summaryrefslogtreecommitdiff
path: root/nova/tests/unit/virt
diff options
context:
space:
mode:
Diffstat (limited to 'nova/tests/unit/virt')
-rw-r--r--nova/tests/unit/virt/__init__.py0
-rw-r--r--nova/tests/unit/virt/disk/__init__.py0
-rw-r--r--nova/tests/unit/virt/disk/mount/__init__.py0
-rw-r--r--nova/tests/unit/virt/disk/mount/test_loop.py98
-rw-r--r--nova/tests/unit/virt/disk/mount/test_nbd.py331
-rw-r--r--nova/tests/unit/virt/disk/test_api.py153
-rw-r--r--nova/tests/unit/virt/disk/test_inject.py284
-rw-r--r--nova/tests/unit/virt/disk/vfs/__init__.py0
-rw-r--r--nova/tests/unit/virt/disk/vfs/fakeguestfs.py188
-rw-r--r--nova/tests/unit/virt/disk/vfs/test_guestfs.py264
-rw-r--r--nova/tests/unit/virt/disk/vfs/test_localfs.py385
-rw-r--r--nova/tests/unit/virt/hyperv/__init__.py0
-rw-r--r--nova/tests/unit/virt/hyperv/db_fakes.py167
-rw-r--r--nova/tests/unit/virt/hyperv/fake.py90
-rw-r--r--nova/tests/unit/virt/hyperv/test_basevolumeutils.py157
-rw-r--r--nova/tests/unit/virt/hyperv/test_hostutils.py97
-rw-r--r--nova/tests/unit/virt/hyperv/test_hypervapi.py1967
-rw-r--r--nova/tests/unit/virt/hyperv/test_ioutils.py61
-rw-r--r--nova/tests/unit/virt/hyperv/test_migrationops.py79
-rw-r--r--nova/tests/unit/virt/hyperv/test_networkutils.py82
-rw-r--r--nova/tests/unit/virt/hyperv/test_networkutilsv2.py45
-rw-r--r--nova/tests/unit/virt/hyperv/test_pathutils.py58
-rw-r--r--nova/tests/unit/virt/hyperv/test_rdpconsoleutils.py28
-rw-r--r--nova/tests/unit/virt/hyperv/test_rdpconsoleutilsv2.py37
-rw-r--r--nova/tests/unit/virt/hyperv/test_utilsfactory.py57
-rw-r--r--nova/tests/unit/virt/hyperv/test_vhdutils.py161
-rw-r--r--nova/tests/unit/virt/hyperv/test_vhdutilsv2.py249
-rw-r--r--nova/tests/unit/virt/hyperv/test_vmops.py230
-rw-r--r--nova/tests/unit/virt/hyperv/test_vmutils.py668
-rw-r--r--nova/tests/unit/virt/hyperv/test_vmutilsv2.py197
-rw-r--r--nova/tests/unit/virt/hyperv/test_volumeutils.py151
-rw-r--r--nova/tests/unit/virt/hyperv/test_volumeutilsv2.py147
-rw-r--r--nova/tests/unit/virt/ironic/__init__.py0
-rw-r--r--nova/tests/unit/virt/ironic/test_client_wrapper.py126
-rw-r--r--nova/tests/unit/virt/ironic/test_driver.py1268
-rw-r--r--nova/tests/unit/virt/ironic/test_patcher.py139
-rw-r--r--nova/tests/unit/virt/ironic/utils.py115
-rw-r--r--nova/tests/unit/virt/libvirt/__init__.py0
-rw-r--r--nova/tests/unit/virt/libvirt/fake_imagebackend.py75
-rw-r--r--nova/tests/unit/virt/libvirt/fake_libvirt_utils.py211
-rw-r--r--nova/tests/unit/virt/libvirt/fakelibvirt.py1108
-rw-r--r--nova/tests/unit/virt/libvirt/test_blockinfo.py991
-rw-r--r--nova/tests/unit/virt/libvirt/test_config.py2344
-rw-r--r--nova/tests/unit/virt/libvirt/test_designer.py30
-rw-r--r--nova/tests/unit/virt/libvirt/test_dmcrypt.py72
-rw-r--r--nova/tests/unit/virt/libvirt/test_driver.py12576
-rw-r--r--nova/tests/unit/virt/libvirt/test_fakelibvirt.py386
-rw-r--r--nova/tests/unit/virt/libvirt/test_firewall.py749
-rw-r--r--nova/tests/unit/virt/libvirt/test_imagebackend.py1309
-rw-r--r--nova/tests/unit/virt/libvirt/test_imagecache.py887
-rw-r--r--nova/tests/unit/virt/libvirt/test_lvm.py183
-rw-r--r--nova/tests/unit/virt/libvirt/test_rbd.py283
-rw-r--r--nova/tests/unit/virt/libvirt/test_utils.py652
-rw-r--r--nova/tests/unit/virt/libvirt/test_vif.py959
-rw-r--r--nova/tests/unit/virt/libvirt/test_volume.py1160
-rw-r--r--nova/tests/unit/virt/test_block_device.py684
-rw-r--r--nova/tests/unit/virt/test_configdrive.py30
-rw-r--r--nova/tests/unit/virt/test_diagnostics.py231
-rw-r--r--nova/tests/unit/virt/test_driver.py58
-rw-r--r--nova/tests/unit/virt/test_events.py36
-rw-r--r--nova/tests/unit/virt/test_hardware.py1439
-rw-r--r--nova/tests/unit/virt/test_imagecache.py122
-rw-r--r--nova/tests/unit/virt/test_images.py45
-rw-r--r--nova/tests/unit/virt/test_virt.py287
-rw-r--r--nova/tests/unit/virt/test_virt_drivers.py881
-rw-r--r--nova/tests/unit/virt/test_volumeutils.py47
-rw-r--r--nova/tests/unit/virt/vmwareapi/__init__.py0
-rw-r--r--nova/tests/unit/virt/vmwareapi/fake.py1606
-rw-r--r--nova/tests/unit/virt/vmwareapi/stubs.py131
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_configdrive.py168
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_driver_api.py2650
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_ds_util.py548
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_ds_util_datastore_selection.py163
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_imagecache.py277
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_images.py216
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_io_util.py33
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_read_write_util.py39
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_vif.py346
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_vim_util.py117
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_vm_util.py1069
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_vmops.py1293
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_volumeops.py95
-rw-r--r--nova/tests/unit/virt/xenapi/__init__.py0
-rw-r--r--nova/tests/unit/virt/xenapi/client/__init__.py0
-rw-r--r--nova/tests/unit/virt/xenapi/client/test_objects.py113
-rw-r--r--nova/tests/unit/virt/xenapi/client/test_session.py158
-rw-r--r--nova/tests/unit/virt/xenapi/image/__init__.py0
-rw-r--r--nova/tests/unit/virt/xenapi/image/test_bittorrent.py163
-rw-r--r--nova/tests/unit/virt/xenapi/image/test_glance.py256
-rw-r--r--nova/tests/unit/virt/xenapi/image/test_utils.py252
-rw-r--r--nova/tests/unit/virt/xenapi/image/test_vdi_through_dev.py182
-rw-r--r--nova/tests/unit/virt/xenapi/stubs.py365
-rw-r--r--nova/tests/unit/virt/xenapi/test_agent.py468
-rw-r--r--nova/tests/unit/virt/xenapi/test_driver.py101
-rw-r--r--nova/tests/unit/virt/xenapi/test_network_utils.py76
-rw-r--r--nova/tests/unit/virt/xenapi/test_vm_utils.py2422
-rw-r--r--nova/tests/unit/virt/xenapi/test_vmops.py1124
-rw-r--r--nova/tests/unit/virt/xenapi/test_volume_utils.py232
-rw-r--r--nova/tests/unit/virt/xenapi/test_volumeops.py549
-rw-r--r--nova/tests/unit/virt/xenapi/test_xenapi.py4105
-rw-r--r--nova/tests/unit/virt/xenapi/vm_rrd.xml1101
101 files changed, 56332 insertions, 0 deletions
diff --git a/nova/tests/unit/virt/__init__.py b/nova/tests/unit/virt/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/virt/__init__.py
diff --git a/nova/tests/unit/virt/disk/__init__.py b/nova/tests/unit/virt/disk/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/virt/disk/__init__.py
diff --git a/nova/tests/unit/virt/disk/mount/__init__.py b/nova/tests/unit/virt/disk/mount/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/virt/disk/mount/__init__.py
diff --git a/nova/tests/unit/virt/disk/mount/test_loop.py b/nova/tests/unit/virt/disk/mount/test_loop.py
new file mode 100644
index 0000000000..6375c9386b
--- /dev/null
+++ b/nova/tests/unit/virt/disk/mount/test_loop.py
@@ -0,0 +1,98 @@
+# Copyright 2012 Michael Still
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import fixtures
+
+from nova import test
+from nova.virt.disk.mount import loop
+
+
+def _fake_noop(*args, **kwargs):
+ return
+
+
+def _fake_trycmd_losetup_works(*args, **kwargs):
+ return '/dev/loop0', ''
+
+
+def _fake_trycmd_losetup_fails(*args, **kwards):
+ return '', 'doh'
+
+
+class LoopTestCase(test.NoDBTestCase):
+ def test_get_dev(self):
+ tempdir = self.useFixture(fixtures.TempDir()).path
+ l = loop.LoopMount(None, tempdir)
+ self.useFixture(fixtures.MonkeyPatch('nova.utils.trycmd',
+ _fake_trycmd_losetup_works))
+ self.useFixture(fixtures.MonkeyPatch('nova.utils.execute',
+ _fake_noop))
+
+ # No error logged, device consumed
+ self.assertTrue(l.get_dev())
+ self.assertTrue(l.linked)
+ self.assertEqual('', l.error)
+ self.assertEqual('/dev/loop0', l.device)
+
+ # Free
+ l.unget_dev()
+ self.assertFalse(l.linked)
+ self.assertEqual('', l.error)
+ self.assertIsNone(l.device)
+
+ def test_inner_get_dev_fails(self):
+ tempdir = self.useFixture(fixtures.TempDir()).path
+ l = loop.LoopMount(None, tempdir)
+ self.useFixture(fixtures.MonkeyPatch('nova.utils.trycmd',
+ _fake_trycmd_losetup_fails))
+
+ # No error logged, device consumed
+ self.assertFalse(l._inner_get_dev())
+ self.assertFalse(l.linked)
+ self.assertNotEqual('', l.error)
+ self.assertIsNone(l.device)
+
+ # Free
+ l.unget_dev()
+ self.assertFalse(l.linked)
+ self.assertIsNone(l.device)
+
+ def test_get_dev_timeout(self):
+ tempdir = self.useFixture(fixtures.TempDir()).path
+ l = loop.LoopMount(None, tempdir)
+ self.useFixture(fixtures.MonkeyPatch('time.sleep', _fake_noop))
+ self.useFixture(fixtures.MonkeyPatch('nova.utils.trycmd',
+ _fake_trycmd_losetup_fails))
+ self.useFixture(fixtures.MonkeyPatch(('nova.virt.disk.mount.api.'
+ 'MAX_DEVICE_WAIT'), -10))
+
+ # Always fail to get a device
+ def fake_get_dev_fails():
+ return False
+ l._inner_get_dev = fake_get_dev_fails
+
+ # Fail to get a device
+ self.assertFalse(l.get_dev())
+
+ def test_unget_dev(self):
+ tempdir = self.useFixture(fixtures.TempDir()).path
+ l = loop.LoopMount(None, tempdir)
+ self.useFixture(fixtures.MonkeyPatch('nova.utils.execute',
+ _fake_noop))
+
+ # This just checks that a free of something we don't have doesn't
+ # throw an exception
+ l.unget_dev()
diff --git a/nova/tests/unit/virt/disk/mount/test_nbd.py b/nova/tests/unit/virt/disk/mount/test_nbd.py
new file mode 100644
index 0000000000..d048511d16
--- /dev/null
+++ b/nova/tests/unit/virt/disk/mount/test_nbd.py
@@ -0,0 +1,331 @@
+# Copyright 2012 Michael Still and Canonical Inc
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import os
+import tempfile
+import time
+
+import eventlet
+import fixtures
+
+from nova import test
+from nova.virt.disk.mount import nbd
+
+ORIG_EXISTS = os.path.exists
+ORIG_LISTDIR = os.listdir
+
+
+def _fake_exists_no_users(path):
+ if path.startswith('/sys/block/nbd'):
+ if path.endswith('pid'):
+ return False
+ return True
+ return ORIG_EXISTS(path)
+
+
+def _fake_listdir_nbd_devices(path):
+ if path.startswith('/sys/block'):
+ return ['nbd0', 'nbd1']
+ return ORIG_LISTDIR(path)
+
+
+def _fake_exists_all_used(path):
+ if path.startswith('/sys/block/nbd'):
+ return True
+ return ORIG_EXISTS(path)
+
+
+def _fake_detect_nbd_devices_none(self):
+ return []
+
+
+def _fake_detect_nbd_devices(self):
+ return ['nbd0', 'nbd1']
+
+
+def _fake_noop(*args, **kwargs):
+ return
+
+
+class NbdTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(NbdTestCase, self).setUp()
+ self.stubs.Set(nbd.NbdMount, '_detect_nbd_devices',
+ _fake_detect_nbd_devices)
+ self.useFixture(fixtures.MonkeyPatch('os.listdir',
+ _fake_listdir_nbd_devices))
+
+ def test_nbd_no_devices(self):
+ tempdir = self.useFixture(fixtures.TempDir()).path
+ self.stubs.Set(nbd.NbdMount, '_detect_nbd_devices',
+ _fake_detect_nbd_devices_none)
+ n = nbd.NbdMount(None, tempdir)
+ self.assertIsNone(n._allocate_nbd())
+
+ def test_nbd_no_free_devices(self):
+ tempdir = self.useFixture(fixtures.TempDir()).path
+ n = nbd.NbdMount(None, tempdir)
+ self.useFixture(fixtures.MonkeyPatch('os.path.exists',
+ _fake_exists_all_used))
+ self.assertIsNone(n._allocate_nbd())
+
+ def test_nbd_not_loaded(self):
+ tempdir = self.useFixture(fixtures.TempDir()).path
+ n = nbd.NbdMount(None, tempdir)
+
+ # Fake out os.path.exists
+ def fake_exists(path):
+ if path.startswith('/sys/block/nbd'):
+ return False
+ return ORIG_EXISTS(path)
+ self.useFixture(fixtures.MonkeyPatch('os.path.exists', fake_exists))
+
+ # This should fail, as we don't have the module "loaded"
+ # TODO(mikal): work out how to force english as the gettext language
+ # so that the error check always passes
+ self.assertIsNone(n._allocate_nbd())
+ self.assertEqual('nbd unavailable: module not loaded', n.error)
+
+ def test_nbd_allocation(self):
+ tempdir = self.useFixture(fixtures.TempDir()).path
+ n = nbd.NbdMount(None, tempdir)
+ self.useFixture(fixtures.MonkeyPatch('os.path.exists',
+ _fake_exists_no_users))
+ self.useFixture(fixtures.MonkeyPatch('random.shuffle', _fake_noop))
+
+ # Allocate a nbd device
+ self.assertEqual('/dev/nbd0', n._allocate_nbd())
+
+ def test_nbd_allocation_one_in_use(self):
+ tempdir = self.useFixture(fixtures.TempDir()).path
+ n = nbd.NbdMount(None, tempdir)
+ self.useFixture(fixtures.MonkeyPatch('random.shuffle', _fake_noop))
+
+ # Fake out os.path.exists
+ def fake_exists(path):
+ if path.startswith('/sys/block/nbd'):
+ if path == '/sys/block/nbd0/pid':
+ return True
+ if path.endswith('pid'):
+ return False
+ return True
+ return ORIG_EXISTS(path)
+ self.useFixture(fixtures.MonkeyPatch('os.path.exists', fake_exists))
+
+ # Allocate a nbd device, should not be the in use one
+ # TODO(mikal): Note that there is a leak here, as the in use nbd device
+ # is removed from the list, but not returned so it will never be
+ # re-added. I will fix this in a later patch.
+ self.assertEqual('/dev/nbd1', n._allocate_nbd())
+
+ def test_inner_get_dev_no_devices(self):
+ tempdir = self.useFixture(fixtures.TempDir()).path
+ self.stubs.Set(nbd.NbdMount, '_detect_nbd_devices',
+ _fake_detect_nbd_devices_none)
+ n = nbd.NbdMount(None, tempdir)
+ self.assertFalse(n._inner_get_dev())
+
+ def test_inner_get_dev_qemu_fails(self):
+ tempdir = self.useFixture(fixtures.TempDir()).path
+ n = nbd.NbdMount(None, tempdir)
+ self.useFixture(fixtures.MonkeyPatch('os.path.exists',
+ _fake_exists_no_users))
+
+ # We have a trycmd that always fails
+ def fake_trycmd(*args, **kwargs):
+ return '', 'broken'
+ self.useFixture(fixtures.MonkeyPatch('nova.utils.trycmd', fake_trycmd))
+
+ # Error logged, no device consumed
+ self.assertFalse(n._inner_get_dev())
+ self.assertTrue(n.error.startswith('qemu-nbd error'))
+
+ def test_inner_get_dev_qemu_timeout(self):
+ tempdir = self.useFixture(fixtures.TempDir()).path
+ n = nbd.NbdMount(None, tempdir)
+ self.useFixture(fixtures.MonkeyPatch('os.path.exists',
+ _fake_exists_no_users))
+
+ # We have a trycmd that always passed
+ def fake_trycmd(*args, **kwargs):
+ return '', ''
+ self.useFixture(fixtures.MonkeyPatch('nova.utils.trycmd', fake_trycmd))
+ self.useFixture(fixtures.MonkeyPatch('time.sleep', _fake_noop))
+
+ # Error logged, no device consumed
+ self.assertFalse(n._inner_get_dev())
+ self.assertTrue(n.error.endswith('did not show up'))
+
+ def fake_exists_one(self, path):
+ # We need the pid file for the device which is allocated to exist, but
+ # only once it is allocated to us
+ if path.startswith('/sys/block/nbd'):
+ if path == '/sys/block/nbd1/pid':
+ return False
+ if path.endswith('pid'):
+ return False
+ return True
+ return ORIG_EXISTS(path)
+
+ def fake_trycmd_creates_pid(self, *args, **kwargs):
+ def fake_exists_two(path):
+ if path.startswith('/sys/block/nbd'):
+ if path == '/sys/block/nbd0/pid':
+ return True
+ if path.endswith('pid'):
+ return False
+ return True
+ return ORIG_EXISTS(path)
+ self.useFixture(fixtures.MonkeyPatch('os.path.exists',
+ fake_exists_two))
+ return '', ''
+
+ def test_inner_get_dev_works(self):
+ tempdir = self.useFixture(fixtures.TempDir()).path
+ n = nbd.NbdMount(None, tempdir)
+ self.useFixture(fixtures.MonkeyPatch('random.shuffle', _fake_noop))
+ self.useFixture(fixtures.MonkeyPatch('os.path.exists',
+ self.fake_exists_one))
+ self.useFixture(fixtures.MonkeyPatch('nova.utils.trycmd',
+ self.fake_trycmd_creates_pid))
+ self.useFixture(fixtures.MonkeyPatch('nova.utils.execute', _fake_noop))
+
+ # No error logged, device consumed
+ self.assertTrue(n._inner_get_dev())
+ self.assertTrue(n.linked)
+ self.assertEqual('', n.error)
+ self.assertEqual('/dev/nbd0', n.device)
+
+ # Free
+ n.unget_dev()
+ self.assertFalse(n.linked)
+ self.assertEqual('', n.error)
+ self.assertIsNone(n.device)
+
+ def test_unget_dev_simple(self):
+ # This test is just checking we don't get an exception when we unget
+ # something we don't have
+ tempdir = self.useFixture(fixtures.TempDir()).path
+ n = nbd.NbdMount(None, tempdir)
+ self.useFixture(fixtures.MonkeyPatch('nova.utils.execute', _fake_noop))
+ n.unget_dev()
+
+ def test_get_dev(self):
+ tempdir = self.useFixture(fixtures.TempDir()).path
+ n = nbd.NbdMount(None, tempdir)
+ self.useFixture(fixtures.MonkeyPatch('random.shuffle', _fake_noop))
+ self.useFixture(fixtures.MonkeyPatch('nova.utils.execute', _fake_noop))
+ self.useFixture(fixtures.MonkeyPatch('os.path.exists',
+ self.fake_exists_one))
+ self.useFixture(fixtures.MonkeyPatch('nova.utils.trycmd',
+ self.fake_trycmd_creates_pid))
+
+ # No error logged, device consumed
+ self.assertTrue(n.get_dev())
+ self.assertTrue(n.linked)
+ self.assertEqual('', n.error)
+ self.assertEqual('/dev/nbd0', n.device)
+
+ # Free
+ n.unget_dev()
+ self.assertFalse(n.linked)
+ self.assertEqual('', n.error)
+ self.assertIsNone(n.device)
+
+ def test_get_dev_timeout(self):
+ # Always fail to get a device
+ def fake_get_dev_fails(self):
+ return False
+ self.stubs.Set(nbd.NbdMount, '_inner_get_dev', fake_get_dev_fails)
+
+ tempdir = self.useFixture(fixtures.TempDir()).path
+ n = nbd.NbdMount(None, tempdir)
+ self.useFixture(fixtures.MonkeyPatch('random.shuffle', _fake_noop))
+ self.useFixture(fixtures.MonkeyPatch('time.sleep', _fake_noop))
+ self.useFixture(fixtures.MonkeyPatch('nova.utils.execute', _fake_noop))
+ self.useFixture(fixtures.MonkeyPatch('os.path.exists',
+ self.fake_exists_one))
+ self.useFixture(fixtures.MonkeyPatch('nova.utils.trycmd',
+ self.fake_trycmd_creates_pid))
+ self.useFixture(fixtures.MonkeyPatch(('nova.virt.disk.mount.api.'
+ 'MAX_DEVICE_WAIT'), -10))
+
+ # No error logged, device consumed
+ self.assertFalse(n.get_dev())
+
+ def test_do_mount_need_to_specify_fs_type(self):
+ # NOTE(mikal): Bug 1094373 saw a regression where we failed to
+ # communicate a failed mount properly.
+ def fake_trycmd(*args, **kwargs):
+ return '', 'broken'
+ self.useFixture(fixtures.MonkeyPatch('nova.utils.trycmd', fake_trycmd))
+
+ imgfile = tempfile.NamedTemporaryFile()
+ self.addCleanup(imgfile.close)
+ tempdir = self.useFixture(fixtures.TempDir()).path
+ mount = nbd.NbdMount(imgfile.name, tempdir)
+
+ def fake_returns_true(*args, **kwargs):
+ return True
+ mount.get_dev = fake_returns_true
+ mount.map_dev = fake_returns_true
+
+ self.assertFalse(mount.do_mount())
+
+ def test_device_creation_race(self):
+ # Make sure that even if two threads create instances at the same time
+ # they cannot choose the same nbd number (see bug 1207422)
+
+ tempdir = self.useFixture(fixtures.TempDir()).path
+ free_devices = _fake_detect_nbd_devices(None)[:]
+ chosen_devices = []
+
+ def fake_find_unused(self):
+ return os.path.join('/dev', free_devices[-1])
+
+ def delay_and_remove_device(*args, **kwargs):
+ # Ensure that context switch happens before the device is marked
+ # as used. This will cause a failure without nbd-allocation-lock
+ # in place.
+ time.sleep(0.1)
+
+ # We always choose the top device in find_unused - remove it now.
+ free_devices.pop()
+
+ return '', ''
+
+ def pid_exists(pidfile):
+ return pidfile not in [os.path.join('/sys/block', dev, 'pid')
+ for dev in free_devices]
+
+ self.stubs.Set(nbd.NbdMount, '_allocate_nbd', fake_find_unused)
+ self.useFixture(fixtures.MonkeyPatch('nova.utils.trycmd',
+ delay_and_remove_device))
+ self.useFixture(fixtures.MonkeyPatch('os.path.exists',
+ pid_exists))
+
+ def get_a_device():
+ n = nbd.NbdMount(None, tempdir)
+ n.get_dev()
+ chosen_devices.append(n.device)
+
+ thread1 = eventlet.spawn(get_a_device)
+ thread2 = eventlet.spawn(get_a_device)
+ thread1.wait()
+ thread2.wait()
+
+ self.assertEqual(2, len(chosen_devices))
+ self.assertNotEqual(chosen_devices[0], chosen_devices[1])
diff --git a/nova/tests/unit/virt/disk/test_api.py b/nova/tests/unit/virt/disk/test_api.py
new file mode 100644
index 0000000000..1f62c33b51
--- /dev/null
+++ b/nova/tests/unit/virt/disk/test_api.py
@@ -0,0 +1,153 @@
+# Copyright 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import tempfile
+
+import fixtures
+from oslo.concurrency import processutils
+
+from nova import test
+from nova import utils
+from nova.virt.disk import api
+from nova.virt.disk.mount import api as mount
+
+
+class FakeMount(object):
+ device = None
+
+ @staticmethod
+ def instance_for_format(imgfile, mountdir, partition, imgfmt):
+ return FakeMount()
+
+ def get_dev(self):
+ pass
+
+ def unget_dev(self):
+ pass
+
+
+class APITestCase(test.NoDBTestCase):
+ def test_can_resize_need_fs_type_specified(self):
+ # NOTE(mikal): Bug 1094373 saw a regression where we failed to
+ # treat a failure to mount as a failure to be able to resize the
+ # filesystem
+ def _fake_get_disk_size(path):
+ return 10
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.disk.api.get_disk_size', _fake_get_disk_size))
+
+ def fake_trycmd(*args, **kwargs):
+ return '', 'broken'
+ self.useFixture(fixtures.MonkeyPatch('nova.utils.trycmd', fake_trycmd))
+
+ def fake_returns_true(*args, **kwargs):
+ return True
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.disk.mount.nbd.NbdMount.get_dev',
+ fake_returns_true))
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.disk.mount.nbd.NbdMount.map_dev',
+ fake_returns_true))
+
+ # Force the use of localfs, which is what was used during the failure
+ # reported in the bug
+ def fake_import_fails(*args, **kwargs):
+ raise Exception('Failed')
+ self.useFixture(fixtures.MonkeyPatch(
+ 'oslo.utils.import_module',
+ fake_import_fails))
+
+ imgfile = tempfile.NamedTemporaryFile()
+ self.addCleanup(imgfile.close)
+ self.assertFalse(api.is_image_partitionless(imgfile, use_cow=True))
+
+ def test_resize2fs_success(self):
+ imgfile = tempfile.NamedTemporaryFile()
+
+ self.mox.StubOutWithMock(utils, 'execute')
+ utils.execute('e2fsck',
+ '-fp',
+ imgfile,
+ check_exit_code=[0, 1, 2],
+ run_as_root=False)
+ utils.execute('resize2fs',
+ imgfile,
+ check_exit_code=False,
+ run_as_root=False)
+
+ self.mox.ReplayAll()
+ api.resize2fs(imgfile)
+
+ def test_resize2fs_e2fsck_fails(self):
+ imgfile = tempfile.NamedTemporaryFile()
+
+ self.mox.StubOutWithMock(utils, 'execute')
+ utils.execute('e2fsck',
+ '-fp',
+ imgfile,
+ check_exit_code=[0, 1, 2],
+ run_as_root=False).AndRaise(
+ processutils.ProcessExecutionError("fs error"))
+ self.mox.ReplayAll()
+ api.resize2fs(imgfile)
+
+ def test_extend_qcow_success(self):
+ imgfile = tempfile.NamedTemporaryFile()
+ imgsize = 10
+ device = "/dev/sdh"
+ use_cow = True
+
+ self.flags(resize_fs_using_block_device=True)
+ mounter = FakeMount.instance_for_format(
+ imgfile, None, None, 'qcow2')
+ mounter.device = device
+
+ self.mox.StubOutWithMock(api, 'can_resize_image')
+ self.mox.StubOutWithMock(utils, 'execute')
+ self.mox.StubOutWithMock(api, 'is_image_partitionless')
+ self.mox.StubOutWithMock(mounter, 'get_dev')
+ self.mox.StubOutWithMock(mounter, 'unget_dev')
+ self.mox.StubOutWithMock(api, 'resize2fs')
+ self.mox.StubOutWithMock(mount.Mount, 'instance_for_format')
+
+ api.can_resize_image(imgfile, imgsize).AndReturn(True)
+ utils.execute('qemu-img', 'resize', imgfile, imgsize)
+ api.is_image_partitionless(imgfile, use_cow).AndReturn(True)
+ mount.Mount.instance_for_format(
+ imgfile, None, None, 'qcow2').AndReturn(mounter)
+ mounter.get_dev().AndReturn(True)
+ api.resize2fs(mounter.device, run_as_root=True, check_exit_code=[0])
+ mounter.unget_dev()
+
+ self.mox.ReplayAll()
+ api.extend(imgfile, imgsize, use_cow=use_cow)
+
+ def test_extend_raw_success(self):
+ imgfile = tempfile.NamedTemporaryFile()
+ imgsize = 10
+ use_cow = False
+
+ self.mox.StubOutWithMock(api, 'can_resize_image')
+ self.mox.StubOutWithMock(utils, 'execute')
+ self.mox.StubOutWithMock(api, 'is_image_partitionless')
+ self.mox.StubOutWithMock(api, 'resize2fs')
+
+ api.can_resize_image(imgfile, imgsize).AndReturn(True)
+ utils.execute('qemu-img', 'resize', imgfile, imgsize)
+ api.is_image_partitionless(imgfile, use_cow).AndReturn(True)
+ api.resize2fs(imgfile, run_as_root=False, check_exit_code=[0])
+
+ self.mox.ReplayAll()
+ api.extend(imgfile, imgsize, use_cow=use_cow)
diff --git a/nova/tests/unit/virt/disk/test_inject.py b/nova/tests/unit/virt/disk/test_inject.py
new file mode 100644
index 0000000000..97c8a08013
--- /dev/null
+++ b/nova/tests/unit/virt/disk/test_inject.py
@@ -0,0 +1,284 @@
+# Copyright (C) 2012 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+import sys
+
+from nova import exception
+from nova import test
+from nova.tests.unit.virt.disk.vfs import fakeguestfs
+from nova.virt.disk import api as diskapi
+from nova.virt.disk.vfs import guestfs as vfsguestfs
+
+
+class VirtDiskTest(test.NoDBTestCase):
+
+ def setUp(self):
+ super(VirtDiskTest, self).setUp()
+ sys.modules['guestfs'] = fakeguestfs
+ vfsguestfs.guestfs = fakeguestfs
+
+ def test_inject_data(self):
+
+ self.assertTrue(diskapi.inject_data("/some/file", use_cow=True))
+
+ self.assertTrue(diskapi.inject_data("/some/file",
+ mandatory=('files',)))
+
+ self.assertTrue(diskapi.inject_data("/some/file", key="mysshkey",
+ mandatory=('key',)))
+
+ os_name = os.name
+ os.name = 'nt' # Cause password injection to fail
+ self.assertRaises(exception.NovaException,
+ diskapi.inject_data,
+ "/some/file", admin_password="p",
+ mandatory=('admin_password',))
+ self.assertFalse(diskapi.inject_data("/some/file", admin_password="p"))
+ os.name = os_name
+
+ self.assertFalse(diskapi.inject_data("/some/fail/file",
+ key="mysshkey"))
+
+ def test_inject_data_key(self):
+
+ vfs = vfsguestfs.VFSGuestFS("/some/file", "qcow2")
+ vfs.setup()
+
+ diskapi._inject_key_into_fs("mysshkey", vfs)
+
+ self.assertIn("/root/.ssh", vfs.handle.files)
+ self.assertEqual(vfs.handle.files["/root/.ssh"],
+ {'isdir': True, 'gid': 0, 'uid': 0, 'mode': 0o700})
+ self.assertIn("/root/.ssh/authorized_keys", vfs.handle.files)
+ self.assertEqual(vfs.handle.files["/root/.ssh/authorized_keys"],
+ {'isdir': False,
+ 'content': "Hello World\n# The following ssh " +
+ "key was injected by Nova\nmysshkey\n",
+ 'gid': 100,
+ 'uid': 100,
+ 'mode': 0o600})
+
+ vfs.teardown()
+
+ def test_inject_data_key_with_selinux(self):
+
+ vfs = vfsguestfs.VFSGuestFS("/some/file", "qcow2")
+ vfs.setup()
+
+ vfs.make_path("etc/selinux")
+ vfs.make_path("etc/rc.d")
+ diskapi._inject_key_into_fs("mysshkey", vfs)
+
+ self.assertIn("/etc/rc.d/rc.local", vfs.handle.files)
+ self.assertEqual(vfs.handle.files["/etc/rc.d/rc.local"],
+ {'isdir': False,
+ 'content': "Hello World#!/bin/sh\n# Added by " +
+ "Nova to ensure injected ssh keys " +
+ "have the right context\nrestorecon " +
+ "-RF root/.ssh 2>/dev/null || :\n",
+ 'gid': 100,
+ 'uid': 100,
+ 'mode': 0o700})
+
+ self.assertIn("/root/.ssh", vfs.handle.files)
+ self.assertEqual(vfs.handle.files["/root/.ssh"],
+ {'isdir': True, 'gid': 0, 'uid': 0, 'mode': 0o700})
+ self.assertIn("/root/.ssh/authorized_keys", vfs.handle.files)
+ self.assertEqual(vfs.handle.files["/root/.ssh/authorized_keys"],
+ {'isdir': False,
+ 'content': "Hello World\n# The following ssh " +
+ "key was injected by Nova\nmysshkey\n",
+ 'gid': 100,
+ 'uid': 100,
+ 'mode': 0o600})
+
+ vfs.teardown()
+
+ def test_inject_data_key_with_selinux_append_with_newline(self):
+
+ vfs = vfsguestfs.VFSGuestFS("/some/file", "qcow2")
+ vfs.setup()
+
+ vfs.replace_file("/etc/rc.d/rc.local", "#!/bin/sh\necho done")
+ vfs.make_path("etc/selinux")
+ vfs.make_path("etc/rc.d")
+ diskapi._inject_key_into_fs("mysshkey", vfs)
+
+ self.assertIn("/etc/rc.d/rc.local", vfs.handle.files)
+ self.assertEqual(vfs.handle.files["/etc/rc.d/rc.local"],
+ {'isdir': False,
+ 'content': "#!/bin/sh\necho done\n# Added "
+ "by Nova to ensure injected ssh keys have "
+ "the right context\nrestorecon -RF "
+ "root/.ssh 2>/dev/null || :\n",
+ 'gid': 100,
+ 'uid': 100,
+ 'mode': 0o700})
+ vfs.teardown()
+
+ def test_inject_net(self):
+
+ vfs = vfsguestfs.VFSGuestFS("/some/file", "qcow2")
+ vfs.setup()
+
+ diskapi._inject_net_into_fs("mynetconfig", vfs)
+
+ self.assertIn("/etc/network/interfaces", vfs.handle.files)
+ self.assertEqual(vfs.handle.files["/etc/network/interfaces"],
+ {'content': 'mynetconfig',
+ 'gid': 100,
+ 'isdir': False,
+ 'mode': 0o700,
+ 'uid': 100})
+ vfs.teardown()
+
+ def test_inject_metadata(self):
+ vfs = vfsguestfs.VFSGuestFS("/some/file", "qcow2")
+ vfs.setup()
+
+ diskapi._inject_metadata_into_fs({"foo": "bar", "eek": "wizz"}, vfs)
+
+ self.assertIn("/meta.js", vfs.handle.files)
+ self.assertEqual({'content': '{"foo": "bar", ' +
+ '"eek": "wizz"}',
+ 'gid': 100,
+ 'isdir': False,
+ 'mode': 0o700,
+ 'uid': 100},
+ vfs.handle.files["/meta.js"])
+ vfs.teardown()
+
+ def test_inject_admin_password(self):
+ vfs = vfsguestfs.VFSGuestFS("/some/file", "qcow2")
+ vfs.setup()
+
+ def fake_salt():
+ return "1234567890abcdef"
+
+ self.stubs.Set(diskapi, '_generate_salt', fake_salt)
+
+ vfs.handle.write("/etc/shadow",
+ "root:$1$12345678$xxxxx:14917:0:99999:7:::\n" +
+ "bin:*:14495:0:99999:7:::\n" +
+ "daemon:*:14495:0:99999:7:::\n")
+
+ vfs.handle.write("/etc/passwd",
+ "root:x:0:0:root:/root:/bin/bash\n" +
+ "bin:x:1:1:bin:/bin:/sbin/nologin\n" +
+ "daemon:x:2:2:daemon:/sbin:/sbin/nologin\n")
+
+ diskapi._inject_admin_password_into_fs("123456", vfs)
+
+ self.assertEqual(vfs.handle.files["/etc/passwd"],
+ {'content': "root:x:0:0:root:/root:/bin/bash\n" +
+ "bin:x:1:1:bin:/bin:/sbin/nologin\n" +
+ "daemon:x:2:2:daemon:/sbin:" +
+ "/sbin/nologin\n",
+ 'gid': 100,
+ 'isdir': False,
+ 'mode': 0o700,
+ 'uid': 100})
+ shadow = vfs.handle.files["/etc/shadow"]
+
+ # if the encrypted password is only 13 characters long, then
+ # nova.virt.disk.api:_set_password fell back to DES.
+ if len(shadow['content']) == 91:
+ self.assertEqual(shadow,
+ {'content': "root:12tir.zIbWQ3c" +
+ ":14917:0:99999:7:::\n" +
+ "bin:*:14495:0:99999:7:::\n" +
+ "daemon:*:14495:0:99999:7:::\n",
+ 'gid': 100,
+ 'isdir': False,
+ 'mode': 0o700,
+ 'uid': 100})
+ else:
+ self.assertEqual(shadow,
+ {'content': "root:$1$12345678$a4ge4d5iJ5vw" +
+ "vbFS88TEN0:14917:0:99999:7:::\n" +
+ "bin:*:14495:0:99999:7:::\n" +
+ "daemon:*:14495:0:99999:7:::\n",
+ 'gid': 100,
+ 'isdir': False,
+ 'mode': 0o700,
+ 'uid': 100})
+ vfs.teardown()
+
+ def test_inject_files_into_fs(self):
+ vfs = vfsguestfs.VFSGuestFS("/some/file", "qcow2")
+ vfs.setup()
+
+ diskapi._inject_files_into_fs([("/path/to/not/exists/file",
+ "inject-file-contents")],
+ vfs)
+
+ self.assertIn("/path/to/not/exists", vfs.handle.files)
+ shadow_dir = vfs.handle.files["/path/to/not/exists"]
+ self.assertEqual(shadow_dir,
+ {"isdir": True,
+ "gid": 0,
+ "uid": 0,
+ "mode": 0o744})
+
+ shadow_file = vfs.handle.files["/path/to/not/exists/file"]
+ self.assertEqual(shadow_file,
+ {"isdir": False,
+ "content": "inject-file-contents",
+ "gid": 100,
+ "uid": 100,
+ "mode": 0o700})
+ vfs.teardown()
+
+ def test_inject_files_into_fs_dir_exists(self):
+ vfs = vfsguestfs.VFSGuestFS("/some/file", "qcow2")
+ vfs.setup()
+
+ called = {'make_path': False}
+
+ def fake_has_file(*args, **kwargs):
+ return True
+
+ def fake_make_path(*args, **kwargs):
+ called['make_path'] = True
+
+ self.stubs.Set(vfs, 'has_file', fake_has_file)
+ self.stubs.Set(vfs, 'make_path', fake_make_path)
+
+ # test for already exists dir
+ diskapi._inject_files_into_fs([("/path/to/exists/file",
+ "inject-file-contents")],
+ vfs)
+
+ self.assertIn("/path/to/exists/file", vfs.handle.files)
+ self.assertFalse(called['make_path'])
+
+ # test for root dir
+ diskapi._inject_files_into_fs([("/inject-file",
+ "inject-file-contents")],
+ vfs)
+
+ self.assertIn("/inject-file", vfs.handle.files)
+ self.assertFalse(called['make_path'])
+
+ # test for null dir
+ vfs.handle.files.pop("/inject-file")
+ diskapi._inject_files_into_fs([("inject-file",
+ "inject-file-contents")],
+ vfs)
+
+ self.assertIn("/inject-file", vfs.handle.files)
+ self.assertFalse(called['make_path'])
+
+ vfs.teardown()
diff --git a/nova/tests/unit/virt/disk/vfs/__init__.py b/nova/tests/unit/virt/disk/vfs/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/virt/disk/vfs/__init__.py
diff --git a/nova/tests/unit/virt/disk/vfs/fakeguestfs.py b/nova/tests/unit/virt/disk/vfs/fakeguestfs.py
new file mode 100644
index 0000000000..5e5efa7a14
--- /dev/null
+++ b/nova/tests/unit/virt/disk/vfs/fakeguestfs.py
@@ -0,0 +1,188 @@
+# Copyright 2012 Red Hat, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+EVENT_APPLIANCE = 0x1
+EVENT_LIBRARY = 0x2
+EVENT_WARNING = 0x3
+EVENT_TRACE = 0x4
+
+
+class GuestFS(object):
+ SUPPORT_CLOSE_ON_EXIT = True
+ SUPPORT_RETURN_DICT = True
+
+ def __init__(self, **kwargs):
+ if not self.SUPPORT_CLOSE_ON_EXIT and 'close_on_exit' in kwargs:
+ raise TypeError('close_on_exit')
+ if not self.SUPPORT_RETURN_DICT and 'python_return_dict' in kwargs:
+ raise TypeError('python_return_dict')
+
+ self._python_return_dict = kwargs.get('python_return_dict', False)
+ self.kwargs = kwargs
+ self.drives = []
+ self.running = False
+ self.closed = False
+ self.mounts = []
+ self.files = {}
+ self.auginit = False
+ self.root_mounted = False
+ self.backend_settings = None
+ self.trace_enabled = False
+ self.verbose_enabled = False
+ self.event_callback = None
+
+ def launch(self):
+ self.running = True
+
+ def shutdown(self):
+ self.running = False
+ self.mounts = []
+ self.drives = []
+
+ def set_backend_settings(self, settings):
+ self.backend_settings = settings
+
+ def close(self):
+ self.closed = True
+
+ def add_drive_opts(self, file, *args, **kwargs):
+ if file == "/some/fail/file":
+ raise RuntimeError("%s: No such file or directory", file)
+
+ self.drives.append((file, kwargs['format']))
+
+ def add_drive(self, file, format=None, *args, **kwargs):
+ self.add_drive_opts(file, format=None, *args, **kwargs)
+
+ def inspect_os(self):
+ return ["/dev/guestvgf/lv_root"]
+
+ def inspect_get_mountpoints(self, dev):
+ mountpoints = [("/home", "/dev/mapper/guestvgf-lv_home"),
+ ("/", "/dev/mapper/guestvgf-lv_root"),
+ ("/boot", "/dev/vda1")]
+
+ if self.SUPPORT_RETURN_DICT and self._python_return_dict:
+ return dict(mountpoints)
+ else:
+ return mountpoints
+
+ def mount_options(self, options, device, mntpoint):
+ if mntpoint == "/":
+ self.root_mounted = True
+ else:
+ if not self.root_mounted:
+ raise RuntimeError(
+ "mount: %s: No such file or directory" % mntpoint)
+ self.mounts.append((options, device, mntpoint))
+
+ def mkdir_p(self, path):
+ if path not in self.files:
+ self.files[path] = {
+ "isdir": True,
+ "gid": 100,
+ "uid": 100,
+ "mode": 0o700
+ }
+
+ def read_file(self, path):
+ if path not in self.files:
+ self.files[path] = {
+ "isdir": False,
+ "content": "Hello World",
+ "gid": 100,
+ "uid": 100,
+ "mode": 0o700
+ }
+
+ return self.files[path]["content"]
+
+ def write(self, path, content):
+ if path not in self.files:
+ self.files[path] = {
+ "isdir": False,
+ "content": "Hello World",
+ "gid": 100,
+ "uid": 100,
+ "mode": 0o700
+ }
+
+ self.files[path]["content"] = content
+
+ def write_append(self, path, content):
+ if path not in self.files:
+ self.files[path] = {
+ "isdir": False,
+ "content": "Hello World",
+ "gid": 100,
+ "uid": 100,
+ "mode": 0o700
+ }
+
+ self.files[path]["content"] = self.files[path]["content"] + content
+
+ def stat(self, path):
+ if path not in self.files:
+ raise RuntimeError("No such file: " + path)
+
+ return self.files[path]["mode"]
+
+ def chown(self, uid, gid, path):
+ if path not in self.files:
+ raise RuntimeError("No such file: " + path)
+
+ if uid != -1:
+ self.files[path]["uid"] = uid
+ if gid != -1:
+ self.files[path]["gid"] = gid
+
+ def chmod(self, mode, path):
+ if path not in self.files:
+ raise RuntimeError("No such file: " + path)
+
+ self.files[path]["mode"] = mode
+
+ def aug_init(self, root, flags):
+ self.auginit = True
+
+ def aug_close(self):
+ self.auginit = False
+
+ def aug_get(self, cfgpath):
+ if not self.auginit:
+ raise RuntimeError("Augeus not initialized")
+
+ if cfgpath == "/files/etc/passwd/root/uid":
+ return 0
+ elif cfgpath == "/files/etc/passwd/fred/uid":
+ return 105
+ elif cfgpath == "/files/etc/passwd/joe/uid":
+ return 110
+ elif cfgpath == "/files/etc/group/root/gid":
+ return 0
+ elif cfgpath == "/files/etc/group/users/gid":
+ return 500
+ elif cfgpath == "/files/etc/group/admins/gid":
+ return 600
+ raise RuntimeError("Unknown path %s", cfgpath)
+
+ def set_trace(self, enabled):
+ self.trace_enabled = enabled
+
+ def set_verbose(self, enabled):
+ self.verbose_enabled = enabled
+
+ def set_event_callback(self, func, events):
+ self.event_callback = (func, events)
diff --git a/nova/tests/unit/virt/disk/vfs/test_guestfs.py b/nova/tests/unit/virt/disk/vfs/test_guestfs.py
new file mode 100644
index 0000000000..33dd100329
--- /dev/null
+++ b/nova/tests/unit/virt/disk/vfs/test_guestfs.py
@@ -0,0 +1,264 @@
+# Copyright (C) 2012 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sys
+
+from nova import exception
+from nova import test
+from nova.tests.unit.virt.disk.vfs import fakeguestfs
+from nova.virt.disk.vfs import guestfs as vfsimpl
+
+
+class VirtDiskVFSGuestFSTest(test.NoDBTestCase):
+
+ def setUp(self):
+ super(VirtDiskVFSGuestFSTest, self).setUp()
+ sys.modules['guestfs'] = fakeguestfs
+ vfsimpl.guestfs = fakeguestfs
+
+ def _do_test_appliance_setup_inspect(self, forcetcg):
+ if forcetcg:
+ vfsimpl.force_tcg()
+ else:
+ vfsimpl.force_tcg(False)
+
+ vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2",
+ imgfmt="qcow2",
+ partition=-1)
+ vfs.setup()
+
+ if forcetcg:
+ self.assertEqual("force_tcg", vfs.handle.backend_settings)
+ vfsimpl.force_tcg(False)
+ else:
+ self.assertIsNone(vfs.handle.backend_settings)
+
+ self.assertTrue(vfs.handle.running)
+ self.assertEqual(3, len(vfs.handle.mounts))
+ self.assertEqual("/dev/mapper/guestvgf-lv_root",
+ vfs.handle.mounts[0][1])
+ self.assertEqual("/dev/vda1",
+ vfs.handle.mounts[1][1])
+ self.assertEqual("/dev/mapper/guestvgf-lv_home",
+ vfs.handle.mounts[2][1])
+ self.assertEqual("/", vfs.handle.mounts[0][2])
+ self.assertEqual("/boot", vfs.handle.mounts[1][2])
+ self.assertEqual("/home", vfs.handle.mounts[2][2])
+
+ handle = vfs.handle
+ vfs.teardown()
+
+ self.assertIsNone(vfs.handle)
+ self.assertFalse(handle.running)
+ self.assertTrue(handle.closed)
+ self.assertEqual(0, len(handle.mounts))
+
+ def test_appliance_setup_inspect_auto(self):
+ self._do_test_appliance_setup_inspect(False)
+
+ def test_appliance_setup_inspect_tcg(self):
+ self._do_test_appliance_setup_inspect(True)
+
+ def test_appliance_setup_inspect_no_root_raises(self):
+ vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2",
+ imgfmt="qcow2",
+ partition=-1)
+ # call setup to init the handle so we can stub it
+ vfs.setup()
+
+ self.assertIsNone(vfs.handle.backend_settings)
+
+ def fake_inspect_os():
+ return []
+
+ self.stubs.Set(vfs.handle, 'inspect_os', fake_inspect_os)
+ self.assertRaises(exception.NovaException, vfs.setup_os_inspect)
+
+ def test_appliance_setup_inspect_multi_boots_raises(self):
+ vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2",
+ imgfmt="qcow2",
+ partition=-1)
+ # call setup to init the handle so we can stub it
+ vfs.setup()
+
+ self.assertIsNone(vfs.handle.backend_settings)
+
+ def fake_inspect_os():
+ return ['fake1', 'fake2']
+
+ self.stubs.Set(vfs.handle, 'inspect_os', fake_inspect_os)
+ self.assertRaises(exception.NovaException, vfs.setup_os_inspect)
+
+ def test_appliance_setup_static_nopart(self):
+ vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2",
+ imgfmt="qcow2",
+ partition=None)
+ vfs.setup()
+
+ self.assertIsNone(vfs.handle.backend_settings)
+ self.assertTrue(vfs.handle.running)
+ self.assertEqual(1, len(vfs.handle.mounts))
+ self.assertEqual("/dev/sda", vfs.handle.mounts[0][1])
+ self.assertEqual("/", vfs.handle.mounts[0][2])
+
+ handle = vfs.handle
+ vfs.teardown()
+
+ self.assertIsNone(vfs.handle)
+ self.assertFalse(handle.running)
+ self.assertTrue(handle.closed)
+ self.assertEqual(0, len(handle.mounts))
+
+ def test_appliance_setup_static_part(self):
+ vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2",
+ imgfmt="qcow2",
+ partition=2)
+ vfs.setup()
+
+ self.assertIsNone(vfs.handle.backend_settings)
+ self.assertTrue(vfs.handle.running)
+ self.assertEqual(1, len(vfs.handle.mounts))
+ self.assertEqual("/dev/sda2", vfs.handle.mounts[0][1])
+ self.assertEqual("/", vfs.handle.mounts[0][2])
+
+ handle = vfs.handle
+ vfs.teardown()
+
+ self.assertIsNone(vfs.handle)
+ self.assertFalse(handle.running)
+ self.assertTrue(handle.closed)
+ self.assertEqual(0, len(handle.mounts))
+
+ def test_makepath(self):
+ vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.setup()
+ vfs.make_path("/some/dir")
+ vfs.make_path("/other/dir")
+
+ self.assertIn("/some/dir", vfs.handle.files)
+ self.assertIn("/other/dir", vfs.handle.files)
+ self.assertTrue(vfs.handle.files["/some/dir"]["isdir"])
+ self.assertTrue(vfs.handle.files["/other/dir"]["isdir"])
+
+ vfs.teardown()
+
+ def test_append_file(self):
+ vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.setup()
+ vfs.append_file("/some/file", " Goodbye")
+
+ self.assertIn("/some/file", vfs.handle.files)
+ self.assertEqual("Hello World Goodbye",
+ vfs.handle.files["/some/file"]["content"])
+
+ vfs.teardown()
+
+ def test_replace_file(self):
+ vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.setup()
+ vfs.replace_file("/some/file", "Goodbye")
+
+ self.assertIn("/some/file", vfs.handle.files)
+ self.assertEqual("Goodbye",
+ vfs.handle.files["/some/file"]["content"])
+
+ vfs.teardown()
+
+ def test_read_file(self):
+ vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.setup()
+ self.assertEqual("Hello World", vfs.read_file("/some/file"))
+
+ vfs.teardown()
+
+ def test_has_file(self):
+ vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.setup()
+ vfs.read_file("/some/file")
+
+ self.assertTrue(vfs.has_file("/some/file"))
+ self.assertFalse(vfs.has_file("/other/file"))
+
+ vfs.teardown()
+
+ def test_set_permissions(self):
+ vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.setup()
+ vfs.read_file("/some/file")
+
+ self.assertEqual(0o700, vfs.handle.files["/some/file"]["mode"])
+
+ vfs.set_permissions("/some/file", 0o7777)
+ self.assertEqual(0o7777, vfs.handle.files["/some/file"]["mode"])
+
+ vfs.teardown()
+
+ def test_set_ownership(self):
+ vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.setup()
+ vfs.read_file("/some/file")
+
+ self.assertEqual(100, vfs.handle.files["/some/file"]["uid"])
+ self.assertEqual(100, vfs.handle.files["/some/file"]["gid"])
+
+ vfs.set_ownership("/some/file", "fred", None)
+ self.assertEqual(105, vfs.handle.files["/some/file"]["uid"])
+ self.assertEqual(100, vfs.handle.files["/some/file"]["gid"])
+
+ vfs.set_ownership("/some/file", None, "users")
+ self.assertEqual(105, vfs.handle.files["/some/file"]["uid"])
+ self.assertEqual(500, vfs.handle.files["/some/file"]["gid"])
+
+ vfs.set_ownership("/some/file", "joe", "admins")
+ self.assertEqual(110, vfs.handle.files["/some/file"]["uid"])
+ self.assertEqual(600, vfs.handle.files["/some/file"]["gid"])
+
+ vfs.teardown()
+
+ def test_close_on_error(self):
+ vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.setup()
+ self.assertFalse(vfs.handle.kwargs['close_on_exit'])
+ vfs.teardown()
+ self.stubs.Set(fakeguestfs.GuestFS, 'SUPPORT_CLOSE_ON_EXIT', False)
+ vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.setup()
+ self.assertNotIn('close_on_exit', vfs.handle.kwargs)
+ vfs.teardown()
+
+ def test_python_return_dict(self):
+ vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.setup()
+ self.assertFalse(vfs.handle.kwargs['python_return_dict'])
+ vfs.teardown()
+ self.stubs.Set(fakeguestfs.GuestFS, 'SUPPORT_RETURN_DICT', False)
+ vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.setup()
+ self.assertNotIn('python_return_dict', vfs.handle.kwargs)
+ vfs.teardown()
+
+ def test_setup_debug_disable(self):
+ vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.setup()
+ self.assertFalse(vfs.handle.trace_enabled)
+ self.assertFalse(vfs.handle.verbose_enabled)
+ self.assertIsNone(vfs.handle.event_callback)
+
+ def test_setup_debug_enabled(self):
+ self.flags(debug=True, group='guestfs')
+ vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.setup()
+ self.assertTrue(vfs.handle.trace_enabled)
+ self.assertTrue(vfs.handle.verbose_enabled)
+ self.assertIsNotNone(vfs.handle.event_callback)
diff --git a/nova/tests/unit/virt/disk/vfs/test_localfs.py b/nova/tests/unit/virt/disk/vfs/test_localfs.py
new file mode 100644
index 0000000000..6e7780e74b
--- /dev/null
+++ b/nova/tests/unit/virt/disk/vfs/test_localfs.py
@@ -0,0 +1,385 @@
+# Copyright (C) 2012 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo.concurrency import processutils
+from oslo.config import cfg
+
+from nova import exception
+from nova import test
+from nova.tests.unit import utils as tests_utils
+import nova.utils
+from nova.virt.disk.vfs import localfs as vfsimpl
+
+CONF = cfg.CONF
+
+dirs = []
+files = {}
+commands = []
+
+
+def fake_execute(*args, **kwargs):
+ commands.append({"args": args, "kwargs": kwargs})
+
+ if args[0] == "readlink":
+ if args[1] == "-nm":
+ if args[2] in ["/scratch/dir/some/file",
+ "/scratch/dir/some/dir",
+ "/scratch/dir/other/dir",
+ "/scratch/dir/other/file"]:
+ return args[2], ""
+ elif args[1] == "-e":
+ if args[2] in files:
+ return args[2], ""
+
+ return "", "No such file"
+ elif args[0] == "mkdir":
+ dirs.append(args[2])
+ elif args[0] == "chown":
+ owner = args[1]
+ path = args[2]
+ if path not in files:
+ raise Exception("No such file: " + path)
+
+ sep = owner.find(':')
+ if sep != -1:
+ user = owner[0:sep]
+ group = owner[sep + 1:]
+ else:
+ user = owner
+ group = None
+
+ if user:
+ if user == "fred":
+ uid = 105
+ else:
+ uid = 110
+ files[path]["uid"] = uid
+ if group:
+ if group == "users":
+ gid = 500
+ else:
+ gid = 600
+ files[path]["gid"] = gid
+ elif args[0] == "chgrp":
+ group = args[1]
+ path = args[2]
+ if path not in files:
+ raise Exception("No such file: " + path)
+
+ if group == "users":
+ gid = 500
+ else:
+ gid = 600
+ files[path]["gid"] = gid
+ elif args[0] == "chmod":
+ mode = args[1]
+ path = args[2]
+ if path not in files:
+ raise Exception("No such file: " + path)
+
+ files[path]["mode"] = int(mode, 8)
+ elif args[0] == "cat":
+ path = args[1]
+ if path not in files:
+ files[path] = {
+ "content": "Hello World",
+ "gid": 100,
+ "uid": 100,
+ "mode": 0o700
+ }
+ return files[path]["content"], ""
+ elif args[0] == "tee":
+ if args[1] == "-a":
+ path = args[2]
+ append = True
+ else:
+ path = args[1]
+ append = False
+ if path not in files:
+ files[path] = {
+ "content": "Hello World",
+ "gid": 100,
+ "uid": 100,
+ "mode": 0o700,
+ }
+ if append:
+ files[path]["content"] += kwargs["process_input"]
+ else:
+ files[path]["content"] = kwargs["process_input"]
+
+
+class VirtDiskVFSLocalFSTestPaths(test.NoDBTestCase):
+ def setUp(self):
+ super(VirtDiskVFSLocalFSTestPaths, self).setUp()
+
+ real_execute = processutils.execute
+
+ def nonroot_execute(*cmd_parts, **kwargs):
+ kwargs.pop('run_as_root', None)
+ return real_execute(*cmd_parts, **kwargs)
+
+ self.stubs.Set(processutils, 'execute', nonroot_execute)
+
+ def test_check_safe_path(self):
+ if not tests_utils.coreutils_readlink_available():
+ self.skipTest("coreutils readlink(1) unavailable")
+ vfs = vfsimpl.VFSLocalFS("dummy.img")
+ vfs.imgdir = "/foo"
+ ret = vfs._canonical_path('etc/something.conf')
+ self.assertEqual(ret, '/foo/etc/something.conf')
+
+ def test_check_unsafe_path(self):
+ if not tests_utils.coreutils_readlink_available():
+ self.skipTest("coreutils readlink(1) unavailable")
+ vfs = vfsimpl.VFSLocalFS("dummy.img")
+ vfs.imgdir = "/foo"
+ self.assertRaises(exception.Invalid,
+ vfs._canonical_path,
+ 'etc/../../../something.conf')
+
+
+class VirtDiskVFSLocalFSTest(test.NoDBTestCase):
+ def test_makepath(self):
+ global dirs, commands
+ dirs = []
+ commands = []
+ self.stubs.Set(processutils, 'execute', fake_execute)
+
+ vfs = vfsimpl.VFSLocalFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.imgdir = "/scratch/dir"
+ vfs.make_path("/some/dir")
+ vfs.make_path("/other/dir")
+
+ self.assertEqual(dirs,
+ ["/scratch/dir/some/dir", "/scratch/dir/other/dir"]),
+
+ root_helper = nova.utils._get_root_helper()
+ self.assertEqual(commands,
+ [{'args': ('readlink', '-nm',
+ '/scratch/dir/some/dir'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}},
+ {'args': ('mkdir', '-p',
+ '/scratch/dir/some/dir'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}},
+ {'args': ('readlink', '-nm',
+ '/scratch/dir/other/dir'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}},
+ {'args': ('mkdir', '-p',
+ '/scratch/dir/other/dir'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}}])
+
+ def test_append_file(self):
+ global files, commands
+ files = {}
+ commands = []
+ self.stubs.Set(processutils, 'execute', fake_execute)
+
+ vfs = vfsimpl.VFSLocalFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.imgdir = "/scratch/dir"
+ vfs.append_file("/some/file", " Goodbye")
+
+ self.assertIn("/scratch/dir/some/file", files)
+ self.assertEqual(files["/scratch/dir/some/file"]["content"],
+ "Hello World Goodbye")
+
+ root_helper = nova.utils._get_root_helper()
+ self.assertEqual(commands,
+ [{'args': ('readlink', '-nm',
+ '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}},
+ {'args': ('tee', '-a',
+ '/scratch/dir/some/file'),
+ 'kwargs': {'process_input': ' Goodbye',
+ 'run_as_root': True,
+ 'root_helper': root_helper}}])
+
+ def test_replace_file(self):
+ global files, commands
+ files = {}
+ commands = []
+ self.stubs.Set(processutils, 'execute', fake_execute)
+
+ vfs = vfsimpl.VFSLocalFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.imgdir = "/scratch/dir"
+ vfs.replace_file("/some/file", "Goodbye")
+
+ self.assertIn("/scratch/dir/some/file", files)
+ self.assertEqual(files["/scratch/dir/some/file"]["content"],
+ "Goodbye")
+
+ root_helper = nova.utils._get_root_helper()
+ self.assertEqual(commands,
+ [{'args': ('readlink', '-nm',
+ '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}},
+ {'args': ('tee', '/scratch/dir/some/file'),
+ 'kwargs': {'process_input': 'Goodbye',
+ 'run_as_root': True,
+ 'root_helper': root_helper}}])
+
+ def test_read_file(self):
+ global commands, files
+ files = {}
+ commands = []
+ self.stubs.Set(processutils, 'execute', fake_execute)
+
+ vfs = vfsimpl.VFSLocalFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.imgdir = "/scratch/dir"
+ self.assertEqual(vfs.read_file("/some/file"), "Hello World")
+
+ root_helper = nova.utils._get_root_helper()
+ self.assertEqual(commands,
+ [{'args': ('readlink', '-nm',
+ '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}},
+ {'args': ('cat', '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}}])
+
+ def test_has_file(self):
+ global commands, files
+ files = {}
+ commands = []
+ self.stubs.Set(processutils, 'execute', fake_execute)
+
+ vfs = vfsimpl.VFSLocalFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.imgdir = "/scratch/dir"
+ vfs.read_file("/some/file")
+
+ self.assertTrue(vfs.has_file("/some/file"))
+ self.assertFalse(vfs.has_file("/other/file"))
+
+ root_helper = nova.utils._get_root_helper()
+ self.assertEqual(commands,
+ [{'args': ('readlink', '-nm',
+ '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}},
+ {'args': ('cat', '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}},
+ {'args': ('readlink', '-nm',
+ '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}},
+ {'args': ('readlink', '-e',
+ '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}},
+ {'args': ('readlink', '-nm',
+ '/scratch/dir/other/file'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}},
+ {'args': ('readlink', '-e',
+ '/scratch/dir/other/file'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}},
+ ])
+
+ def test_set_permissions(self):
+ global commands, files
+ commands = []
+ files = {}
+ self.stubs.Set(processutils, 'execute', fake_execute)
+
+ vfs = vfsimpl.VFSLocalFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.imgdir = "/scratch/dir"
+ vfs.read_file("/some/file")
+
+ vfs.set_permissions("/some/file", 0o777)
+ self.assertEqual(files["/scratch/dir/some/file"]["mode"], 0o777)
+
+ root_helper = nova.utils._get_root_helper()
+ self.assertEqual(commands,
+ [{'args': ('readlink', '-nm',
+ '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}},
+ {'args': ('cat', '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}},
+ {'args': ('readlink', '-nm',
+ '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}},
+ {'args': ('chmod', '777',
+ '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}}])
+
+ def test_set_ownership(self):
+ global commands, files
+ commands = []
+ files = {}
+ self.stubs.Set(processutils, 'execute', fake_execute)
+
+ vfs = vfsimpl.VFSLocalFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.imgdir = "/scratch/dir"
+ vfs.read_file("/some/file")
+
+ self.assertEqual(files["/scratch/dir/some/file"]["uid"], 100)
+ self.assertEqual(files["/scratch/dir/some/file"]["gid"], 100)
+
+ vfs.set_ownership("/some/file", "fred", None)
+ self.assertEqual(files["/scratch/dir/some/file"]["uid"], 105)
+ self.assertEqual(files["/scratch/dir/some/file"]["gid"], 100)
+
+ vfs.set_ownership("/some/file", None, "users")
+ self.assertEqual(files["/scratch/dir/some/file"]["uid"], 105)
+ self.assertEqual(files["/scratch/dir/some/file"]["gid"], 500)
+
+ vfs.set_ownership("/some/file", "joe", "admins")
+ self.assertEqual(files["/scratch/dir/some/file"]["uid"], 110)
+ self.assertEqual(files["/scratch/dir/some/file"]["gid"], 600)
+
+ root_helper = nova.utils._get_root_helper()
+ self.assertEqual(commands,
+ [{'args': ('readlink', '-nm',
+ '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}},
+ {'args': ('cat', '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}},
+ {'args': ('readlink', '-nm',
+ '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}},
+ {'args': ('chown', 'fred',
+ '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}},
+ {'args': ('readlink', '-nm',
+ '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}},
+ {'args': ('chgrp', 'users',
+ '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}},
+ {'args': ('readlink', '-nm',
+ '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}},
+ {'args': ('chown', 'joe:admins',
+ '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}}])
diff --git a/nova/tests/unit/virt/hyperv/__init__.py b/nova/tests/unit/virt/hyperv/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/virt/hyperv/__init__.py
diff --git a/nova/tests/unit/virt/hyperv/db_fakes.py b/nova/tests/unit/virt/hyperv/db_fakes.py
new file mode 100644
index 0000000000..9e8249323e
--- /dev/null
+++ b/nova/tests/unit/virt/hyperv/db_fakes.py
@@ -0,0 +1,167 @@
+# Copyright 2012 Cloudbase Solutions Srl
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Stubouts, mocks and fixtures for the test suite
+"""
+
+import uuid
+
+from nova.compute import task_states
+from nova.compute import vm_states
+from nova import db
+from nova import utils
+
+
+def get_fake_instance_data(name, project_id, user_id):
+ return {'name': name,
+ 'id': 1,
+ 'uuid': str(uuid.uuid4()),
+ 'project_id': project_id,
+ 'user_id': user_id,
+ 'image_ref': "1",
+ 'kernel_id': "1",
+ 'ramdisk_id': "1",
+ 'mac_address': "de:ad:be:ef:be:ef",
+ 'flavor':
+ {'name': 'm1.tiny',
+ 'memory_mb': 512,
+ 'vcpus': 1,
+ 'root_gb': 1024,
+ 'flavorid': 1,
+ 'rxtx_factor': 1}
+ }
+
+
+def get_fake_image_data(project_id, user_id):
+ return {'name': 'image1',
+ 'id': 1,
+ 'project_id': project_id,
+ 'user_id': user_id,
+ 'image_ref': "1",
+ 'kernel_id': "1",
+ 'ramdisk_id': "1",
+ 'mac_address': "de:ad:be:ef:be:ef",
+ 'flavor': 'm1.tiny',
+ }
+
+
+def get_fake_volume_info_data(target_portal, volume_id):
+ return {
+ 'driver_volume_type': 'iscsi',
+ 'data': {
+ 'volume_id': 1,
+ 'target_iqn': 'iqn.2010-10.org.openstack:volume-' + volume_id,
+ 'target_portal': target_portal,
+ 'target_lun': 1,
+ 'auth_method': 'CHAP',
+ }
+ }
+
+
+def get_fake_block_device_info(target_portal, volume_id):
+ return {'block_device_mapping': [{'connection_info': {
+ 'driver_volume_type': 'iscsi',
+ 'data': {'target_lun': 1,
+ 'volume_id': volume_id,
+ 'target_iqn':
+ 'iqn.2010-10.org.openstack:volume-' +
+ volume_id,
+ 'target_portal': target_portal,
+ 'target_discovered': False}},
+ 'mount_device': 'vda',
+ 'delete_on_termination': False}],
+ 'root_device_name': 'fake_root_device_name',
+ 'ephemerals': [],
+ 'swap': None
+ }
+
+
+def stub_out_db_instance_api(stubs):
+ """Stubs out the db API for creating Instances."""
+
+ FLAVORS = {
+ 'm1.tiny': dict(memory_mb=512, vcpus=1, root_gb=0, flavorid=1),
+ 'm1.small': dict(memory_mb=2048, vcpus=1, root_gb=20, flavorid=2),
+ 'm1.medium': dict(memory_mb=4096, vcpus=2, root_gb=40, flavorid=3),
+ 'm1.large': dict(memory_mb=8192, vcpus=4, root_gb=80, flavorid=4),
+ 'm1.xlarge': dict(memory_mb=16384, vcpus=8, root_gb=160, flavorid=5)}
+
+ class FakeModel(object):
+ """Stubs out for model."""
+
+ def __init__(self, values):
+ self.values = values
+
+ def get(self, key, default=None):
+ if key in self.values:
+ return self.values[key]
+ else:
+ return default
+
+ def __getattr__(self, name):
+ return self.values[name]
+
+ def __getitem__(self, key):
+ return self.get(key)
+
+ def __setitem__(self, key, value):
+ self.values[key] = value
+
+ def __str__(self):
+ return str(self.values)
+
+ def fake_instance_create(context, values):
+ """Stubs out the db.instance_create method."""
+
+ if 'flavor' not in values:
+ return
+
+ flavor = values['flavor']
+
+ base_options = {
+ 'name': values['name'],
+ 'id': values['id'],
+ 'uuid': str(uuid.uuid4()),
+ 'reservation_id': utils.generate_uid('r'),
+ 'image_ref': values['image_ref'],
+ 'kernel_id': values['kernel_id'],
+ 'ramdisk_id': values['ramdisk_id'],
+ 'vm_state': vm_states.BUILDING,
+ 'task_state': task_states.SCHEDULING,
+ 'user_id': values['user_id'],
+ 'project_id': values['project_id'],
+ 'flavor': flavor,
+ 'memory_mb': flavor['memory_mb'],
+ 'vcpus': flavor['vcpus'],
+ 'mac_addresses': [{'address': values['mac_address']}],
+ 'root_gb': flavor['root_gb'],
+ 'system_metadata': {'image_shutdown_timeout': 0},
+ }
+ return FakeModel(base_options)
+
+ def fake_flavor_get_all(context, inactive=0, filters=None):
+ return FLAVORS.values()
+
+ def fake_flavor_get_by_name(context, name):
+ return FLAVORS[name]
+
+ def fake_block_device_mapping_get_all_by_instance(context, instance_uuid):
+ return {}
+
+ stubs.Set(db, 'instance_create', fake_instance_create)
+ stubs.Set(db, 'flavor_get_all', fake_flavor_get_all)
+ stubs.Set(db, 'flavor_get_by_name', fake_flavor_get_by_name)
+ stubs.Set(db, 'block_device_mapping_get_all_by_instance',
+ fake_block_device_mapping_get_all_by_instance)
diff --git a/nova/tests/unit/virt/hyperv/fake.py b/nova/tests/unit/virt/hyperv/fake.py
new file mode 100644
index 0000000000..6403374aa5
--- /dev/null
+++ b/nova/tests/unit/virt/hyperv/fake.py
@@ -0,0 +1,90 @@
+# Copyright 2013 Cloudbase Solutions Srl
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import io
+import os
+
+
+class PathUtils(object):
+ def open(self, path, mode):
+ return io.BytesIO(b'fake content')
+
+ def exists(self, path):
+ return False
+
+ def makedirs(self, path):
+ pass
+
+ def remove(self, path):
+ pass
+
+ def rename(self, src, dest):
+ pass
+
+ def copyfile(self, src, dest):
+ pass
+
+ def copy(self, src, dest):
+ pass
+
+ def rmtree(self, path):
+ pass
+
+ def get_instances_dir(self, remote_server=None):
+ return 'C:\\FakeInstancesPath\\'
+
+ def get_instance_migr_revert_dir(self, instance_name, create_dir=False,
+ remove_dir=False):
+ return os.path.join(self.get_instances_dir(), instance_name, '_revert')
+
+ def get_instance_dir(self, instance_name, remote_server=None,
+ create_dir=True, remove_dir=False):
+ return os.path.join(self.get_instances_dir(remote_server),
+ instance_name)
+
+ def lookup_root_vhd_path(self, instance_name):
+ instance_path = self.get_instance_dir(instance_name)
+ return os.path.join(instance_path, 'root.vhd')
+
+ def lookup_configdrive_path(self, instance_name):
+ instance_path = self.get_instance_dir(instance_name)
+ return os.path.join(instance_path, 'configdrive.iso')
+
+ def lookup_ephemeral_vhd_path(self, instance_name):
+ instance_path = self.get_instance_dir(instance_name)
+ if instance_path:
+ return os.path.join(instance_path, 'ephemeral.vhd')
+
+ def get_root_vhd_path(self, instance_name, format_ext):
+ instance_path = self.get_instance_dir(instance_name)
+ return os.path.join(instance_path, 'root.' + format_ext)
+
+ def get_ephemeral_vhd_path(self, instance_name, format_ext):
+ instance_path = self.get_instance_dir(instance_name)
+ return os.path.join(instance_path, 'ephemeral.' + format_ext.lower())
+
+ def get_base_vhd_dir(self):
+ return os.path.join(self.get_instances_dir(), '_base')
+
+ def get_export_dir(self, instance_name):
+ export_dir = os.path.join(self.get_instances_dir(), 'export',
+ instance_name)
+ return export_dir
+
+ def vhd_exists(self, path):
+ return False
+
+ def get_vm_console_log_paths(self, vm_name, remote_server=None):
+ return 'fake_vm_log_path'
diff --git a/nova/tests/unit/virt/hyperv/test_basevolumeutils.py b/nova/tests/unit/virt/hyperv/test_basevolumeutils.py
new file mode 100644
index 0000000000..8f48515d09
--- /dev/null
+++ b/nova/tests/unit/virt/hyperv/test_basevolumeutils.py
@@ -0,0 +1,157 @@
+# Copyright 2014 Cloudbase Solutions Srl
+#
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova import test
+from nova.virt.hyperv import basevolumeutils
+
+
+def _exception_thrower():
+ raise Exception("Testing exception handling.")
+
+
+class BaseVolumeUtilsTestCase(test.NoDBTestCase):
+ """Unit tests for the Hyper-V BaseVolumeUtils class."""
+
+ _FAKE_COMPUTER_NAME = "fake_computer_name"
+ _FAKE_DOMAIN_NAME = "fake_domain_name"
+ _FAKE_INITIATOR_NAME = "fake_initiator_name"
+ _FAKE_INITIATOR_IQN_NAME = "iqn.1991-05.com.microsoft:fake_computer_name"
+ _FAKE_DISK_PATH = 'fake_path DeviceID="123\\\\2"'
+ _FAKE_MOUNT_DEVICE = '/dev/fake/mount'
+ _FAKE_DEVICE_NAME = '/dev/fake/path'
+ _FAKE_SWAP = {'device_name': _FAKE_DISK_PATH}
+
+ def setUp(self):
+ self._volutils = basevolumeutils.BaseVolumeUtils()
+ self._volutils._conn_wmi = mock.MagicMock()
+ self._volutils._conn_cimv2 = mock.MagicMock()
+
+ super(BaseVolumeUtilsTestCase, self).setUp()
+
+ def test_get_iscsi_initiator_ok(self):
+ self._check_get_iscsi_initiator(
+ mock.MagicMock(return_value=mock.sentinel.FAKE_KEY),
+ self._FAKE_INITIATOR_NAME)
+
+ def test_get_iscsi_initiator_exception(self):
+ initiator_name = "%(iqn)s.%(domain)s" % {
+ 'iqn': self._FAKE_INITIATOR_IQN_NAME,
+ 'domain': self._FAKE_DOMAIN_NAME
+ }
+
+ self._check_get_iscsi_initiator(_exception_thrower, initiator_name)
+
+ def _check_get_iscsi_initiator(self, winreg_method, expected):
+ mock_computer = mock.MagicMock()
+ mock_computer.name = self._FAKE_COMPUTER_NAME
+ mock_computer.Domain = self._FAKE_DOMAIN_NAME
+ self._volutils._conn_cimv2.Win32_ComputerSystem.return_value = [
+ mock_computer]
+
+ with mock.patch.object(basevolumeutils,
+ '_winreg', create=True) as mock_winreg:
+ mock_winreg.OpenKey = winreg_method
+ mock_winreg.QueryValueEx = mock.MagicMock(return_value=[expected])
+
+ initiator_name = self._volutils.get_iscsi_initiator()
+ self.assertEqual(expected, initiator_name)
+
+ @mock.patch.object(basevolumeutils, 'driver')
+ def test_volume_in_mapping(self, mock_driver):
+ mock_driver.block_device_info_get_mapping.return_value = [
+ {'mount_device': self._FAKE_MOUNT_DEVICE}]
+ mock_driver.block_device_info_get_swap = mock.MagicMock(
+ return_value=self._FAKE_SWAP)
+ mock_driver.block_device_info_get_ephemerals = mock.MagicMock(
+ return_value=[{'device_name': self._FAKE_DEVICE_NAME}])
+
+ mock_driver.swap_is_usable = mock.MagicMock(return_value=True)
+
+ self.assertTrue(self._volutils.volume_in_mapping(
+ self._FAKE_MOUNT_DEVICE, mock.sentinel.FAKE_BLOCK_DEVICE_INFO))
+
+ @mock.patch.object(basevolumeutils.BaseVolumeUtils,
+ "_get_drive_number_from_disk_path")
+ def test_get_session_id_from_mounted_disk(self, mock_get_session_id):
+ mock_get_session_id.return_value = mock.sentinel.FAKE_DEVICE_NUMBER
+ mock_initiator_session = self._create_initiator_session()
+ self._volutils._conn_wmi.query.return_value = [mock_initiator_session]
+ session_id = self._volutils.get_session_id_from_mounted_disk(
+ self._FAKE_DISK_PATH)
+
+ self.assertEqual(mock.sentinel.FAKE_SESSION_ID, session_id)
+
+ def test_get_devices_for_target(self):
+ init_session = self._create_initiator_session()
+ self._volutils._conn_wmi.query.return_value = [init_session]
+ devices = self._volutils._get_devices_for_target(
+ mock.sentinel.FAKE_IQN)
+
+ self.assertEqual(init_session.Devices, devices)
+
+ def test_get_devices_for_target_not_found(self):
+ self._volutils._conn_wmi.query.return_value = None
+ devices = self._volutils._get_devices_for_target(
+ mock.sentinel.FAKE_IQN)
+
+ self.assertEqual(0, len(devices))
+
+ @mock.patch.object(basevolumeutils.BaseVolumeUtils,
+ '_get_devices_for_target')
+ def test_get_device_number_for_target(self, fake_get_devices):
+ init_session = self._create_initiator_session()
+ fake_get_devices.return_value = init_session.Devices
+ device_number = self._volutils.get_device_number_for_target(
+ mock.sentinel.FAKE_IQN, mock.sentinel.FAKE_LUN)
+
+ self.assertEqual(mock.sentinel.FAKE_DEVICE_NUMBER, device_number)
+
+ @mock.patch.object(basevolumeutils.BaseVolumeUtils,
+ '_get_devices_for_target')
+ def test_get_target_lun_count(self, fake_get_devices):
+ init_session = self._create_initiator_session()
+ fake_get_devices.return_value = [init_session]
+ lun_count = self._volutils.get_target_lun_count(
+ mock.sentinel.FAKE_IQN)
+
+ self.assertEqual(len(init_session.Devices), lun_count)
+
+ @mock.patch.object(basevolumeutils.BaseVolumeUtils,
+ "_get_drive_number_from_disk_path")
+ def test_get_target_from_disk_path(self, mock_get_session_id):
+ mock_get_session_id.return_value = mock.sentinel.FAKE_DEVICE_NUMBER
+ init_sess = self._create_initiator_session()
+ mock_ses_class = self._volutils._conn_wmi.MSiSCSIInitiator_SessionClass
+ mock_ses_class.return_value = [init_sess]
+
+ (target_name, scsi_lun) = self._volutils.get_target_from_disk_path(
+ self._FAKE_DISK_PATH)
+
+ self.assertEqual(mock.sentinel.FAKE_TARGET_NAME, target_name)
+ self.assertEqual(mock.sentinel.FAKE_LUN, scsi_lun)
+
+ def _create_initiator_session(self):
+ device = mock.MagicMock()
+ device.ScsiLun = mock.sentinel.FAKE_LUN
+ device.DeviceNumber = mock.sentinel.FAKE_DEVICE_NUMBER
+ device.TargetName = mock.sentinel.FAKE_TARGET_NAME
+ init_session = mock.MagicMock()
+ init_session.Devices = [device]
+ init_session.SessionId = mock.sentinel.FAKE_SESSION_ID
+
+ return init_session
diff --git a/nova/tests/unit/virt/hyperv/test_hostutils.py b/nova/tests/unit/virt/hyperv/test_hostutils.py
new file mode 100644
index 0000000000..998692d350
--- /dev/null
+++ b/nova/tests/unit/virt/hyperv/test_hostutils.py
@@ -0,0 +1,97 @@
+# Copyright 2014 Hewlett-Packard Development Company, L.P.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova import test
+from nova.virt.hyperv import hostutils
+
+
+class FakeCPUSpec(object):
+ """Fake CPU Spec for unit tests."""
+
+ Architecture = mock.sentinel.cpu_arch
+ Name = mock.sentinel.cpu_name
+ Manufacturer = mock.sentinel.cpu_man
+ NumberOfCores = mock.sentinel.cpu_cores
+ NumberOfLogicalProcessors = mock.sentinel.cpu_procs
+
+
+class HostUtilsTestCase(test.NoDBTestCase):
+ """Unit tests for the Hyper-V hostutils class."""
+
+ _FAKE_MEMORY_TOTAL = 1024L
+ _FAKE_MEMORY_FREE = 512L
+ _FAKE_DISK_SIZE = 1024L
+ _FAKE_DISK_FREE = 512L
+ _FAKE_VERSION_GOOD = '6.2.0'
+ _FAKE_VERSION_BAD = '6.1.9'
+
+ def setUp(self):
+ self._hostutils = hostutils.HostUtils()
+ self._hostutils._conn_cimv2 = mock.MagicMock()
+
+ super(HostUtilsTestCase, self).setUp()
+
+ @mock.patch('nova.virt.hyperv.hostutils.ctypes')
+ def test_get_host_tick_count64(self, mock_ctypes):
+ tick_count64 = "100"
+ mock_ctypes.windll.kernel32.GetTickCount64.return_value = tick_count64
+ response = self._hostutils.get_host_tick_count64()
+ self.assertEqual(tick_count64, response)
+
+ def test_get_cpus_info(self):
+ cpu = mock.MagicMock(spec=FakeCPUSpec)
+ self._hostutils._conn_cimv2.query.return_value = [cpu]
+ cpu_list = self._hostutils.get_cpus_info()
+ self.assertEqual([cpu._mock_children], cpu_list)
+
+ def test_get_memory_info(self):
+ memory = mock.MagicMock()
+ type(memory).TotalVisibleMemorySize = mock.PropertyMock(
+ return_value=self._FAKE_MEMORY_TOTAL)
+ type(memory).FreePhysicalMemory = mock.PropertyMock(
+ return_value=self._FAKE_MEMORY_FREE)
+
+ self._hostutils._conn_cimv2.query.return_value = [memory]
+ total_memory, free_memory = self._hostutils.get_memory_info()
+
+ self.assertEqual(self._FAKE_MEMORY_TOTAL, total_memory)
+ self.assertEqual(self._FAKE_MEMORY_FREE, free_memory)
+
+ def test_get_volume_info(self):
+ disk = mock.MagicMock()
+ type(disk).Size = mock.PropertyMock(return_value=self._FAKE_DISK_SIZE)
+ type(disk).FreeSpace = mock.PropertyMock(
+ return_value=self._FAKE_DISK_FREE)
+
+ self._hostutils._conn_cimv2.query.return_value = [disk]
+ (total_memory, free_memory) = self._hostutils.get_volume_info(
+ mock.sentinel.FAKE_DRIVE)
+
+ self.assertEqual(self._FAKE_DISK_SIZE, total_memory)
+ self.assertEqual(self._FAKE_DISK_FREE, free_memory)
+
+ def test_check_min_windows_version_true(self):
+ self._test_check_min_windows_version(self._FAKE_VERSION_GOOD, True)
+
+ def test_check_min_windows_version_false(self):
+ self._test_check_min_windows_version(self._FAKE_VERSION_BAD, False)
+
+ def _test_check_min_windows_version(self, version, expected):
+ os = mock.MagicMock()
+ os.Version = version
+ self._hostutils._conn_cimv2.Win32_OperatingSystem.return_value = [os]
+ self.assertEqual(expected,
+ self._hostutils.check_min_windows_version(6, 2))
diff --git a/nova/tests/unit/virt/hyperv/test_hypervapi.py b/nova/tests/unit/virt/hyperv/test_hypervapi.py
new file mode 100644
index 0000000000..375420a484
--- /dev/null
+++ b/nova/tests/unit/virt/hyperv/test_hypervapi.py
@@ -0,0 +1,1967 @@
+# Copyright 2012 Cloudbase Solutions Srl
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Test suite for the Hyper-V driver and related APIs.
+"""
+
+import contextlib
+import datetime
+import io
+import os
+import platform
+import shutil
+import time
+import uuid
+
+import mock
+import mox
+from oslo.config import cfg
+from oslo.utils import units
+
+from nova.api.metadata import base as instance_metadata
+from nova.compute import power_state
+from nova.compute import task_states
+from nova import context
+from nova import db
+from nova import exception
+from nova.i18n import _
+from nova.image import glance
+from nova.openstack.common import fileutils
+from nova import test
+from nova.tests.unit import fake_network
+from nova.tests.unit.image import fake as fake_image
+from nova.tests.unit import matchers
+from nova.tests.unit.virt.hyperv import db_fakes
+from nova.tests.unit.virt.hyperv import fake
+from nova import utils
+from nova.virt import configdrive
+from nova.virt import driver
+from nova.virt.hyperv import basevolumeutils
+from nova.virt.hyperv import constants
+from nova.virt.hyperv import driver as driver_hyperv
+from nova.virt.hyperv import hostops
+from nova.virt.hyperv import hostutils
+from nova.virt.hyperv import ioutils
+from nova.virt.hyperv import livemigrationutils
+from nova.virt.hyperv import networkutils
+from nova.virt.hyperv import networkutilsv2
+from nova.virt.hyperv import pathutils
+from nova.virt.hyperv import rdpconsoleutils
+from nova.virt.hyperv import utilsfactory
+from nova.virt.hyperv import vhdutils
+from nova.virt.hyperv import vhdutilsv2
+from nova.virt.hyperv import vmutils
+from nova.virt.hyperv import vmutilsv2
+from nova.virt.hyperv import volumeops
+from nova.virt.hyperv import volumeutils
+from nova.virt.hyperv import volumeutilsv2
+from nova.virt import images
+
+CONF = cfg.CONF
+CONF.import_opt('vswitch_name', 'nova.virt.hyperv.vif', 'hyperv')
+
+
+class HyperVAPIBaseTestCase(test.NoDBTestCase):
+ """Base unit tests class for Hyper-V driver calls."""
+
+ def __init__(self, test_case_name):
+ self._mox = mox.Mox()
+ super(HyperVAPIBaseTestCase, self).__init__(test_case_name)
+
+ def setUp(self):
+ super(HyperVAPIBaseTestCase, self).setUp()
+
+ self._user_id = 'fake'
+ self._project_id = 'fake'
+ self._instance_data = None
+ self._image_metadata = None
+ self._fetched_image = None
+ self._update_image_raise_exception = False
+ self._volume_target_portal = 'testtargetportal:3260'
+ self._volume_id = '0ef5d708-45ab-4129-8c59-d774d2837eb7'
+ self._context = context.RequestContext(self._user_id, self._project_id)
+ self._instance_ide_disks = []
+ self._instance_ide_dvds = []
+ self._instance_volume_disks = []
+ self._test_vm_name = None
+ self._test_instance_dir = 'C:\\FakeInstancesPath\\instance-0000001'
+ self._check_min_windows_version_satisfied = True
+
+ self._setup_stubs()
+
+ self.flags(instances_path=r'C:\Hyper-V\test\instances',
+ network_api_class='nova.network.neutronv2.api.API')
+ self.flags(force_volumeutils_v1=True, group='hyperv')
+ self.flags(force_hyperv_utils_v1=True, group='hyperv')
+
+ self._conn = driver_hyperv.HyperVDriver(None)
+
+ def _setup_stubs(self):
+ db_fakes.stub_out_db_instance_api(self.stubs)
+ fake_image.stub_out_image_service(self.stubs)
+ fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs)
+
+ def fake_fetch(context, image_id, target, user, project):
+ self._fetched_image = target
+ self.stubs.Set(images, 'fetch', fake_fetch)
+
+ def fake_get_remote_image_service(context, name):
+ class FakeGlanceImageService(object):
+ def update(self_fake, context, image_id, image_metadata, f):
+ if self._update_image_raise_exception:
+ raise vmutils.HyperVException(
+ "Simulated update failure")
+ self._image_metadata = image_metadata
+ return (FakeGlanceImageService(), 1)
+ self.stubs.Set(glance, 'get_remote_image_service',
+ fake_get_remote_image_service)
+
+ def fake_check_min_windows_version(fake_self, major, minor):
+ if [major, minor] >= [6, 3]:
+ return False
+ return self._check_min_windows_version_satisfied
+ self.stubs.Set(hostutils.HostUtils, 'check_min_windows_version',
+ fake_check_min_windows_version)
+
+ def fake_sleep(ms):
+ pass
+ self.stubs.Set(time, 'sleep', fake_sleep)
+
+ class FakeIOThread(object):
+ def __init__(self, src, dest, max_bytes):
+ pass
+
+ def start(self):
+ pass
+
+ self.stubs.Set(pathutils, 'PathUtils', fake.PathUtils)
+ self.stubs.Set(ioutils, 'IOThread', FakeIOThread)
+ self._mox.StubOutWithMock(fake.PathUtils, 'open')
+ self._mox.StubOutWithMock(fake.PathUtils, 'copyfile')
+ self._mox.StubOutWithMock(fake.PathUtils, 'rmtree')
+ self._mox.StubOutWithMock(fake.PathUtils, 'copy')
+ self._mox.StubOutWithMock(fake.PathUtils, 'remove')
+ self._mox.StubOutWithMock(fake.PathUtils, 'rename')
+ self._mox.StubOutWithMock(fake.PathUtils, 'makedirs')
+ self._mox.StubOutWithMock(fake.PathUtils,
+ 'get_instance_migr_revert_dir')
+ self._mox.StubOutWithMock(fake.PathUtils, 'get_instance_dir')
+ self._mox.StubOutWithMock(fake.PathUtils, 'get_vm_console_log_paths')
+
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'vm_exists')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'create_vm')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'destroy_vm')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'attach_ide_drive')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'create_scsi_controller')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'create_nic')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'set_vm_state')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'list_instances')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'get_vm_summary_info')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'take_vm_snapshot')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'remove_vm_snapshot')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'set_nic_connection')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'get_vm_scsi_controller')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'get_vm_ide_controller')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'get_attached_disks')
+ self._mox.StubOutWithMock(vmutils.VMUtils,
+ 'attach_volume_to_controller')
+ self._mox.StubOutWithMock(vmutils.VMUtils,
+ 'get_mounted_disk_by_drive_number')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'detach_vm_disk')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'get_vm_storage_paths')
+ self._mox.StubOutWithMock(vmutils.VMUtils,
+ 'get_controller_volume_paths')
+ self._mox.StubOutWithMock(vmutils.VMUtils,
+ 'enable_vm_metrics_collection')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'get_vm_id')
+ self._mox.StubOutWithMock(vmutils.VMUtils,
+ 'get_vm_serial_port_connection')
+
+ self._mox.StubOutWithMock(vhdutils.VHDUtils, 'create_differencing_vhd')
+ self._mox.StubOutWithMock(vhdutils.VHDUtils, 'reconnect_parent_vhd')
+ self._mox.StubOutWithMock(vhdutils.VHDUtils, 'merge_vhd')
+ self._mox.StubOutWithMock(vhdutils.VHDUtils, 'get_vhd_parent_path')
+ self._mox.StubOutWithMock(vhdutils.VHDUtils, 'get_vhd_info')
+ self._mox.StubOutWithMock(vhdutils.VHDUtils, 'resize_vhd')
+ self._mox.StubOutWithMock(vhdutils.VHDUtils,
+ 'get_internal_vhd_size_by_file_size')
+ self._mox.StubOutWithMock(vhdutils.VHDUtils, 'validate_vhd')
+ self._mox.StubOutWithMock(vhdutils.VHDUtils, 'get_vhd_format')
+ self._mox.StubOutWithMock(vhdutils.VHDUtils, 'create_dynamic_vhd')
+
+ self._mox.StubOutWithMock(hostutils.HostUtils, 'get_cpus_info')
+ self._mox.StubOutWithMock(hostutils.HostUtils,
+ 'is_cpu_feature_present')
+ self._mox.StubOutWithMock(hostutils.HostUtils, 'get_memory_info')
+ self._mox.StubOutWithMock(hostutils.HostUtils, 'get_volume_info')
+ self._mox.StubOutWithMock(hostutils.HostUtils, 'get_windows_version')
+ self._mox.StubOutWithMock(hostutils.HostUtils, 'get_local_ips')
+
+ self._mox.StubOutWithMock(networkutils.NetworkUtils,
+ 'get_external_vswitch')
+ self._mox.StubOutWithMock(networkutils.NetworkUtils,
+ 'create_vswitch_port')
+ self._mox.StubOutWithMock(networkutils.NetworkUtils,
+ 'vswitch_port_needed')
+
+ self._mox.StubOutWithMock(livemigrationutils.LiveMigrationUtils,
+ 'live_migrate_vm')
+ self._mox.StubOutWithMock(livemigrationutils.LiveMigrationUtils,
+ 'check_live_migration_config')
+
+ self._mox.StubOutWithMock(basevolumeutils.BaseVolumeUtils,
+ 'volume_in_mapping')
+ self._mox.StubOutWithMock(basevolumeutils.BaseVolumeUtils,
+ 'get_session_id_from_mounted_disk')
+ self._mox.StubOutWithMock(basevolumeutils.BaseVolumeUtils,
+ 'get_device_number_for_target')
+ self._mox.StubOutWithMock(basevolumeutils.BaseVolumeUtils,
+ 'get_target_from_disk_path')
+ self._mox.StubOutWithMock(basevolumeutils.BaseVolumeUtils,
+ 'get_target_lun_count')
+
+ self._mox.StubOutWithMock(volumeutils.VolumeUtils,
+ 'login_storage_target')
+ self._mox.StubOutWithMock(volumeutils.VolumeUtils,
+ 'logout_storage_target')
+ self._mox.StubOutWithMock(volumeutils.VolumeUtils,
+ 'execute_log_out')
+ self._mox.StubOutWithMock(volumeutils.VolumeUtils,
+ 'get_iscsi_initiator')
+
+ self._mox.StubOutWithMock(volumeutilsv2.VolumeUtilsV2,
+ 'login_storage_target')
+ self._mox.StubOutWithMock(volumeutilsv2.VolumeUtilsV2,
+ 'logout_storage_target')
+ self._mox.StubOutWithMock(volumeutilsv2.VolumeUtilsV2,
+ 'execute_log_out')
+
+ self._mox.StubOutWithMock(rdpconsoleutils.RDPConsoleUtils,
+ 'get_rdp_console_port')
+
+ self._mox.StubOutClassWithMocks(instance_metadata, 'InstanceMetadata')
+ self._mox.StubOutWithMock(instance_metadata.InstanceMetadata,
+ 'metadata_for_config_drive')
+
+ # Can't use StubOutClassWithMocks due to __exit__ and __enter__
+ self._mox.StubOutWithMock(configdrive, 'ConfigDriveBuilder')
+ self._mox.StubOutWithMock(configdrive.ConfigDriveBuilder, 'make_drive')
+
+ self._mox.StubOutWithMock(fileutils, 'delete_if_exists')
+ self._mox.StubOutWithMock(utils, 'execute')
+
+ def tearDown(self):
+ self._mox.UnsetStubs()
+ super(HyperVAPIBaseTestCase, self).tearDown()
+
+
+class HyperVAPITestCase(HyperVAPIBaseTestCase):
+ """Unit tests for Hyper-V driver calls."""
+
+ def test_public_api_signatures(self):
+ self.assertPublicAPISignatures(driver.ComputeDriver(None), self._conn)
+
+ def test_get_available_resource(self):
+ cpu_info = {'Architecture': 'fake',
+ 'Name': 'fake',
+ 'Manufacturer': 'ACME, Inc.',
+ 'NumberOfCores': 2,
+ 'NumberOfLogicalProcessors': 4}
+
+ tot_mem_kb = 2000000L
+ free_mem_kb = 1000000L
+
+ tot_hdd_b = 4L * 1024 ** 3
+ free_hdd_b = 3L * 1024 ** 3
+
+ windows_version = '6.2.9200'
+
+ hostutils.HostUtils.get_memory_info().AndReturn((tot_mem_kb,
+ free_mem_kb))
+
+ m = hostutils.HostUtils.get_volume_info(mox.IsA(str))
+ m.AndReturn((tot_hdd_b, free_hdd_b))
+
+ hostutils.HostUtils.get_cpus_info().AndReturn([cpu_info])
+ m = hostutils.HostUtils.is_cpu_feature_present(mox.IsA(int))
+ m.MultipleTimes()
+
+ m = hostutils.HostUtils.get_windows_version()
+ m.AndReturn(windows_version)
+
+ self._mox.ReplayAll()
+ dic = self._conn.get_available_resource(None)
+ self._mox.VerifyAll()
+
+ self.assertEqual(dic['vcpus'], cpu_info['NumberOfLogicalProcessors'])
+ self.assertEqual(dic['hypervisor_hostname'], platform.node())
+ self.assertEqual(dic['memory_mb'], tot_mem_kb / units.Ki)
+ self.assertEqual(dic['memory_mb_used'],
+ tot_mem_kb / units.Ki - free_mem_kb / units.Ki)
+ self.assertEqual(dic['local_gb'], tot_hdd_b / units.Gi)
+ self.assertEqual(dic['local_gb_used'],
+ tot_hdd_b / units.Gi - free_hdd_b / units.Gi)
+ self.assertEqual(dic['hypervisor_version'],
+ windows_version.replace('.', ''))
+ self.assertEqual(dic['supported_instances'],
+ '[["i686", "hyperv", "hvm"], ["x86_64", "hyperv", "hvm"]]')
+
+ def test_list_instances(self):
+ fake_instances = ['fake1', 'fake2']
+ vmutils.VMUtils.list_instances().AndReturn(fake_instances)
+
+ self._mox.ReplayAll()
+ instances = self._conn.list_instances()
+ self._mox.VerifyAll()
+
+ self.assertEqual(instances, fake_instances)
+
+ def test_get_host_uptime(self):
+ fake_host = "fake_host"
+ with mock.patch.object(self._conn._hostops,
+ "get_host_uptime") as mock_uptime:
+ self._conn._hostops.get_host_uptime(fake_host)
+ mock_uptime.assert_called_once_with(fake_host)
+
+ def test_get_info(self):
+ self._instance_data = self._get_instance_data()
+
+ summary_info = {'NumberOfProcessors': 2,
+ 'EnabledState': constants.HYPERV_VM_STATE_ENABLED,
+ 'MemoryUsage': 1000,
+ 'UpTime': 1}
+
+ m = vmutils.VMUtils.vm_exists(mox.Func(self._check_instance_name))
+ m.AndReturn(True)
+
+ func = mox.Func(self._check_instance_name)
+ m = vmutils.VMUtils.get_vm_summary_info(func)
+ m.AndReturn(summary_info)
+
+ self._mox.ReplayAll()
+ info = self._conn.get_info(self._instance_data)
+ self._mox.VerifyAll()
+
+ self.assertEqual(info["state"], power_state.RUNNING)
+
+ def test_get_info_instance_not_found(self):
+ # Tests that InstanceNotFound is raised if the instance isn't found
+ # from the vmutils.vm_exists method.
+ self._instance_data = self._get_instance_data()
+
+ m = vmutils.VMUtils.vm_exists(mox.Func(self._check_instance_name))
+ m.AndReturn(False)
+
+ self._mox.ReplayAll()
+ self.assertRaises(exception.InstanceNotFound, self._conn.get_info,
+ self._instance_data)
+ self._mox.VerifyAll()
+
+ def test_spawn_cow_image(self):
+ self._test_spawn_instance(True)
+
+ def test_spawn_cow_image_vhdx(self):
+ self._test_spawn_instance(True, vhd_format=constants.DISK_FORMAT_VHDX)
+
+ def test_spawn_no_cow_image(self):
+ self._test_spawn_instance(False)
+
+ def test_spawn_dynamic_memory(self):
+ CONF.set_override('dynamic_memory_ratio', 2.0, 'hyperv')
+ self._test_spawn_instance()
+
+ def test_spawn_no_cow_image_vhdx(self):
+ self._test_spawn_instance(False, vhd_format=constants.DISK_FORMAT_VHDX)
+
+ def _setup_spawn_config_drive_mocks(self, use_cdrom):
+ instance_metadata.InstanceMetadata(mox.IgnoreArg(),
+ content=mox.IsA(list),
+ extra_md=mox.IsA(dict))
+
+ m = fake.PathUtils.get_instance_dir(mox.IsA(str))
+ m.AndReturn(self._test_instance_dir)
+
+ cdb = self._mox.CreateMockAnything()
+ m = configdrive.ConfigDriveBuilder(instance_md=mox.IgnoreArg())
+ m.AndReturn(cdb)
+ # __enter__ and __exit__ are required by "with"
+ cdb.__enter__().AndReturn(cdb)
+ cdb.make_drive(mox.IsA(str))
+ cdb.__exit__(None, None, None).AndReturn(None)
+
+ if not use_cdrom:
+ utils.execute(CONF.hyperv.qemu_img_cmd,
+ 'convert',
+ '-f',
+ 'raw',
+ '-O',
+ 'vpc',
+ mox.IsA(str),
+ mox.IsA(str),
+ attempts=1)
+ fake.PathUtils.remove(mox.IsA(str))
+
+ m = vmutils.VMUtils.attach_ide_drive(mox.IsA(str),
+ mox.IsA(str),
+ mox.IsA(int),
+ mox.IsA(int),
+ mox.IsA(str))
+ m.WithSideEffects(self._add_ide_disk)
+
+ def _test_spawn_config_drive(self, use_cdrom, format_error=False):
+ self.flags(force_config_drive=True)
+ self.flags(config_drive_cdrom=use_cdrom, group='hyperv')
+ self.flags(mkisofs_cmd='mkisofs.exe')
+
+ if use_cdrom:
+ expected_ide_disks = 1
+ expected_ide_dvds = 1
+ else:
+ expected_ide_disks = 2
+ expected_ide_dvds = 0
+
+ if format_error:
+ self.assertRaises(vmutils.UnsupportedConfigDriveFormatException,
+ self._test_spawn_instance,
+ with_exception=True,
+ config_drive=True,
+ use_cdrom=use_cdrom)
+ else:
+ self._test_spawn_instance(expected_ide_disks=expected_ide_disks,
+ expected_ide_dvds=expected_ide_dvds,
+ config_drive=True,
+ use_cdrom=use_cdrom)
+
+ def test_spawn_config_drive(self):
+ self._test_spawn_config_drive(False)
+
+ def test_spawn_config_drive_format_error(self):
+ CONF.set_override('config_drive_format', 'wrong_format')
+ self._test_spawn_config_drive(True, True)
+
+ def test_spawn_config_drive_cdrom(self):
+ self._test_spawn_config_drive(True)
+
+ def test_spawn_no_config_drive(self):
+ self.flags(force_config_drive=False)
+
+ expected_ide_disks = 1
+ expected_ide_dvds = 0
+
+ self._test_spawn_instance(expected_ide_disks=expected_ide_disks,
+ expected_ide_dvds=expected_ide_dvds)
+
+ def _test_spawn_nova_net_vif(self, with_port):
+ self.flags(network_api_class='nova.network.api.API')
+ # Reinstantiate driver, as the VIF plugin is loaded during __init__
+ self._conn = driver_hyperv.HyperVDriver(None)
+
+ def setup_vif_mocks():
+ fake_vswitch_path = 'fake vswitch path'
+ fake_vswitch_port = 'fake port'
+
+ m = networkutils.NetworkUtils.get_external_vswitch(
+ CONF.hyperv.vswitch_name)
+ m.AndReturn(fake_vswitch_path)
+
+ m = networkutils.NetworkUtils.vswitch_port_needed()
+ m.AndReturn(with_port)
+
+ if with_port:
+ m = networkutils.NetworkUtils.create_vswitch_port(
+ fake_vswitch_path, mox.IsA(str))
+ m.AndReturn(fake_vswitch_port)
+ vswitch_conn_data = fake_vswitch_port
+ else:
+ vswitch_conn_data = fake_vswitch_path
+
+ vmutils.VMUtils.set_nic_connection(mox.IsA(str),
+ mox.IsA(str), vswitch_conn_data)
+
+ self._test_spawn_instance(setup_vif_mocks_func=setup_vif_mocks)
+
+ def test_spawn_nova_net_vif_with_port(self):
+ self._test_spawn_nova_net_vif(True)
+
+ def test_spawn_nova_net_vif_without_port(self):
+ self._test_spawn_nova_net_vif(False)
+
+ def test_spawn_nova_net_vif_no_vswitch_exception(self):
+ self.flags(network_api_class='nova.network.api.API')
+ # Reinstantiate driver, as the VIF plugin is loaded during __init__
+ self._conn = driver_hyperv.HyperVDriver(None)
+
+ def setup_vif_mocks():
+ m = networkutils.NetworkUtils.get_external_vswitch(
+ CONF.hyperv.vswitch_name)
+ m.AndRaise(vmutils.HyperVException(_('fake vswitch not found')))
+
+ self.assertRaises(vmutils.HyperVException, self._test_spawn_instance,
+ setup_vif_mocks_func=setup_vif_mocks,
+ with_exception=True)
+
+ def test_spawn_with_metrics_collection(self):
+ self.flags(enable_instance_metrics_collection=True, group='hyperv')
+ self._test_spawn_instance(False)
+
+ def test_spawn_with_ephemeral_storage(self):
+ self._test_spawn_instance(True, expected_ide_disks=2,
+ ephemeral_storage=True)
+
+ def _check_instance_name(self, vm_name):
+ return vm_name == self._instance_data['name']
+
+ def _test_vm_state_change(self, action, from_state, to_state):
+ self._instance_data = self._get_instance_data()
+
+ vmutils.VMUtils.set_vm_state(mox.Func(self._check_instance_name),
+ to_state)
+
+ if to_state in (constants.HYPERV_VM_STATE_DISABLED,
+ constants.HYPERV_VM_STATE_REBOOT):
+ self._setup_delete_vm_log_mocks()
+ if to_state in (constants.HYPERV_VM_STATE_ENABLED,
+ constants.HYPERV_VM_STATE_REBOOT):
+ self._setup_log_vm_output_mocks()
+
+ self._mox.ReplayAll()
+ action(self._instance_data)
+ self._mox.VerifyAll()
+
+ def test_pause(self):
+ self._test_vm_state_change(self._conn.pause, None,
+ constants.HYPERV_VM_STATE_PAUSED)
+
+ def test_pause_already_paused(self):
+ self._test_vm_state_change(self._conn.pause,
+ constants.HYPERV_VM_STATE_PAUSED,
+ constants.HYPERV_VM_STATE_PAUSED)
+
+ def test_unpause(self):
+ self._test_vm_state_change(self._conn.unpause,
+ constants.HYPERV_VM_STATE_PAUSED,
+ constants.HYPERV_VM_STATE_ENABLED)
+
+ def test_unpause_already_running(self):
+ self._test_vm_state_change(self._conn.unpause, None,
+ constants.HYPERV_VM_STATE_ENABLED)
+
+ def test_suspend(self):
+ self._test_vm_state_change(self._conn.suspend, None,
+ constants.HYPERV_VM_STATE_SUSPENDED)
+
+ def test_suspend_already_suspended(self):
+ self._test_vm_state_change(self._conn.suspend,
+ constants.HYPERV_VM_STATE_SUSPENDED,
+ constants.HYPERV_VM_STATE_SUSPENDED)
+
+ def test_resume(self):
+ self._test_vm_state_change(lambda i: self._conn.resume(self._context,
+ i, None),
+ constants.HYPERV_VM_STATE_SUSPENDED,
+ constants.HYPERV_VM_STATE_ENABLED)
+
+ def test_resume_already_running(self):
+ self._test_vm_state_change(lambda i: self._conn.resume(self._context,
+ i, None), None,
+ constants.HYPERV_VM_STATE_ENABLED)
+
+ def test_power_off(self):
+ self._test_vm_state_change(self._conn.power_off, None,
+ constants.HYPERV_VM_STATE_DISABLED)
+
+ def test_power_off_already_powered_off(self):
+ self._test_vm_state_change(self._conn.power_off,
+ constants.HYPERV_VM_STATE_DISABLED,
+ constants.HYPERV_VM_STATE_DISABLED)
+
+ def _test_power_on(self, block_device_info):
+ self._instance_data = self._get_instance_data()
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs)
+
+ vmutils.VMUtils.set_vm_state(mox.Func(self._check_instance_name),
+ constants.HYPERV_VM_STATE_ENABLED)
+ if block_device_info:
+ self._mox.StubOutWithMock(volumeops.VolumeOps,
+ 'fix_instance_volume_disk_paths')
+ volumeops.VolumeOps.fix_instance_volume_disk_paths(
+ mox.Func(self._check_instance_name), block_device_info)
+
+ self._setup_log_vm_output_mocks()
+
+ self._mox.ReplayAll()
+ self._conn.power_on(self._context, self._instance_data, network_info,
+ block_device_info=block_device_info)
+ self._mox.VerifyAll()
+
+ def test_power_on_having_block_devices(self):
+ block_device_info = db_fakes.get_fake_block_device_info(
+ self._volume_target_portal, self._volume_id)
+ self._test_power_on(block_device_info=block_device_info)
+
+ def test_power_on_without_block_devices(self):
+ self._test_power_on(block_device_info=None)
+
+ def test_power_on_already_running(self):
+ self._instance_data = self._get_instance_data()
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs)
+ vmutils.VMUtils.set_vm_state(mox.Func(self._check_instance_name),
+ constants.HYPERV_VM_STATE_ENABLED)
+ self._setup_log_vm_output_mocks()
+ self._mox.ReplayAll()
+ self._conn.power_on(self._context, self._instance_data, network_info)
+ self._mox.VerifyAll()
+
+ def test_reboot(self):
+
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs)
+ self._instance_data = self._get_instance_data()
+
+ vmutils.VMUtils.set_vm_state(mox.Func(self._check_instance_name),
+ constants.HYPERV_VM_STATE_REBOOT)
+
+ self._setup_delete_vm_log_mocks()
+ self._setup_log_vm_output_mocks()
+
+ self._mox.ReplayAll()
+ self._conn.reboot(self._context, self._instance_data, network_info,
+ None)
+ self._mox.VerifyAll()
+
+ def _setup_destroy_mocks(self, destroy_disks=True):
+ fake_volume_drives = ['fake_volume_drive']
+ fake_target_iqn = 'fake_target_iqn'
+ fake_target_lun = 'fake_target_lun'
+
+ m = vmutils.VMUtils.vm_exists(mox.Func(self._check_instance_name))
+ m.AndReturn(True)
+
+ func = mox.Func(self._check_instance_name)
+ vmutils.VMUtils.set_vm_state(func, constants.HYPERV_VM_STATE_DISABLED)
+
+ self._setup_delete_vm_log_mocks()
+
+ m = vmutils.VMUtils.get_vm_storage_paths(func)
+ m.AndReturn(([], fake_volume_drives))
+
+ vmutils.VMUtils.destroy_vm(func)
+
+ m = self._conn._volumeops.get_target_from_disk_path(
+ fake_volume_drives[0])
+ m.AndReturn((fake_target_iqn, fake_target_lun))
+
+ self._mock_logout_storage_target(fake_target_iqn)
+
+ if destroy_disks:
+ m = fake.PathUtils.get_instance_dir(mox.IsA(str),
+ create_dir=False,
+ remove_dir=True)
+ m.AndReturn(self._test_instance_dir)
+
+ def test_destroy(self):
+ self._instance_data = self._get_instance_data()
+
+ self._setup_destroy_mocks()
+
+ self._mox.ReplayAll()
+ self._conn.destroy(self._context, self._instance_data, None)
+ self._mox.VerifyAll()
+
+ def test_live_migration_unsupported_os(self):
+ self._check_min_windows_version_satisfied = False
+ self._conn = driver_hyperv.HyperVDriver(None)
+ self._test_live_migration(unsupported_os=True)
+
+ def test_live_migration_without_volumes(self):
+ self._test_live_migration()
+
+ def test_live_migration_with_volumes(self):
+ self._test_live_migration(with_volumes=True)
+
+ def test_live_migration_with_multiple_luns_per_target(self):
+ self._test_live_migration(with_volumes=True,
+ other_luns_available=True)
+
+ def test_live_migration_with_target_failure(self):
+ self._test_live_migration(test_failure=True)
+
+ def _test_live_migration(self, test_failure=False,
+ with_volumes=False,
+ other_luns_available=False,
+ unsupported_os=False):
+ dest_server = 'fake_server'
+
+ instance_data = self._get_instance_data()
+
+ fake_post_method = self._mox.CreateMockAnything()
+ if not test_failure and not unsupported_os:
+ fake_post_method(self._context, instance_data, dest_server,
+ False)
+
+ fake_recover_method = self._mox.CreateMockAnything()
+ if test_failure:
+ fake_recover_method(self._context, instance_data, dest_server,
+ False)
+
+ if with_volumes:
+ fake_target_iqn = 'fake_target_iqn'
+ fake_target_lun_count = 1
+
+ if not unsupported_os:
+ m = fake.PathUtils.get_vm_console_log_paths(mox.IsA(str))
+ m.AndReturn(('fake_local_vm_log_path', 'fake_vm_log_path.1'))
+
+ m = fake.PathUtils.get_vm_console_log_paths(
+ mox.IsA(str), remote_server=mox.IsA(str))
+ m.AndReturn(('fake_remote_vm_log_path',
+ 'fake_remote_vm_log_path.1'))
+
+ self._mox.StubOutWithMock(fake.PathUtils, 'exists')
+ m = fake.PathUtils.exists(mox.IsA(str))
+ m.AndReturn(True)
+ m = fake.PathUtils.exists(mox.IsA(str))
+ m.AndReturn(False)
+
+ fake.PathUtils.copy(mox.IsA(str), mox.IsA(str))
+
+ m = livemigrationutils.LiveMigrationUtils.live_migrate_vm(
+ instance_data['name'], dest_server)
+ if test_failure:
+ m.AndRaise(vmutils.HyperVException('Simulated failure'))
+
+ if with_volumes:
+ m.AndReturn({fake_target_iqn: fake_target_lun_count})
+
+ self._mock_logout_storage_target(fake_target_iqn,
+ other_luns_available)
+ else:
+ m.AndReturn({})
+
+ self._mox.ReplayAll()
+ try:
+ hyperv_exception_raised = False
+ unsupported_os_exception_raised = False
+ self._conn.live_migration(self._context, instance_data,
+ dest_server, fake_post_method,
+ fake_recover_method)
+ except vmutils.HyperVException:
+ hyperv_exception_raised = True
+ except NotImplementedError:
+ unsupported_os_exception_raised = True
+
+ self.assertTrue(not test_failure ^ hyperv_exception_raised)
+ self.assertTrue(not unsupported_os ^ unsupported_os_exception_raised)
+ self._mox.VerifyAll()
+
+ def test_pre_live_migration_cow_image(self):
+ self._test_pre_live_migration(True, False)
+
+ def test_pre_live_migration_no_cow_image(self):
+ self._test_pre_live_migration(False, False)
+
+ def test_pre_live_migration_with_volumes(self):
+ self._test_pre_live_migration(False, True)
+
+ def _test_pre_live_migration(self, cow, with_volumes):
+ self.flags(use_cow_images=cow)
+
+ instance_data = self._get_instance_data()
+ instance = db.instance_create(self._context, instance_data)
+ instance['system_metadata'] = {}
+
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs)
+
+ m = livemigrationutils.LiveMigrationUtils.check_live_migration_config()
+ m.AndReturn(True)
+
+ if cow:
+ self._setup_get_cached_image_mocks(cow)
+
+ if with_volumes:
+ block_device_info = db_fakes.get_fake_block_device_info(
+ self._volume_target_portal, self._volume_id)
+
+ mapping = driver.block_device_info_get_mapping(block_device_info)
+ data = mapping[0]['connection_info']['data']
+ target_lun = data['target_lun']
+ target_iqn = data['target_iqn']
+ target_portal = data['target_portal']
+
+ fake_mounted_disk = "fake_mounted_disk"
+ fake_device_number = 0
+
+ self._mock_login_storage_target(target_iqn, target_lun,
+ target_portal,
+ fake_mounted_disk,
+ fake_device_number)
+ else:
+ block_device_info = None
+
+ self._mox.ReplayAll()
+ self._conn.pre_live_migration(self._context, instance,
+ block_device_info, None, network_info)
+ self._mox.VerifyAll()
+
+ if cow:
+ self.assertIsNotNone(self._fetched_image)
+ else:
+ self.assertIsNone(self._fetched_image)
+
+ def test_get_instance_disk_info_is_implemented(self):
+ # Ensure that the method has been implemented in the driver
+ try:
+ disk_info = self._conn.get_instance_disk_info('fake_instance_name')
+ self.assertIsNone(disk_info)
+ except NotImplementedError:
+ self.fail("test_get_instance_disk_info() should not raise "
+ "NotImplementedError")
+
+ def test_snapshot_with_update_failure(self):
+ (snapshot_name, func_call_matcher) = self._setup_snapshot_mocks()
+
+ self._update_image_raise_exception = True
+
+ self._mox.ReplayAll()
+ self.assertRaises(vmutils.HyperVException, self._conn.snapshot,
+ self._context, self._instance_data, snapshot_name,
+ func_call_matcher.call)
+ self._mox.VerifyAll()
+
+ # Assert states changed in correct order
+ self.assertIsNone(func_call_matcher.match())
+
+ def _setup_snapshot_mocks(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs': {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs': {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}
+ ]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+
+ snapshot_name = 'test_snapshot_' + str(uuid.uuid4())
+
+ fake_hv_snapshot_path = 'fake_snapshot_path'
+ fake_parent_vhd_path = 'C:\\fake_vhd_path\\parent.vhd'
+
+ self._instance_data = self._get_instance_data()
+
+ func = mox.Func(self._check_instance_name)
+ m = vmutils.VMUtils.take_vm_snapshot(func)
+ m.AndReturn(fake_hv_snapshot_path)
+
+ m = fake.PathUtils.get_instance_dir(mox.IsA(str))
+ m.AndReturn(self._test_instance_dir)
+
+ m = vhdutils.VHDUtils.get_vhd_parent_path(mox.IsA(str))
+ m.AndReturn(fake_parent_vhd_path)
+
+ self._fake_dest_disk_path = None
+
+ def copy_dest_disk_path(src, dest):
+ self._fake_dest_disk_path = dest
+
+ m = fake.PathUtils.copyfile(mox.IsA(str), mox.IsA(str))
+ m.WithSideEffects(copy_dest_disk_path)
+
+ self._fake_dest_base_disk_path = None
+
+ def copy_dest_base_disk_path(src, dest):
+ self._fake_dest_base_disk_path = dest
+
+ m = fake.PathUtils.copyfile(fake_parent_vhd_path, mox.IsA(str))
+ m.WithSideEffects(copy_dest_base_disk_path)
+
+ def check_dest_disk_path(path):
+ return path == self._fake_dest_disk_path
+
+ def check_dest_base_disk_path(path):
+ return path == self._fake_dest_base_disk_path
+
+ func1 = mox.Func(check_dest_disk_path)
+ func2 = mox.Func(check_dest_base_disk_path)
+ # Make sure that the hyper-v base and differential VHDs are merged
+ vhdutils.VHDUtils.reconnect_parent_vhd(func1, func2)
+ vhdutils.VHDUtils.merge_vhd(func1, func2)
+
+ def check_snapshot_path(snapshot_path):
+ return snapshot_path == fake_hv_snapshot_path
+
+ # Make sure that the Hyper-V snapshot is removed
+ func = mox.Func(check_snapshot_path)
+ vmutils.VMUtils.remove_vm_snapshot(func)
+
+ fake.PathUtils.rmtree(mox.IsA(str))
+
+ m = fake.PathUtils.open(func2, 'rb')
+ m.AndReturn(io.BytesIO(b'fake content'))
+
+ return (snapshot_name, func_call_matcher)
+
+ def test_snapshot(self):
+ (snapshot_name, func_call_matcher) = self._setup_snapshot_mocks()
+
+ self._mox.ReplayAll()
+ self._conn.snapshot(self._context, self._instance_data, snapshot_name,
+ func_call_matcher.call)
+ self._mox.VerifyAll()
+
+ self.assertTrue(self._image_metadata)
+ self.assertIn("disk_format", self._image_metadata)
+ self.assertEqual("vhd", self._image_metadata["disk_format"])
+
+ # Assert states changed in correct order
+ self.assertIsNone(func_call_matcher.match())
+
+ def _get_instance_data(self):
+ instance_name = 'openstack_unit_test_vm_' + str(uuid.uuid4())
+ return db_fakes.get_fake_instance_data(instance_name,
+ self._project_id,
+ self._user_id)
+
+ def _spawn_instance(self, cow, block_device_info=None,
+ ephemeral_storage=False):
+ self.flags(use_cow_images=cow)
+
+ self._instance_data = self._get_instance_data()
+ instance = db.instance_create(self._context, self._instance_data)
+ instance['system_metadata'] = {}
+
+ if ephemeral_storage:
+ instance['ephemeral_gb'] = 1
+
+ image = db_fakes.get_fake_image_data(self._project_id, self._user_id)
+
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs)
+
+ self._conn.spawn(self._context, instance, image,
+ injected_files=[], admin_password=None,
+ network_info=network_info,
+ block_device_info=block_device_info)
+
+ def _add_ide_disk(self, vm_name, path, ctrller_addr,
+ drive_addr, drive_type):
+ if drive_type == constants.IDE_DISK:
+ self._instance_ide_disks.append(path)
+ elif drive_type == constants.IDE_DVD:
+ self._instance_ide_dvds.append(path)
+
+ def _add_volume_disk(self, vm_name, controller_path, address,
+ mounted_disk_path):
+ self._instance_volume_disks.append(mounted_disk_path)
+
+ def _check_img_path(self, image_path):
+ return image_path == self._fetched_image
+
+ def _setup_create_instance_mocks(self, setup_vif_mocks_func=None,
+ boot_from_volume=False,
+ block_device_info=None,
+ admin_permissions=True,
+ ephemeral_storage=False):
+ vmutils.VMUtils.create_vm(mox.Func(self._check_vm_name), mox.IsA(int),
+ mox.IsA(int), mox.IsA(bool),
+ CONF.hyperv.dynamic_memory_ratio,
+ mox.IsA(list))
+
+ if not boot_from_volume:
+ m = vmutils.VMUtils.attach_ide_drive(mox.Func(self._check_vm_name),
+ mox.IsA(str),
+ mox.IsA(int),
+ mox.IsA(int),
+ mox.IsA(str))
+ m.WithSideEffects(self._add_ide_disk).InAnyOrder()
+
+ if ephemeral_storage:
+ m = vmutils.VMUtils.attach_ide_drive(mox.Func(self._check_vm_name),
+ mox.IsA(str),
+ mox.IsA(int),
+ mox.IsA(int),
+ mox.IsA(str))
+ m.WithSideEffects(self._add_ide_disk).InAnyOrder()
+
+ func = mox.Func(self._check_vm_name)
+ m = vmutils.VMUtils.create_scsi_controller(func)
+ m.InAnyOrder()
+
+ if boot_from_volume:
+ mapping = driver.block_device_info_get_mapping(block_device_info)
+ data = mapping[0]['connection_info']['data']
+ target_lun = data['target_lun']
+ target_iqn = data['target_iqn']
+ target_portal = data['target_portal']
+
+ self._mock_attach_volume(mox.Func(self._check_vm_name), target_iqn,
+ target_lun, target_portal, True)
+
+ vmutils.VMUtils.create_nic(mox.Func(self._check_vm_name),
+ mox.IsA(str), mox.IsA(unicode)).InAnyOrder()
+
+ if setup_vif_mocks_func:
+ setup_vif_mocks_func()
+
+ if CONF.hyperv.enable_instance_metrics_collection:
+ vmutils.VMUtils.enable_vm_metrics_collection(
+ mox.Func(self._check_vm_name))
+
+ vmutils.VMUtils.get_vm_serial_port_connection(
+ mox.IsA(str), update_connection=mox.IsA(str))
+
+ def _set_vm_name(self, vm_name):
+ self._test_vm_name = vm_name
+
+ def _check_vm_name(self, vm_name):
+ return vm_name == self._test_vm_name
+
+ def _setup_check_admin_permissions_mocks(self, admin_permissions=True):
+ self._mox.StubOutWithMock(vmutils.VMUtils,
+ 'check_admin_permissions')
+ m = vmutils.VMUtils.check_admin_permissions()
+ if admin_permissions:
+ m.AndReturn(None)
+ else:
+ m.AndRaise(vmutils.HyperVAuthorizationException(_(
+ 'Simulated failure')))
+
+ def _setup_log_vm_output_mocks(self):
+ m = fake.PathUtils.get_vm_console_log_paths(mox.IsA(str))
+ m.AndReturn(('fake_vm_log_path', 'fake_vm_log_path.1'))
+ ioutils.IOThread('fake_pipe', 'fake_vm_log_path',
+ units.Mi).start()
+
+ def _setup_delete_vm_log_mocks(self):
+ m = fake.PathUtils.get_vm_console_log_paths(mox.IsA(str))
+ m.AndReturn(('fake_vm_log_path', 'fake_vm_log_path.1'))
+ fileutils.delete_if_exists(mox.IsA(str))
+ fileutils.delete_if_exists(mox.IsA(str))
+
+ def _setup_get_cached_image_mocks(self, cow=True,
+ vhd_format=constants.DISK_FORMAT_VHD):
+ m = vhdutils.VHDUtils.get_vhd_format(
+ mox.Func(self._check_img_path))
+ m.AndReturn(vhd_format)
+
+ def check_img_path_with_ext(image_path):
+ return image_path == self._fetched_image + '.' + vhd_format.lower()
+
+ fake.PathUtils.rename(mox.Func(self._check_img_path),
+ mox.Func(check_img_path_with_ext))
+
+ if cow and vhd_format == constants.DISK_FORMAT_VHD:
+ m = vhdutils.VHDUtils.get_vhd_info(
+ mox.Func(check_img_path_with_ext))
+ m.AndReturn({'MaxInternalSize': 1024})
+
+ fake.PathUtils.copyfile(mox.IsA(str), mox.IsA(str))
+
+ m = vhdutils.VHDUtils.get_internal_vhd_size_by_file_size(
+ mox.IsA(str), mox.IsA(object))
+ m.AndReturn(1025)
+
+ vhdutils.VHDUtils.resize_vhd(mox.IsA(str), mox.IsA(object),
+ is_file_max_size=False)
+
+ def _setup_spawn_instance_mocks(self, cow, setup_vif_mocks_func=None,
+ with_exception=False,
+ block_device_info=None,
+ boot_from_volume=False,
+ config_drive=False,
+ use_cdrom=False,
+ admin_permissions=True,
+ vhd_format=constants.DISK_FORMAT_VHD,
+ ephemeral_storage=False):
+ m = vmutils.VMUtils.vm_exists(mox.IsA(str))
+ m.WithSideEffects(self._set_vm_name).AndReturn(False)
+
+ m = fake.PathUtils.get_instance_dir(mox.IsA(str),
+ create_dir=False,
+ remove_dir=True)
+ m.AndReturn(self._test_instance_dir)
+
+ if block_device_info:
+ m = basevolumeutils.BaseVolumeUtils.volume_in_mapping(
+ 'fake_root_device_name', block_device_info)
+ m.AndReturn(boot_from_volume)
+
+ if not boot_from_volume:
+ m = fake.PathUtils.get_instance_dir(mox.Func(self._check_vm_name))
+ m.AndReturn(self._test_instance_dir)
+
+ self._setup_get_cached_image_mocks(cow, vhd_format)
+ m = vhdutils.VHDUtils.get_vhd_info(mox.IsA(str))
+ m.AndReturn({'MaxInternalSize': 1024, 'FileSize': 1024,
+ 'Type': 2})
+
+ if cow:
+ m = vhdutils.VHDUtils.get_vhd_format(mox.IsA(str))
+ m.AndReturn(vhd_format)
+ if vhd_format == constants.DISK_FORMAT_VHD:
+ vhdutils.VHDUtils.create_differencing_vhd(mox.IsA(str),
+ mox.IsA(str))
+ else:
+ m = vhdutils.VHDUtils.get_internal_vhd_size_by_file_size(
+ mox.IsA(str), mox.IsA(object))
+ m.AndReturn(1025)
+ vhdutils.VHDUtils.create_differencing_vhd(mox.IsA(str),
+ mox.IsA(str),
+ mox.IsA(int))
+ else:
+ fake.PathUtils.copyfile(mox.IsA(str), mox.IsA(str))
+ m = vhdutils.VHDUtils.get_internal_vhd_size_by_file_size(
+ mox.IsA(str), mox.IsA(object))
+ m.AndReturn(1025)
+ vhdutils.VHDUtils.resize_vhd(mox.IsA(str), mox.IsA(object),
+ is_file_max_size=False)
+
+ self._setup_check_admin_permissions_mocks(
+ admin_permissions=admin_permissions)
+ if ephemeral_storage:
+ m = fake.PathUtils.get_instance_dir(mox.Func(self._check_vm_name))
+ m.AndReturn(self._test_instance_dir)
+ vhdutils.VHDUtils.create_dynamic_vhd(mox.IsA(str), mox.IsA(int),
+ mox.IsA(str))
+
+ self._setup_create_instance_mocks(setup_vif_mocks_func,
+ boot_from_volume,
+ block_device_info,
+ ephemeral_storage=ephemeral_storage)
+
+ if config_drive and not with_exception:
+ self._setup_spawn_config_drive_mocks(use_cdrom)
+
+ # TODO(alexpilotti) Based on where the exception is thrown
+ # some of the above mock calls need to be skipped
+ if with_exception:
+ self._setup_destroy_mocks()
+ else:
+ vmutils.VMUtils.set_vm_state(mox.Func(self._check_vm_name),
+ constants.HYPERV_VM_STATE_ENABLED)
+ self._setup_log_vm_output_mocks()
+
+ def _test_spawn_instance(self, cow=True,
+ expected_ide_disks=1,
+ expected_ide_dvds=0,
+ setup_vif_mocks_func=None,
+ with_exception=False,
+ config_drive=False,
+ use_cdrom=False,
+ admin_permissions=True,
+ vhd_format=constants.DISK_FORMAT_VHD,
+ ephemeral_storage=False):
+ self._setup_spawn_instance_mocks(cow,
+ setup_vif_mocks_func,
+ with_exception,
+ config_drive=config_drive,
+ use_cdrom=use_cdrom,
+ admin_permissions=admin_permissions,
+ vhd_format=vhd_format,
+ ephemeral_storage=ephemeral_storage)
+
+ self._mox.ReplayAll()
+ self._spawn_instance(cow, ephemeral_storage=ephemeral_storage)
+ self._mox.VerifyAll()
+
+ self.assertEqual(len(self._instance_ide_disks), expected_ide_disks)
+ self.assertEqual(len(self._instance_ide_dvds), expected_ide_dvds)
+
+ vhd_path = os.path.join(self._test_instance_dir, 'root.' +
+ vhd_format.lower())
+ self.assertEqual(vhd_path, self._instance_ide_disks[0])
+
+ def _mock_get_mounted_disk_from_lun(self, target_iqn, target_lun,
+ fake_mounted_disk,
+ fake_device_number):
+ m = volumeutils.VolumeUtils.get_device_number_for_target(target_iqn,
+ target_lun)
+ m.AndReturn(fake_device_number)
+
+ m = vmutils.VMUtils.get_mounted_disk_by_drive_number(
+ fake_device_number)
+ m.AndReturn(fake_mounted_disk)
+
+ def _mock_login_storage_target(self, target_iqn, target_lun, target_portal,
+ fake_mounted_disk, fake_device_number):
+ m = volumeutils.VolumeUtils.get_device_number_for_target(target_iqn,
+ target_lun)
+ m.AndReturn(fake_device_number)
+
+ volumeutils.VolumeUtils.login_storage_target(target_lun,
+ target_iqn,
+ target_portal)
+
+ self._mock_get_mounted_disk_from_lun(target_iqn, target_lun,
+ fake_mounted_disk,
+ fake_device_number)
+
+ def _mock_attach_volume(self, instance_name, target_iqn, target_lun,
+ target_portal=None, boot_from_volume=False):
+ fake_mounted_disk = "fake_mounted_disk"
+ fake_device_number = 0
+ fake_controller_path = 'fake_scsi_controller_path'
+ self._mox.StubOutWithMock(self._conn._volumeops,
+ '_get_free_controller_slot')
+
+ self._mock_login_storage_target(target_iqn, target_lun,
+ target_portal,
+ fake_mounted_disk,
+ fake_device_number)
+
+ self._mock_get_mounted_disk_from_lun(target_iqn, target_lun,
+ fake_mounted_disk,
+ fake_device_number)
+
+ if boot_from_volume:
+ m = vmutils.VMUtils.get_vm_ide_controller(instance_name, 0)
+ m.AndReturn(fake_controller_path)
+ fake_free_slot = 0
+ else:
+ m = vmutils.VMUtils.get_vm_scsi_controller(instance_name)
+ m.AndReturn(fake_controller_path)
+
+ fake_free_slot = 1
+ m = self._conn._volumeops._get_free_controller_slot(
+ fake_controller_path)
+ m.AndReturn(fake_free_slot)
+
+ m = vmutils.VMUtils.attach_volume_to_controller(instance_name,
+ fake_controller_path,
+ fake_free_slot,
+ fake_mounted_disk)
+ m.WithSideEffects(self._add_volume_disk)
+
+ def _test_util_class_version(self, v1_class, v2_class,
+ get_instance_action, is_hyperv_2012,
+ force_v1_flag, force_utils_v1):
+ self._check_min_windows_version_satisfied = is_hyperv_2012
+ CONF.set_override(force_v1_flag, force_v1_flag, 'hyperv')
+ self._conn = driver_hyperv.HyperVDriver(None)
+
+ instance = get_instance_action()
+ is_v1 = isinstance(instance, v1_class)
+ # v2_class can inherit from v1_class
+ is_v2 = isinstance(instance, v2_class)
+
+ self.assertTrue((is_hyperv_2012 and not force_v1_flag) ^
+ (is_v1 and not is_v2))
+
+ def test_volumeutils_version_hyperv_2012(self):
+ self._test_util_class_version(volumeutils.VolumeUtils,
+ volumeutilsv2.VolumeUtilsV2,
+ lambda: utilsfactory.get_volumeutils(),
+ True, 'force_volumeutils_v1', False)
+
+ def test_volumeutils_version_hyperv_2012_force_v1(self):
+ self._test_util_class_version(volumeutils.VolumeUtils,
+ volumeutilsv2.VolumeUtilsV2,
+ lambda: utilsfactory.get_volumeutils(),
+ True, 'force_volumeutils_v1', True)
+
+ def test_volumeutils_version_hyperv_2008R2(self):
+ self._test_util_class_version(volumeutils.VolumeUtils,
+ volumeutilsv2.VolumeUtilsV2,
+ lambda: utilsfactory.get_volumeutils(),
+ False, 'force_volumeutils_v1', False)
+
+ def test_vmutils_version_hyperv_2012(self):
+ self._test_util_class_version(vmutils.VMUtils, vmutilsv2.VMUtilsV2,
+ lambda: utilsfactory.get_vmutils(),
+ True, 'force_hyperv_utils_v1', False)
+
+ def test_vmutils_version_hyperv_2012_force_v1(self):
+ self._test_util_class_version(vmutils.VMUtils, vmutilsv2.VMUtilsV2,
+ lambda: utilsfactory.get_vmutils(),
+ True, 'force_hyperv_utils_v1', True)
+
+ def test_vmutils_version_hyperv_2008R2(self):
+ self._test_util_class_version(vmutils.VMUtils, vmutilsv2.VMUtilsV2,
+ lambda: utilsfactory.get_vmutils(),
+ False, 'force_hyperv_utils_v1', False)
+
+ def test_vhdutils_version_hyperv_2012(self):
+ self._test_util_class_version(vhdutils.VHDUtils,
+ vhdutilsv2.VHDUtilsV2,
+ lambda: utilsfactory.get_vhdutils(),
+ True, 'force_hyperv_utils_v1', False)
+
+ def test_vhdutils_version_hyperv_2012_force_v1(self):
+ self._test_util_class_version(vhdutils.VHDUtils,
+ vhdutilsv2.VHDUtilsV2,
+ lambda: utilsfactory.get_vhdutils(),
+ True, 'force_hyperv_utils_v1', True)
+
+ def test_vhdutils_version_hyperv_2008R2(self):
+ self._test_util_class_version(vhdutils.VHDUtils,
+ vhdutilsv2.VHDUtilsV2,
+ lambda: utilsfactory.get_vhdutils(),
+ False, 'force_hyperv_utils_v1', False)
+
+ def test_networkutils_version_hyperv_2012(self):
+ self._test_util_class_version(networkutils.NetworkUtils,
+ networkutilsv2.NetworkUtilsV2,
+ lambda: utilsfactory.get_networkutils(),
+ True, 'force_hyperv_utils_v1', False)
+
+ def test_networkutils_version_hyperv_2012_force_v1(self):
+ self._test_util_class_version(networkutils.NetworkUtils,
+ networkutilsv2.NetworkUtilsV2,
+ lambda: utilsfactory.get_networkutils(),
+ True, 'force_hyperv_utils_v1', True)
+
+ def test_networkutils_version_hyperv_2008R2(self):
+ self._test_util_class_version(networkutils.NetworkUtils,
+ networkutilsv2.NetworkUtilsV2,
+ lambda: utilsfactory.get_networkutils(),
+ False, 'force_hyperv_utils_v1', False)
+
+ def test_attach_volume(self):
+ instance_data = self._get_instance_data()
+
+ connection_info = db_fakes.get_fake_volume_info_data(
+ self._volume_target_portal, self._volume_id)
+ data = connection_info['data']
+ target_lun = data['target_lun']
+ target_iqn = data['target_iqn']
+ target_portal = data['target_portal']
+ mount_point = '/dev/sdc'
+
+ self._mock_attach_volume(instance_data['name'], target_iqn, target_lun,
+ target_portal)
+
+ self._mox.ReplayAll()
+ self._conn.attach_volume(None, connection_info, instance_data,
+ mount_point)
+ self._mox.VerifyAll()
+
+ self.assertEqual(len(self._instance_volume_disks), 1)
+
+ def _mock_get_mounted_disk_from_lun_error(self, target_iqn, target_lun,
+ fake_mounted_disk,
+ fake_device_number):
+ m = volumeutils.VolumeUtils.get_device_number_for_target(target_iqn,
+ target_lun)
+ m.AndRaise(vmutils.HyperVException('Simulated failure'))
+
+ def _mock_attach_volume_target_logout(self, instance_name, target_iqn,
+ target_lun, target_portal=None,
+ boot_from_volume=False):
+ fake_mounted_disk = "fake_mounted disk"
+ fake_device_number = 0
+
+ self._mock_login_storage_target(target_iqn, target_lun,
+ target_portal,
+ fake_mounted_disk,
+ fake_device_number)
+
+ self._mock_get_mounted_disk_from_lun_error(target_iqn, target_lun,
+ fake_mounted_disk,
+ fake_device_number)
+
+ self._mock_logout_storage_target(target_iqn)
+
+ def test_attach_volume_logout(self):
+ instance_data = self._get_instance_data()
+
+ connection_info = db_fakes.get_fake_volume_info_data(
+ self._volume_target_portal, self._volume_id)
+ data = connection_info['data']
+ target_lun = data['target_lun']
+ target_iqn = data['target_iqn']
+ target_portal = data['target_portal']
+ mount_point = '/dev/sdc'
+
+ self._mock_attach_volume_target_logout(instance_data['name'],
+ target_iqn, target_lun,
+ target_portal)
+
+ self._mox.ReplayAll()
+ self.assertRaises(vmutils.HyperVException, self._conn.attach_volume,
+ None, connection_info, instance_data, mount_point)
+ self._mox.VerifyAll()
+
+ def test_attach_volume_connection_error(self):
+ instance_data = self._get_instance_data()
+
+ connection_info = db_fakes.get_fake_volume_info_data(
+ self._volume_target_portal, self._volume_id)
+ mount_point = '/dev/sdc'
+
+ def fake_login_storage_target(connection_info):
+ raise vmutils.HyperVException('Fake connection exception')
+
+ self.stubs.Set(self._conn._volumeops, '_login_storage_target',
+ fake_login_storage_target)
+ self.assertRaises(vmutils.HyperVException, self._conn.attach_volume,
+ None, connection_info, instance_data, mount_point)
+
+ def _mock_detach_volume(self, target_iqn, target_lun,
+ other_luns_available=False):
+ fake_mounted_disk = "fake_mounted_disk"
+ fake_device_number = 0
+ m = volumeutils.VolumeUtils.get_device_number_for_target(target_iqn,
+ target_lun)
+ m.AndReturn(fake_device_number)
+
+ m = vmutils.VMUtils.get_mounted_disk_by_drive_number(
+ fake_device_number)
+ m.AndReturn(fake_mounted_disk)
+
+ vmutils.VMUtils.detach_vm_disk(mox.IsA(str), fake_mounted_disk)
+
+ self._mock_logout_storage_target(target_iqn, other_luns_available)
+
+ def _mock_logout_storage_target(self, target_iqn,
+ other_luns_available=False):
+
+ m = volumeutils.VolumeUtils.get_target_lun_count(target_iqn)
+ m.AndReturn(1 + int(other_luns_available))
+
+ if not other_luns_available:
+ volumeutils.VolumeUtils.logout_storage_target(target_iqn)
+
+ def _test_detach_volume(self, other_luns_available=False):
+ instance_data = self._get_instance_data()
+ self.assertIn('name', instance_data)
+
+ connection_info = db_fakes.get_fake_volume_info_data(
+ self._volume_target_portal, self._volume_id)
+ data = connection_info['data']
+ target_lun = data['target_lun']
+ target_iqn = data['target_iqn']
+ self.assertIn('target_portal', data)
+
+ mount_point = '/dev/sdc'
+
+ self._mock_detach_volume(target_iqn, target_lun, other_luns_available)
+ self._mox.ReplayAll()
+ self._conn.detach_volume(connection_info, instance_data, mount_point)
+ self._mox.VerifyAll()
+
+ def test_detach_volume(self):
+ self._test_detach_volume()
+
+ def test_detach_volume_multiple_luns_per_target(self):
+ # The iSCSI target should not be disconnected in this case.
+ self._test_detach_volume(other_luns_available=True)
+
+ def test_boot_from_volume(self):
+ block_device_info = db_fakes.get_fake_block_device_info(
+ self._volume_target_portal, self._volume_id)
+
+ self._setup_spawn_instance_mocks(cow=False,
+ block_device_info=block_device_info,
+ boot_from_volume=True)
+
+ self._mox.ReplayAll()
+ self._spawn_instance(False, block_device_info)
+ self._mox.VerifyAll()
+
+ self.assertEqual(len(self._instance_volume_disks), 1)
+
+ def test_get_volume_connector(self):
+ self._instance_data = self._get_instance_data()
+ instance = db.instance_create(self._context, self._instance_data)
+
+ fake_my_ip = "fake_ip"
+ fake_host = "fake_host"
+ fake_initiator = "fake_initiator"
+
+ self.flags(my_ip=fake_my_ip)
+ self.flags(host=fake_host)
+
+ m = volumeutils.VolumeUtils.get_iscsi_initiator()
+ m.AndReturn(fake_initiator)
+
+ self._mox.ReplayAll()
+ data = self._conn.get_volume_connector(instance)
+ self._mox.VerifyAll()
+
+ self.assertEqual(fake_my_ip, data.get('ip'))
+ self.assertEqual(fake_host, data.get('host'))
+ self.assertEqual(fake_initiator, data.get('initiator'))
+
+ def _setup_test_migrate_disk_and_power_off_mocks(self, same_host=False,
+ copy_exception=False,
+ size_exception=False):
+ self._instance_data = self._get_instance_data()
+ instance = db.instance_create(self._context, self._instance_data)
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs)
+
+ instance['root_gb'] = 10
+
+ fake_local_ip = '10.0.0.1'
+ if same_host:
+ fake_dest_ip = fake_local_ip
+ else:
+ fake_dest_ip = '10.0.0.2'
+
+ if size_exception:
+ flavor = 'm1.tiny'
+ else:
+ flavor = 'm1.small'
+
+ flavor = db.flavor_get_by_name(self._context, flavor)
+
+ if not size_exception:
+ fake_root_vhd_path = 'C:\\FakePath\\root.vhd'
+ fake_revert_path = os.path.join(self._test_instance_dir, '_revert')
+
+ func = mox.Func(self._check_instance_name)
+ vmutils.VMUtils.set_vm_state(func,
+ constants.HYPERV_VM_STATE_DISABLED)
+
+ self._setup_delete_vm_log_mocks()
+
+ m = vmutils.VMUtils.get_vm_storage_paths(func)
+ m.AndReturn(([fake_root_vhd_path], []))
+
+ m = hostutils.HostUtils.get_local_ips()
+ m.AndReturn([fake_local_ip])
+
+ m = fake.PathUtils.get_instance_dir(mox.IsA(str))
+ m.AndReturn(self._test_instance_dir)
+
+ m = pathutils.PathUtils.get_instance_migr_revert_dir(
+ instance['name'], remove_dir=True)
+ m.AndReturn(fake_revert_path)
+
+ if same_host:
+ fake.PathUtils.makedirs(mox.IsA(str))
+
+ m = fake.PathUtils.copy(fake_root_vhd_path, mox.IsA(str))
+ if copy_exception:
+ m.AndRaise(shutil.Error('Simulated copy error'))
+ m = fake.PathUtils.get_instance_dir(mox.IsA(str),
+ mox.IsA(str),
+ remove_dir=True)
+ m.AndReturn(self._test_instance_dir)
+ else:
+ fake.PathUtils.rename(mox.IsA(str), mox.IsA(str))
+ destroy_disks = True
+ if same_host:
+ fake.PathUtils.rename(mox.IsA(str), mox.IsA(str))
+ destroy_disks = False
+
+ self._setup_destroy_mocks(False)
+
+ if destroy_disks:
+ m = fake.PathUtils.get_instance_dir(mox.IsA(str),
+ mox.IsA(str),
+ remove_dir=True)
+ m.AndReturn(self._test_instance_dir)
+
+ return (instance, fake_dest_ip, network_info, flavor)
+
+ def test_migrate_disk_and_power_off(self):
+ (instance,
+ fake_dest_ip,
+ network_info,
+ flavor) = self._setup_test_migrate_disk_and_power_off_mocks()
+
+ self._mox.ReplayAll()
+ self._conn.migrate_disk_and_power_off(self._context, instance,
+ fake_dest_ip, flavor,
+ network_info)
+ self._mox.VerifyAll()
+
+ def test_migrate_disk_and_power_off_same_host(self):
+ args = self._setup_test_migrate_disk_and_power_off_mocks(
+ same_host=True)
+ (instance, fake_dest_ip, network_info, flavor) = args
+
+ self._mox.ReplayAll()
+ self._conn.migrate_disk_and_power_off(self._context, instance,
+ fake_dest_ip, flavor,
+ network_info)
+ self._mox.VerifyAll()
+
+ def test_migrate_disk_and_power_off_copy_exception(self):
+ args = self._setup_test_migrate_disk_and_power_off_mocks(
+ copy_exception=True)
+ (instance, fake_dest_ip, network_info, flavor) = args
+
+ self._mox.ReplayAll()
+ self.assertRaises(shutil.Error, self._conn.migrate_disk_and_power_off,
+ self._context, instance, fake_dest_ip,
+ flavor, network_info)
+ self._mox.VerifyAll()
+
+ def test_migrate_disk_and_power_off_smaller_root_vhd_size_exception(self):
+ args = self._setup_test_migrate_disk_and_power_off_mocks(
+ size_exception=True)
+ (instance, fake_dest_ip, network_info, flavor) = args
+
+ self._mox.ReplayAll()
+ self.assertRaises(exception.InstanceFaultRollback,
+ self._conn.migrate_disk_and_power_off,
+ self._context, instance, fake_dest_ip,
+ flavor, network_info)
+ self._mox.VerifyAll()
+
+ def _mock_attach_config_drive(self, instance, config_drive_format):
+ instance['config_drive'] = True
+ self._mox.StubOutWithMock(fake.PathUtils, 'lookup_configdrive_path')
+ m = fake.PathUtils.lookup_configdrive_path(
+ mox.Func(self._check_instance_name))
+
+ if config_drive_format in constants.DISK_FORMAT_MAP:
+ m.AndReturn(self._test_instance_dir + '/configdrive.' +
+ config_drive_format)
+ else:
+ m.AndReturn(None)
+
+ m = vmutils.VMUtils.attach_ide_drive(
+ mox.Func(self._check_instance_name),
+ mox.IsA(str),
+ mox.IsA(int),
+ mox.IsA(int),
+ mox.IsA(str))
+ m.WithSideEffects(self._add_ide_disk).InAnyOrder()
+
+ def _verify_attach_config_drive(self, config_drive_format):
+ if config_drive_format == constants.IDE_DISK_FORMAT.lower():
+ self.assertEqual(self._instance_ide_disks[1],
+ self._test_instance_dir + '/configdrive.' +
+ config_drive_format)
+ elif config_drive_format == constants.IDE_DVD_FORMAT.lower():
+ self.assertEqual(self._instance_ide_dvds[0],
+ self._test_instance_dir + '/configdrive.' +
+ config_drive_format)
+
+ def _test_finish_migration(self, power_on, ephemeral_storage=False,
+ config_drive=False,
+ config_drive_format='iso'):
+ self._instance_data = self._get_instance_data()
+ instance = db.instance_create(self._context, self._instance_data)
+ instance['system_metadata'] = {}
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs)
+
+ m = fake.PathUtils.get_instance_dir(mox.IsA(str))
+ m.AndReturn(self._test_instance_dir)
+
+ self._mox.StubOutWithMock(fake.PathUtils, 'exists')
+ m = fake.PathUtils.exists(mox.IsA(str))
+ m.AndReturn(True)
+
+ fake_parent_vhd_path = (os.path.join('FakeParentPath', '%s.vhd' %
+ instance["image_ref"]))
+
+ m = vhdutils.VHDUtils.get_vhd_info(mox.IsA(str))
+ m.AndReturn({'ParentPath': fake_parent_vhd_path,
+ 'MaxInternalSize': 1})
+ m = vhdutils.VHDUtils.get_internal_vhd_size_by_file_size(
+ mox.IsA(str), mox.IsA(object))
+ m.AndReturn(1025)
+
+ vhdutils.VHDUtils.reconnect_parent_vhd(mox.IsA(str), mox.IsA(str))
+
+ m = vhdutils.VHDUtils.get_vhd_info(mox.IsA(str))
+ m.AndReturn({'MaxInternalSize': 1024})
+
+ m = fake.PathUtils.exists(mox.IsA(str))
+ m.AndReturn(True)
+
+ m = fake.PathUtils.get_instance_dir(mox.IsA(str))
+ if ephemeral_storage:
+ return m.AndReturn(self._test_instance_dir)
+ else:
+ m.AndReturn(None)
+
+ self._set_vm_name(instance['name'])
+ self._setup_create_instance_mocks(None, False,
+ ephemeral_storage=ephemeral_storage)
+
+ if power_on:
+ vmutils.VMUtils.set_vm_state(mox.Func(self._check_instance_name),
+ constants.HYPERV_VM_STATE_ENABLED)
+ self._setup_log_vm_output_mocks()
+
+ if config_drive:
+ self._mock_attach_config_drive(instance, config_drive_format)
+
+ self._mox.ReplayAll()
+ self._conn.finish_migration(self._context, None, instance, "",
+ network_info, None, False, None, power_on)
+ self._mox.VerifyAll()
+
+ if config_drive:
+ self._verify_attach_config_drive(config_drive_format)
+
+ def test_finish_migration_power_on(self):
+ self._test_finish_migration(True)
+
+ def test_finish_migration_power_off(self):
+ self._test_finish_migration(False)
+
+ def test_finish_migration_with_ephemeral_storage(self):
+ self._test_finish_migration(False, ephemeral_storage=True)
+
+ def test_finish_migration_attach_config_drive_iso(self):
+ self._test_finish_migration(False, config_drive=True,
+ config_drive_format=constants.IDE_DVD_FORMAT.lower())
+
+ def test_finish_migration_attach_config_drive_vhd(self):
+ self._test_finish_migration(False, config_drive=True,
+ config_drive_format=constants.IDE_DISK_FORMAT.lower())
+
+ def test_confirm_migration(self):
+ self._instance_data = self._get_instance_data()
+ instance = db.instance_create(self._context, self._instance_data)
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs)
+
+ pathutils.PathUtils.get_instance_migr_revert_dir(instance['name'],
+ remove_dir=True)
+ self._mox.ReplayAll()
+ self._conn.confirm_migration(None, instance, network_info)
+ self._mox.VerifyAll()
+
+ def _test_finish_revert_migration(self, power_on, ephemeral_storage=False,
+ config_drive=False,
+ config_drive_format='iso'):
+ self._instance_data = self._get_instance_data()
+ instance = db.instance_create(self._context, self._instance_data)
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs)
+
+ fake_revert_path = ('C:\\FakeInstancesPath\\%s\\_revert' %
+ instance['name'])
+
+ m = fake.PathUtils.get_instance_dir(mox.IsA(str),
+ create_dir=False,
+ remove_dir=True)
+ m.AndReturn(self._test_instance_dir)
+
+ m = pathutils.PathUtils.get_instance_migr_revert_dir(instance['name'])
+ m.AndReturn(fake_revert_path)
+ fake.PathUtils.rename(fake_revert_path, mox.IsA(str))
+
+ m = fake.PathUtils.get_instance_dir(mox.IsA(str))
+ m.AndReturn(self._test_instance_dir)
+
+ m = fake.PathUtils.get_instance_dir(mox.IsA(str))
+ if ephemeral_storage:
+ m.AndReturn(self._test_instance_dir)
+ else:
+ m.AndReturn(None)
+
+ self._set_vm_name(instance['name'])
+ self._setup_create_instance_mocks(None, False,
+ ephemeral_storage=ephemeral_storage)
+
+ if power_on:
+ vmutils.VMUtils.set_vm_state(mox.Func(self._check_instance_name),
+ constants.HYPERV_VM_STATE_ENABLED)
+ self._setup_log_vm_output_mocks()
+
+ if config_drive:
+ self._mock_attach_config_drive(instance, config_drive_format)
+
+ self._mox.ReplayAll()
+ self._conn.finish_revert_migration(self._context, instance,
+ network_info, None,
+ power_on)
+ self._mox.VerifyAll()
+
+ if config_drive:
+ self._verify_attach_config_drive(config_drive_format)
+
+ def test_finish_revert_migration_power_on(self):
+ self._test_finish_revert_migration(True)
+
+ def test_finish_revert_migration_power_off(self):
+ self._test_finish_revert_migration(False)
+
+ def test_spawn_no_admin_permissions(self):
+ self.assertRaises(vmutils.HyperVAuthorizationException,
+ self._test_spawn_instance,
+ with_exception=True,
+ admin_permissions=False)
+
+ def test_finish_revert_migration_with_ephemeral_storage(self):
+ self._test_finish_revert_migration(False, ephemeral_storage=True)
+
+ def test_finish_revert_migration_attach_config_drive_iso(self):
+ self._test_finish_revert_migration(False, config_drive=True,
+ config_drive_format=constants.IDE_DVD_FORMAT.lower())
+
+ def test_finish_revert_migration_attach_config_drive_vhd(self):
+ self._test_finish_revert_migration(False, config_drive=True,
+ config_drive_format=constants.IDE_DISK_FORMAT.lower())
+
+ def test_plug_vifs(self):
+ # Check to make sure the method raises NotImplementedError.
+ self.assertRaises(NotImplementedError,
+ self._conn.plug_vifs,
+ instance=self._test_spawn_instance,
+ network_info=None)
+
+ def test_unplug_vifs(self):
+ # Check to make sure the method raises NotImplementedError.
+ self.assertRaises(NotImplementedError,
+ self._conn.unplug_vifs,
+ instance=self._test_spawn_instance,
+ network_info=None)
+
+ def test_rollback_live_migration_at_destination(self):
+ with mock.patch.object(self._conn, "destroy") as mock_destroy:
+ self._conn.rollback_live_migration_at_destination(self._context,
+ self._test_spawn_instance, [], None)
+ mock_destroy.assert_called_once_with(self._context,
+ self._test_spawn_instance, [], None)
+
+ def test_refresh_instance_security_rules(self):
+ self.assertRaises(NotImplementedError,
+ self._conn.refresh_instance_security_rules,
+ instance=None)
+
+ def test_get_rdp_console(self):
+ self.flags(my_ip="192.168.1.1")
+
+ self._instance_data = self._get_instance_data()
+ instance = db.instance_create(self._context, self._instance_data)
+
+ fake_port = 9999
+ fake_vm_id = "fake_vm_id"
+
+ m = rdpconsoleutils.RDPConsoleUtils.get_rdp_console_port()
+ m.AndReturn(fake_port)
+
+ m = vmutils.VMUtils.get_vm_id(mox.IsA(str))
+ m.AndReturn(fake_vm_id)
+
+ self._mox.ReplayAll()
+ connect_info = self._conn.get_rdp_console(self._context, instance)
+ self._mox.VerifyAll()
+
+ self.assertEqual(CONF.my_ip, connect_info.host)
+ self.assertEqual(fake_port, connect_info.port)
+ self.assertEqual(fake_vm_id, connect_info.internal_access_path)
+
+
+class VolumeOpsTestCase(HyperVAPIBaseTestCase):
+ """Unit tests for VolumeOps class."""
+
+ def setUp(self):
+ super(VolumeOpsTestCase, self).setUp()
+ self.volumeops = volumeops.VolumeOps()
+
+ def test_get_mounted_disk_from_lun(self):
+ with contextlib.nested(
+ mock.patch.object(self.volumeops._volutils,
+ 'get_device_number_for_target'),
+ mock.patch.object(self.volumeops._vmutils,
+ 'get_mounted_disk_by_drive_number')
+ ) as (mock_get_device_number_for_target,
+ mock_get_mounted_disk_by_drive_number):
+
+ mock_get_device_number_for_target.return_value = 0
+ mock_get_mounted_disk_by_drive_number.return_value = 'disk_path'
+
+ block_device_info = db_fakes.get_fake_block_device_info(
+ self._volume_target_portal, self._volume_id)
+
+ mapping = driver.block_device_info_get_mapping(block_device_info)
+ data = mapping[0]['connection_info']['data']
+ target_lun = data['target_lun']
+ target_iqn = data['target_iqn']
+
+ disk = self.volumeops._get_mounted_disk_from_lun(target_iqn,
+ target_lun)
+ self.assertEqual(disk, 'disk_path')
+
+ def test_get_mounted_disk_from_lun_failure(self):
+ self.flags(mounted_disk_query_retry_count=1, group='hyperv')
+
+ with mock.patch.object(self.volumeops._volutils,
+ 'get_device_number_for_target') as m_device_num:
+ m_device_num.side_effect = [None, -1]
+
+ block_device_info = db_fakes.get_fake_block_device_info(
+ self._volume_target_portal, self._volume_id)
+
+ mapping = driver.block_device_info_get_mapping(block_device_info)
+ data = mapping[0]['connection_info']['data']
+ target_lun = data['target_lun']
+ target_iqn = data['target_iqn']
+
+ for attempt in xrange(1):
+ self.assertRaises(exception.NotFound,
+ self.volumeops._get_mounted_disk_from_lun,
+ target_iqn, target_lun)
+
+ def test_get_free_controller_slot_exception(self):
+ fake_drive = mock.MagicMock()
+ type(fake_drive).AddressOnParent = mock.PropertyMock(
+ side_effect=xrange(constants.SCSI_CONTROLLER_SLOTS_NUMBER))
+ fake_scsi_controller_path = 'fake_scsi_controller_path'
+
+ with mock.patch.object(self.volumeops._vmutils,
+ 'get_attached_disks') as fake_get_attached_disks:
+ fake_get_attached_disks.return_value = (
+ [fake_drive] * constants.SCSI_CONTROLLER_SLOTS_NUMBER)
+ self.assertRaises(vmutils.HyperVException,
+ self.volumeops._get_free_controller_slot,
+ fake_scsi_controller_path)
+
+ def test_fix_instance_volume_disk_paths(self):
+ block_device_info = db_fakes.get_fake_block_device_info(
+ self._volume_target_portal, self._volume_id)
+
+ with contextlib.nested(
+ mock.patch.object(self.volumeops,
+ '_get_mounted_disk_from_lun'),
+ mock.patch.object(self.volumeops._vmutils,
+ 'get_vm_scsi_controller'),
+ mock.patch.object(self.volumeops._vmutils,
+ 'set_disk_host_resource'),
+ mock.patch.object(self.volumeops,
+ 'ebs_root_in_block_devices')
+ ) as (mock_get_mounted_disk_from_lun,
+ mock_get_vm_scsi_controller,
+ mock_set_disk_host_resource,
+ mock_ebs_in_block_devices):
+
+ mock_ebs_in_block_devices.return_value = False
+ mock_get_mounted_disk_from_lun.return_value = "fake_mounted_path"
+ mock_set_disk_host_resource.return_value = "fake_controller_path"
+
+ self.volumeops.fix_instance_volume_disk_paths(
+ "test_vm_name",
+ block_device_info)
+
+ mock_get_mounted_disk_from_lun.assert_called_with(
+ 'iqn.2010-10.org.openstack:volume-' + self._volume_id, 1, True)
+ mock_get_vm_scsi_controller.assert_called_with("test_vm_name")
+ mock_set_disk_host_resource("test_vm_name", "fake_controller_path",
+ 0, "fake_mounted_path")
+
+
+class HostOpsTestCase(HyperVAPIBaseTestCase):
+ """Unit tests for the Hyper-V hostops class."""
+
+ def setUp(self):
+ self._hostops = hostops.HostOps()
+ self._hostops._hostutils = mock.MagicMock()
+ self._hostops.time = mock.MagicMock()
+ super(HostOpsTestCase, self).setUp()
+
+ @mock.patch('nova.virt.hyperv.hostops.time')
+ def test_host_uptime(self, mock_time):
+ self._hostops._hostutils.get_host_tick_count64.return_value = 100
+ mock_time.strftime.return_value = "01:01:01"
+
+ result_uptime = "01:01:01 up %s, 0 users, load average: 0, 0, 0" % (
+ str(datetime.timedelta(
+ milliseconds = long(100))))
+ actual_uptime = self._hostops.get_host_uptime()
+ self.assertEqual(result_uptime, actual_uptime)
diff --git a/nova/tests/unit/virt/hyperv/test_ioutils.py b/nova/tests/unit/virt/hyperv/test_ioutils.py
new file mode 100644
index 0000000000..2f12450a46
--- /dev/null
+++ b/nova/tests/unit/virt/hyperv/test_ioutils.py
@@ -0,0 +1,61 @@
+# Copyright 2014 Cloudbase Solutions Srl
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.import mock
+
+import mock
+
+import os
+
+from nova import test
+from nova.virt.hyperv import ioutils
+
+
+class IOThreadTestCase(test.NoDBTestCase):
+ _FAKE_SRC = r'fake_source_file'
+ _FAKE_DEST = r'fake_dest_file'
+ _FAKE_MAX_BYTES = 1
+
+ def setUp(self):
+ self._iothread = ioutils.IOThread(
+ self._FAKE_SRC, self._FAKE_DEST, self._FAKE_MAX_BYTES)
+ super(IOThreadTestCase, self).setUp()
+
+ @mock.patch('__builtin__.open')
+ @mock.patch('os.rename')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.remove')
+ def test_copy(self, fake_remove, fake_exists, fake_rename, fake_open):
+ fake_data = 'a'
+ fake_src = mock.Mock()
+ fake_dest = mock.Mock()
+
+ fake_src.read.return_value = fake_data
+ fake_dest.tell.return_value = 0
+ fake_exists.return_value = True
+
+ mock_context_manager = mock.MagicMock()
+ fake_open.return_value = mock_context_manager
+ mock_context_manager.__enter__.side_effect = [fake_src, fake_dest]
+ self._iothread._stopped.isSet = mock.Mock(side_effect=[False, True])
+
+ self._iothread._copy(self._FAKE_SRC, self._FAKE_DEST)
+
+ fake_dest.seek.assert_called_once_with(0, os.SEEK_END)
+ fake_dest.write.assert_called_once_with(fake_data)
+ fake_dest.close.assert_called_once_with()
+ fake_rename.assert_called_once_with(
+ self._iothread._dest, self._iothread._dest_archive)
+ fake_remove.assert_called_once_with(
+ self._iothread._dest_archive)
+ self.assertEqual(3, fake_open.call_count)
diff --git a/nova/tests/unit/virt/hyperv/test_migrationops.py b/nova/tests/unit/virt/hyperv/test_migrationops.py
new file mode 100644
index 0000000000..8cda2ccd48
--- /dev/null
+++ b/nova/tests/unit/virt/hyperv/test_migrationops.py
@@ -0,0 +1,79 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova import test
+from nova.tests.unit import fake_instance
+from nova.virt.hyperv import migrationops
+from nova.virt.hyperv import vmutils
+
+
+class MigrationOpsTestCase(test.NoDBTestCase):
+ """Unit tests for the Hyper-V MigrationOps class."""
+
+ _FAKE_TIMEOUT = 10
+ _FAKE_RETRY_INTERVAL = 5
+
+ def setUp(self):
+ super(MigrationOpsTestCase, self).setUp()
+ self.context = 'fake-context'
+
+ # utilsfactory will check the host OS version via get_hostutils,
+ # in order to return the proper Utils Class, so it must be mocked.
+ patched_func = mock.patch.object(migrationops.utilsfactory,
+ "get_hostutils")
+ patched_func.start()
+ self.addCleanup(patched_func.stop)
+
+ self._migrationops = migrationops.MigrationOps()
+ self._migrationops._vmops = mock.MagicMock()
+ self._migrationops._vmutils = mock.MagicMock()
+
+ def test_check_and_attach_config_drive_unknown_path(self):
+ instance = fake_instance.fake_instance_obj(self.context,
+ expected_attrs=['system_metadata'])
+ instance.config_drive = 'True'
+ self._migrationops._pathutils.lookup_configdrive_path = mock.MagicMock(
+ return_value=None)
+ self.assertRaises(vmutils.HyperVException,
+ self._migrationops._check_and_attach_config_drive,
+ instance)
+
+ @mock.patch.object(migrationops.MigrationOps, '_migrate_disk_files')
+ @mock.patch.object(migrationops.MigrationOps, '_check_target_flavor')
+ def test_migrate_disk_and_power_off(self, mock_check_flavor,
+ mock_migrate_disk_files):
+ instance = fake_instance.fake_instance_obj(self.context)
+ flavor = mock.MagicMock()
+ network_info = mock.MagicMock()
+
+ disk_files = [mock.MagicMock()]
+ volume_drives = [mock.MagicMock()]
+
+ mock_get_vm_st_path = self._migrationops._vmutils.get_vm_storage_paths
+ mock_get_vm_st_path.return_value = (disk_files, volume_drives)
+
+ self._migrationops.migrate_disk_and_power_off(
+ self.context, instance, mock.sentinel.FAKE_DEST, flavor,
+ network_info, None, self._FAKE_TIMEOUT, self._FAKE_RETRY_INTERVAL)
+
+ mock_check_flavor.assert_called_once_with(instance, flavor)
+ self._migrationops._vmops.power_off.assert_called_once_with(
+ instance, self._FAKE_TIMEOUT, self._FAKE_RETRY_INTERVAL)
+ mock_get_vm_st_path.assert_called_once_with(instance.name)
+ mock_migrate_disk_files.assert_called_once_with(
+ instance.name, disk_files, mock.sentinel.FAKE_DEST)
+ self._migrationops._vmops.destroy.assert_called_once_with(
+ instance, destroy_disks=False)
diff --git a/nova/tests/unit/virt/hyperv/test_networkutils.py b/nova/tests/unit/virt/hyperv/test_networkutils.py
new file mode 100644
index 0000000000..281df29833
--- /dev/null
+++ b/nova/tests/unit/virt/hyperv/test_networkutils.py
@@ -0,0 +1,82 @@
+# Copyright 2014 Cloudbase Solutions Srl
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova import test
+from nova.virt.hyperv import networkutils
+from nova.virt.hyperv import vmutils
+
+
+class NetworkUtilsTestCase(test.NoDBTestCase):
+ """Unit tests for the Hyper-V NetworkUtils class."""
+
+ _FAKE_PORT = {'Name': mock.sentinel.FAKE_PORT_NAME}
+ _FAKE_RET_VALUE = 0
+
+ _MSVM_VIRTUAL_SWITCH = 'Msvm_VirtualSwitch'
+
+ def setUp(self):
+ self._networkutils = networkutils.NetworkUtils()
+ self._networkutils._conn = mock.MagicMock()
+
+ super(NetworkUtilsTestCase, self).setUp()
+
+ def test_get_external_vswitch(self):
+ mock_vswitch = mock.MagicMock()
+ mock_vswitch.path_.return_value = mock.sentinel.FAKE_VSWITCH_PATH
+ getattr(self._networkutils._conn,
+ self._MSVM_VIRTUAL_SWITCH).return_value = [mock_vswitch]
+
+ switch_path = self._networkutils.get_external_vswitch(
+ mock.sentinel.FAKE_VSWITCH_NAME)
+
+ self.assertEqual(mock.sentinel.FAKE_VSWITCH_PATH, switch_path)
+
+ def test_get_external_vswitch_not_found(self):
+ self._networkutils._conn.Msvm_VirtualEthernetSwitch.return_value = []
+
+ self.assertRaises(vmutils.HyperVException,
+ self._networkutils.get_external_vswitch,
+ mock.sentinel.FAKE_VSWITCH_NAME)
+
+ def test_get_external_vswitch_no_name(self):
+ mock_vswitch = mock.MagicMock()
+ mock_vswitch.path_.return_value = mock.sentinel.FAKE_VSWITCH_PATH
+
+ mock_ext_port = self._networkutils._conn.Msvm_ExternalEthernetPort()[0]
+ self._prepare_external_port(mock_vswitch, mock_ext_port)
+
+ switch_path = self._networkutils.get_external_vswitch(None)
+ self.assertEqual(mock.sentinel.FAKE_VSWITCH_PATH, switch_path)
+
+ def _prepare_external_port(self, mock_vswitch, mock_ext_port):
+ mock_lep = mock_ext_port.associators()[0]
+ mock_lep.associators.return_value = [mock_vswitch]
+
+ def test_create_vswitch_port(self):
+ svc = self._networkutils._conn.Msvm_VirtualSwitchManagementService()[0]
+ svc.CreateSwitchPort.return_value = (
+ self._FAKE_PORT, self._FAKE_RET_VALUE)
+
+ port = self._networkutils.create_vswitch_port(
+ mock.sentinel.FAKE_VSWITCH_PATH, mock.sentinel.FAKE_PORT_NAME)
+
+ svc.CreateSwitchPort.assert_called_once_with(
+ Name=mock.ANY, FriendlyName=mock.sentinel.FAKE_PORT_NAME,
+ ScopeOfResidence="", VirtualSwitch=mock.sentinel.FAKE_VSWITCH_PATH)
+ self.assertEqual(self._FAKE_PORT, port)
+
+ def test_vswitch_port_needed(self):
+ self.assertTrue(self._networkutils.vswitch_port_needed())
diff --git a/nova/tests/unit/virt/hyperv/test_networkutilsv2.py b/nova/tests/unit/virt/hyperv/test_networkutilsv2.py
new file mode 100644
index 0000000000..1038e88682
--- /dev/null
+++ b/nova/tests/unit/virt/hyperv/test_networkutilsv2.py
@@ -0,0 +1,45 @@
+# Copyright 2013 Cloudbase Solutions Srl
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova.tests.unit.virt.hyperv import test_networkutils
+from nova.virt.hyperv import networkutilsv2
+
+
+class NetworkUtilsV2TestCase(test_networkutils.NetworkUtilsTestCase):
+ """Unit tests for the Hyper-V NetworkUtilsV2 class."""
+
+ _MSVM_VIRTUAL_SWITCH = 'Msvm_VirtualEthernetSwitch'
+
+ def setUp(self):
+ super(NetworkUtilsV2TestCase, self).setUp()
+ self._networkutils = networkutilsv2.NetworkUtilsV2()
+ self._networkutils._conn = mock.MagicMock()
+
+ def _prepare_external_port(self, mock_vswitch, mock_ext_port):
+ mock_lep = mock_ext_port.associators()[0]
+ mock_lep1 = mock_lep.associators()[0]
+ mock_esw = mock_lep1.associators()[0]
+ mock_esw.associators.return_value = [mock_vswitch]
+
+ def test_create_vswitch_port(self):
+ self.assertRaises(
+ NotImplementedError,
+ self._networkutils.create_vswitch_port,
+ mock.sentinel.FAKE_VSWITCH_PATH,
+ mock.sentinel.FAKE_PORT_NAME)
+
+ def test_vswitch_port_needed(self):
+ self.assertFalse(self._networkutils.vswitch_port_needed())
diff --git a/nova/tests/unit/virt/hyperv/test_pathutils.py b/nova/tests/unit/virt/hyperv/test_pathutils.py
new file mode 100644
index 0000000000..0ded84ec6b
--- /dev/null
+++ b/nova/tests/unit/virt/hyperv/test_pathutils.py
@@ -0,0 +1,58 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+
+import mock
+
+from nova import test
+from nova.virt.hyperv import constants
+from nova.virt.hyperv import pathutils
+
+
+class PathUtilsTestCase(test.NoDBTestCase):
+ """Unit tests for the Hyper-V PathUtils class."""
+
+ def setUp(self):
+ self.fake_instance_dir = os.path.join('C:', 'fake_instance_dir')
+ self.fake_instance_name = 'fake_instance_name'
+ self._pathutils = pathutils.PathUtils()
+ super(PathUtilsTestCase, self).setUp()
+
+ def _mock_lookup_configdrive_path(self, ext):
+ self._pathutils.get_instance_dir = mock.MagicMock(
+ return_value=self.fake_instance_dir)
+
+ def mock_exists(*args, **kwargs):
+ path = args[0]
+ return True if path[(path.rfind('.') + 1):] == ext else False
+ self._pathutils.exists = mock_exists
+ configdrive_path = self._pathutils.lookup_configdrive_path(
+ self.fake_instance_name)
+ return configdrive_path
+
+ def test_lookup_configdrive_path(self):
+ for format_ext in constants.DISK_FORMAT_MAP:
+ configdrive_path = self._mock_lookup_configdrive_path(format_ext)
+ fake_path = os.path.join(self.fake_instance_dir,
+ 'configdrive.' + format_ext)
+ self.assertEqual(configdrive_path, fake_path)
+
+ def test_lookup_configdrive_path_non_exist(self):
+ self._pathutils.get_instance_dir = mock.MagicMock(
+ return_value=self.fake_instance_dir)
+ self._pathutils.exists = mock.MagicMock(return_value=False)
+ configdrive_path = self._pathutils.lookup_configdrive_path(
+ self.fake_instance_name)
+ self.assertIsNone(configdrive_path)
diff --git a/nova/tests/unit/virt/hyperv/test_rdpconsoleutils.py b/nova/tests/unit/virt/hyperv/test_rdpconsoleutils.py
new file mode 100644
index 0000000000..98d4484b61
--- /dev/null
+++ b/nova/tests/unit/virt/hyperv/test_rdpconsoleutils.py
@@ -0,0 +1,28 @@
+# Copyright 2013 Cloudbase Solutions Srl
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import test
+from nova.virt.hyperv import rdpconsoleutils
+
+
+class RDPConsoleUtilsTestCase(test.NoDBTestCase):
+ def setUp(self):
+ self._rdpconsoleutils = rdpconsoleutils.RDPConsoleUtils()
+ super(RDPConsoleUtilsTestCase, self).setUp()
+
+ def test_get_rdp_console_port(self):
+ listener_port = self._rdpconsoleutils.get_rdp_console_port()
+
+ self.assertEqual(self._rdpconsoleutils._DEFAULT_HYPERV_RDP_PORT,
+ listener_port)
diff --git a/nova/tests/unit/virt/hyperv/test_rdpconsoleutilsv2.py b/nova/tests/unit/virt/hyperv/test_rdpconsoleutilsv2.py
new file mode 100644
index 0000000000..bcdfaf92f0
--- /dev/null
+++ b/nova/tests/unit/virt/hyperv/test_rdpconsoleutilsv2.py
@@ -0,0 +1,37 @@
+# Copyright 2013 Cloudbase Solutions Srl
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova import test
+from nova.virt.hyperv import rdpconsoleutilsv2
+
+
+class RDPConsoleUtilsV2TestCase(test.NoDBTestCase):
+ _FAKE_RDP_PORT = 1000
+
+ def setUp(self):
+ self._rdpconsoleutils = rdpconsoleutilsv2.RDPConsoleUtilsV2()
+ self._rdpconsoleutils._conn = mock.MagicMock()
+
+ super(RDPConsoleUtilsV2TestCase, self).setUp()
+
+ def test_get_rdp_console_port(self):
+ conn = self._rdpconsoleutils._conn
+ mock_rdp_setting_data = conn.Msvm_TerminalServiceSettingData()[0]
+ mock_rdp_setting_data.ListenerPort = self._FAKE_RDP_PORT
+
+ listener_port = self._rdpconsoleutils.get_rdp_console_port()
+
+ self.assertEqual(self._FAKE_RDP_PORT, listener_port)
diff --git a/nova/tests/unit/virt/hyperv/test_utilsfactory.py b/nova/tests/unit/virt/hyperv/test_utilsfactory.py
new file mode 100644
index 0000000000..77b8a92a8e
--- /dev/null
+++ b/nova/tests/unit/virt/hyperv/test_utilsfactory.py
@@ -0,0 +1,57 @@
+# Copyright 2014 Cloudbase Solutions SRL
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Unit tests for the Hyper-V utils factory.
+"""
+
+import mock
+from oslo.config import cfg
+
+from nova import test
+from nova.virt.hyperv import hostutils
+from nova.virt.hyperv import utilsfactory
+from nova.virt.hyperv import vmutils
+from nova.virt.hyperv import vmutilsv2
+
+CONF = cfg.CONF
+
+
+class TestHyperVUtilsFactory(test.NoDBTestCase):
+ def test_get_vmutils_force_v1_and_min_version(self):
+ self._test_returned_class(None, True, True)
+
+ def test_get_vmutils_v2(self):
+ self._test_returned_class(vmutilsv2.VMUtilsV2, False, True)
+
+ def test_get_vmutils_v2_r2(self):
+ self._test_returned_class(vmutils.VMUtils, False, False)
+
+ def test_get_vmutils_force_v1_and_not_min_version(self):
+ self._test_returned_class(vmutils.VMUtils, True, False)
+
+ def _test_returned_class(self, expected_class, force_v1, os_supports_v2):
+ CONF.set_override('force_hyperv_utils_v1', force_v1, 'hyperv')
+ with mock.patch.object(
+ hostutils.HostUtils,
+ 'check_min_windows_version') as mock_check_min_windows_version:
+ mock_check_min_windows_version.return_value = os_supports_v2
+
+ if os_supports_v2 and force_v1:
+ self.assertRaises(vmutils.HyperVException,
+ utilsfactory.get_vmutils)
+ else:
+ actual_class = type(utilsfactory.get_vmutils())
+ self.assertEqual(actual_class, expected_class)
diff --git a/nova/tests/unit/virt/hyperv/test_vhdutils.py b/nova/tests/unit/virt/hyperv/test_vhdutils.py
new file mode 100644
index 0000000000..e41353329a
--- /dev/null
+++ b/nova/tests/unit/virt/hyperv/test_vhdutils.py
@@ -0,0 +1,161 @@
+# Copyright 2013 Cloudbase Solutions Srl
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova import test
+from nova.virt.hyperv import constants
+from nova.virt.hyperv import vhdutils
+from nova.virt.hyperv import vmutils
+
+
+class VHDUtilsTestCase(test.NoDBTestCase):
+ """Unit tests for the Hyper-V VHDUtils class."""
+
+ _FAKE_VHD_PATH = "C:\\fake_path.vhdx"
+ _FAKE_PARENT_PATH = "C:\\fake_parent_path.vhdx"
+ _FAKE_FORMAT = 3
+ _FAKE_MAK_INTERNAL_SIZE = 1000
+ _FAKE_JOB_PATH = 'fake_job_path'
+ _FAKE_RET_VAL = 0
+
+ def setUp(self):
+ self._vhdutils = vhdutils.VHDUtils()
+ self._vhdutils._conn = mock.MagicMock()
+ self._vhdutils._vmutils = mock.MagicMock()
+ super(VHDUtilsTestCase, self).setUp()
+
+ def test_create_dynamic_vhd(self):
+ self._vhdutils.get_vhd_info = mock.MagicMock(
+ return_value={'Format': self._FAKE_FORMAT})
+
+ mock_img_svc = self._vhdutils._conn.Msvm_ImageManagementService()[0]
+ mock_img_svc.CreateDynamicVirtualHardDisk.return_value = (
+ self._FAKE_JOB_PATH, self._FAKE_RET_VAL)
+
+ self._vhdutils.create_dynamic_vhd(self._FAKE_VHD_PATH,
+ self._FAKE_MAK_INTERNAL_SIZE,
+ constants.DISK_FORMAT_VHD)
+
+ mock_img_svc.CreateDynamicVirtualHardDisk.assert_called_once_with(
+ Path=self._FAKE_VHD_PATH,
+ MaxInternalSize=self._FAKE_MAK_INTERNAL_SIZE)
+
+ def test_create_differencing_vhd(self):
+ mock_img_svc = self._vhdutils._conn.Msvm_ImageManagementService()[0]
+ mock_img_svc.CreateDifferencingVirtualHardDisk.return_value = (
+ self._FAKE_JOB_PATH, self._FAKE_RET_VAL)
+
+ self._vhdutils.create_differencing_vhd(self._FAKE_VHD_PATH,
+ self._FAKE_PARENT_PATH)
+
+ mock_img_svc.CreateDifferencingVirtualHardDisk.assert_called_once_with(
+ Path=self._FAKE_VHD_PATH,
+ ParentPath=self._FAKE_PARENT_PATH)
+
+ def test_create_differencing_vhd_with_new_size(self):
+ fake_new_size = 1024
+ self.assertRaises(vmutils.HyperVException,
+ self._vhdutils.create_differencing_vhd,
+ self._FAKE_VHD_PATH,
+ self._FAKE_PARENT_PATH,
+ fake_new_size)
+
+ def test_get_internal_vhd_size_by_file_size_fixed(self):
+ vhdutil = vhdutils.VHDUtils()
+ root_vhd_size = 1 * 1024 ** 3
+ vhdutil.get_vhd_info = mock.MagicMock()
+ vhdutil.get_vhd_info.return_value = {'Type': constants.VHD_TYPE_FIXED}
+
+ real_size = vhdutil.get_internal_vhd_size_by_file_size(None,
+ root_vhd_size)
+ expected_vhd_size = 1 * 1024 ** 3 - 512
+ self.assertEqual(expected_vhd_size, real_size)
+
+ def test_get_internal_vhd_size_by_file_size_dynamic(self):
+ vhdutil = vhdutils.VHDUtils()
+ root_vhd_size = 20 * 1024 ** 3
+ vhdutil.get_vhd_info = mock.MagicMock()
+ vhdutil.get_vhd_info.return_value = {'Type':
+ constants.VHD_TYPE_DYNAMIC}
+ vhdutil._get_vhd_dynamic_blk_size = mock.MagicMock()
+ vhdutil._get_vhd_dynamic_blk_size.return_value = 2097152
+
+ real_size = vhdutil.get_internal_vhd_size_by_file_size(None,
+ root_vhd_size)
+ expected_vhd_size = 20 * 1024 ** 3 - 43008
+ self.assertEqual(expected_vhd_size, real_size)
+
+ def test_get_internal_vhd_size_by_file_size_differencing(self):
+ # For differencing images, the internal size of the parent vhd
+ # is returned
+ vhdutil = vhdutils.VHDUtils()
+ root_vhd_size = 20 * 1024 ** 3
+ vhdutil.get_vhd_info = mock.MagicMock()
+ vhdutil.get_vhd_parent_path = mock.MagicMock()
+ vhdutil.get_vhd_parent_path.return_value = self._FAKE_VHD_PATH
+ vhdutil.get_vhd_info.side_effect = [
+ {'Type': 4}, {'Type': constants.VHD_TYPE_DYNAMIC}]
+
+ vhdutil._get_vhd_dynamic_blk_size = mock.MagicMock()
+ vhdutil._get_vhd_dynamic_blk_size.return_value = 2097152
+
+ real_size = vhdutil.get_internal_vhd_size_by_file_size(None,
+ root_vhd_size)
+ expected_vhd_size = 20 * 1024 ** 3 - 43008
+ self.assertEqual(expected_vhd_size, real_size)
+
+ def test_get_vhd_format_vhdx(self):
+ with mock.patch('nova.virt.hyperv.vhdutils.open',
+ mock.mock_open(read_data=vhdutils.VHDX_SIGNATURE),
+ create=True):
+
+ format = self._vhdutils.get_vhd_format(self._FAKE_VHD_PATH)
+
+ self.assertEqual(constants.DISK_FORMAT_VHDX, format)
+
+ def test_get_vhd_format_vhd(self):
+ with mock.patch('nova.virt.hyperv.vhdutils.open',
+ mock.mock_open(read_data=vhdutils.VHD_SIGNATURE),
+ create=True) as mock_open:
+ f = mock_open.return_value
+ f.tell.return_value = 1024
+
+ format = self._vhdutils.get_vhd_format(self._FAKE_VHD_PATH)
+
+ self.assertEqual(constants.DISK_FORMAT_VHD, format)
+
+ def test_get_vhd_format_invalid_format(self):
+ with mock.patch('nova.virt.hyperv.vhdutils.open',
+ mock.mock_open(read_data='invalid'),
+ create=True) as mock_open:
+ f = mock_open.return_value
+ f.tell.return_value = 1024
+
+ self.assertRaises(vmutils.HyperVException,
+ self._vhdutils.get_vhd_format,
+ self._FAKE_VHD_PATH)
+
+ def test_get_vhd_format_zero_length_file(self):
+ with mock.patch('nova.virt.hyperv.vhdutils.open',
+ mock.mock_open(read_data=''),
+ create=True) as mock_open:
+ f = mock_open.return_value
+ f.tell.return_value = 0
+
+ self.assertRaises(vmutils.HyperVException,
+ self._vhdutils.get_vhd_format,
+ self._FAKE_VHD_PATH)
+
+ f.seek.assert_called_once_with(0, 2)
diff --git a/nova/tests/unit/virt/hyperv/test_vhdutilsv2.py b/nova/tests/unit/virt/hyperv/test_vhdutilsv2.py
new file mode 100644
index 0000000000..a813d3bbd6
--- /dev/null
+++ b/nova/tests/unit/virt/hyperv/test_vhdutilsv2.py
@@ -0,0 +1,249 @@
+# Copyright 2013 Cloudbase Solutions Srl
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+from oslo.utils import units
+
+from nova import test
+from nova.virt.hyperv import constants
+from nova.virt.hyperv import vhdutilsv2
+
+
+class VHDUtilsV2TestCase(test.NoDBTestCase):
+ """Unit tests for the Hyper-V VHDUtilsV2 class."""
+
+ _FAKE_VHD_PATH = "C:\\fake_path.vhdx"
+ _FAKE_PARENT_VHD_PATH = "C:\\fake_parent_path.vhdx"
+ _FAKE_FORMAT = 3
+ _FAKE_MAK_INTERNAL_SIZE = units.Gi
+ _FAKE_TYPE = 3
+ _FAKE_JOB_PATH = 'fake_job_path'
+ _FAKE_RET_VAL = 0
+ _FAKE_VHD_FORMAT = 'vhdx'
+ _FAKE_BLOCK_SIZE = 33554432
+ _FAKE_LOG_SIZE = 1048576
+ _FAKE_LOGICAL_SECTOR_SIZE = 4096
+ _FAKE_METADATA_SIZE = 1048576
+ _FAKE_VHD_INFO = {'ParentPath': _FAKE_PARENT_VHD_PATH,
+ 'Format': _FAKE_FORMAT,
+ 'BlockSize': _FAKE_BLOCK_SIZE,
+ 'LogicalSectorSize': _FAKE_LOGICAL_SECTOR_SIZE,
+ 'Type': _FAKE_TYPE}
+
+ def setUp(self):
+ self._vhdutils = vhdutilsv2.VHDUtilsV2()
+ self._vhdutils._conn = mock.MagicMock()
+ self._vhdutils._vmutils = mock.MagicMock()
+ self._vhdutils.get_vhd_format = mock.MagicMock(
+ return_value=self._FAKE_VHD_FORMAT)
+
+ self._fake_file_handle = mock.MagicMock()
+ self._fake_vhd_info_xml = (
+ '<INSTANCE CLASSNAME="Msvm_VirtualHardDiskSettingData">'
+ '<PROPERTY NAME="BlockSize" TYPE="uint32">'
+ '<VALUE>33554432</VALUE>'
+ '</PROPERTY>'
+ '<PROPERTY NAME="Caption" TYPE="string">'
+ '<VALUE>Virtual Hard Disk Setting Data</VALUE>'
+ '</PROPERTY>'
+ '<PROPERTY NAME="Description" TYPE="string">'
+ '<VALUE>Setting Data for a Virtual Hard Disk.</VALUE>'
+ '</PROPERTY>'
+ '<PROPERTY NAME="ElementName" TYPE="string">'
+ '<VALUE>fake_path.vhdx</VALUE>'
+ '</PROPERTY>'
+ '<PROPERTY NAME="Format" TYPE="uint16">'
+ '<VALUE>%(format)s</VALUE>'
+ '</PROPERTY>'
+ '<PROPERTY NAME="InstanceID" TYPE="string">'
+ '<VALUE>52794B89-AC06-4349-AC57-486CAAD52F69</VALUE>'
+ '</PROPERTY>'
+ '<PROPERTY NAME="LogicalSectorSize" TYPE="uint32">'
+ '<VALUE>512</VALUE>'
+ '</PROPERTY>'
+ '<PROPERTY NAME="MaxInternalSize" TYPE="uint64">'
+ '<VALUE>%(max_internal_size)s</VALUE>'
+ '</PROPERTY>'
+ '<PROPERTY NAME="ParentPath" TYPE="string">'
+ '<VALUE>%(parent_path)s</VALUE>'
+ '</PROPERTY>'
+ '<PROPERTY NAME="Path" TYPE="string">'
+ '<VALUE>%(path)s</VALUE>'
+ '</PROPERTY>'
+ '<PROPERTY NAME="PhysicalSectorSize" TYPE="uint32">'
+ '<VALUE>4096</VALUE>'
+ '</PROPERTY>'
+ '<PROPERTY NAME="Type" TYPE="uint16">'
+ '<VALUE>%(type)s</VALUE>'
+ '</PROPERTY>'
+ '</INSTANCE>' %
+ {'path': self._FAKE_VHD_PATH,
+ 'parent_path': self._FAKE_PARENT_VHD_PATH,
+ 'format': self._FAKE_FORMAT,
+ 'max_internal_size': self._FAKE_MAK_INTERNAL_SIZE,
+ 'type': self._FAKE_TYPE})
+
+ super(VHDUtilsV2TestCase, self).setUp()
+
+ def test_get_vhd_info(self):
+ mock_img_svc = self._vhdutils._conn.Msvm_ImageManagementService()[0]
+ mock_img_svc.GetVirtualHardDiskSettingData.return_value = (
+ self._FAKE_JOB_PATH, self._FAKE_RET_VAL, self._fake_vhd_info_xml)
+
+ vhd_info = self._vhdutils.get_vhd_info(self._FAKE_VHD_PATH)
+
+ self.assertEqual(self._FAKE_VHD_PATH, vhd_info['Path'])
+ self.assertEqual(self._FAKE_PARENT_VHD_PATH, vhd_info['ParentPath'])
+ self.assertEqual(self._FAKE_FORMAT, vhd_info['Format'])
+ self.assertEqual(self._FAKE_MAK_INTERNAL_SIZE,
+ vhd_info['MaxInternalSize'])
+ self.assertEqual(self._FAKE_TYPE, vhd_info['Type'])
+
+ def test_create_dynamic_vhd(self):
+ self._vhdutils.get_vhd_info = mock.MagicMock(
+ return_value={'Format': self._FAKE_FORMAT})
+
+ mock_img_svc = self._vhdutils._conn.Msvm_ImageManagementService()[0]
+ mock_img_svc.CreateVirtualHardDisk.return_value = (self._FAKE_JOB_PATH,
+ self._FAKE_RET_VAL)
+
+ self._vhdutils.create_dynamic_vhd(self._FAKE_VHD_PATH,
+ self._FAKE_MAK_INTERNAL_SIZE,
+ constants.DISK_FORMAT_VHDX)
+
+ self.assertTrue(mock_img_svc.CreateVirtualHardDisk.called)
+
+ def test_create_differencing_vhd(self):
+ self._vhdutils.get_vhd_info = mock.MagicMock(
+ return_value={'ParentPath': self._FAKE_PARENT_VHD_PATH,
+ 'Format': self._FAKE_FORMAT})
+
+ mock_img_svc = self._vhdutils._conn.Msvm_ImageManagementService()[0]
+ mock_img_svc.CreateVirtualHardDisk.return_value = (self._FAKE_JOB_PATH,
+ self._FAKE_RET_VAL)
+
+ self._vhdutils.create_differencing_vhd(self._FAKE_VHD_PATH,
+ self._FAKE_PARENT_VHD_PATH)
+
+ self.assertTrue(mock_img_svc.CreateVirtualHardDisk.called)
+
+ def test_reconnect_parent_vhd(self):
+ mock_img_svc = self._vhdutils._conn.Msvm_ImageManagementService()[0]
+
+ self._vhdutils._get_vhd_info_xml = mock.MagicMock(
+ return_value=self._fake_vhd_info_xml)
+
+ mock_img_svc.SetVirtualHardDiskSettingData.return_value = (
+ self._FAKE_JOB_PATH, self._FAKE_RET_VAL)
+
+ self._vhdutils.reconnect_parent_vhd(self._FAKE_VHD_PATH,
+ self._FAKE_PARENT_VHD_PATH)
+
+ mock_img_svc.SetVirtualHardDiskSettingData.assert_called_once_with(
+ VirtualDiskSettingData=self._fake_vhd_info_xml)
+
+ def test_resize_vhd(self):
+ mock_img_svc = self._vhdutils._conn.Msvm_ImageManagementService()[0]
+ mock_img_svc.ResizeVirtualHardDisk.return_value = (self._FAKE_JOB_PATH,
+ self._FAKE_RET_VAL)
+ self._vhdutils.get_internal_vhd_size_by_file_size = mock.MagicMock(
+ return_value=self._FAKE_MAK_INTERNAL_SIZE)
+
+ self._vhdutils.resize_vhd(self._FAKE_VHD_PATH,
+ self._FAKE_MAK_INTERNAL_SIZE)
+
+ mock_img_svc.ResizeVirtualHardDisk.assert_called_once_with(
+ Path=self._FAKE_VHD_PATH,
+ MaxInternalSize=self._FAKE_MAK_INTERNAL_SIZE)
+
+ self.mock_get = self._vhdutils.get_internal_vhd_size_by_file_size
+ self.mock_get.assert_called_once_with(self._FAKE_VHD_PATH,
+ self._FAKE_MAK_INTERNAL_SIZE)
+
+ def _test_get_vhdx_internal_size(self, vhd_type):
+ self._vhdutils.get_vhd_info = mock.MagicMock()
+ self._vhdutils.get_vhd_parent_path = mock.Mock(
+ return_value=self._FAKE_PARENT_VHD_PATH)
+
+ if vhd_type == 4:
+ self._vhdutils.get_vhd_info.side_effect = [
+ {'Type': vhd_type}, self._FAKE_VHD_INFO]
+ else:
+ self._vhdutils.get_vhd_info.return_value = self._FAKE_VHD_INFO
+ self._vhdutils._get_vhdx_log_size = mock.MagicMock(
+ return_value=self._FAKE_LOG_SIZE)
+ self._vhdutils._get_vhdx_metadata_size_and_offset = mock.MagicMock(
+ return_value=(self._FAKE_METADATA_SIZE, 1024))
+ self._vhdutils._get_vhdx_block_size = mock.MagicMock(
+ return_value=self._FAKE_BLOCK_SIZE)
+
+ file_mock = mock.MagicMock()
+ with mock.patch('__builtin__.open', file_mock):
+ internal_size = (
+ self._vhdutils.get_internal_vhd_size_by_file_size(
+ self._FAKE_VHD_PATH, self._FAKE_MAK_INTERNAL_SIZE))
+
+ self.assertEqual(self._FAKE_MAK_INTERNAL_SIZE - self._FAKE_BLOCK_SIZE,
+ internal_size)
+
+ def test_get_vhdx_internal_size_dynamic(self):
+ self._test_get_vhdx_internal_size(3)
+
+ def test_get_vhdx_internal_size_differencing(self):
+ self._test_get_vhdx_internal_size(4)
+
+ def test_get_vhdx_current_header(self):
+ VHDX_HEADER_OFFSETS = [64 * 1024, 128 * 1024]
+ fake_sequence_numbers = ['\x01\x00\x00\x00\x00\x00\x00\x00',
+ '\x02\x00\x00\x00\x00\x00\x00\x00']
+ self._fake_file_handle.read = mock.MagicMock(
+ side_effect=fake_sequence_numbers)
+
+ offset = self._vhdutils._get_vhdx_current_header_offset(
+ self._fake_file_handle)
+ self.assertEqual(offset, VHDX_HEADER_OFFSETS[1])
+
+ def test_get_vhdx_metadata_size(self):
+ fake_metadata_offset = '\x01\x00\x00\x00\x00\x00\x00\x00'
+ fake_metadata_size = '\x01\x00\x00\x00'
+ self._fake_file_handle.read = mock.MagicMock(
+ side_effect=[fake_metadata_offset, fake_metadata_size])
+
+ metadata_size, metadata_offset = (
+ self._vhdutils._get_vhdx_metadata_size_and_offset(
+ self._fake_file_handle))
+ self.assertEqual(metadata_size, 1)
+ self.assertEqual(metadata_offset, 1)
+
+ def test_get_block_size(self):
+ self._vhdutils._get_vhdx_metadata_size_and_offset = mock.MagicMock(
+ return_value=(self._FAKE_METADATA_SIZE, 1024))
+ fake_block_size = '\x01\x00\x00\x00'
+ self._fake_file_handle.read = mock.MagicMock(
+ return_value=fake_block_size)
+
+ block_size = self._vhdutils._get_vhdx_block_size(
+ self._fake_file_handle)
+ self.assertEqual(block_size, 1)
+
+ def test_get_log_size(self):
+ fake_current_header_offset = 64 * 1024
+ self._vhdutils._get_vhdx_current_header_offset = mock.MagicMock(
+ return_value=fake_current_header_offset)
+ fake_log_size = '\x01\x00\x00\x00'
+ self._fake_file_handle.read = mock.MagicMock(
+ return_value=fake_log_size)
+
+ log_size = self._vhdutils._get_vhdx_log_size(self._fake_file_handle)
+ self.assertEqual(log_size, 1)
diff --git a/nova/tests/unit/virt/hyperv/test_vmops.py b/nova/tests/unit/virt/hyperv/test_vmops.py
new file mode 100644
index 0000000000..5ec107747e
--- /dev/null
+++ b/nova/tests/unit/virt/hyperv/test_vmops.py
@@ -0,0 +1,230 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from eventlet import timeout as etimeout
+import mock
+
+from nova import exception
+from nova import test
+from nova.tests.unit import fake_instance
+from nova.virt.hyperv import constants
+from nova.virt.hyperv import pathutils
+from nova.virt.hyperv import vmops
+from nova.virt.hyperv import vmutils
+
+
+class VMOpsTestCase(test.NoDBTestCase):
+ """Unit tests for the Hyper-V VMOps class."""
+
+ _FAKE_TIMEOUT = 2
+
+ def __init__(self, test_case_name):
+ super(VMOpsTestCase, self).__init__(test_case_name)
+
+ def setUp(self):
+ super(VMOpsTestCase, self).setUp()
+ self.context = 'fake-context'
+
+ # utilsfactory will check the host OS version via get_hostutils,
+ # in order to return the proper Utils Class, so it must be mocked.
+ patched_func = mock.patch.object(vmops.utilsfactory,
+ "get_hostutils")
+ patched_func.start()
+ self.addCleanup(patched_func.stop)
+
+ self._vmops = vmops.VMOps()
+
+ def test_attach_config_drive(self):
+ instance = fake_instance.fake_instance_obj(self.context)
+ self.assertRaises(exception.InvalidDiskFormat,
+ self._vmops.attach_config_drive,
+ instance, 'C:/fake_instance_dir/configdrive.xxx')
+
+ def test_reboot_hard(self):
+ self._test_reboot(vmops.REBOOT_TYPE_HARD,
+ constants.HYPERV_VM_STATE_REBOOT)
+
+ @mock.patch("nova.virt.hyperv.vmops.VMOps._soft_shutdown")
+ def test_reboot_soft(self, mock_soft_shutdown):
+ mock_soft_shutdown.return_value = True
+ self._test_reboot(vmops.REBOOT_TYPE_SOFT,
+ constants.HYPERV_VM_STATE_ENABLED)
+
+ @mock.patch("nova.virt.hyperv.vmops.VMOps._soft_shutdown")
+ def test_reboot_soft_failed(self, mock_soft_shutdown):
+ mock_soft_shutdown.return_value = False
+ self._test_reboot(vmops.REBOOT_TYPE_SOFT,
+ constants.HYPERV_VM_STATE_REBOOT)
+
+ @mock.patch("nova.virt.hyperv.vmops.VMOps.power_on")
+ @mock.patch("nova.virt.hyperv.vmops.VMOps._soft_shutdown")
+ def test_reboot_soft_exception(self, mock_soft_shutdown, mock_power_on):
+ mock_soft_shutdown.return_value = True
+ mock_power_on.side_effect = vmutils.HyperVException("Expected failure")
+ instance = fake_instance.fake_instance_obj(self.context)
+
+ self.assertRaises(vmutils.HyperVException, self._vmops.reboot,
+ instance, {}, vmops.REBOOT_TYPE_SOFT)
+
+ mock_soft_shutdown.assert_called_once_with(instance)
+ mock_power_on.assert_called_once_with(instance)
+
+ def _test_reboot(self, reboot_type, vm_state):
+ instance = fake_instance.fake_instance_obj(self.context)
+ with mock.patch.object(self._vmops, '_set_vm_state') as mock_set_state:
+ self._vmops.reboot(instance, {}, reboot_type)
+ mock_set_state.assert_called_once_with(instance, vm_state)
+
+ @mock.patch("nova.virt.hyperv.vmutils.VMUtils.soft_shutdown_vm")
+ @mock.patch("nova.virt.hyperv.vmops.VMOps._wait_for_power_off")
+ def test_soft_shutdown(self, mock_wait_for_power_off, mock_shutdown_vm):
+ instance = fake_instance.fake_instance_obj(self.context)
+ mock_wait_for_power_off.return_value = True
+
+ result = self._vmops._soft_shutdown(instance, self._FAKE_TIMEOUT)
+
+ mock_shutdown_vm.assert_called_once_with(instance.name)
+ mock_wait_for_power_off.assert_called_once_with(
+ instance.name, self._FAKE_TIMEOUT)
+
+ self.assertTrue(result)
+
+ @mock.patch("time.sleep")
+ @mock.patch("nova.virt.hyperv.vmutils.VMUtils.soft_shutdown_vm")
+ def test_soft_shutdown_failed(self, mock_shutdown_vm, mock_sleep):
+ instance = fake_instance.fake_instance_obj(self.context)
+
+ mock_shutdown_vm.side_effect = vmutils.HyperVException(
+ "Expected failure.")
+
+ result = self._vmops._soft_shutdown(instance, self._FAKE_TIMEOUT)
+
+ mock_shutdown_vm.assert_called_once_with(instance.name)
+ self.assertFalse(result)
+
+ @mock.patch("nova.virt.hyperv.vmutils.VMUtils.soft_shutdown_vm")
+ @mock.patch("nova.virt.hyperv.vmops.VMOps._wait_for_power_off")
+ def test_soft_shutdown_wait(self, mock_wait_for_power_off,
+ mock_shutdown_vm):
+ instance = fake_instance.fake_instance_obj(self.context)
+ mock_wait_for_power_off.side_effect = [False, True]
+
+ result = self._vmops._soft_shutdown(instance, self._FAKE_TIMEOUT, 1)
+
+ calls = [mock.call(instance.name, 1),
+ mock.call(instance.name, self._FAKE_TIMEOUT - 1)]
+ mock_shutdown_vm.assert_called_with(instance.name)
+ mock_wait_for_power_off.assert_has_calls(calls)
+
+ self.assertTrue(result)
+
+ @mock.patch("nova.virt.hyperv.vmutils.VMUtils.soft_shutdown_vm")
+ @mock.patch("nova.virt.hyperv.vmops.VMOps._wait_for_power_off")
+ def test_soft_shutdown_wait_timeout(self, mock_wait_for_power_off,
+ mock_shutdown_vm):
+ instance = fake_instance.fake_instance_obj(self.context)
+ mock_wait_for_power_off.return_value = False
+
+ result = self._vmops._soft_shutdown(instance, self._FAKE_TIMEOUT, 1.5)
+
+ calls = [mock.call(instance.name, 1.5),
+ mock.call(instance.name, self._FAKE_TIMEOUT - 1.5)]
+ mock_shutdown_vm.assert_called_with(instance.name)
+ mock_wait_for_power_off.assert_has_calls(calls)
+
+ self.assertFalse(result)
+
+ def _test_power_off(self, timeout):
+ instance = fake_instance.fake_instance_obj(self.context)
+ with mock.patch.object(self._vmops, '_set_vm_state') as mock_set_state:
+ self._vmops.power_off(instance, timeout)
+
+ mock_set_state.assert_called_once_with(
+ instance, constants.HYPERV_VM_STATE_DISABLED)
+
+ def test_power_off_hard(self):
+ self._test_power_off(timeout=0)
+
+ @mock.patch("nova.virt.hyperv.vmops.VMOps._soft_shutdown")
+ def test_power_off_exception(self, mock_soft_shutdown):
+ mock_soft_shutdown.return_value = False
+ self._test_power_off(timeout=1)
+
+ @mock.patch("nova.virt.hyperv.vmops.VMOps._set_vm_state")
+ @mock.patch("nova.virt.hyperv.vmops.VMOps._soft_shutdown")
+ def test_power_off_soft(self, mock_soft_shutdown, mock_set_state):
+ instance = fake_instance.fake_instance_obj(self.context)
+ mock_soft_shutdown.return_value = True
+
+ self._vmops.power_off(instance, 1, 0)
+
+ mock_soft_shutdown.assert_called_once_with(
+ instance, 1, vmops.SHUTDOWN_TIME_INCREMENT)
+ self.assertFalse(mock_set_state.called)
+
+ def test_get_vm_state(self):
+ summary_info = {'EnabledState': constants.HYPERV_VM_STATE_DISABLED}
+
+ with mock.patch.object(self._vmops._vmutils,
+ 'get_vm_summary_info') as mock_get_summary_info:
+ mock_get_summary_info.return_value = summary_info
+
+ response = self._vmops._get_vm_state(mock.sentinel.FAKE_VM_NAME)
+ self.assertEqual(response, constants.HYPERV_VM_STATE_DISABLED)
+
+ @mock.patch.object(vmops.VMOps, '_get_vm_state')
+ def test_wait_for_power_off_true(self, mock_get_state):
+ mock_get_state.return_value = constants.HYPERV_VM_STATE_DISABLED
+ result = self._vmops._wait_for_power_off(
+ mock.sentinel.FAKE_VM_NAME, vmops.SHUTDOWN_TIME_INCREMENT)
+ mock_get_state.assert_called_with(mock.sentinel.FAKE_VM_NAME)
+ self.assertTrue(result)
+
+ @mock.patch.object(vmops.etimeout, "with_timeout")
+ def test_wait_for_power_off_false(self, mock_with_timeout):
+ mock_with_timeout.side_effect = etimeout.Timeout()
+ result = self._vmops._wait_for_power_off(
+ mock.sentinel.FAKE_VM_NAME, vmops.SHUTDOWN_TIME_INCREMENT)
+ self.assertFalse(result)
+
+ @mock.patch("__builtin__.open")
+ @mock.patch("os.path.exists")
+ @mock.patch.object(pathutils.PathUtils, 'get_vm_console_log_paths')
+ def test_get_console_output_exception(self,
+ fake_get_vm_log_path,
+ fake_path_exists,
+ fake_open):
+ fake_vm = mock.MagicMock()
+
+ fake_open.side_effect = vmutils.HyperVException
+ fake_path_exists.return_value = True
+ fake_get_vm_log_path.return_value = (
+ mock.sentinel.fake_console_log_path,
+ mock.sentinel.fake_console_log_archived)
+
+ with mock.patch('nova.virt.hyperv.vmops.open', fake_open, create=True):
+ self.assertRaises(vmutils.HyperVException,
+ self._vmops.get_console_output,
+ fake_vm)
+
+ def test_list_instance_uuids(self):
+ fake_uuid = '4f54fb69-d3a2-45b7-bb9b-b6e6b3d893b3'
+ with mock.patch.object(self._vmops._vmutils,
+ 'list_instance_notes') as mock_list_notes:
+ mock_list_notes.return_value = [('fake_name', [fake_uuid])]
+
+ response = self._vmops.list_instance_uuids()
+ mock_list_notes.assert_called_once_with()
+
+ self.assertEqual(response, [fake_uuid])
diff --git a/nova/tests/unit/virt/hyperv/test_vmutils.py b/nova/tests/unit/virt/hyperv/test_vmutils.py
new file mode 100644
index 0000000000..7c54f273ab
--- /dev/null
+++ b/nova/tests/unit/virt/hyperv/test_vmutils.py
@@ -0,0 +1,668 @@
+# Copyright 2014 Cloudbase Solutions Srl
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova import exception
+from nova import test
+from nova.virt.hyperv import constants
+from nova.virt.hyperv import vmutils
+
+
+class VMUtilsTestCase(test.NoDBTestCase):
+ """Unit tests for the Hyper-V VMUtils class."""
+
+ _FAKE_VM_NAME = 'fake_vm'
+ _FAKE_MEMORY_MB = 2
+ _FAKE_VCPUS_NUM = 4
+ _FAKE_JOB_PATH = 'fake_job_path'
+ _FAKE_RET_VAL = 0
+ _FAKE_RET_VAL_BAD = -1
+ _FAKE_CTRL_PATH = 'fake_ctrl_path'
+ _FAKE_CTRL_ADDR = 0
+ _FAKE_DRIVE_ADDR = 0
+ _FAKE_MOUNTED_DISK_PATH = 'fake_mounted_disk_path'
+ _FAKE_VM_PATH = "fake_vm_path"
+ _FAKE_VHD_PATH = "fake_vhd_path"
+ _FAKE_DVD_PATH = "fake_dvd_path"
+ _FAKE_VOLUME_DRIVE_PATH = "fake_volume_drive_path"
+ _FAKE_VM_UUID = "04e79212-39bc-4065-933c-50f6d48a57f6"
+ _FAKE_INSTANCE = {"name": _FAKE_VM_NAME,
+ "uuid": _FAKE_VM_UUID}
+ _FAKE_SNAPSHOT_PATH = "fake_snapshot_path"
+ _FAKE_RES_DATA = "fake_res_data"
+ _FAKE_HOST_RESOURCE = "fake_host_resource"
+ _FAKE_CLASS = "FakeClass"
+ _FAKE_RES_PATH = "fake_res_path"
+ _FAKE_RES_NAME = 'fake_res_name'
+ _FAKE_ADDRESS = "fake_address"
+ _FAKE_JOB_STATUS_DONE = 7
+ _FAKE_JOB_STATUS_BAD = -1
+ _FAKE_JOB_DESCRIPTION = "fake_job_description"
+ _FAKE_ERROR = "fake_error"
+ _FAKE_ELAPSED_TIME = 0
+ _CONCRETE_JOB = "Msvm_ConcreteJob"
+ _FAKE_DYNAMIC_MEMORY_RATIO = 1.0
+
+ _FAKE_SUMMARY_INFO = {'NumberOfProcessors': 4,
+ 'EnabledState': 2,
+ 'MemoryUsage': 2,
+ 'UpTime': 1}
+
+ _DEFINE_SYSTEM = 'DefineVirtualSystem'
+ _DESTROY_SYSTEM = 'DestroyVirtualSystem'
+ _DESTROY_SNAPSHOT = 'RemoveVirtualSystemSnapshot'
+ _ADD_RESOURCE = 'AddVirtualSystemResources'
+ _REMOVE_RESOURCE = 'RemoveVirtualSystemResources'
+ _SETTING_TYPE = 'SettingType'
+
+ _VIRTUAL_SYSTEM_TYPE_REALIZED = 3
+
+ def setUp(self):
+ self._vmutils = vmutils.VMUtils()
+ self._vmutils._conn = mock.MagicMock()
+
+ super(VMUtilsTestCase, self).setUp()
+
+ def test_enable_vm_metrics_collection(self):
+ self.assertRaises(NotImplementedError,
+ self._vmutils.enable_vm_metrics_collection,
+ self._FAKE_VM_NAME)
+
+ def test_get_vm_summary_info(self):
+ self._lookup_vm()
+ mock_svc = self._vmutils._conn.Msvm_VirtualSystemManagementService()[0]
+
+ mock_summary = mock.MagicMock()
+ mock_svc.GetSummaryInformation.return_value = (self._FAKE_RET_VAL,
+ [mock_summary])
+
+ for (key, val) in self._FAKE_SUMMARY_INFO.items():
+ setattr(mock_summary, key, val)
+
+ summary = self._vmutils.get_vm_summary_info(self._FAKE_VM_NAME)
+ self.assertEqual(self._FAKE_SUMMARY_INFO, summary)
+
+ def _lookup_vm(self):
+ mock_vm = mock.MagicMock()
+ self._vmutils._lookup_vm_check = mock.MagicMock(
+ return_value=mock_vm)
+ mock_vm.path_.return_value = self._FAKE_VM_PATH
+ return mock_vm
+
+ def test_lookup_vm_ok(self):
+ mock_vm = mock.MagicMock()
+ self._vmutils._conn.Msvm_ComputerSystem.return_value = [mock_vm]
+ vm = self._vmutils._lookup_vm_check(self._FAKE_VM_NAME)
+ self.assertEqual(mock_vm, vm)
+
+ def test_lookup_vm_multiple(self):
+ mockvm = mock.MagicMock()
+ self._vmutils._conn.Msvm_ComputerSystem.return_value = [mockvm, mockvm]
+ self.assertRaises(vmutils.HyperVException,
+ self._vmutils._lookup_vm_check,
+ self._FAKE_VM_NAME)
+
+ def test_lookup_vm_none(self):
+ self._vmutils._conn.Msvm_ComputerSystem.return_value = []
+ self.assertRaises(exception.NotFound,
+ self._vmutils._lookup_vm_check,
+ self._FAKE_VM_NAME)
+
+ def test_set_vm_memory_static(self):
+ self._test_set_vm_memory_dynamic(1.0)
+
+ def test_set_vm_memory_dynamic(self):
+ self._test_set_vm_memory_dynamic(2.0)
+
+ def _test_set_vm_memory_dynamic(self, dynamic_memory_ratio):
+ mock_vm = self._lookup_vm()
+
+ mock_s = self._vmutils._conn.Msvm_VirtualSystemSettingData()[0]
+ mock_s.SystemType = 3
+
+ mock_vmsetting = mock.MagicMock()
+ mock_vmsetting.associators.return_value = [mock_s]
+
+ self._vmutils._modify_virt_resource = mock.MagicMock()
+
+ self._vmutils._set_vm_memory(mock_vm, mock_vmsetting,
+ self._FAKE_MEMORY_MB,
+ dynamic_memory_ratio)
+
+ self._vmutils._modify_virt_resource.assert_called_with(
+ mock_s, self._FAKE_VM_PATH)
+
+ if dynamic_memory_ratio > 1:
+ self.assertTrue(mock_s.DynamicMemoryEnabled)
+ else:
+ self.assertFalse(mock_s.DynamicMemoryEnabled)
+
+ def test_soft_shutdown_vm(self):
+ mock_vm = self._lookup_vm()
+ mock_shutdown = mock.MagicMock()
+ mock_shutdown.InitiateShutdown.return_value = (self._FAKE_RET_VAL, )
+ mock_vm.associators.return_value = [mock_shutdown]
+
+ with mock.patch.object(self._vmutils, 'check_ret_val') as mock_check:
+ self._vmutils.soft_shutdown_vm(self._FAKE_VM_NAME)
+
+ mock_shutdown.InitiateShutdown.assert_called_once_with(
+ Force=False, Reason=mock.ANY)
+ mock_check.assert_called_once_with(self._FAKE_RET_VAL, None)
+
+ def test_soft_shutdown_vm_no_component(self):
+ mock_vm = self._lookup_vm()
+ mock_vm.associators.return_value = []
+
+ with mock.patch.object(self._vmutils, 'check_ret_val') as mock_check:
+ self._vmutils.soft_shutdown_vm(self._FAKE_VM_NAME)
+ self.assertFalse(mock_check.called)
+
+ @mock.patch('nova.virt.hyperv.vmutils.VMUtils._get_vm_disks')
+ def test_get_vm_storage_paths(self, mock_get_vm_disks):
+ self._lookup_vm()
+ mock_rasds = self._create_mock_disks()
+ mock_get_vm_disks.return_value = ([mock_rasds[0]], [mock_rasds[1]])
+
+ storage = self._vmutils.get_vm_storage_paths(self._FAKE_VM_NAME)
+ (disk_files, volume_drives) = storage
+
+ self.assertEqual([self._FAKE_VHD_PATH], disk_files)
+ self.assertEqual([self._FAKE_VOLUME_DRIVE_PATH], volume_drives)
+
+ def test_get_vm_disks(self):
+ mock_vm = self._lookup_vm()
+ mock_vmsettings = [mock.MagicMock()]
+ mock_vm.associators.return_value = mock_vmsettings
+
+ mock_rasds = self._create_mock_disks()
+ mock_vmsettings[0].associators.return_value = mock_rasds
+
+ (disks, volumes) = self._vmutils._get_vm_disks(mock_vm)
+
+ mock_vm.associators.assert_called_with(
+ wmi_result_class=self._vmutils._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)
+ mock_vmsettings[0].associators.assert_called_with(
+ wmi_result_class=self._vmutils._RESOURCE_ALLOC_SETTING_DATA_CLASS)
+ self.assertEqual([mock_rasds[0]], disks)
+ self.assertEqual([mock_rasds[1]], volumes)
+
+ def _create_mock_disks(self):
+ mock_rasd1 = mock.MagicMock()
+ mock_rasd1.ResourceSubType = self._vmutils._IDE_DISK_RES_SUB_TYPE
+ mock_rasd1.HostResource = [self._FAKE_VHD_PATH]
+ mock_rasd1.Connection = [self._FAKE_VHD_PATH]
+ mock_rasd1.Parent = self._FAKE_CTRL_PATH
+ mock_rasd1.Address = self._FAKE_ADDRESS
+ mock_rasd1.HostResource = [self._FAKE_VHD_PATH]
+
+ mock_rasd2 = mock.MagicMock()
+ mock_rasd2.ResourceSubType = self._vmutils._PHYS_DISK_RES_SUB_TYPE
+ mock_rasd2.HostResource = [self._FAKE_VOLUME_DRIVE_PATH]
+
+ return [mock_rasd1, mock_rasd2]
+
+ @mock.patch.object(vmutils.VMUtils, '_set_vm_vcpus')
+ @mock.patch.object(vmutils.VMUtils, '_set_vm_memory')
+ @mock.patch.object(vmutils.VMUtils, '_get_wmi_obj')
+ def test_create_vm(self, mock_get_wmi_obj, mock_set_mem, mock_set_vcpus):
+ mock_svc = self._vmutils._conn.Msvm_VirtualSystemManagementService()[0]
+ getattr(mock_svc, self._DEFINE_SYSTEM).return_value = (
+ None, self._FAKE_JOB_PATH, self._FAKE_RET_VAL)
+
+ mock_vm = mock_get_wmi_obj.return_value
+ self._vmutils._conn.Msvm_ComputerSystem.return_value = [mock_vm]
+
+ mock_s = mock.MagicMock()
+ setattr(mock_s,
+ self._SETTING_TYPE,
+ self._VIRTUAL_SYSTEM_TYPE_REALIZED)
+ mock_vm.associators.return_value = [mock_s]
+
+ self._vmutils.create_vm(self._FAKE_VM_NAME, self._FAKE_MEMORY_MB,
+ self._FAKE_VCPUS_NUM, False,
+ self._FAKE_DYNAMIC_MEMORY_RATIO)
+
+ self.assertTrue(getattr(mock_svc, self._DEFINE_SYSTEM).called)
+ mock_set_mem.assert_called_with(mock_vm, mock_s, self._FAKE_MEMORY_MB,
+ self._FAKE_DYNAMIC_MEMORY_RATIO)
+
+ mock_set_vcpus.assert_called_with(mock_vm, mock_s,
+ self._FAKE_VCPUS_NUM,
+ False)
+
+ def test_get_vm_scsi_controller(self):
+ self._prepare_get_vm_controller(self._vmutils._SCSI_CTRL_RES_SUB_TYPE)
+ path = self._vmutils.get_vm_scsi_controller(self._FAKE_VM_NAME)
+ self.assertEqual(self._FAKE_RES_PATH, path)
+
+ def test_get_vm_ide_controller(self):
+ self._prepare_get_vm_controller(self._vmutils._IDE_CTRL_RES_SUB_TYPE)
+ path = self._vmutils.get_vm_ide_controller(self._FAKE_VM_NAME,
+ self._FAKE_ADDRESS)
+ self.assertEqual(self._FAKE_RES_PATH, path)
+
+ def _prepare_get_vm_controller(self, resource_sub_type):
+ mock_vm = self._lookup_vm()
+ mock_vm_settings = mock.MagicMock()
+ mock_rasds = mock.MagicMock()
+ mock_rasds.path_.return_value = self._FAKE_RES_PATH
+ mock_rasds.ResourceSubType = resource_sub_type
+ mock_rasds.Address = self._FAKE_ADDRESS
+ mock_vm_settings.associators.return_value = [mock_rasds]
+ mock_vm.associators.return_value = [mock_vm_settings]
+
+ def _prepare_resources(self, mock_path, mock_subtype, mock_vm_settings):
+ mock_rasds = mock_vm_settings.associators.return_value[0]
+ mock_rasds.path_.return_value = mock_path
+ mock_rasds.ResourceSubType = mock_subtype
+ return mock_rasds
+
+ @mock.patch.object(vmutils.VMUtils, '_get_new_resource_setting_data')
+ @mock.patch.object(vmutils.VMUtils, '_get_vm_ide_controller')
+ def test_attach_ide_drive(self, mock_get_ide_ctrl, mock_get_new_rsd):
+ mock_vm = self._lookup_vm()
+ mock_rsd = mock_get_new_rsd.return_value
+
+ with mock.patch.object(self._vmutils,
+ '_add_virt_resource') as mock_add_virt_res:
+ self._vmutils.attach_ide_drive(self._FAKE_VM_NAME,
+ self._FAKE_CTRL_PATH,
+ self._FAKE_CTRL_ADDR,
+ self._FAKE_DRIVE_ADDR)
+
+ mock_add_virt_res.assert_called_with(mock_rsd,
+ mock_vm.path_.return_value)
+
+ mock_get_ide_ctrl.assert_called_with(mock_vm, self._FAKE_CTRL_ADDR)
+ self.assertTrue(mock_get_new_rsd.called)
+
+ @mock.patch.object(vmutils.VMUtils, '_get_new_resource_setting_data')
+ def test_create_scsi_controller(self, mock_get_new_rsd):
+ mock_vm = self._lookup_vm()
+ with mock.patch.object(self._vmutils,
+ '_add_virt_resource') as mock_add_virt_res:
+ self._vmutils.create_scsi_controller(self._FAKE_VM_NAME)
+
+ mock_add_virt_res.assert_called_with(mock_get_new_rsd.return_value,
+ mock_vm.path_.return_value)
+
+ @mock.patch.object(vmutils.VMUtils, '_get_new_resource_setting_data')
+ def test_attach_volume_to_controller(self, mock_get_new_rsd):
+ mock_vm = self._lookup_vm()
+ with mock.patch.object(self._vmutils,
+ '_add_virt_resource') as mock_add_virt_res:
+ self._vmutils.attach_volume_to_controller(
+ self._FAKE_VM_NAME, self._FAKE_CTRL_PATH, self._FAKE_CTRL_ADDR,
+ self._FAKE_MOUNTED_DISK_PATH)
+
+ mock_add_virt_res.assert_called_with(mock_get_new_rsd.return_value,
+ mock_vm.path_.return_value)
+
+ @mock.patch.object(vmutils.VMUtils, '_modify_virt_resource')
+ @mock.patch.object(vmutils.VMUtils, '_get_nic_data_by_name')
+ def test_set_nic_connection(self, mock_get_nic_conn, mock_modify_virt_res):
+ self._lookup_vm()
+ mock_nic = mock_get_nic_conn.return_value
+ self._vmutils.set_nic_connection(self._FAKE_VM_NAME, None, None)
+
+ mock_modify_virt_res.assert_called_with(mock_nic, self._FAKE_VM_PATH)
+
+ @mock.patch.object(vmutils.VMUtils, '_get_new_setting_data')
+ def test_create_nic(self, mock_get_new_virt_res):
+ self._lookup_vm()
+ mock_nic = mock_get_new_virt_res.return_value
+
+ with mock.patch.object(self._vmutils,
+ '_add_virt_resource') as mock_add_virt_res:
+ self._vmutils.create_nic(
+ self._FAKE_VM_NAME, self._FAKE_RES_NAME, self._FAKE_ADDRESS)
+
+ mock_add_virt_res.assert_called_with(mock_nic, self._FAKE_VM_PATH)
+
+ def test_set_vm_state(self):
+ mock_vm = self._lookup_vm()
+ mock_vm.RequestStateChange.return_value = (
+ self._FAKE_JOB_PATH, self._FAKE_RET_VAL)
+
+ self._vmutils.set_vm_state(self._FAKE_VM_NAME,
+ constants.HYPERV_VM_STATE_ENABLED)
+ mock_vm.RequestStateChange.assert_called_with(
+ constants.HYPERV_VM_STATE_ENABLED)
+
+ def test_destroy_vm(self):
+ self._lookup_vm()
+
+ mock_svc = self._vmutils._conn.Msvm_VirtualSystemManagementService()[0]
+ getattr(mock_svc, self._DESTROY_SYSTEM).return_value = (
+ self._FAKE_JOB_PATH, self._FAKE_RET_VAL)
+
+ self._vmutils.destroy_vm(self._FAKE_VM_NAME)
+
+ getattr(mock_svc, self._DESTROY_SYSTEM).assert_called_with(
+ self._FAKE_VM_PATH)
+
+ @mock.patch.object(vmutils.VMUtils, '_wait_for_job')
+ def test_check_ret_val_ok(self, mock_wait_for_job):
+ self._vmutils.check_ret_val(constants.WMI_JOB_STATUS_STARTED,
+ self._FAKE_JOB_PATH)
+ mock_wait_for_job.assert_called_once_with(self._FAKE_JOB_PATH)
+
+ def test_check_ret_val_exception(self):
+ self.assertRaises(vmutils.HyperVException,
+ self._vmutils.check_ret_val,
+ self._FAKE_RET_VAL_BAD,
+ self._FAKE_JOB_PATH)
+
+ def test_wait_for_job_done(self):
+ mockjob = self._prepare_wait_for_job(constants.WMI_JOB_STATE_COMPLETED)
+ job = self._vmutils._wait_for_job(self._FAKE_JOB_PATH)
+ self.assertEqual(mockjob, job)
+
+ def test_wait_for_job_exception_concrete_job(self):
+ mock_job = self._prepare_wait_for_job()
+ mock_job.path.return_value.Class = self._CONCRETE_JOB
+ self.assertRaises(vmutils.HyperVException,
+ self._vmutils._wait_for_job,
+ self._FAKE_JOB_PATH)
+
+ def test_wait_for_job_exception_with_error(self):
+ mock_job = self._prepare_wait_for_job()
+ mock_job.GetError.return_value = (self._FAKE_ERROR, self._FAKE_RET_VAL)
+ self.assertRaises(vmutils.HyperVException,
+ self._vmutils._wait_for_job,
+ self._FAKE_JOB_PATH)
+
+ def test_wait_for_job_exception_no_error(self):
+ mock_job = self._prepare_wait_for_job()
+ mock_job.GetError.return_value = (None, None)
+ self.assertRaises(vmutils.HyperVException,
+ self._vmutils._wait_for_job,
+ self._FAKE_JOB_PATH)
+
+ def _prepare_wait_for_job(self, state=_FAKE_JOB_STATUS_BAD):
+ mock_job = mock.MagicMock()
+ mock_job.JobState = state
+ mock_job.Description = self._FAKE_JOB_DESCRIPTION
+ mock_job.ElapsedTime = self._FAKE_ELAPSED_TIME
+
+ self._vmutils._get_wmi_obj = mock.MagicMock(return_value=mock_job)
+ return mock_job
+
+ def test_add_virt_resource(self):
+ mock_svc = self._vmutils._conn.Msvm_VirtualSystemManagementService()[0]
+ getattr(mock_svc, self._ADD_RESOURCE).return_value = (
+ self._FAKE_JOB_PATH, mock.MagicMock(), self._FAKE_RET_VAL)
+ mock_res_setting_data = mock.MagicMock()
+ mock_res_setting_data.GetText_.return_value = self._FAKE_RES_DATA
+
+ self._vmutils._add_virt_resource(mock_res_setting_data,
+ self._FAKE_VM_PATH)
+ self._assert_add_resources(mock_svc)
+
+ def test_modify_virt_resource(self):
+ mock_svc = self._vmutils._conn.Msvm_VirtualSystemManagementService()[0]
+ mock_svc.ModifyVirtualSystemResources.return_value = (
+ self._FAKE_JOB_PATH, self._FAKE_RET_VAL)
+ mock_res_setting_data = mock.MagicMock()
+ mock_res_setting_data.GetText_.return_value = self._FAKE_RES_DATA
+
+ self._vmutils._modify_virt_resource(mock_res_setting_data,
+ self._FAKE_VM_PATH)
+
+ mock_svc.ModifyVirtualSystemResources.assert_called_with(
+ ResourceSettingData=[self._FAKE_RES_DATA],
+ ComputerSystem=self._FAKE_VM_PATH)
+
+ def test_remove_virt_resource(self):
+ mock_svc = self._vmutils._conn.Msvm_VirtualSystemManagementService()[0]
+ getattr(mock_svc, self._REMOVE_RESOURCE).return_value = (
+ self._FAKE_JOB_PATH, self._FAKE_RET_VAL)
+ mock_res_setting_data = mock.MagicMock()
+ mock_res_setting_data.path_.return_value = self._FAKE_RES_PATH
+
+ self._vmutils._remove_virt_resource(mock_res_setting_data,
+ self._FAKE_VM_PATH)
+ self._assert_remove_resources(mock_svc)
+
+ def test_set_disk_host_resource(self):
+ self._lookup_vm()
+ mock_rasds = self._create_mock_disks()
+
+ self._vmutils._get_vm_disks = mock.MagicMock(
+ return_value=([mock_rasds[0]], [mock_rasds[1]]))
+ self._vmutils._modify_virt_resource = mock.MagicMock()
+ self._vmutils._get_disk_resource_address = mock.MagicMock(
+ return_value=self._FAKE_ADDRESS)
+
+ self._vmutils.set_disk_host_resource(
+ self._FAKE_VM_NAME,
+ self._FAKE_CTRL_PATH,
+ self._FAKE_ADDRESS,
+ mock.sentinel.fake_new_mounted_disk_path)
+ self._vmutils._get_disk_resource_address.assert_called_with(
+ mock_rasds[0])
+ self._vmutils._modify_virt_resource.assert_called_with(
+ mock_rasds[0], self._FAKE_VM_PATH)
+ self.assertEqual(
+ mock.sentinel.fake_new_mounted_disk_path,
+ mock_rasds[0].HostResource[0])
+
+ @mock.patch.object(vmutils, 'wmi', create=True)
+ @mock.patch.object(vmutils.VMUtils, 'check_ret_val')
+ def test_take_vm_snapshot(self, mock_check_ret_val, mock_wmi):
+ self._lookup_vm()
+
+ mock_svc = self._get_snapshot_service()
+ mock_svc.CreateVirtualSystemSnapshot.return_value = (
+ self._FAKE_JOB_PATH, self._FAKE_RET_VAL, mock.MagicMock())
+
+ self._vmutils.take_vm_snapshot(self._FAKE_VM_NAME)
+
+ mock_svc.CreateVirtualSystemSnapshot.assert_called_with(
+ self._FAKE_VM_PATH)
+
+ mock_check_ret_val.assert_called_once_with(self._FAKE_RET_VAL,
+ self._FAKE_JOB_PATH)
+
+ def test_remove_vm_snapshot(self):
+ mock_svc = self._get_snapshot_service()
+ getattr(mock_svc, self._DESTROY_SNAPSHOT).return_value = (
+ self._FAKE_JOB_PATH, self._FAKE_RET_VAL)
+
+ self._vmutils.remove_vm_snapshot(self._FAKE_SNAPSHOT_PATH)
+ getattr(mock_svc, self._DESTROY_SNAPSHOT).assert_called_with(
+ self._FAKE_SNAPSHOT_PATH)
+
+ def test_detach_vm_disk(self):
+ self._lookup_vm()
+ mock_disk = self._prepare_mock_disk()
+
+ with mock.patch.object(self._vmutils,
+ '_remove_virt_resource') as mock_rm_virt_res:
+ self._vmutils.detach_vm_disk(self._FAKE_VM_NAME,
+ self._FAKE_HOST_RESOURCE)
+
+ mock_rm_virt_res.assert_called_with(mock_disk, self._FAKE_VM_PATH)
+
+ def test_get_mounted_disk_resource_from_path(self):
+ mock_disk_1 = mock.MagicMock()
+ mock_disk_2 = mock.MagicMock()
+ mock_disk_2.HostResource = [self._FAKE_MOUNTED_DISK_PATH]
+ self._vmutils._conn.query.return_value = [mock_disk_1, mock_disk_2]
+
+ physical_disk = self._vmutils._get_mounted_disk_resource_from_path(
+ self._FAKE_MOUNTED_DISK_PATH)
+
+ self.assertEqual(mock_disk_2, physical_disk)
+
+ def test_get_controller_volume_paths(self):
+ self._prepare_mock_disk()
+ mock_disks = {self._FAKE_RES_PATH: self._FAKE_HOST_RESOURCE}
+ disks = self._vmutils.get_controller_volume_paths(self._FAKE_RES_PATH)
+ self.assertEqual(mock_disks, disks)
+
+ def _prepare_mock_disk(self):
+ mock_disk = mock.MagicMock()
+ mock_disk.HostResource = [self._FAKE_HOST_RESOURCE]
+ mock_disk.path.return_value.RelPath = self._FAKE_RES_PATH
+ mock_disk.ResourceSubType = self._vmutils._IDE_DISK_RES_SUB_TYPE
+ self._vmutils._conn.query.return_value = [mock_disk]
+
+ return mock_disk
+
+ def _get_snapshot_service(self):
+ return self._vmutils._conn.Msvm_VirtualSystemManagementService()[0]
+
+ def _assert_add_resources(self, mock_svc):
+ getattr(mock_svc, self._ADD_RESOURCE).assert_called_with(
+ [self._FAKE_RES_DATA], self._FAKE_VM_PATH)
+
+ def _assert_remove_resources(self, mock_svc):
+ getattr(mock_svc, self._REMOVE_RESOURCE).assert_called_with(
+ [self._FAKE_RES_PATH], self._FAKE_VM_PATH)
+
+ def test_get_active_instances(self):
+ fake_vm = mock.MagicMock()
+
+ type(fake_vm).ElementName = mock.PropertyMock(
+ side_effect=['active_vm', 'inactive_vm'])
+ type(fake_vm).EnabledState = mock.PropertyMock(
+ side_effect=[constants.HYPERV_VM_STATE_ENABLED,
+ constants.HYPERV_VM_STATE_DISABLED])
+ self._vmutils.list_instances = mock.MagicMock(
+ return_value=[mock.sentinel.fake_vm_name] * 2)
+ self._vmutils._lookup_vm = mock.MagicMock(side_effect=[fake_vm] * 2)
+ active_instances = self._vmutils.get_active_instances()
+
+ self.assertEqual(['active_vm'], active_instances)
+
+ def _test_get_vm_serial_port_connection(self, new_connection=None):
+ old_serial_connection = 'old_serial_connection'
+
+ mock_vm = self._lookup_vm()
+ mock_vmsettings = [mock.MagicMock()]
+ mock_vm.associators.return_value = mock_vmsettings
+
+ fake_serial_port = mock.MagicMock()
+
+ fake_serial_port.ResourceSubType = (
+ self._vmutils._SERIAL_PORT_RES_SUB_TYPE)
+ fake_serial_port.Connection = [old_serial_connection]
+ mock_rasds = [fake_serial_port]
+ mock_vmsettings[0].associators.return_value = mock_rasds
+ self._vmutils._modify_virt_resource = mock.MagicMock()
+ fake_modify = self._vmutils._modify_virt_resource
+
+ ret_val = self._vmutils.get_vm_serial_port_connection(
+ self._FAKE_VM_NAME, update_connection=new_connection)
+
+ if new_connection:
+ self.assertEqual(new_connection, ret_val)
+ fake_modify.assert_called_once_with(fake_serial_port,
+ mock_vm.path_())
+ else:
+ self.assertEqual(old_serial_connection, ret_val)
+
+ def test_set_vm_serial_port_connection(self):
+ self._test_get_vm_serial_port_connection('new_serial_connection')
+
+ def test_get_vm_serial_port_connection(self):
+ self._test_get_vm_serial_port_connection()
+
+ def test_list_instance_notes(self):
+ vs = mock.MagicMock()
+ attrs = {'ElementName': 'fake_name',
+ 'Notes': '4f54fb69-d3a2-45b7-bb9b-b6e6b3d893b3'}
+ vs.configure_mock(**attrs)
+ self._vmutils._conn.Msvm_VirtualSystemSettingData.return_value = [vs]
+ response = self._vmutils.list_instance_notes()
+
+ self.assertEqual([(attrs['ElementName'], [attrs['Notes']])], response)
+ self._vmutils._conn.Msvm_VirtualSystemSettingData.assert_called_with(
+ ['ElementName', 'Notes'],
+ SettingType=self._vmutils._VIRTUAL_SYSTEM_CURRENT_SETTINGS)
+
+ @mock.patch('nova.virt.hyperv.vmutils.VMUtils.check_ret_val')
+ def test_modify_virtual_system(self, mock_check_ret_val):
+ mock_vs_man_svc = mock.MagicMock()
+ mock_vmsetting = mock.MagicMock()
+ fake_path = 'fake path'
+ fake_job_path = 'fake job path'
+ fake_ret_val = 'fake return value'
+
+ mock_vs_man_svc.ModifyVirtualSystem.return_value = (0, fake_job_path,
+ fake_ret_val)
+
+ self._vmutils._modify_virtual_system(vs_man_svc=mock_vs_man_svc,
+ vm_path=fake_path,
+ vmsetting=mock_vmsetting)
+
+ mock_vs_man_svc.ModifyVirtualSystem.assert_called_once_with(
+ ComputerSystem=fake_path,
+ SystemSettingData=mock_vmsetting.GetText_(1))
+ mock_check_ret_val.assert_called_once_with(fake_ret_val, fake_job_path)
+
+ @mock.patch('nova.virt.hyperv.vmutils.VMUtils.check_ret_val')
+ @mock.patch('nova.virt.hyperv.vmutils.VMUtils._get_wmi_obj')
+ @mock.patch('nova.virt.hyperv.vmutils.VMUtils._modify_virtual_system')
+ @mock.patch('nova.virt.hyperv.vmutils.VMUtils._get_vm_setting_data')
+ def test_create_vm_obj(self, mock_get_vm_setting_data,
+ mock_modify_virtual_system,
+ mock_get_wmi_obj, mock_check_ret_val):
+ mock_vs_man_svc = mock.MagicMock()
+ mock_vs_gs_data = mock.MagicMock()
+ fake_vm_path = 'fake vm path'
+ fake_job_path = 'fake job path'
+ fake_ret_val = 'fake return value'
+ _conn = self._vmutils._conn.Msvm_VirtualSystemGlobalSettingData
+
+ _conn.new.return_value = mock_vs_gs_data
+ mock_vs_man_svc.DefineVirtualSystem.return_value = (fake_vm_path,
+ fake_job_path,
+ fake_ret_val)
+
+ response = self._vmutils._create_vm_obj(vs_man_svc=mock_vs_man_svc,
+ vm_name='fake vm',
+ notes='fake notes',
+ dynamic_memory_ratio=1.0)
+
+ _conn.new.assert_called_once_with()
+ self.assertEqual(mock_vs_gs_data.ElementName, 'fake vm')
+ mock_vs_man_svc.DefineVirtualSystem.assert_called_once_with(
+ [], None, mock_vs_gs_data.GetText_(1))
+ mock_check_ret_val.assert_called_once_with(fake_ret_val, fake_job_path)
+
+ mock_get_wmi_obj.assert_called_with(fake_vm_path)
+ mock_get_vm_setting_data.assert_called_once_with(mock_get_wmi_obj())
+ mock_modify_virtual_system.assert_called_once_with(
+ mock_vs_man_svc, fake_vm_path, mock_get_vm_setting_data())
+
+ self.assertEqual(mock_get_vm_setting_data().Notes,
+ '\n'.join('fake notes'))
+ self.assertEqual(response, mock_get_wmi_obj())
+
+ def test_list_instances(self):
+ vs = mock.MagicMock()
+ attrs = {'ElementName': 'fake_name'}
+ vs.configure_mock(**attrs)
+ self._vmutils._conn.Msvm_VirtualSystemSettingData.return_value = [vs]
+ response = self._vmutils.list_instances()
+
+ self.assertEqual([(attrs['ElementName'])], response)
+ self._vmutils._conn.Msvm_VirtualSystemSettingData.assert_called_with(
+ ['ElementName'],
+ SettingType=self._vmutils._VIRTUAL_SYSTEM_CURRENT_SETTINGS)
diff --git a/nova/tests/unit/virt/hyperv/test_vmutilsv2.py b/nova/tests/unit/virt/hyperv/test_vmutilsv2.py
new file mode 100644
index 0000000000..e4c24683eb
--- /dev/null
+++ b/nova/tests/unit/virt/hyperv/test_vmutilsv2.py
@@ -0,0 +1,197 @@
+# Copyright 2014 Cloudbase Solutions Srl
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova.tests.unit.virt.hyperv import test_vmutils
+from nova.virt.hyperv import vmutilsv2
+
+
+class VMUtilsV2TestCase(test_vmutils.VMUtilsTestCase):
+ """Unit tests for the Hyper-V VMUtilsV2 class."""
+
+ _DEFINE_SYSTEM = 'DefineSystem'
+ _DESTROY_SYSTEM = 'DestroySystem'
+ _DESTROY_SNAPSHOT = 'DestroySnapshot'
+
+ _ADD_RESOURCE = 'AddResourceSettings'
+ _REMOVE_RESOURCE = 'RemoveResourceSettings'
+ _SETTING_TYPE = 'VirtualSystemType'
+
+ _VIRTUAL_SYSTEM_TYPE_REALIZED = 'Microsoft:Hyper-V:System:Realized'
+
+ def setUp(self):
+ super(VMUtilsV2TestCase, self).setUp()
+ self._vmutils = vmutilsv2.VMUtilsV2()
+ self._vmutils._conn = mock.MagicMock()
+
+ def test_modify_virt_resource(self):
+ mock_svc = self._vmutils._conn.Msvm_VirtualSystemManagementService()[0]
+ mock_svc.ModifyResourceSettings.return_value = (self._FAKE_JOB_PATH,
+ mock.MagicMock(),
+ self._FAKE_RET_VAL)
+ mock_res_setting_data = mock.MagicMock()
+ mock_res_setting_data.GetText_.return_value = self._FAKE_RES_DATA
+
+ self._vmutils._modify_virt_resource(mock_res_setting_data,
+ self._FAKE_VM_PATH)
+
+ mock_svc.ModifyResourceSettings.assert_called_with(
+ ResourceSettings=[self._FAKE_RES_DATA])
+
+ @mock.patch.object(vmutilsv2, 'wmi', create=True)
+ @mock.patch.object(vmutilsv2.VMUtilsV2, 'check_ret_val')
+ def test_take_vm_snapshot(self, mock_check_ret_val, mock_wmi):
+ self._lookup_vm()
+
+ mock_svc = self._get_snapshot_service()
+ mock_svc.CreateSnapshot.return_value = (self._FAKE_JOB_PATH,
+ mock.MagicMock(),
+ self._FAKE_RET_VAL)
+
+ self._vmutils.take_vm_snapshot(self._FAKE_VM_NAME)
+
+ mock_svc.CreateSnapshot.assert_called_with(
+ AffectedSystem=self._FAKE_VM_PATH,
+ SnapshotType=self._vmutils._SNAPSHOT_FULL)
+
+ mock_check_ret_val.assert_called_once_with(self._FAKE_RET_VAL,
+ self._FAKE_JOB_PATH)
+
+ @mock.patch.object(vmutilsv2.VMUtilsV2, '_add_virt_resource')
+ @mock.patch.object(vmutilsv2.VMUtilsV2, '_get_new_setting_data')
+ @mock.patch.object(vmutilsv2.VMUtilsV2, '_get_nic_data_by_name')
+ def test_set_nic_connection(self, mock_get_nic_data, mock_get_new_sd,
+ mock_add_virt_res):
+ self._lookup_vm()
+ fake_eth_port = mock_get_new_sd.return_value
+
+ self._vmutils.set_nic_connection(self._FAKE_VM_NAME, None, None)
+ mock_add_virt_res.assert_called_with(fake_eth_port, self._FAKE_VM_PATH)
+
+ @mock.patch('nova.virt.hyperv.vmutils.VMUtils._get_vm_disks')
+ def test_enable_vm_metrics_collection(self, mock_get_vm_disks):
+ self._lookup_vm()
+ mock_svc = self._vmutils._conn.Msvm_MetricService()[0]
+
+ metric_def = mock.MagicMock()
+ mock_disk = mock.MagicMock()
+ mock_disk.path_.return_value = self._FAKE_RES_PATH
+ mock_get_vm_disks.return_value = ([mock_disk], [mock_disk])
+
+ fake_metric_def_paths = ["fake_0", None]
+ fake_metric_resource_paths = [self._FAKE_VM_PATH, self._FAKE_RES_PATH]
+
+ metric_def.path_.side_effect = fake_metric_def_paths
+ self._vmutils._conn.CIM_BaseMetricDefinition.return_value = [
+ metric_def]
+
+ self._vmutils.enable_vm_metrics_collection(self._FAKE_VM_NAME)
+
+ calls = []
+ for i in range(len(fake_metric_def_paths)):
+ calls.append(mock.call(
+ Subject=fake_metric_resource_paths[i],
+ Definition=fake_metric_def_paths[i],
+ MetricCollectionEnabled=self._vmutils._METRIC_ENABLED))
+
+ mock_svc.ControlMetrics.assert_has_calls(calls, any_order=True)
+
+ def _get_snapshot_service(self):
+ return self._vmutils._conn.Msvm_VirtualSystemSnapshotService()[0]
+
+ def _assert_add_resources(self, mock_svc):
+ getattr(mock_svc, self._ADD_RESOURCE).assert_called_with(
+ self._FAKE_VM_PATH, [self._FAKE_RES_DATA])
+
+ def _assert_remove_resources(self, mock_svc):
+ getattr(mock_svc, self._REMOVE_RESOURCE).assert_called_with(
+ [self._FAKE_RES_PATH])
+
+ def test_list_instance_notes(self):
+ vs = mock.MagicMock()
+ attrs = {'ElementName': 'fake_name',
+ 'Notes': ['4f54fb69-d3a2-45b7-bb9b-b6e6b3d893b3']}
+ vs.configure_mock(**attrs)
+ self._vmutils._conn.Msvm_VirtualSystemSettingData.return_value = [vs]
+ response = self._vmutils.list_instance_notes()
+
+ self.assertEqual([(attrs['ElementName'], attrs['Notes'])], response)
+ self._vmutils._conn.Msvm_VirtualSystemSettingData.assert_called_with(
+ ['ElementName', 'Notes'],
+ VirtualSystemType=self._vmutils._VIRTUAL_SYSTEM_TYPE_REALIZED)
+
+ @mock.patch('nova.virt.hyperv.vmutilsv2.VMUtilsV2.check_ret_val')
+ @mock.patch('nova.virt.hyperv.vmutilsv2.VMUtilsV2._get_wmi_obj')
+ def _test_create_vm_obj(self, mock_get_wmi_obj, mock_check_ret_val,
+ vm_path, dynamic_memory_ratio=1.0):
+ mock_vs_man_svc = mock.MagicMock()
+ mock_vs_data = mock.MagicMock()
+ mock_job = mock.MagicMock()
+ fake_job_path = 'fake job path'
+ fake_ret_val = 'fake return value'
+ _conn = self._vmutils._conn.Msvm_VirtualSystemSettingData
+
+ mock_check_ret_val.return_value = mock_job
+ _conn.new.return_value = mock_vs_data
+ mock_vs_man_svc.DefineSystem.return_value = (fake_job_path,
+ vm_path,
+ fake_ret_val)
+ mock_job.associators.return_value = ['fake vm path']
+
+ response = self._vmutils._create_vm_obj(
+ vs_man_svc=mock_vs_man_svc,
+ vm_name='fake vm',
+ notes='fake notes',
+ dynamic_memory_ratio=dynamic_memory_ratio)
+
+ if not vm_path:
+ mock_job.associators.assert_called_once_with(
+ self._vmutils._AFFECTED_JOB_ELEMENT_CLASS)
+
+ _conn.new.assert_called_once_with()
+ self.assertEqual(mock_vs_data.ElementName, 'fake vm')
+ mock_vs_man_svc.DefineSystem.assert_called_once_with(
+ ResourceSettings=[], ReferenceConfiguration=None,
+ SystemSettings=mock_vs_data.GetText_(1))
+ mock_check_ret_val.assert_called_once_with(fake_ret_val, fake_job_path)
+
+ if dynamic_memory_ratio > 1:
+ self.assertFalse(mock_vs_data.VirtualNumaEnabled)
+
+ mock_get_wmi_obj.assert_called_with('fake vm path')
+
+ self.assertEqual(mock_vs_data.Notes, 'fake notes')
+ self.assertEqual(response, mock_get_wmi_obj())
+
+ def test_create_vm_obj(self):
+ self._test_create_vm_obj(vm_path='fake vm path')
+
+ def test_create_vm_obj_no_vm_path(self):
+ self._test_create_vm_obj(vm_path=None)
+
+ def test_create_vm_obj_dynamic_memory(self):
+ self._test_create_vm_obj(vm_path=None, dynamic_memory_ratio=1.1)
+
+ def test_list_instances(self):
+ vs = mock.MagicMock()
+ attrs = {'ElementName': 'fake_name'}
+ vs.configure_mock(**attrs)
+ self._vmutils._conn.Msvm_VirtualSystemSettingData.return_value = [vs]
+ response = self._vmutils.list_instances()
+
+ self.assertEqual([(attrs['ElementName'])], response)
+ self._vmutils._conn.Msvm_VirtualSystemSettingData.assert_called_with(
+ ['ElementName'],
+ VirtualSystemType=self._vmutils._VIRTUAL_SYSTEM_TYPE_REALIZED)
diff --git a/nova/tests/unit/virt/hyperv/test_volumeutils.py b/nova/tests/unit/virt/hyperv/test_volumeutils.py
new file mode 100644
index 0000000000..98ffcce533
--- /dev/null
+++ b/nova/tests/unit/virt/hyperv/test_volumeutils.py
@@ -0,0 +1,151 @@
+# Copyright 2014 Cloudbase Solutions Srl
+#
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+from oslo.config import cfg
+
+from nova.tests.unit.virt.hyperv import test_basevolumeutils
+from nova.virt.hyperv import vmutils
+from nova.virt.hyperv import volumeutils
+
+CONF = cfg.CONF
+CONF.import_opt('volume_attach_retry_count', 'nova.virt.hyperv.volumeops',
+ 'hyperv')
+
+
+class VolumeUtilsTestCase(test_basevolumeutils.BaseVolumeUtilsTestCase):
+ """Unit tests for the Hyper-V VolumeUtils class."""
+
+ _FAKE_PORTAL_ADDR = '10.1.1.1'
+ _FAKE_PORTAL_PORT = '3260'
+ _FAKE_LUN = 0
+ _FAKE_TARGET = 'iqn.2010-10.org.openstack:fake_target'
+
+ _FAKE_STDOUT_VALUE = 'The operation completed successfully'
+
+ def setUp(self):
+ super(VolumeUtilsTestCase, self).setUp()
+ self._volutils = volumeutils.VolumeUtils()
+ self._volutils._conn_wmi = mock.MagicMock()
+ self._volutils._conn_cimv2 = mock.MagicMock()
+ self.flags(volume_attach_retry_count=4, group='hyperv')
+ self.flags(volume_attach_retry_interval=0, group='hyperv')
+
+ def _test_login_target_portal(self, portal_connected):
+ fake_portal = '%s:%s' % (self._FAKE_PORTAL_ADDR,
+ self._FAKE_PORTAL_PORT)
+
+ self._volutils.execute = mock.MagicMock()
+ if portal_connected:
+ exec_output = 'Address and Socket: %s %s' % (
+ self._FAKE_PORTAL_ADDR, self._FAKE_PORTAL_PORT)
+ else:
+ exec_output = ''
+
+ self._volutils.execute.return_value = exec_output
+
+ self._volutils._login_target_portal(fake_portal)
+
+ call_list = self._volutils.execute.call_args_list
+ all_call_args = [arg for call in call_list for arg in call[0]]
+
+ if portal_connected:
+ self.assertIn('RefreshTargetPortal', all_call_args)
+ else:
+ self.assertIn('AddTargetPortal', all_call_args)
+
+ def test_login_connected_portal(self):
+ self._test_login_target_portal(True)
+
+ def test_login_new_portal(self):
+ self._test_login_target_portal(False)
+
+ def _test_login_target(self, target_connected, raise_exception=False):
+ fake_portal = '%s:%s' % (self._FAKE_PORTAL_ADDR,
+ self._FAKE_PORTAL_PORT)
+ self._volutils.execute = mock.MagicMock()
+ self._volutils._login_target_portal = mock.MagicMock()
+
+ if target_connected:
+ self._volutils.execute.return_value = self._FAKE_TARGET
+ elif raise_exception:
+ self._volutils.execute.return_value = ''
+ else:
+ self._volutils.execute.side_effect = (
+ ['', '', '', self._FAKE_TARGET, ''])
+
+ if raise_exception:
+ self.assertRaises(vmutils.HyperVException,
+ self._volutils.login_storage_target,
+ self._FAKE_LUN, self._FAKE_TARGET, fake_portal)
+ else:
+ self._volutils.login_storage_target(self._FAKE_LUN,
+ self._FAKE_TARGET,
+ fake_portal)
+
+ call_list = self._volutils.execute.call_args_list
+ all_call_args = [arg for call in call_list for arg in call[0]]
+
+ if target_connected:
+ self.assertNotIn('qlogintarget', all_call_args)
+ else:
+ self.assertIn('qlogintarget', all_call_args)
+
+ def test_login_connected_target(self):
+ self._test_login_target(True)
+
+ def test_login_disconncted_target(self):
+ self._test_login_target(False)
+
+ def test_login_target_exception(self):
+ self._test_login_target(False, True)
+
+ def _test_execute_wrapper(self, raise_exception):
+ fake_cmd = ('iscsicli.exe', 'ListTargetPortals')
+
+ if raise_exception:
+ output = 'fake error'
+ else:
+ output = 'The operation completed successfully'
+
+ with mock.patch('nova.utils.execute') as fake_execute:
+ fake_execute.return_value = (output, None)
+
+ if raise_exception:
+ self.assertRaises(vmutils.HyperVException,
+ self._volutils.execute,
+ *fake_cmd)
+ else:
+ ret_val = self._volutils.execute(*fake_cmd)
+ self.assertEqual(output, ret_val)
+
+ def test_execute_raise_exception(self):
+ self._test_execute_wrapper(True)
+
+ def test_execute_exception(self):
+ self._test_execute_wrapper(False)
+
+ @mock.patch.object(volumeutils, 'utils')
+ def test_logout_storage_target(self, mock_utils):
+ mock_utils.execute.return_value = (self._FAKE_STDOUT_VALUE,
+ mock.sentinel.FAKE_STDERR_VALUE)
+ session = mock.MagicMock()
+ session.SessionId = mock.sentinel.FAKE_SESSION_ID
+ self._volutils._conn_wmi.query.return_value = [session]
+
+ self._volutils.logout_storage_target(mock.sentinel.FAKE_IQN)
+ mock_utils.execute.assert_called_once_with(
+ 'iscsicli.exe', 'logouttarget', mock.sentinel.FAKE_SESSION_ID)
diff --git a/nova/tests/unit/virt/hyperv/test_volumeutilsv2.py b/nova/tests/unit/virt/hyperv/test_volumeutilsv2.py
new file mode 100644
index 0000000000..1c242b71f8
--- /dev/null
+++ b/nova/tests/unit/virt/hyperv/test_volumeutilsv2.py
@@ -0,0 +1,147 @@
+# Copyright 2014 Cloudbase Solutions Srl
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+from oslo.config import cfg
+
+from nova import test
+from nova.virt.hyperv import vmutils
+from nova.virt.hyperv import volumeutilsv2
+
+CONF = cfg.CONF
+CONF.import_opt('volume_attach_retry_count', 'nova.virt.hyperv.volumeops',
+ 'hyperv')
+
+
+class VolumeUtilsV2TestCase(test.NoDBTestCase):
+ """Unit tests for the Hyper-V VolumeUtilsV2 class."""
+
+ _FAKE_PORTAL_ADDR = '10.1.1.1'
+ _FAKE_PORTAL_PORT = '3260'
+ _FAKE_LUN = 0
+ _FAKE_TARGET = 'iqn.2010-10.org.openstack:fake_target'
+
+ def setUp(self):
+ super(VolumeUtilsV2TestCase, self).setUp()
+ self._volutilsv2 = volumeutilsv2.VolumeUtilsV2()
+ self._volutilsv2._conn_storage = mock.MagicMock()
+ self._volutilsv2._conn_wmi = mock.MagicMock()
+ self.flags(volume_attach_retry_count=4, group='hyperv')
+ self.flags(volume_attach_retry_interval=0, group='hyperv')
+
+ def _test_login_target_portal(self, portal_connected):
+ fake_portal = '%s:%s' % (self._FAKE_PORTAL_ADDR,
+ self._FAKE_PORTAL_PORT)
+ fake_portal_object = mock.MagicMock()
+ _query = self._volutilsv2._conn_storage.query
+ self._volutilsv2._conn_storage.MSFT_iSCSITargetPortal = (
+ fake_portal_object)
+
+ if portal_connected:
+ _query.return_value = [fake_portal_object]
+ else:
+ _query.return_value = None
+
+ self._volutilsv2._login_target_portal(fake_portal)
+
+ if portal_connected:
+ fake_portal_object.Update.assert_called_once_with()
+ else:
+ fake_portal_object.New.assert_called_once_with(
+ TargetPortalAddress=self._FAKE_PORTAL_ADDR,
+ TargetPortalPortNumber=self._FAKE_PORTAL_PORT)
+
+ def test_login_connected_portal(self):
+ self._test_login_target_portal(True)
+
+ def test_login_new_portal(self):
+ self._test_login_target_portal(False)
+
+ def _test_login_target(self, target_connected, raise_exception=False):
+ fake_portal = '%s:%s' % (self._FAKE_PORTAL_ADDR,
+ self._FAKE_PORTAL_PORT)
+
+ fake_target_object = mock.MagicMock()
+
+ if target_connected:
+ fake_target_object.IsConnected = True
+ elif not raise_exception:
+ type(fake_target_object).IsConnected = mock.PropertyMock(
+ side_effect=[False, True])
+ else:
+ fake_target_object.IsConnected = False
+
+ _query = self._volutilsv2._conn_storage.query
+ _query.return_value = [fake_target_object]
+
+ self._volutilsv2._conn_storage.MSFT_iSCSITarget = (
+ fake_target_object)
+
+ if raise_exception:
+ self.assertRaises(vmutils.HyperVException,
+ self._volutilsv2.login_storage_target,
+ self._FAKE_LUN, self._FAKE_TARGET, fake_portal)
+ else:
+ self._volutilsv2.login_storage_target(self._FAKE_LUN,
+ self._FAKE_TARGET,
+ fake_portal)
+
+ if target_connected:
+ fake_target_object.Update.assert_called_with()
+ else:
+ fake_target_object.Connect.assert_called_once_with(
+ IsPersistent=True, NodeAddress=self._FAKE_TARGET)
+
+ def test_login_connected_target(self):
+ self._test_login_target(True)
+
+ def test_login_disconncted_target(self):
+ self._test_login_target(False)
+
+ def test_login_target_exception(self):
+ self._test_login_target(False, True)
+
+ def test_logout_storage_target(self):
+ mock_msft_target = self._volutilsv2._conn_storage.MSFT_iSCSITarget
+ mock_msft_session = self._volutilsv2._conn_storage.MSFT_iSCSISession
+
+ mock_target = mock.MagicMock()
+ mock_target.IsConnected = True
+ mock_msft_target.return_value = [mock_target]
+
+ mock_session = mock.MagicMock()
+ mock_session.IsPersistent = True
+ mock_msft_session.return_value = [mock_session]
+
+ self._volutilsv2.logout_storage_target(self._FAKE_TARGET)
+
+ mock_msft_target.assert_called_once_with(NodeAddress=self._FAKE_TARGET)
+ mock_msft_session.assert_called_once_with(
+ TargetNodeAddress=self._FAKE_TARGET)
+
+ mock_session.Unregister.assert_called_once_with()
+ mock_target.Disconnect.assert_called_once_with()
+
+ @mock.patch.object(volumeutilsv2.VolumeUtilsV2, 'logout_storage_target')
+ def test_execute_log_out(self, mock_logout_target):
+ sess_class = self._volutilsv2._conn_wmi.MSiSCSIInitiator_SessionClass
+
+ mock_session = mock.MagicMock()
+ sess_class.return_value = [mock_session]
+
+ self._volutilsv2.execute_log_out(mock.sentinel.FAKE_SESSION_ID)
+
+ sess_class.assert_called_once_with(
+ SessionId=mock.sentinel.FAKE_SESSION_ID)
+ mock_logout_target.assert_called_once_with(mock_session.TargetName)
diff --git a/nova/tests/unit/virt/ironic/__init__.py b/nova/tests/unit/virt/ironic/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/virt/ironic/__init__.py
diff --git a/nova/tests/unit/virt/ironic/test_client_wrapper.py b/nova/tests/unit/virt/ironic/test_client_wrapper.py
new file mode 100644
index 0000000000..025d2616dd
--- /dev/null
+++ b/nova/tests/unit/virt/ironic/test_client_wrapper.py
@@ -0,0 +1,126 @@
+# Copyright 2014 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from ironicclient import client as ironic_client
+from ironicclient import exc as ironic_exception
+import mock
+from oslo.config import cfg
+
+from nova import exception
+from nova import test
+from nova.tests.unit.virt.ironic import utils as ironic_utils
+from nova.virt.ironic import client_wrapper
+
+CONF = cfg.CONF
+
+FAKE_CLIENT = ironic_utils.FakeClient()
+
+
+class IronicClientWrapperTestCase(test.NoDBTestCase):
+
+ def setUp(self):
+ super(IronicClientWrapperTestCase, self).setUp()
+ self.ironicclient = client_wrapper.IronicClientWrapper()
+ # Do not waste time sleeping
+ cfg.CONF.set_override('api_retry_interval', 0, 'ironic')
+
+ @mock.patch.object(client_wrapper.IronicClientWrapper, '_multi_getattr')
+ @mock.patch.object(client_wrapper.IronicClientWrapper, '_get_client')
+ def test_call_good_no_args(self, mock_get_client, mock_multi_getattr):
+ mock_get_client.return_value = FAKE_CLIENT
+ self.ironicclient.call("node.list")
+ mock_get_client.assert_called_once_with()
+ mock_multi_getattr.assert_called_once_with(FAKE_CLIENT, "node.list")
+ mock_multi_getattr.return_value.assert_called_once_with()
+
+ @mock.patch.object(client_wrapper.IronicClientWrapper, '_multi_getattr')
+ @mock.patch.object(client_wrapper.IronicClientWrapper, '_get_client')
+ def test_call_good_with_args(self, mock_get_client, mock_multi_getattr):
+ mock_get_client.return_value = FAKE_CLIENT
+ self.ironicclient.call("node.list", 'test', associated=True)
+ mock_get_client.assert_called_once_with()
+ mock_multi_getattr.assert_called_once_with(FAKE_CLIENT, "node.list")
+ mock_multi_getattr.return_value.assert_called_once_with(
+ 'test', associated=True)
+
+ @mock.patch.object(ironic_client, 'get_client')
+ def test__get_client_no_auth_token(self, mock_ir_cli):
+ self.flags(admin_auth_token=None, group='ironic')
+ ironicclient = client_wrapper.IronicClientWrapper()
+ # dummy call to have _get_client() called
+ ironicclient.call("node.list")
+ expected = {'os_username': CONF.ironic.admin_username,
+ 'os_password': CONF.ironic.admin_password,
+ 'os_auth_url': CONF.ironic.admin_url,
+ 'os_tenant_name': CONF.ironic.admin_tenant_name,
+ 'os_service_type': 'baremetal',
+ 'os_endpoint_type': 'public',
+ 'ironic_url': CONF.ironic.api_endpoint}
+ mock_ir_cli.assert_called_once_with(CONF.ironic.api_version,
+ **expected)
+
+ @mock.patch.object(ironic_client, 'get_client')
+ def test__get_client_with_auth_token(self, mock_ir_cli):
+ self.flags(admin_auth_token='fake-token', group='ironic')
+ ironicclient = client_wrapper.IronicClientWrapper()
+ # dummy call to have _get_client() called
+ ironicclient.call("node.list")
+ expected = {'os_auth_token': 'fake-token',
+ 'ironic_url': CONF.ironic.api_endpoint}
+ mock_ir_cli.assert_called_once_with(CONF.ironic.api_version,
+ **expected)
+
+ @mock.patch.object(client_wrapper.IronicClientWrapper, '_multi_getattr')
+ @mock.patch.object(client_wrapper.IronicClientWrapper, '_get_client')
+ def test_call_fail(self, mock_get_client, mock_multi_getattr):
+ cfg.CONF.set_override('api_max_retries', 2, 'ironic')
+ test_obj = mock.Mock()
+ test_obj.side_effect = ironic_exception.HTTPServiceUnavailable
+ mock_multi_getattr.return_value = test_obj
+ mock_get_client.return_value = FAKE_CLIENT
+ self.assertRaises(exception.NovaException, self.ironicclient.call,
+ "node.list")
+ self.assertEqual(2, test_obj.call_count)
+
+ @mock.patch.object(client_wrapper.IronicClientWrapper, '_multi_getattr')
+ @mock.patch.object(client_wrapper.IronicClientWrapper, '_get_client')
+ def test_call_fail_unexpected_exception(self, mock_get_client,
+ mock_multi_getattr):
+ test_obj = mock.Mock()
+ test_obj.side_effect = ironic_exception.HTTPNotFound
+ mock_multi_getattr.return_value = test_obj
+ mock_get_client.return_value = FAKE_CLIENT
+ self.assertRaises(ironic_exception.HTTPNotFound,
+ self.ironicclient.call, "node.list")
+
+ @mock.patch.object(ironic_client, 'get_client')
+ def test__get_client_unauthorized(self, mock_get_client):
+ mock_get_client.side_effect = ironic_exception.Unauthorized
+ self.assertRaises(exception.NovaException,
+ self.ironicclient._get_client)
+
+ @mock.patch.object(ironic_client, 'get_client')
+ def test__get_client_unexpected_exception(self, mock_get_client):
+ mock_get_client.side_effect = ironic_exception.ConnectionRefused
+ self.assertRaises(ironic_exception.ConnectionRefused,
+ self.ironicclient._get_client)
+
+ def test__multi_getattr_good(self):
+ response = self.ironicclient._multi_getattr(FAKE_CLIENT, "node.list")
+ self.assertEqual(FAKE_CLIENT.node.list, response)
+
+ def test__multi_getattr_fail(self):
+ self.assertRaises(AttributeError, self.ironicclient._multi_getattr,
+ FAKE_CLIENT, "nonexistent")
diff --git a/nova/tests/unit/virt/ironic/test_driver.py b/nova/tests/unit/virt/ironic/test_driver.py
new file mode 100644
index 0000000000..0e24c7bab4
--- /dev/null
+++ b/nova/tests/unit/virt/ironic/test_driver.py
@@ -0,0 +1,1268 @@
+# Copyright 2014 Red Hat, Inc.
+# Copyright 2013 Hewlett-Packard Development Company, L.P.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Tests for the ironic driver."""
+
+from ironicclient import exc as ironic_exception
+import mock
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+
+from nova.compute import power_state as nova_states
+from nova.compute import task_states
+from nova import context as nova_context
+from nova import exception
+from nova import objects
+from nova.openstack.common import loopingcall
+from nova.openstack.common import uuidutils
+from nova import test
+from nova.tests.unit import fake_instance
+from nova.tests.unit import utils
+from nova.tests.unit.virt.ironic import utils as ironic_utils
+from nova.virt import driver
+from nova.virt import fake
+from nova.virt import firewall
+from nova.virt.ironic import client_wrapper as cw
+from nova.virt.ironic import driver as ironic_driver
+from nova.virt.ironic import ironic_states
+
+
+CONF = cfg.CONF
+
+IRONIC_FLAGS = dict(
+ api_version=1,
+ group='ironic',
+)
+
+FAKE_CLIENT = ironic_utils.FakeClient()
+
+
+class FakeClientWrapper(cw.IronicClientWrapper):
+ def _get_client(self):
+ return FAKE_CLIENT
+
+
+class FakeLoopingCall(object):
+ def __init__(self):
+ self.wait = mock.MagicMock()
+ self.start = mock.MagicMock()
+ self.start.return_value = self
+
+
+def _get_properties():
+ return {'cpus': 2,
+ 'memory_mb': 512,
+ 'local_gb': 10,
+ 'cpu_arch': 'x86_64'}
+
+
+def _get_stats():
+ return {'cpu_arch': 'x86_64'}
+
+
+FAKE_CLIENT_WRAPPER = FakeClientWrapper()
+
+
+@mock.patch.object(cw, 'IronicClientWrapper', lambda *_: FAKE_CLIENT_WRAPPER)
+class IronicDriverTestCase(test.NoDBTestCase):
+
+ def setUp(self):
+ super(IronicDriverTestCase, self).setUp()
+ self.flags(**IRONIC_FLAGS)
+ self.driver = ironic_driver.IronicDriver(None)
+ self.driver.virtapi = fake.FakeVirtAPI()
+ self.ctx = nova_context.get_admin_context()
+
+ # mock retries configs to avoid sleeps and make tests run quicker
+ CONF.set_default('api_max_retries', default=1, group='ironic')
+ CONF.set_default('api_retry_interval', default=0, group='ironic')
+
+ def test_public_api_signatures(self):
+ self.assertPublicAPISignatures(driver.ComputeDriver(None), self.driver)
+
+ def test_validate_driver_loading(self):
+ self.assertIsInstance(self.driver, ironic_driver.IronicDriver)
+
+ def test__get_hypervisor_type(self):
+ self.assertEqual('ironic', self.driver._get_hypervisor_type())
+
+ def test__get_hypervisor_version(self):
+ self.assertEqual(1, self.driver._get_hypervisor_version())
+
+ @mock.patch.object(FAKE_CLIENT.node, 'get_by_instance_uuid')
+ def test__validate_instance_and_node(self, mock_gbiui):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ instance_uuid = uuidutils.generate_uuid()
+ node = ironic_utils.get_test_node(uuid=node_uuid,
+ instance_uuid=instance_uuid)
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ uuid=instance_uuid)
+ ironicclient = cw.IronicClientWrapper()
+
+ mock_gbiui.return_value = node
+ result = ironic_driver._validate_instance_and_node(ironicclient,
+ instance)
+ self.assertEqual(result.uuid, node_uuid)
+
+ @mock.patch.object(FAKE_CLIENT.node, 'get_by_instance_uuid')
+ def test__validate_instance_and_node_failed(self, mock_gbiui):
+ ironicclient = cw.IronicClientWrapper()
+ mock_gbiui.side_effect = ironic_exception.NotFound()
+ instance_uuid = uuidutils.generate_uuid(),
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ uuid=instance_uuid)
+ self.assertRaises(exception.InstanceNotFound,
+ ironic_driver._validate_instance_and_node,
+ ironicclient, instance)
+
+ @mock.patch.object(ironic_driver, '_validate_instance_and_node')
+ def test__wait_for_active_pass(self, fake_validate):
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ uuid=uuidutils.generate_uuid())
+ node = ironic_utils.get_test_node(
+ provision_state=ironic_states.DEPLOYING)
+
+ fake_validate.return_value = node
+ self.driver._wait_for_active(FAKE_CLIENT, instance)
+ self.assertTrue(fake_validate.called)
+
+ @mock.patch.object(ironic_driver, '_validate_instance_and_node')
+ def test__wait_for_active_done(self, fake_validate):
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ uuid=uuidutils.generate_uuid())
+ node = ironic_utils.get_test_node(
+ provision_state=ironic_states.ACTIVE)
+
+ fake_validate.return_value = node
+ self.assertRaises(loopingcall.LoopingCallDone,
+ self.driver._wait_for_active,
+ FAKE_CLIENT, instance)
+ self.assertTrue(fake_validate.called)
+
+ @mock.patch.object(ironic_driver, '_validate_instance_and_node')
+ def test__wait_for_active_fail(self, fake_validate):
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ uuid=uuidutils.generate_uuid())
+ node = ironic_utils.get_test_node(
+ provision_state=ironic_states.DEPLOYFAIL)
+
+ fake_validate.return_value = node
+ self.assertRaises(exception.InstanceDeployFailure,
+ self.driver._wait_for_active,
+ FAKE_CLIENT, instance)
+ self.assertTrue(fake_validate.called)
+
+ @mock.patch.object(ironic_driver, '_validate_instance_and_node')
+ def test__wait_for_power_state_pass(self, fake_validate):
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ uuid=uuidutils.generate_uuid())
+ node = ironic_utils.get_test_node(
+ target_power_state=ironic_states.POWER_OFF)
+
+ fake_validate.return_value = node
+ self.driver._wait_for_power_state(
+ FAKE_CLIENT, instance, 'fake message')
+ self.assertTrue(fake_validate.called)
+
+ @mock.patch.object(ironic_driver, '_validate_instance_and_node')
+ def test__wait_for_power_state_ok(self, fake_validate):
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ uuid=uuidutils.generate_uuid())
+ node = ironic_utils.get_test_node(
+ target_power_state=ironic_states.NOSTATE)
+
+ fake_validate.return_value = node
+ self.assertRaises(loopingcall.LoopingCallDone,
+ self.driver._wait_for_power_state,
+ FAKE_CLIENT, instance, 'fake message')
+ self.assertTrue(fake_validate.called)
+
+ def test__node_resource(self):
+ node_uuid = uuidutils.generate_uuid()
+ instance_uuid = uuidutils.generate_uuid()
+ props = _get_properties()
+ stats = _get_stats()
+ node = ironic_utils.get_test_node(uuid=node_uuid,
+ instance_uuid=instance_uuid,
+ properties=props)
+
+ result = self.driver._node_resource(node)
+ self.assertEqual(props['cpus'], result['vcpus'])
+ self.assertEqual(props['cpus'], result['vcpus_used'])
+ self.assertEqual(props['memory_mb'], result['memory_mb'])
+ self.assertEqual(props['memory_mb'], result['memory_mb_used'])
+ self.assertEqual(props['local_gb'], result['local_gb'])
+ self.assertEqual(props['local_gb'], result['local_gb_used'])
+ self.assertEqual(node_uuid, result['hypervisor_hostname'])
+ self.assertEqual(stats, jsonutils.loads(result['stats']))
+
+ def test__node_resource_canonicalizes_arch(self):
+ node_uuid = uuidutils.generate_uuid()
+ props = _get_properties()
+ props['cpu_arch'] = 'i386'
+ node = ironic_utils.get_test_node(uuid=node_uuid, properties=props)
+
+ result = self.driver._node_resource(node)
+ self.assertEqual('i686',
+ jsonutils.loads(result['supported_instances'])[0][0])
+ self.assertEqual('i386',
+ jsonutils.loads(result['stats'])['cpu_arch'])
+
+ def test__node_resource_unknown_arch(self):
+ node_uuid = uuidutils.generate_uuid()
+ props = _get_properties()
+ del props['cpu_arch']
+ node = ironic_utils.get_test_node(uuid=node_uuid, properties=props)
+
+ result = self.driver._node_resource(node)
+ self.assertEqual([], jsonutils.loads(result['supported_instances']))
+
+ def test__node_resource_exposes_capabilities(self):
+ props = _get_properties()
+ props['capabilities'] = 'test:capability'
+ node = ironic_utils.get_test_node(properties=props)
+ result = self.driver._node_resource(node)
+ stats = jsonutils.loads(result['stats'])
+ self.assertIsNone(stats.get('capabilities'))
+ self.assertEqual('capability', stats.get('test'))
+
+ def test__node_resource_no_capabilities(self):
+ props = _get_properties()
+ props['capabilities'] = None
+ node = ironic_utils.get_test_node(properties=props)
+ result = self.driver._node_resource(node)
+ self.assertIsNone(jsonutils.loads(result['stats']).get('capabilities'))
+
+ def test__node_resource_malformed_capabilities(self):
+ props = _get_properties()
+ props['capabilities'] = 'test:capability,:no_key,no_val:'
+ node = ironic_utils.get_test_node(properties=props)
+ result = self.driver._node_resource(node)
+ stats = jsonutils.loads(result['stats'])
+ self.assertEqual('capability', stats.get('test'))
+
+ def test__node_resource_no_instance_uuid(self):
+ node_uuid = uuidutils.generate_uuid()
+ props = _get_properties()
+ stats = _get_stats()
+ node = ironic_utils.get_test_node(uuid=node_uuid,
+ instance_uuid=None,
+ power_state=ironic_states.POWER_OFF,
+ properties=props)
+
+ result = self.driver._node_resource(node)
+ self.assertEqual(props['cpus'], result['vcpus'])
+ self.assertEqual(0, result['vcpus_used'])
+ self.assertEqual(props['memory_mb'], result['memory_mb'])
+ self.assertEqual(0, result['memory_mb_used'])
+ self.assertEqual(props['local_gb'], result['local_gb'])
+ self.assertEqual(0, result['local_gb_used'])
+ self.assertEqual(node_uuid, result['hypervisor_hostname'])
+ self.assertEqual(stats, jsonutils.loads(result['stats']))
+
+ @mock.patch.object(ironic_driver.IronicDriver,
+ '_node_resources_unavailable')
+ def test__node_resource_unavailable_node_res(self, mock_res_unavail):
+ mock_res_unavail.return_value = True
+ node_uuid = uuidutils.generate_uuid()
+ props = _get_properties()
+ stats = _get_stats()
+ node = ironic_utils.get_test_node(uuid=node_uuid,
+ instance_uuid=None,
+ properties=props)
+
+ result = self.driver._node_resource(node)
+ self.assertEqual(0, result['vcpus'])
+ self.assertEqual(0, result['vcpus_used'])
+ self.assertEqual(0, result['memory_mb'])
+ self.assertEqual(0, result['memory_mb_used'])
+ self.assertEqual(0, result['local_gb'])
+ self.assertEqual(0, result['local_gb_used'])
+ self.assertEqual(node_uuid, result['hypervisor_hostname'])
+ self.assertEqual(stats, jsonutils.loads(result['stats']))
+
+ @mock.patch.object(firewall.NoopFirewallDriver, 'prepare_instance_filter',
+ create=True)
+ @mock.patch.object(firewall.NoopFirewallDriver, 'setup_basic_filtering',
+ create=True)
+ @mock.patch.object(firewall.NoopFirewallDriver, 'apply_instance_filter',
+ create=True)
+ def test__start_firewall(self, mock_aif, mock_sbf, mock_pif):
+ fake_inst = 'fake-inst'
+ fake_net_info = utils.get_test_network_info()
+ self.driver._start_firewall(fake_inst, fake_net_info)
+
+ mock_aif.assert_called_once_with(fake_inst, fake_net_info)
+ mock_sbf.assert_called_once_with(fake_inst, fake_net_info)
+ mock_pif.assert_called_once_with(fake_inst, fake_net_info)
+
+ @mock.patch.object(firewall.NoopFirewallDriver, 'unfilter_instance',
+ create=True)
+ def test__stop_firewall(self, mock_ui):
+ fake_inst = 'fake-inst'
+ fake_net_info = utils.get_test_network_info()
+ self.driver._stop_firewall(fake_inst, fake_net_info)
+ mock_ui.assert_called_once_with(fake_inst, fake_net_info)
+
+ @mock.patch.object(cw.IronicClientWrapper, 'call')
+ def test_instance_exists(self, mock_call):
+ instance_uuid = 'fake-uuid'
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ uuid=instance_uuid)
+ self.assertTrue(self.driver.instance_exists(instance))
+ mock_call.assert_called_once_with('node.get_by_instance_uuid',
+ instance_uuid)
+
+ @mock.patch.object(cw.IronicClientWrapper, 'call')
+ def test_instance_exists_fail(self, mock_call):
+ mock_call.side_effect = ironic_exception.NotFound
+ instance_uuid = 'fake-uuid'
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ uuid=instance_uuid)
+ self.assertFalse(self.driver.instance_exists(instance))
+ mock_call.assert_called_once_with('node.get_by_instance_uuid',
+ instance_uuid)
+
+ @mock.patch.object(cw.IronicClientWrapper, 'call')
+ @mock.patch.object(objects.Instance, 'get_by_uuid')
+ def test_list_instances(self, mock_inst_by_uuid, mock_call):
+ nodes = []
+ instances = []
+ for i in range(2):
+ uuid = uuidutils.generate_uuid()
+ instances.append(fake_instance.fake_instance_obj(self.ctx,
+ id=i,
+ uuid=uuid))
+ nodes.append(ironic_utils.get_test_node(instance_uuid=uuid))
+
+ mock_inst_by_uuid.side_effect = instances
+ mock_call.return_value = nodes
+
+ response = self.driver.list_instances()
+ mock_call.assert_called_with("node.list", associated=True, limit=0)
+ expected_calls = [mock.call(mock.ANY, instances[0].uuid),
+ mock.call(mock.ANY, instances[1].uuid)]
+ mock_inst_by_uuid.assert_has_calls(expected_calls)
+ self.assertEqual(['instance-00000000', 'instance-00000001'],
+ sorted(response))
+
+ @mock.patch.object(cw.IronicClientWrapper, 'call')
+ def test_list_instance_uuids(self, mock_call):
+ num_nodes = 2
+ nodes = []
+ for n in range(num_nodes):
+ nodes.append(ironic_utils.get_test_node(
+ instance_uuid=uuidutils.generate_uuid()))
+
+ mock_call.return_value = nodes
+ uuids = self.driver.list_instance_uuids()
+ mock_call.assert_called_with('node.list', associated=True, limit=0)
+ expected = [n.instance_uuid for n in nodes]
+ self.assertEqual(sorted(expected), sorted(uuids))
+
+ @mock.patch.object(FAKE_CLIENT.node, 'list')
+ @mock.patch.object(FAKE_CLIENT.node, 'get')
+ def test_node_is_available_empty_cache_empty_list(self, mock_get,
+ mock_list):
+ node = ironic_utils.get_test_node()
+ mock_get.return_value = node
+ mock_list.return_value = []
+ self.assertTrue(self.driver.node_is_available(node.uuid))
+ mock_get.assert_called_with(node.uuid)
+ mock_list.assert_called_with(detail=True, limit=0)
+
+ mock_get.side_effect = ironic_exception.NotFound
+ self.assertFalse(self.driver.node_is_available(node.uuid))
+
+ @mock.patch.object(FAKE_CLIENT.node, 'list')
+ @mock.patch.object(FAKE_CLIENT.node, 'get')
+ def test_node_is_available_empty_cache(self, mock_get, mock_list):
+ node = ironic_utils.get_test_node()
+ mock_get.return_value = node
+ mock_list.return_value = [node]
+ self.assertTrue(self.driver.node_is_available(node.uuid))
+ mock_list.assert_called_with(detail=True, limit=0)
+ self.assertEqual(0, mock_get.call_count)
+
+ @mock.patch.object(FAKE_CLIENT.node, 'list')
+ @mock.patch.object(FAKE_CLIENT.node, 'get')
+ def test_node_is_available_with_cache(self, mock_get, mock_list):
+ node = ironic_utils.get_test_node()
+ mock_get.return_value = node
+ mock_list.return_value = [node]
+ # populate the cache
+ self.driver.get_available_nodes(refresh=True)
+ # prove that zero calls are made after populating cache
+ mock_list.reset_mock()
+ self.assertTrue(self.driver.node_is_available(node.uuid))
+ self.assertEqual(0, mock_list.call_count)
+ self.assertEqual(0, mock_get.call_count)
+
+ def test__node_resources_unavailable(self):
+ node_dicts = [
+ # a node in maintenance /w no instance and power OFF
+ {'uuid': uuidutils.generate_uuid(),
+ 'maintenance': True,
+ 'power_state': ironic_states.POWER_OFF},
+ # a node in maintenance /w no instance and ERROR power state
+ {'uuid': uuidutils.generate_uuid(),
+ 'maintenance': True,
+ 'power_state': ironic_states.ERROR},
+ # a node not in maintenance /w no instance and bad power state
+ {'uuid': uuidutils.generate_uuid(),
+ 'power_state': ironic_states.NOSTATE},
+ ]
+ for n in node_dicts:
+ node = ironic_utils.get_test_node(**n)
+ self.assertTrue(self.driver._node_resources_unavailable(node))
+
+ avail_node = ironic_utils.get_test_node(
+ power_state=ironic_states.POWER_OFF)
+ self.assertFalse(self.driver._node_resources_unavailable(avail_node))
+
+ @mock.patch.object(FAKE_CLIENT.node, 'list')
+ def test_get_available_nodes(self, mock_list):
+ node_dicts = [
+ # a node in maintenance /w no instance and power OFF
+ {'uuid': uuidutils.generate_uuid(),
+ 'maintenance': True,
+ 'power_state': ironic_states.POWER_OFF},
+ # a node /w instance and power ON
+ {'uuid': uuidutils.generate_uuid(),
+ 'instance_uuid': uuidutils.generate_uuid(),
+ 'power_state': ironic_states.POWER_ON},
+ # a node not in maintenance /w no instance and bad power state
+ {'uuid': uuidutils.generate_uuid(),
+ 'power_state': ironic_states.ERROR},
+ ]
+ nodes = [ironic_utils.get_test_node(**n) for n in node_dicts]
+ mock_list.return_value = nodes
+ available_nodes = self.driver.get_available_nodes()
+ expected_uuids = [n['uuid'] for n in node_dicts]
+ self.assertEqual(sorted(expected_uuids), sorted(available_nodes))
+
+ @mock.patch.object(FAKE_CLIENT.node, 'get')
+ @mock.patch.object(FAKE_CLIENT.node, 'list')
+ @mock.patch.object(ironic_driver.IronicDriver, '_node_resource')
+ def test_get_available_resource(self, mock_nr, mock_list, mock_get):
+ node = ironic_utils.get_test_node()
+ node_2 = ironic_utils.get_test_node(uuid=uuidutils.generate_uuid())
+ fake_resource = 'fake-resource'
+ mock_get.return_value = node
+ # ensure cache gets populated without the node we want
+ mock_list.return_value = [node_2]
+ mock_nr.return_value = fake_resource
+
+ result = self.driver.get_available_resource(node.uuid)
+ self.assertEqual(fake_resource, result)
+ mock_nr.assert_called_once_with(node)
+ mock_get.assert_called_once_with(node.uuid)
+
+ @mock.patch.object(FAKE_CLIENT.node, 'get')
+ @mock.patch.object(FAKE_CLIENT.node, 'list')
+ @mock.patch.object(ironic_driver.IronicDriver, '_node_resource')
+ def test_get_available_resource_with_cache(self, mock_nr, mock_list,
+ mock_get):
+ node = ironic_utils.get_test_node()
+ fake_resource = 'fake-resource'
+ mock_list.return_value = [node]
+ mock_nr.return_value = fake_resource
+ # populate the cache
+ self.driver.get_available_nodes(refresh=True)
+ mock_list.reset_mock()
+
+ result = self.driver.get_available_resource(node.uuid)
+ self.assertEqual(fake_resource, result)
+ self.assertEqual(0, mock_list.call_count)
+ self.assertEqual(0, mock_get.call_count)
+ mock_nr.assert_called_once_with(node)
+
+ @mock.patch.object(FAKE_CLIENT.node, 'get_by_instance_uuid')
+ def test_get_info(self, mock_gbiu):
+ instance_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ properties = {'memory_mb': 512, 'cpus': 2}
+ power_state = ironic_states.POWER_ON
+ node = ironic_utils.get_test_node(instance_uuid=instance_uuid,
+ properties=properties,
+ power_state=power_state)
+
+ mock_gbiu.return_value = node
+
+ # ironic_states.POWER_ON should be mapped to
+ # nova_states.RUNNING
+ memory_kib = properties['memory_mb'] * 1024
+ expected = {'state': nova_states.RUNNING,
+ 'max_mem': memory_kib,
+ 'mem': memory_kib,
+ 'num_cpu': properties['cpus'],
+ 'cpu_time': 0}
+ instance = fake_instance.fake_instance_obj('fake-context',
+ uuid=instance_uuid)
+ result = self.driver.get_info(instance)
+ self.assertEqual(expected, result)
+
+ @mock.patch.object(FAKE_CLIENT.node, 'get_by_instance_uuid')
+ def test_get_info_http_not_found(self, mock_gbiu):
+ mock_gbiu.side_effect = ironic_exception.NotFound()
+
+ expected = {'state': nova_states.NOSTATE,
+ 'max_mem': 0,
+ 'mem': 0,
+ 'num_cpu': 0,
+ 'cpu_time': 0}
+ instance = fake_instance.fake_instance_obj(
+ self.ctx, uuid=uuidutils.generate_uuid())
+ result = self.driver.get_info(instance)
+ self.assertEqual(expected, result)
+
+ @mock.patch.object(FAKE_CLIENT, 'node')
+ def test_macs_for_instance(self, mock_node):
+ node = ironic_utils.get_test_node()
+ port = ironic_utils.get_test_port()
+ mock_node.get.return_value = node
+ mock_node.list_ports.return_value = [port]
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ node=node.uuid)
+ result = self.driver.macs_for_instance(instance)
+ self.assertEqual(set([port.address]), result)
+ mock_node.list_ports.assert_called_once_with(node.uuid)
+
+ @mock.patch.object(FAKE_CLIENT.node, 'get')
+ def test_macs_for_instance_http_not_found(self, mock_get):
+ mock_get.side_effect = ironic_exception.NotFound()
+
+ instance = fake_instance.fake_instance_obj(
+ self.ctx, node=uuidutils.generate_uuid())
+ result = self.driver.macs_for_instance(instance)
+ self.assertIsNone(result)
+
+ @mock.patch.object(objects.Instance, 'save')
+ @mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
+ @mock.patch.object(FAKE_CLIENT, 'node')
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ @mock.patch.object(ironic_driver.IronicDriver, '_wait_for_active')
+ @mock.patch.object(ironic_driver.IronicDriver, '_add_driver_fields')
+ @mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
+ @mock.patch.object(ironic_driver.IronicDriver, '_start_firewall')
+ def test_spawn(self, mock_sf, mock_pvifs, mock_adf, mock_wait_active,
+ mock_fg_bid, mock_node, mock_looping, mock_save):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
+ instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
+ fake_flavor = {'ephemeral_gb': 0}
+
+ mock_node.get.return_value = node
+ mock_node.validate.return_value = ironic_utils.get_test_validation()
+ mock_node.get_by_instance_uuid.return_value = node
+ mock_node.set_provision_state.return_value = mock.MagicMock()
+ mock_fg_bid.return_value = fake_flavor
+
+ fake_looping_call = FakeLoopingCall()
+ mock_looping.return_value = fake_looping_call
+
+ self.driver.spawn(self.ctx, instance, None, [], None)
+
+ mock_node.get.assert_called_once_with(node_uuid)
+ mock_node.validate.assert_called_once_with(node_uuid)
+ mock_fg_bid.assert_called_once_with(self.ctx,
+ instance['instance_type_id'])
+ mock_adf.assert_called_once_with(node, instance, None, fake_flavor)
+ mock_pvifs.assert_called_once_with(node, instance, None)
+ mock_sf.assert_called_once_with(instance, None)
+ mock_node.set_provision_state.assert_called_once_with(node_uuid,
+ 'active')
+
+ self.assertIsNone(instance['default_ephemeral_device'])
+ self.assertFalse(mock_save.called)
+
+ mock_looping.assert_called_once_with(mock_wait_active,
+ FAKE_CLIENT_WRAPPER,
+ instance)
+ fake_looping_call.start.assert_called_once_with(
+ interval=CONF.ironic.api_retry_interval)
+ fake_looping_call.wait.assert_called_once_with()
+
+ @mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
+ @mock.patch.object(FAKE_CLIENT, 'node')
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ @mock.patch.object(ironic_driver.IronicDriver, 'destroy')
+ @mock.patch.object(ironic_driver.IronicDriver, '_wait_for_active')
+ @mock.patch.object(ironic_driver.IronicDriver, '_add_driver_fields')
+ @mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
+ @mock.patch.object(ironic_driver.IronicDriver, '_start_firewall')
+ def test_spawn_destroyed_after_failure(self, mock_sf, mock_pvifs, mock_adf,
+ mock_wait_active, mock_destroy,
+ mock_fg_bid, mock_node,
+ mock_looping):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
+ instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
+ fake_flavor = {'ephemeral_gb': 0}
+
+ mock_node.get.return_value = node
+ mock_node.validate.return_value = ironic_utils.get_test_validation()
+ mock_node.get_by_instance_uuid.return_value = node
+ mock_node.set_provision_state.return_value = mock.MagicMock()
+ mock_fg_bid.return_value = fake_flavor
+
+ fake_looping_call = FakeLoopingCall()
+ mock_looping.return_value = fake_looping_call
+
+ deploy_exc = exception.InstanceDeployFailure('foo')
+ fake_looping_call.wait.side_effect = deploy_exc
+ self.assertRaises(
+ exception.InstanceDeployFailure,
+ self.driver.spawn, self.ctx, instance, None, [], None)
+ mock_destroy.assert_called_once_with(self.ctx, instance, None)
+
+ @mock.patch.object(FAKE_CLIENT.node, 'update')
+ def test__add_driver_fields_good(self, mock_update):
+ node = ironic_utils.get_test_node(driver='fake')
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ node=node.uuid)
+ image_meta = ironic_utils.get_test_image_meta()
+ flavor = ironic_utils.get_test_flavor()
+ self.driver._add_driver_fields(node, instance, image_meta, flavor)
+ expected_patch = [{'path': '/instance_info/image_source', 'op': 'add',
+ 'value': image_meta['id']},
+ {'path': '/instance_info/root_gb', 'op': 'add',
+ 'value': str(instance.root_gb)},
+ {'path': '/instance_info/swap_mb', 'op': 'add',
+ 'value': str(flavor['swap'])},
+ {'path': '/instance_uuid', 'op': 'add',
+ 'value': instance.uuid}]
+ mock_update.assert_called_once_with(node.uuid, expected_patch)
+
+ @mock.patch.object(FAKE_CLIENT.node, 'update')
+ def test__add_driver_fields_fail(self, mock_update):
+ mock_update.side_effect = ironic_exception.BadRequest()
+ node = ironic_utils.get_test_node(driver='fake')
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ node=node.uuid)
+ image_meta = ironic_utils.get_test_image_meta()
+ flavor = ironic_utils.get_test_flavor()
+ self.assertRaises(exception.InstanceDeployFailure,
+ self.driver._add_driver_fields,
+ node, instance, image_meta, flavor)
+
+ @mock.patch.object(FAKE_CLIENT.node, 'update')
+ def test__cleanup_deploy_good_with_flavor(self, mock_update):
+ node = ironic_utils.get_test_node(driver='fake',
+ instance_uuid='fake-id')
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ node=node.uuid)
+ flavor = ironic_utils.get_test_flavor(extra_specs={})
+ self.driver._cleanup_deploy(self.ctx, node, instance, None,
+ flavor=flavor)
+ expected_patch = [{'path': '/instance_uuid', 'op': 'remove'}]
+ mock_update.assert_called_once_with(node.uuid, expected_patch)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ @mock.patch.object(FAKE_CLIENT.node, 'update')
+ def test__cleanup_deploy_without_flavor(self, mock_update, mock_flavor):
+ mock_flavor.return_value = ironic_utils.get_test_flavor(extra_specs={})
+ node = ironic_utils.get_test_node(driver='fake',
+ instance_uuid='fake-id')
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ node=node.uuid)
+ self.driver._cleanup_deploy(self.ctx, node, instance, None)
+ expected_patch = [{'path': '/instance_uuid', 'op': 'remove'}]
+ mock_update.assert_called_once_with(node.uuid, expected_patch)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ @mock.patch.object(FAKE_CLIENT.node, 'update')
+ def test__cleanup_deploy_fail(self, mock_update, mock_flavor):
+ mock_flavor.return_value = ironic_utils.get_test_flavor(extra_specs={})
+ mock_update.side_effect = ironic_exception.BadRequest()
+ node = ironic_utils.get_test_node(driver='fake',
+ instance_uuid='fake-id')
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ node=node.uuid)
+ self.assertRaises(exception.InstanceTerminationFailure,
+ self.driver._cleanup_deploy,
+ self.ctx, node, instance, None)
+
+ @mock.patch.object(FAKE_CLIENT, 'node')
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_spawn_node_driver_validation_fail(self, mock_flavor, mock_node):
+ mock_flavor.return_value = ironic_utils.get_test_flavor()
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
+ instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
+
+ mock_node.validate.return_value = ironic_utils.get_test_validation(
+ power=False, deploy=False)
+ mock_node.get.return_value = node
+ image_meta = ironic_utils.get_test_image_meta()
+
+ self.assertRaises(exception.ValidationError, self.driver.spawn,
+ self.ctx, instance, image_meta, [], None)
+ mock_node.get.assert_called_once_with(node_uuid)
+ mock_node.validate.assert_called_once_with(node_uuid)
+ mock_flavor.assert_called_with(mock.ANY, instance['instance_type_id'])
+
+ @mock.patch.object(FAKE_CLIENT, 'node')
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ @mock.patch.object(ironic_driver.IronicDriver, '_start_firewall')
+ @mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
+ @mock.patch.object(ironic_driver.IronicDriver, '_cleanup_deploy')
+ def test_spawn_node_prepare_for_deploy_fail(self, mock_cleanup_deploy,
+ mock_pvifs, mock_sf,
+ mock_flavor, mock_node):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
+ instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
+ mock_node.get.return_value = node
+ mock_node.validate.return_value = ironic_utils.get_test_validation()
+ flavor = ironic_utils.get_test_flavor()
+ mock_flavor.return_value = flavor
+ image_meta = ironic_utils.get_test_image_meta()
+
+ class TestException(Exception):
+ pass
+
+ mock_sf.side_effect = TestException()
+ self.assertRaises(TestException, self.driver.spawn,
+ self.ctx, instance, image_meta, [], None)
+
+ mock_node.get.assert_called_once_with(node_uuid)
+ mock_node.validate.assert_called_once_with(node_uuid)
+ mock_flavor.assert_called_once_with(self.ctx,
+ instance['instance_type_id'])
+ mock_cleanup_deploy.assert_called_with(self.ctx, node, instance, None,
+ flavor=flavor)
+
+ @mock.patch.object(FAKE_CLIENT, 'node')
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ @mock.patch.object(ironic_driver.IronicDriver, '_start_firewall')
+ @mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
+ @mock.patch.object(ironic_driver.IronicDriver, '_cleanup_deploy')
+ def test_spawn_node_trigger_deploy_fail(self, mock_cleanup_deploy,
+ mock_pvifs, mock_sf,
+ mock_flavor, mock_node):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
+ instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
+ flavor = ironic_utils.get_test_flavor()
+ mock_flavor.return_value = flavor
+ image_meta = ironic_utils.get_test_image_meta()
+
+ mock_node.get.return_value = node
+ mock_node.validate.return_value = ironic_utils.get_test_validation()
+
+ mock_node.set_provision_state.side_effect = exception.NovaException()
+ self.assertRaises(exception.NovaException, self.driver.spawn,
+ self.ctx, instance, image_meta, [], None)
+
+ mock_node.get.assert_called_once_with(node_uuid)
+ mock_node.validate.assert_called_once_with(node_uuid)
+ mock_flavor.assert_called_once_with(self.ctx,
+ instance['instance_type_id'])
+ mock_cleanup_deploy.assert_called_once_with(self.ctx, node,
+ instance, None,
+ flavor=flavor)
+
+ @mock.patch.object(FAKE_CLIENT, 'node')
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ @mock.patch.object(ironic_driver.IronicDriver, '_start_firewall')
+ @mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
+ @mock.patch.object(ironic_driver.IronicDriver, '_cleanup_deploy')
+ def test_spawn_node_trigger_deploy_fail2(self, mock_cleanup_deploy,
+ mock_pvifs, mock_sf,
+ mock_flavor, mock_node):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
+ instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
+ flavor = ironic_utils.get_test_flavor()
+ mock_flavor.return_value = flavor
+ image_meta = ironic_utils.get_test_image_meta()
+
+ mock_node.get.return_value = node
+ mock_node.validate.return_value = ironic_utils.get_test_validation()
+ mock_node.set_provision_state.side_effect = ironic_exception.BadRequest
+ self.assertRaises(ironic_exception.BadRequest,
+ self.driver.spawn,
+ self.ctx, instance, image_meta, [], None)
+
+ mock_node.get.assert_called_once_with(node_uuid)
+ mock_node.validate.assert_called_once_with(node_uuid)
+ mock_flavor.assert_called_once_with(self.ctx,
+ instance['instance_type_id'])
+ mock_cleanup_deploy.assert_called_once_with(self.ctx, node,
+ instance, None,
+ flavor=flavor)
+
+ @mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
+ @mock.patch.object(FAKE_CLIENT, 'node')
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ @mock.patch.object(ironic_driver.IronicDriver, '_start_firewall')
+ @mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
+ @mock.patch.object(ironic_driver.IronicDriver, 'destroy')
+ def test_spawn_node_trigger_deploy_fail3(self, mock_destroy,
+ mock_pvifs, mock_sf,
+ mock_flavor, mock_node,
+ mock_looping):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ fake_net_info = utils.get_test_network_info()
+ node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
+ instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
+ mock_flavor.return_value = ironic_utils.get_test_flavor()
+ image_meta = ironic_utils.get_test_image_meta()
+
+ mock_node.get.return_value = node
+ mock_node.validate.return_value = ironic_utils.get_test_validation()
+
+ fake_looping_call = FakeLoopingCall()
+ mock_looping.return_value = fake_looping_call
+
+ fake_looping_call.wait.side_effect = ironic_exception.BadRequest
+ fake_net_info = utils.get_test_network_info()
+ self.assertRaises(ironic_exception.BadRequest,
+ self.driver.spawn, self.ctx, instance,
+ image_meta, [], None, fake_net_info)
+ mock_destroy.assert_called_once_with(self.ctx, instance,
+ fake_net_info)
+
+ @mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
+ @mock.patch.object(objects.Instance, 'save')
+ @mock.patch.object(FAKE_CLIENT, 'node')
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ @mock.patch.object(ironic_driver.IronicDriver, '_wait_for_active')
+ @mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
+ @mock.patch.object(ironic_driver.IronicDriver, '_start_firewall')
+ def test_spawn_sets_default_ephemeral_device(self, mock_sf, mock_pvifs,
+ mock_wait, mock_flavor,
+ mock_node, mock_save,
+ mock_looping):
+ mock_flavor.return_value = ironic_utils.get_test_flavor(ephemeral_gb=1)
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
+ instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
+ mock_node.get_by_instance_uuid.return_value = node
+ mock_node.set_provision_state.return_value = mock.MagicMock()
+ image_meta = ironic_utils.get_test_image_meta()
+
+ self.driver.spawn(self.ctx, instance, image_meta, [], None)
+ mock_flavor.assert_called_once_with(self.ctx,
+ instance['instance_type_id'])
+ self.assertTrue(mock_save.called)
+ self.assertEqual('/dev/sda1', instance['default_ephemeral_device'])
+
+ @mock.patch.object(FAKE_CLIENT, 'node')
+ @mock.patch.object(ironic_driver.IronicDriver, '_cleanup_deploy')
+ def test_destroy(self, mock_cleanup_deploy, mock_node):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ network_info = 'foo'
+
+ node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid,
+ provision_state=ironic_states.ACTIVE)
+ instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
+
+ def fake_set_provision_state(*_):
+ node.provision_state = None
+
+ mock_node.get_by_instance_uuid.return_value = node
+ mock_node.set_provision_state.side_effect = fake_set_provision_state
+ self.driver.destroy(self.ctx, instance, network_info, None)
+ mock_node.set_provision_state.assert_called_once_with(node_uuid,
+ 'deleted')
+ mock_node.get_by_instance_uuid.assert_called_with(instance.uuid)
+ mock_cleanup_deploy.assert_called_with(self.ctx, node,
+ instance, network_info)
+
+ @mock.patch.object(FAKE_CLIENT, 'node')
+ @mock.patch.object(ironic_driver.IronicDriver, '_cleanup_deploy')
+ def test_destroy_ignore_unexpected_state(self, mock_cleanup_deploy,
+ mock_node):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ network_info = 'foo'
+
+ node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid,
+ provision_state=ironic_states.DELETING)
+ instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
+
+ mock_node.get_by_instance_uuid.return_value = node
+ self.driver.destroy(self.ctx, instance, network_info, None)
+ self.assertFalse(mock_node.set_provision_state.called)
+ mock_node.get_by_instance_uuid.assert_called_with(instance.uuid)
+ mock_cleanup_deploy.assert_called_with(self.ctx, node, instance,
+ network_info)
+
+ @mock.patch.object(FAKE_CLIENT.node, 'set_provision_state')
+ @mock.patch.object(ironic_driver, '_validate_instance_and_node')
+ def test_destroy_trigger_undeploy_fail(self, fake_validate, mock_sps):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid,
+ provision_state=ironic_states.ACTIVE)
+ fake_validate.return_value = node
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ node=node_uuid)
+ mock_sps.side_effect = exception.NovaException()
+ self.assertRaises(exception.NovaException, self.driver.destroy,
+ self.ctx, instance, None, None)
+
+ @mock.patch.object(FAKE_CLIENT, 'node')
+ def test_destroy_unprovision_fail(self, mock_node):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid,
+ provision_state=ironic_states.ACTIVE)
+ instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
+
+ def fake_set_provision_state(*_):
+ node.provision_state = ironic_states.ERROR
+
+ mock_node.get_by_instance_uuid.return_value = node
+ self.assertRaises(exception.NovaException, self.driver.destroy,
+ self.ctx, instance, None, None)
+ mock_node.set_provision_state.assert_called_once_with(node_uuid,
+ 'deleted')
+
+ @mock.patch.object(FAKE_CLIENT, 'node')
+ def test_destroy_unassociate_fail(self, mock_node):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid,
+ provision_state=ironic_states.ACTIVE)
+ instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
+
+ mock_node.get_by_instance_uuid.return_value = node
+ mock_node.update.side_effect = exception.NovaException()
+ self.assertRaises(exception.NovaException, self.driver.destroy,
+ self.ctx, instance, None, None)
+ mock_node.set_provision_state.assert_called_once_with(node_uuid,
+ 'deleted')
+ mock_node.get_by_instance_uuid.assert_called_with(instance.uuid)
+
+ @mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
+ @mock.patch.object(ironic_driver, '_validate_instance_and_node')
+ @mock.patch.object(FAKE_CLIENT.node, 'set_power_state')
+ def test_reboot(self, mock_sp, fake_validate, mock_looping):
+ node = ironic_utils.get_test_node()
+ fake_validate.side_effect = [node, node]
+
+ fake_looping_call = FakeLoopingCall()
+ mock_looping.return_value = fake_looping_call
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ node=node.uuid)
+ self.driver.reboot(self.ctx, instance, None, None)
+ mock_sp.assert_called_once_with(node.uuid, 'reboot')
+
+ @mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
+ @mock.patch.object(ironic_driver, '_validate_instance_and_node')
+ @mock.patch.object(FAKE_CLIENT.node, 'set_power_state')
+ def test_power_off(self, mock_sp, fake_validate, mock_looping):
+ node = ironic_utils.get_test_node()
+ fake_validate.side_effect = [node, node]
+
+ fake_looping_call = FakeLoopingCall()
+ mock_looping.return_value = fake_looping_call
+ instance_uuid = uuidutils.generate_uuid()
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ node=instance_uuid)
+
+ self.driver.power_off(instance)
+ mock_sp.assert_called_once_with(node.uuid, 'off')
+
+ @mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
+ @mock.patch.object(ironic_driver, '_validate_instance_and_node')
+ @mock.patch.object(FAKE_CLIENT.node, 'set_power_state')
+ def test_power_on(self, mock_sp, fake_validate, mock_looping):
+ node = ironic_utils.get_test_node()
+ fake_validate.side_effect = [node, node]
+
+ fake_looping_call = FakeLoopingCall()
+ mock_looping.return_value = fake_looping_call
+ instance_uuid = uuidutils.generate_uuid()
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ node=instance_uuid)
+
+ self.driver.power_on(self.ctx, instance,
+ utils.get_test_network_info())
+ mock_sp.assert_called_once_with(node.uuid, 'on')
+
+ @mock.patch.object(FAKE_CLIENT.node, 'list_ports')
+ @mock.patch.object(FAKE_CLIENT.port, 'update')
+ @mock.patch.object(ironic_driver.IronicDriver, '_unplug_vifs')
+ def test_plug_vifs_with_port(self, mock_uvifs, mock_port_udt, mock_lp):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ node = ironic_utils.get_test_node(uuid=node_uuid)
+ port = ironic_utils.get_test_port()
+
+ mock_lp.return_value = [port]
+
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ node=node_uuid)
+ network_info = utils.get_test_network_info()
+
+ port_id = unicode(network_info[0]['id'])
+ expected_patch = [{'op': 'add',
+ 'path': '/extra/vif_port_id',
+ 'value': port_id}]
+ self.driver._plug_vifs(node, instance, network_info)
+
+ # asserts
+ mock_uvifs.assert_called_once_with(node, instance, network_info)
+ mock_lp.assert_called_once_with(node_uuid)
+ mock_port_udt.assert_called_with(port.uuid, expected_patch)
+
+ @mock.patch.object(FAKE_CLIENT.node, 'get')
+ @mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
+ def test_plug_vifs(self, mock__plug_vifs, mock_get):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ node = ironic_utils.get_test_node(uuid=node_uuid)
+
+ mock_get.return_value = node
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ node=node_uuid)
+ network_info = utils.get_test_network_info()
+ self.driver.plug_vifs(instance, network_info)
+
+ mock_get.assert_called_once_with(node_uuid)
+ mock__plug_vifs.assert_called_once_with(node, instance, network_info)
+
+ @mock.patch.object(FAKE_CLIENT.port, 'update')
+ @mock.patch.object(FAKE_CLIENT.node, 'list_ports')
+ @mock.patch.object(ironic_driver.IronicDriver, '_unplug_vifs')
+ def test_plug_vifs_count_mismatch(self, mock_uvifs, mock_lp,
+ mock_port_udt):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ node = ironic_utils.get_test_node(uuid=node_uuid)
+ port = ironic_utils.get_test_port()
+
+ mock_lp.return_value = [port]
+
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ node=node_uuid)
+ # len(network_info) > len(ports)
+ network_info = (utils.get_test_network_info() +
+ utils.get_test_network_info())
+ self.assertRaises(exception.NovaException,
+ self.driver._plug_vifs, node, instance,
+ network_info)
+
+ # asserts
+ mock_uvifs.assert_called_once_with(node, instance, network_info)
+ mock_lp.assert_called_once_with(node_uuid)
+ # assert port.update() was not called
+ self.assertFalse(mock_port_udt.called)
+
+ @mock.patch.object(FAKE_CLIENT.port, 'update')
+ @mock.patch.object(FAKE_CLIENT.node, 'list_ports')
+ @mock.patch.object(ironic_driver.IronicDriver, '_unplug_vifs')
+ def test_plug_vifs_no_network_info(self, mock_uvifs, mock_lp,
+ mock_port_udt):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ node = ironic_utils.get_test_node(uuid=node_uuid)
+ port = ironic_utils.get_test_port()
+
+ mock_lp.return_value = [port]
+
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ node=node_uuid)
+ network_info = []
+ self.driver._plug_vifs(node, instance, network_info)
+
+ # asserts
+ mock_uvifs.assert_called_once_with(node, instance, network_info)
+ mock_lp.assert_called_once_with(node_uuid)
+ # assert port.update() was not called
+ self.assertFalse(mock_port_udt.called)
+
+ @mock.patch.object(FAKE_CLIENT.port, 'update')
+ @mock.patch.object(FAKE_CLIENT, 'node')
+ def test_unplug_vifs(self, mock_node, mock_update):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ node = ironic_utils.get_test_node(uuid=node_uuid)
+ port = ironic_utils.get_test_port(extra={'vif_port_id': 'fake-vif'})
+
+ mock_node.get.return_value = node
+ mock_node.list_ports.return_value = [port]
+
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ node=node_uuid)
+ expected_patch = [{'op': 'remove', 'path':
+ '/extra/vif_port_id'}]
+ self.driver.unplug_vifs(instance,
+ utils.get_test_network_info())
+
+ # asserts
+ mock_node.get.assert_called_once_with(node_uuid)
+ mock_node.list_ports.assert_called_once_with(node_uuid, detail=True)
+ mock_update.assert_called_once_with(port.uuid, expected_patch)
+
+ @mock.patch.object(FAKE_CLIENT.port, 'update')
+ @mock.patch.object(FAKE_CLIENT, 'node')
+ def test_unplug_vifs_port_not_associated(self, mock_node, mock_update):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ node = ironic_utils.get_test_node(uuid=node_uuid)
+ port = ironic_utils.get_test_port(extra={})
+
+ mock_node.get.return_value = node
+ mock_node.list_ports.return_value = [port]
+ instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
+ self.driver.unplug_vifs(instance, utils.get_test_network_info())
+
+ mock_node.get.assert_called_once_with(node_uuid)
+ mock_node.list_ports.assert_called_once_with(node_uuid, detail=True)
+ # assert port.update() was not called
+ self.assertFalse(mock_update.called)
+
+ @mock.patch.object(FAKE_CLIENT.port, 'update')
+ def test_unplug_vifs_no_network_info(self, mock_update):
+ instance = fake_instance.fake_instance_obj(self.ctx)
+ network_info = []
+ self.driver.unplug_vifs(instance, network_info)
+
+ # assert port.update() was not called
+ self.assertFalse(mock_update.called)
+
+ @mock.patch.object(firewall.NoopFirewallDriver, 'unfilter_instance',
+ create=True)
+ def test_unfilter_instance(self, mock_ui):
+ instance = fake_instance.fake_instance_obj(self.ctx)
+ network_info = utils.get_test_network_info()
+ self.driver.unfilter_instance(instance, network_info)
+ mock_ui.assert_called_once_with(instance, network_info)
+
+ @mock.patch.object(firewall.NoopFirewallDriver, 'setup_basic_filtering',
+ create=True)
+ @mock.patch.object(firewall.NoopFirewallDriver, 'prepare_instance_filter',
+ create=True)
+ def test_ensure_filtering_rules_for_instance(self, mock_pif, mock_sbf):
+ instance = fake_instance.fake_instance_obj(self.ctx)
+ network_info = utils.get_test_network_info()
+ self.driver.ensure_filtering_rules_for_instance(instance,
+ network_info)
+ mock_sbf.assert_called_once_with(instance, network_info)
+ mock_pif.assert_called_once_with(instance, network_info)
+
+ @mock.patch.object(firewall.NoopFirewallDriver,
+ 'refresh_instance_security_rules', create=True)
+ def test_refresh_instance_security_rules(self, mock_risr):
+ instance = fake_instance.fake_instance_obj(self.ctx)
+ self.driver.refresh_instance_security_rules(instance)
+ mock_risr.assert_called_once_with(instance)
+
+ @mock.patch.object(firewall.NoopFirewallDriver,
+ 'refresh_provider_fw_rules', create=True)
+ def test_refresh_provider_fw_rules(self, mock_rpfr):
+ fake_instance.fake_instance_obj(self.ctx)
+ self.driver.refresh_provider_fw_rules()
+ mock_rpfr.assert_called_once_with()
+
+ @mock.patch.object(firewall.NoopFirewallDriver,
+ 'refresh_security_group_members', create=True)
+ def test_refresh_security_group_members(self, mock_rsgm):
+ fake_group = 'fake-security-group-members'
+ self.driver.refresh_security_group_members(fake_group)
+ mock_rsgm.assert_called_once_with(fake_group)
+
+ @mock.patch.object(firewall.NoopFirewallDriver,
+ 'refresh_instance_security_rules', create=True)
+ def test_refresh_security_group_rules(self, mock_risr):
+ fake_group = 'fake-security-group-members'
+ self.driver.refresh_instance_security_rules(fake_group)
+ mock_risr.assert_called_once_with(fake_group)
+
+ @mock.patch.object(ironic_driver.IronicDriver, '_wait_for_active')
+ @mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
+ @mock.patch.object(FAKE_CLIENT.node, 'set_provision_state')
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ @mock.patch.object(ironic_driver.IronicDriver, '_add_driver_fields')
+ @mock.patch.object(FAKE_CLIENT.node, 'get')
+ @mock.patch.object(objects.Instance, 'save')
+ def _test_rebuild(self, mock_save, mock_get, mock_driver_fields,
+ mock_fg_bid, mock_set_pstate, mock_looping,
+ mock_wait_active, preserve=False):
+ node_uuid = uuidutils.generate_uuid()
+ instance_uuid = uuidutils.generate_uuid()
+ node = ironic_utils.get_test_node(uuid=node_uuid,
+ instance_uuid=instance_uuid,
+ instance_type_id=5)
+ mock_get.return_value = node
+
+ image_meta = ironic_utils.get_test_image_meta()
+ flavor_id = 5
+ flavor = {'id': flavor_id, 'name': 'baremetal'}
+ mock_fg_bid.return_value = flavor
+
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ uuid=instance_uuid,
+ node=node_uuid,
+ instance_type_id=flavor_id)
+
+ fake_looping_call = FakeLoopingCall()
+ mock_looping.return_value = fake_looping_call
+
+ self.driver.rebuild(
+ context=self.ctx, instance=instance, image_meta=image_meta,
+ injected_files=None, admin_password=None, bdms=None,
+ detach_block_devices=None, attach_block_devices=None,
+ preserve_ephemeral=preserve)
+
+ mock_save.assert_called_once_with(
+ expected_task_state=[task_states.REBUILDING])
+ mock_driver_fields.assert_called_once_with(node, instance, image_meta,
+ flavor, preserve)
+ mock_set_pstate.assert_called_once_with(node_uuid,
+ ironic_states.REBUILD)
+ mock_looping.assert_called_once_with(mock_wait_active,
+ FAKE_CLIENT_WRAPPER,
+ instance)
+ fake_looping_call.start.assert_called_once_with(
+ interval=CONF.ironic.api_retry_interval)
+ fake_looping_call.wait.assert_called_once_with()
+
+ def test_rebuild_preserve_ephemeral(self):
+ self._test_rebuild(preserve=True)
+
+ def test_rebuild_no_preserve_ephemeral(self):
+ self._test_rebuild(preserve=False)
+
+ @mock.patch.object(FAKE_CLIENT.node, 'set_provision_state')
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ @mock.patch.object(ironic_driver.IronicDriver, '_add_driver_fields')
+ @mock.patch.object(FAKE_CLIENT.node, 'get')
+ @mock.patch.object(objects.Instance, 'save')
+ def test_rebuild_failures(self, mock_save, mock_get, mock_driver_fields,
+ mock_fg_bid, mock_set_pstate):
+ node_uuid = uuidutils.generate_uuid()
+ instance_uuid = uuidutils.generate_uuid()
+ node = ironic_utils.get_test_node(uuid=node_uuid,
+ instance_uuid=instance_uuid,
+ instance_type_id=5)
+ mock_get.return_value = node
+
+ image_meta = ironic_utils.get_test_image_meta()
+ flavor_id = 5
+ flavor = {'id': flavor_id, 'name': 'baremetal'}
+ mock_fg_bid.return_value = flavor
+
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ uuid=instance_uuid,
+ node=node_uuid,
+ instance_type_id=flavor_id)
+
+ exceptions = [
+ exception.NovaException(),
+ ironic_exception.BadRequest(),
+ ironic_exception.InternalServerError(),
+ ]
+ for e in exceptions:
+ mock_set_pstate.side_effect = e
+ self.assertRaises(exception.InstanceDeployFailure,
+ self.driver.rebuild,
+ context=self.ctx, instance=instance, image_meta=image_meta,
+ injected_files=None, admin_password=None, bdms=None,
+ detach_block_devices=None, attach_block_devices=None)
diff --git a/nova/tests/unit/virt/ironic/test_patcher.py b/nova/tests/unit/virt/ironic/test_patcher.py
new file mode 100644
index 0000000000..a69e8cacfe
--- /dev/null
+++ b/nova/tests/unit/virt/ironic/test_patcher.py
@@ -0,0 +1,139 @@
+# Copyright 2014 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo.config import cfg
+
+from nova import context as nova_context
+from nova import test
+from nova.tests.unit import fake_instance
+from nova.tests.unit.virt.ironic import utils as ironic_utils
+from nova.virt.ironic import patcher
+
+CONF = cfg.CONF
+
+
+class IronicDriverFieldsTestCase(test.NoDBTestCase):
+
+ def setUp(self):
+ super(IronicDriverFieldsTestCase, self).setUp()
+ self.image_meta = ironic_utils.get_test_image_meta()
+ self.flavor = ironic_utils.get_test_flavor()
+ self.ctx = nova_context.get_admin_context()
+ self.instance = fake_instance.fake_instance_obj(self.ctx)
+ # Generic expected patches
+ self._expected_deploy_patch = [{'path': '/instance_info/image_source',
+ 'value': self.image_meta['id'],
+ 'op': 'add'},
+ {'path': '/instance_info/root_gb',
+ 'value': str(self.instance['root_gb']),
+ 'op': 'add'},
+ {'path': '/instance_info/swap_mb',
+ 'value': str(self.flavor['swap']),
+ 'op': 'add'}]
+ self._expected_cleanup_patch = []
+
+ def test_create_generic(self):
+ node = ironic_utils.get_test_node(driver='fake')
+ patcher_obj = patcher.create(node)
+ self.assertIsInstance(patcher_obj, patcher.GenericDriverFields)
+
+ def test_create_pxe(self):
+ node = ironic_utils.get_test_node(driver='pxe_fake')
+ patcher_obj = patcher.create(node)
+ self.assertIsInstance(patcher_obj, patcher.PXEDriverFields)
+
+ def test_generic_get_deploy_patch(self):
+ node = ironic_utils.get_test_node(driver='fake')
+ patch = patcher.create(node).get_deploy_patch(
+ self.instance, self.image_meta, self.flavor)
+ self.assertEqual(sorted(self._expected_deploy_patch), sorted(patch))
+
+ def test_generic_get_deploy_patch_ephemeral(self):
+ CONF.set_override('default_ephemeral_format', 'testfmt')
+ node = ironic_utils.get_test_node(driver='fake')
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ ephemeral_gb=10)
+ patch = patcher.create(node).get_deploy_patch(
+ instance, self.image_meta, self.flavor)
+ expected = [{'path': '/instance_info/ephemeral_gb',
+ 'value': str(instance.ephemeral_gb),
+ 'op': 'add'},
+ {'path': '/instance_info/ephemeral_format',
+ 'value': 'testfmt',
+ 'op': 'add'}]
+ expected += self._expected_deploy_patch
+ self.assertEqual(sorted(expected), sorted(patch))
+
+ def test_generic_get_deploy_patch_preserve_ephemeral(self):
+ node = ironic_utils.get_test_node(driver='fake')
+ for preserve in [True, False]:
+ patch = patcher.create(node).get_deploy_patch(
+ self.instance, self.image_meta, self.flavor,
+ preserve_ephemeral=preserve)
+ expected = [{'path': '/instance_info/preserve_ephemeral',
+ 'value': str(preserve), 'op': 'add', }]
+ expected += self._expected_deploy_patch
+ self.assertEqual(sorted(expected), sorted(patch))
+
+ def test_generic_get_cleanup_patch(self):
+ node = ironic_utils.get_test_node(driver='fake')
+ patch = patcher.create(node).get_cleanup_patch(self.instance, None,
+ self.flavor)
+ self.assertEqual(self._expected_cleanup_patch, patch)
+
+ def test_pxe_get_deploy_patch(self):
+ node = ironic_utils.get_test_node(driver='pxe_fake')
+ extra_specs = self.flavor['extra_specs']
+ expected = [{'path': '/driver_info/pxe_deploy_kernel',
+ 'value': extra_specs['baremetal:deploy_kernel_id'],
+ 'op': 'add'},
+ {'path': '/driver_info/pxe_deploy_ramdisk',
+ 'value': extra_specs['baremetal:deploy_ramdisk_id'],
+ 'op': 'add'}]
+ expected += self._expected_deploy_patch
+ patch = patcher.create(node).get_deploy_patch(
+ self.instance, self.image_meta, self.flavor)
+ self.assertEqual(sorted(expected), sorted(patch))
+
+ def test_pxe_get_deploy_patch_no_flavor_kernel_ramdisk_ids(self):
+ flavor = ironic_utils.get_test_flavor(extra_specs={})
+ node = ironic_utils.get_test_node(driver='pxe_fake')
+ patch = patcher.create(node).get_deploy_patch(
+ self.instance, self.image_meta, flavor)
+ # If there's no extra_specs patch should be exactly like a
+ # generic patch
+ self.assertEqual(sorted(self._expected_deploy_patch), sorted(patch))
+
+ def test_pxe_get_cleanup_patch(self):
+ driver_info = {'pxe_deploy_kernel': 'fake-kernel-id',
+ 'pxe_deploy_ramdisk': 'fake-ramdisk-id'}
+ node = ironic_utils.get_test_node(driver='pxe_fake',
+ driver_info=driver_info)
+ patch = patcher.create(node).get_cleanup_patch(self.instance, None,
+ self.flavor)
+ expected = [{'path': '/driver_info/pxe_deploy_kernel',
+ 'op': 'remove'},
+ {'path': '/driver_info/pxe_deploy_ramdisk',
+ 'op': 'remove'}]
+ self.assertEqual(sorted(expected), sorted(patch))
+
+ def test_pxe_get_cleanup_patch_no_flavor_kernel_ramdisk_ids(self):
+ self.flavor = ironic_utils.get_test_flavor(extra_specs={})
+ node = ironic_utils.get_test_node(driver='pxe_fake')
+ patch = patcher.create(node).get_cleanup_patch(self.instance, None,
+ self.flavor)
+ # If there's no extra_specs patch should be exactly like a
+ # generic patch
+ self.assertEqual(self._expected_cleanup_patch, patch)
diff --git a/nova/tests/unit/virt/ironic/utils.py b/nova/tests/unit/virt/ironic/utils.py
new file mode 100644
index 0000000000..cee0abffac
--- /dev/null
+++ b/nova/tests/unit/virt/ironic/utils.py
@@ -0,0 +1,115 @@
+# Copyright 2014 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.virt.ironic import ironic_states
+
+
+def get_test_validation(**kw):
+ return type('interfaces', (object,),
+ {'power': kw.get('power', True),
+ 'deploy': kw.get('deploy', True),
+ 'console': kw.get('console', True),
+ 'rescue': kw.get('rescue', True)})()
+
+
+def get_test_node(**kw):
+ return type('node', (object,),
+ {'uuid': kw.get('uuid', 'eeeeeeee-dddd-cccc-bbbb-aaaaaaaaaaaa'),
+ 'chassis_uuid': kw.get('chassis_uuid'),
+ 'power_state': kw.get('power_state',
+ ironic_states.NOSTATE),
+ 'target_power_state': kw.get('target_power_state',
+ ironic_states.NOSTATE),
+ 'provision_state': kw.get('provision_state',
+ ironic_states.NOSTATE),
+ 'target_provision_state': kw.get('target_provision_state',
+ ironic_states.NOSTATE),
+ 'last_error': kw.get('last_error'),
+ 'instance_uuid': kw.get('instance_uuid'),
+ 'driver': kw.get('driver', 'fake'),
+ 'driver_info': kw.get('driver_info', {}),
+ 'properties': kw.get('properties', {}),
+ 'reservation': kw.get('reservation'),
+ 'maintenance': kw.get('maintenance', False),
+ 'extra': kw.get('extra', {}),
+ 'updated_at': kw.get('created_at'),
+ 'created_at': kw.get('updated_at')})()
+
+
+def get_test_port(**kw):
+ return type('port', (object,),
+ {'uuid': kw.get('uuid', 'gggggggg-uuuu-qqqq-ffff-llllllllllll'),
+ 'node_uuid': kw.get('node_uuid', get_test_node().uuid),
+ 'address': kw.get('address', 'FF:FF:FF:FF:FF:FF'),
+ 'extra': kw.get('extra', {}),
+ 'created_at': kw.get('created_at'),
+ 'updated_at': kw.get('updated_at')})()
+
+
+def get_test_flavor(**kw):
+ default_extra_specs = {'baremetal:deploy_kernel_id':
+ 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
+ 'baremetal:deploy_ramdisk_id':
+ 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb'}
+ return {'name': kw.get('name', 'fake.flavor'),
+ 'extra_specs': kw.get('extra_specs', default_extra_specs),
+ 'swap': kw.get('swap', 0),
+ 'ephemeral_gb': kw.get('ephemeral_gb', 0)}
+
+
+def get_test_image_meta(**kw):
+ return {'id': kw.get('id', 'cccccccc-cccc-cccc-cccc-cccccccccccc')}
+
+
+class FakePortClient(object):
+
+ def get(self, port_uuid):
+ pass
+
+ def update(self, port_uuid, patch):
+ pass
+
+
+class FakeNodeClient(object):
+
+ def list(self, detail=False):
+ return []
+
+ def get(self, node_uuid):
+ pass
+
+ def get_by_instance_uuid(self, instance_uuid):
+ pass
+
+ def list_ports(self, node_uuid):
+ pass
+
+ def set_power_state(self, node_uuid, target):
+ pass
+
+ def set_provision_state(self, node_uuid, target):
+ pass
+
+ def update(self, node_uuid, patch):
+ pass
+
+ def validate(self, node_uuid):
+ pass
+
+
+class FakeClient(object):
+
+ node = FakeNodeClient()
+ port = FakePortClient()
diff --git a/nova/tests/unit/virt/libvirt/__init__.py b/nova/tests/unit/virt/libvirt/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/virt/libvirt/__init__.py
diff --git a/nova/tests/unit/virt/libvirt/fake_imagebackend.py b/nova/tests/unit/virt/libvirt/fake_imagebackend.py
new file mode 100644
index 0000000000..9a7cbdbdaf
--- /dev/null
+++ b/nova/tests/unit/virt/libvirt/fake_imagebackend.py
@@ -0,0 +1,75 @@
+# Copyright 2012 Grid Dynamics
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+
+from nova.virt.libvirt import config
+from nova.virt.libvirt import imagebackend
+
+
+class Backend(object):
+ def __init__(self, use_cow):
+ pass
+
+ def image(self, instance, name, image_type=''):
+ class FakeImage(imagebackend.Image):
+ def __init__(self, instance, name):
+ self.path = os.path.join(instance['name'], name)
+
+ def create_image(self, prepare_template, base,
+ size, *args, **kwargs):
+ pass
+
+ def cache(self, fetch_func, filename, size=None, *args, **kwargs):
+ pass
+
+ def snapshot(self, name):
+ pass
+
+ def libvirt_info(self, disk_bus, disk_dev, device_type,
+ cache_mode, extra_specs, hypervisor_version):
+ info = config.LibvirtConfigGuestDisk()
+ info.source_type = 'file'
+ info.source_device = device_type
+ info.target_bus = disk_bus
+ info.target_dev = disk_dev
+ info.driver_cache = cache_mode
+ info.driver_format = 'raw'
+ info.source_path = self.path
+ return info
+
+ return FakeImage(instance, name)
+
+ def snapshot(self, instance, disk_path, image_type=''):
+ # NOTE(bfilippov): this is done in favor for
+ # snapshot tests in test_libvirt.LibvirtConnTestCase
+ return imagebackend.Backend(True).snapshot(instance,
+ disk_path,
+ image_type=image_type)
+
+
+class Raw(imagebackend.Image):
+ # NOTE(spandhe) Added for test_rescue and test_rescue_config_drive
+ def __init__(self, instance=None, disk_name=None, path=None):
+ pass
+
+ def _get_driver_format(self):
+ pass
+
+ def correct_format(self):
+ pass
+
+ def create_image(self, prepare_template, base, size, *args, **kwargs):
+ pass
diff --git a/nova/tests/unit/virt/libvirt/fake_libvirt_utils.py b/nova/tests/unit/virt/libvirt/fake_libvirt_utils.py
new file mode 100644
index 0000000000..01ab689b00
--- /dev/null
+++ b/nova/tests/unit/virt/libvirt/fake_libvirt_utils.py
@@ -0,0 +1,211 @@
+# Copyright (c) 2011 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+import StringIO
+
+from nova.virt.libvirt import utils as libvirt_utils
+
+
+files = {'console.log': True}
+disk_sizes = {}
+disk_backing_files = {}
+disk_type = "qcow2"
+
+
+def get_iscsi_initiator():
+ return "fake.initiator.iqn"
+
+
+def get_fc_hbas():
+ return [{'ClassDevice': 'host1',
+ 'ClassDevicePath': '/sys/devices/pci0000:00/0000:00:03.0'
+ '/0000:05:00.2/host1/fc_host/host1',
+ 'dev_loss_tmo': '30',
+ 'fabric_name': '0x1000000533f55566',
+ 'issue_lip': '<store method only>',
+ 'max_npiv_vports': '255',
+ 'maxframe_size': '2048 bytes',
+ 'node_name': '0x200010604b019419',
+ 'npiv_vports_inuse': '0',
+ 'port_id': '0x680409',
+ 'port_name': '0x100010604b019419',
+ 'port_state': 'Online',
+ 'port_type': 'NPort (fabric via point-to-point)',
+ 'speed': '10 Gbit',
+ 'supported_classes': 'Class 3',
+ 'supported_speeds': '10 Gbit',
+ 'symbolic_name': 'Emulex 554M FV4.0.493.0 DV8.3.27',
+ 'tgtid_bind_type': 'wwpn (World Wide Port Name)',
+ 'uevent': None,
+ 'vport_create': '<store method only>',
+ 'vport_delete': '<store method only>'}]
+
+
+def get_fc_hbas_info():
+ hbas = get_fc_hbas()
+ info = [{'port_name': hbas[0]['port_name'].replace('0x', ''),
+ 'node_name': hbas[0]['node_name'].replace('0x', ''),
+ 'host_device': hbas[0]['ClassDevice'],
+ 'device_path': hbas[0]['ClassDevicePath']}]
+ return info
+
+
+def get_fc_wwpns():
+ hbas = get_fc_hbas()
+ wwpns = []
+ for hba in hbas:
+ wwpn = hba['port_name'].replace('0x', '')
+ wwpns.append(wwpn)
+
+ return wwpns
+
+
+def get_fc_wwnns():
+ hbas = get_fc_hbas()
+ wwnns = []
+ for hba in hbas:
+ wwnn = hba['node_name'].replace('0x', '')
+ wwnns.append(wwnn)
+
+ return wwnns
+
+
+def create_image(disk_format, path, size):
+ pass
+
+
+def create_cow_image(backing_file, path):
+ pass
+
+
+def get_disk_size(path):
+ return 0
+
+
+def get_disk_backing_file(path):
+ return disk_backing_files.get(path, None)
+
+
+def get_disk_type(path):
+ return disk_type
+
+
+def copy_image(src, dest):
+ pass
+
+
+def resize2fs(path):
+ pass
+
+
+def create_lvm_image(vg, lv, size, sparse=False):
+ pass
+
+
+def volume_group_free_space(vg):
+ pass
+
+
+def remove_logical_volumes(*paths):
+ pass
+
+
+def write_to_file(path, contents, umask=None):
+ pass
+
+
+def chown(path, owner):
+ pass
+
+
+def extract_snapshot(disk_path, source_fmt, out_path, dest_fmt):
+ files[out_path] = ''
+
+
+class File(object):
+ def __init__(self, path, mode=None):
+ if path in files:
+ self.fp = StringIO.StringIO(files[path])
+ else:
+ self.fp = StringIO.StringIO(files[os.path.split(path)[-1]])
+
+ def __enter__(self):
+ return self.fp
+
+ def __exit__(self, *args):
+ return
+
+ def close(self, *args, **kwargs):
+ self.fp.close()
+
+
+def file_open(path, mode=None):
+ return File(path, mode)
+
+
+def find_disk(virt_dom):
+ if disk_type == 'lvm':
+ return "/dev/nova-vg/lv"
+ elif disk_type in ['raw', 'qcow2']:
+ return "filename"
+ else:
+ return "unknown_type_disk"
+
+
+def load_file(path):
+ if os.path.exists(path):
+ with open(path, 'r') as fp:
+ return fp.read()
+ else:
+ return ''
+
+
+def logical_volume_info(path):
+ return {}
+
+
+def file_delete(path):
+ return True
+
+
+def get_fs_info(path):
+ return {'total': 128 * (1024 ** 3),
+ 'used': 44 * (1024 ** 3),
+ 'free': 84 * (1024 ** 3)}
+
+
+def fetch_image(context, target, image_id, user_id, project_id, max_size=0):
+ pass
+
+
+def get_instance_path(instance, forceold=False, relative=False):
+ return libvirt_utils.get_instance_path(instance, forceold=forceold,
+ relative=relative)
+
+
+def pick_disk_driver_name(hypervisor_version, is_block_dev=False):
+ return "qemu"
+
+
+def is_valid_hostname(name):
+ return True
+
+
+def chown_for_id_maps(path, id_maps):
+ pass
+
+
+def get_arch(image_meta):
+ return libvirt_utils.get_arch(image_meta)
diff --git a/nova/tests/unit/virt/libvirt/fakelibvirt.py b/nova/tests/unit/virt/libvirt/fakelibvirt.py
new file mode 100644
index 0000000000..3a0e7ebefb
--- /dev/null
+++ b/nova/tests/unit/virt/libvirt/fakelibvirt.py
@@ -0,0 +1,1108 @@
+# Copyright 2010 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+
+import time
+import uuid
+
+from nova.compute import arch
+from nova.i18n import _
+
+# Allow passing None to the various connect methods
+# (i.e. allow the client to rely on default URLs)
+allow_default_uri_connection = True
+
+# string indicating the CPU arch
+node_arch = arch.X86_64 # or 'i686' (or whatever else uname -m might return)
+
+# memory size in kilobytes
+node_kB_mem = 4096
+
+# the number of active CPUs
+node_cpus = 2
+
+# expected CPU frequency
+node_mhz = 800
+
+# the number of NUMA cell, 1 for unusual NUMA topologies or uniform
+# memory access; check capabilities XML for the actual NUMA topology
+node_nodes = 1 # NUMA nodes
+
+# number of CPU sockets per node if nodes > 1, total number of CPU
+# sockets otherwise
+node_sockets = 1
+
+# number of cores per socket
+node_cores = 2
+
+# number of threads per core
+node_threads = 1
+
+# CPU model
+node_cpu_model = "Penryn"
+
+# CPU vendor
+node_cpu_vendor = "Intel"
+
+# Has libvirt connection been used at least once
+connection_used = False
+
+
+def _reset():
+ global allow_default_uri_connection
+ allow_default_uri_connection = True
+
+# virDomainState
+VIR_DOMAIN_NOSTATE = 0
+VIR_DOMAIN_RUNNING = 1
+VIR_DOMAIN_BLOCKED = 2
+VIR_DOMAIN_PAUSED = 3
+VIR_DOMAIN_SHUTDOWN = 4
+VIR_DOMAIN_SHUTOFF = 5
+VIR_DOMAIN_CRASHED = 6
+
+VIR_DOMAIN_XML_SECURE = 1
+VIR_DOMAIN_XML_INACTIVE = 2
+
+VIR_DOMAIN_BLOCK_REBASE_SHALLOW = 1
+VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT = 2
+VIR_DOMAIN_BLOCK_REBASE_COPY = 8
+
+VIR_DOMAIN_BLOCK_JOB_ABORT_PIVOT = 2
+
+VIR_DOMAIN_EVENT_ID_LIFECYCLE = 0
+
+VIR_DOMAIN_EVENT_DEFINED = 0
+VIR_DOMAIN_EVENT_UNDEFINED = 1
+VIR_DOMAIN_EVENT_STARTED = 2
+VIR_DOMAIN_EVENT_SUSPENDED = 3
+VIR_DOMAIN_EVENT_RESUMED = 4
+VIR_DOMAIN_EVENT_STOPPED = 5
+VIR_DOMAIN_EVENT_SHUTDOWN = 6
+VIR_DOMAIN_EVENT_PMSUSPENDED = 7
+
+VIR_DOMAIN_UNDEFINE_MANAGED_SAVE = 1
+
+VIR_DOMAIN_AFFECT_CURRENT = 0
+VIR_DOMAIN_AFFECT_LIVE = 1
+VIR_DOMAIN_AFFECT_CONFIG = 2
+
+VIR_CPU_COMPARE_ERROR = -1
+VIR_CPU_COMPARE_INCOMPATIBLE = 0
+VIR_CPU_COMPARE_IDENTICAL = 1
+VIR_CPU_COMPARE_SUPERSET = 2
+
+VIR_CRED_USERNAME = 1
+VIR_CRED_AUTHNAME = 2
+VIR_CRED_LANGUAGE = 3
+VIR_CRED_CNONCE = 4
+VIR_CRED_PASSPHRASE = 5
+VIR_CRED_ECHOPROMPT = 6
+VIR_CRED_NOECHOPROMPT = 7
+VIR_CRED_REALM = 8
+VIR_CRED_EXTERNAL = 9
+
+VIR_MIGRATE_LIVE = 1
+VIR_MIGRATE_PEER2PEER = 2
+VIR_MIGRATE_TUNNELLED = 4
+VIR_MIGRATE_UNDEFINE_SOURCE = 16
+VIR_MIGRATE_NON_SHARED_INC = 128
+
+VIR_NODE_CPU_STATS_ALL_CPUS = -1
+
+VIR_DOMAIN_START_PAUSED = 1
+
+# libvirtError enums
+# (Intentionally different from what's in libvirt. We do this to check,
+# that consumers of the library are using the symbolic names rather than
+# hardcoding the numerical values)
+VIR_FROM_QEMU = 100
+VIR_FROM_DOMAIN = 200
+VIR_FROM_NWFILTER = 330
+VIR_FROM_REMOTE = 340
+VIR_FROM_RPC = 345
+VIR_ERR_NO_SUPPORT = 3
+VIR_ERR_XML_DETAIL = 350
+VIR_ERR_NO_DOMAIN = 420
+VIR_ERR_OPERATION_INVALID = 55
+VIR_ERR_OPERATION_TIMEOUT = 68
+VIR_ERR_NO_NWFILTER = 620
+VIR_ERR_SYSTEM_ERROR = 900
+VIR_ERR_INTERNAL_ERROR = 950
+VIR_ERR_CONFIG_UNSUPPORTED = 951
+
+# Readonly
+VIR_CONNECT_RO = 1
+
+# virConnectBaselineCPU flags
+VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES = 1
+
+# snapshotCreateXML flags
+VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA = 4
+VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY = 16
+VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT = 32
+VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE = 64
+
+# blockCommit flags
+VIR_DOMAIN_BLOCK_COMMIT_RELATIVE = 4
+
+
+VIR_CONNECT_LIST_DOMAINS_ACTIVE = 1
+VIR_CONNECT_LIST_DOMAINS_INACTIVE = 2
+
+
+def _parse_disk_info(element):
+ disk_info = {}
+ disk_info['type'] = element.get('type', 'file')
+ disk_info['device'] = element.get('device', 'disk')
+
+ driver = element.find('./driver')
+ if driver is not None:
+ disk_info['driver_name'] = driver.get('name')
+ disk_info['driver_type'] = driver.get('type')
+
+ source = element.find('./source')
+ if source is not None:
+ disk_info['source'] = source.get('file')
+ if not disk_info['source']:
+ disk_info['source'] = source.get('dev')
+
+ if not disk_info['source']:
+ disk_info['source'] = source.get('path')
+
+ target = element.find('./target')
+ if target is not None:
+ disk_info['target_dev'] = target.get('dev')
+ disk_info['target_bus'] = target.get('bus')
+
+ return disk_info
+
+
+class libvirtError(Exception):
+ """This class was copied and slightly modified from
+ `libvirt-python:libvirt-override.py`.
+
+ Since a test environment will use the real `libvirt-python` version of
+ `libvirtError` if it's installed and not this fake, we need to maintain
+ strict compatibility with the original class, including `__init__` args
+ and instance-attributes.
+
+ To create a libvirtError instance you should:
+
+ # Create an unsupported error exception
+ exc = libvirtError('my message')
+ exc.err = (libvirt.VIR_ERR_NO_SUPPORT,)
+
+ self.err is a tuple of form:
+ (error_code, error_domain, error_message, error_level, str1, str2,
+ str3, int1, int2)
+
+ Alternatively, you can use the `make_libvirtError` convenience function to
+ allow you to specify these attributes in one shot.
+ """
+ def __init__(self, defmsg, conn=None, dom=None, net=None, pool=None,
+ vol=None):
+ Exception.__init__(self, defmsg)
+ self.err = None
+
+ def get_error_code(self):
+ if self.err is None:
+ return None
+ return self.err[0]
+
+ def get_error_domain(self):
+ if self.err is None:
+ return None
+ return self.err[1]
+
+ def get_error_message(self):
+ if self.err is None:
+ return None
+ return self.err[2]
+
+ def get_error_level(self):
+ if self.err is None:
+ return None
+ return self.err[3]
+
+ def get_str1(self):
+ if self.err is None:
+ return None
+ return self.err[4]
+
+ def get_str2(self):
+ if self.err is None:
+ return None
+ return self.err[5]
+
+ def get_str3(self):
+ if self.err is None:
+ return None
+ return self.err[6]
+
+ def get_int1(self):
+ if self.err is None:
+ return None
+ return self.err[7]
+
+ def get_int2(self):
+ if self.err is None:
+ return None
+ return self.err[8]
+
+
+class NWFilter(object):
+ def __init__(self, connection, xml):
+ self._connection = connection
+
+ self._xml = xml
+ self._parse_xml(xml)
+
+ def _parse_xml(self, xml):
+ tree = etree.fromstring(xml)
+ root = tree.find('.')
+ self._name = root.get('name')
+
+ def undefine(self):
+ self._connection._remove_filter(self)
+
+
+class Domain(object):
+ def __init__(self, connection, xml, running=False, transient=False):
+ self._connection = connection
+ if running:
+ connection._mark_running(self)
+
+ self._state = running and VIR_DOMAIN_RUNNING or VIR_DOMAIN_SHUTOFF
+ self._transient = transient
+ self._def = self._parse_definition(xml)
+ self._has_saved_state = False
+ self._snapshots = {}
+ self._id = self._connection._id_counter
+
+ def _parse_definition(self, xml):
+ try:
+ tree = etree.fromstring(xml)
+ except etree.ParseError:
+ raise make_libvirtError(
+ libvirtError, "Invalid XML.",
+ error_code=VIR_ERR_XML_DETAIL,
+ error_domain=VIR_FROM_DOMAIN)
+
+ definition = {}
+
+ name = tree.find('./name')
+ if name is not None:
+ definition['name'] = name.text
+
+ uuid_elem = tree.find('./uuid')
+ if uuid_elem is not None:
+ definition['uuid'] = uuid_elem.text
+ else:
+ definition['uuid'] = str(uuid.uuid4())
+
+ vcpu = tree.find('./vcpu')
+ if vcpu is not None:
+ definition['vcpu'] = int(vcpu.text)
+
+ memory = tree.find('./memory')
+ if memory is not None:
+ definition['memory'] = int(memory.text)
+
+ os = {}
+ os_type = tree.find('./os/type')
+ if os_type is not None:
+ os['type'] = os_type.text
+ os['arch'] = os_type.get('arch', node_arch)
+
+ os_kernel = tree.find('./os/kernel')
+ if os_kernel is not None:
+ os['kernel'] = os_kernel.text
+
+ os_initrd = tree.find('./os/initrd')
+ if os_initrd is not None:
+ os['initrd'] = os_initrd.text
+
+ os_cmdline = tree.find('./os/cmdline')
+ if os_cmdline is not None:
+ os['cmdline'] = os_cmdline.text
+
+ os_boot = tree.find('./os/boot')
+ if os_boot is not None:
+ os['boot_dev'] = os_boot.get('dev')
+
+ definition['os'] = os
+
+ features = {}
+
+ acpi = tree.find('./features/acpi')
+ if acpi is not None:
+ features['acpi'] = True
+
+ definition['features'] = features
+
+ devices = {}
+
+ device_nodes = tree.find('./devices')
+ if device_nodes is not None:
+ disks_info = []
+ disks = device_nodes.findall('./disk')
+ for disk in disks:
+ disks_info += [_parse_disk_info(disk)]
+ devices['disks'] = disks_info
+
+ nics_info = []
+ nics = device_nodes.findall('./interface')
+ for nic in nics:
+ nic_info = {}
+ nic_info['type'] = nic.get('type')
+
+ mac = nic.find('./mac')
+ if mac is not None:
+ nic_info['mac'] = mac.get('address')
+
+ source = nic.find('./source')
+ if source is not None:
+ if nic_info['type'] == 'network':
+ nic_info['source'] = source.get('network')
+ elif nic_info['type'] == 'bridge':
+ nic_info['source'] = source.get('bridge')
+
+ nics_info += [nic_info]
+
+ devices['nics'] = nics_info
+
+ definition['devices'] = devices
+
+ return definition
+
+ def create(self):
+ self.createWithFlags(0)
+
+ def createWithFlags(self, flags):
+ # FIXME: Not handling flags at the moment
+ self._state = VIR_DOMAIN_RUNNING
+ self._connection._mark_running(self)
+ self._has_saved_state = False
+
+ def isActive(self):
+ return int(self._state == VIR_DOMAIN_RUNNING)
+
+ def undefine(self):
+ self._connection._undefine(self)
+
+ def undefineFlags(self, flags):
+ self.undefine()
+ if flags & VIR_DOMAIN_UNDEFINE_MANAGED_SAVE:
+ if self.hasManagedSaveImage(0):
+ self.managedSaveRemove()
+
+ def destroy(self):
+ self._state = VIR_DOMAIN_SHUTOFF
+ self._connection._mark_not_running(self)
+
+ def ID(self):
+ return self._id
+
+ def name(self):
+ return self._def['name']
+
+ def UUIDString(self):
+ return self._def['uuid']
+
+ def interfaceStats(self, device):
+ return [10000242400, 1234, 0, 2, 213412343233, 34214234, 23, 3]
+
+ def blockStats(self, device):
+ return [2, 10000242400, 234, 2343424234, 34]
+
+ def suspend(self):
+ self._state = VIR_DOMAIN_PAUSED
+
+ def shutdown(self):
+ self._state = VIR_DOMAIN_SHUTDOWN
+ self._connection._mark_not_running(self)
+
+ def reset(self, flags):
+ # FIXME: Not handling flags at the moment
+ self._state = VIR_DOMAIN_RUNNING
+ self._connection._mark_running(self)
+
+ def info(self):
+ return [self._state,
+ long(self._def['memory']),
+ long(self._def['memory']),
+ self._def['vcpu'],
+ 123456789L]
+
+ def migrateToURI(self, desturi, flags, dname, bandwidth):
+ raise make_libvirtError(
+ libvirtError,
+ "Migration always fails for fake libvirt!",
+ error_code=VIR_ERR_INTERNAL_ERROR,
+ error_domain=VIR_FROM_QEMU)
+
+ def migrateToURI2(self, dconnuri, miguri, dxml, flags, dname, bandwidth):
+ raise make_libvirtError(
+ libvirtError,
+ "Migration always fails for fake libvirt!",
+ error_code=VIR_ERR_INTERNAL_ERROR,
+ error_domain=VIR_FROM_QEMU)
+
+ def attachDevice(self, xml):
+ disk_info = _parse_disk_info(etree.fromstring(xml))
+ disk_info['_attached'] = True
+ self._def['devices']['disks'] += [disk_info]
+ return True
+
+ def attachDeviceFlags(self, xml, flags):
+ if (flags & VIR_DOMAIN_AFFECT_LIVE and
+ self._state != VIR_DOMAIN_RUNNING):
+ raise make_libvirtError(
+ libvirtError,
+ "AFFECT_LIVE only allowed for running domains!",
+ error_code=VIR_ERR_INTERNAL_ERROR,
+ error_domain=VIR_FROM_QEMU)
+ self.attachDevice(xml)
+
+ def detachDevice(self, xml):
+ disk_info = _parse_disk_info(etree.fromstring(xml))
+ disk_info['_attached'] = True
+ return disk_info in self._def['devices']['disks']
+
+ def detachDeviceFlags(self, xml, _flags):
+ self.detachDevice(xml)
+
+ def XMLDesc(self, flags):
+ disks = ''
+ for disk in self._def['devices']['disks']:
+ disks += '''<disk type='%(type)s' device='%(device)s'>
+ <driver name='%(driver_name)s' type='%(driver_type)s'/>
+ <source file='%(source)s'/>
+ <target dev='%(target_dev)s' bus='%(target_bus)s'/>
+ <address type='drive' controller='0' bus='0' unit='0'/>
+ </disk>''' % disk
+
+ nics = ''
+ for nic in self._def['devices']['nics']:
+ nics += '''<interface type='%(type)s'>
+ <mac address='%(mac)s'/>
+ <source %(type)s='%(source)s'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x03'
+ function='0x0'/>
+ </interface>''' % nic
+
+ return '''<domain type='kvm'>
+ <name>%(name)s</name>
+ <uuid>%(uuid)s</uuid>
+ <memory>%(memory)s</memory>
+ <currentMemory>%(memory)s</currentMemory>
+ <vcpu>%(vcpu)s</vcpu>
+ <os>
+ <type arch='%(arch)s' machine='pc-0.12'>hvm</type>
+ <boot dev='hd'/>
+ </os>
+ <features>
+ <acpi/>
+ <apic/>
+ <pae/>
+ </features>
+ <clock offset='localtime'/>
+ <on_poweroff>destroy</on_poweroff>
+ <on_reboot>restart</on_reboot>
+ <on_crash>restart</on_crash>
+ <devices>
+ <emulator>/usr/bin/kvm</emulator>
+ %(disks)s
+ <controller type='ide' index='0'>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x01'
+ function='0x1'/>
+ </controller>
+ %(nics)s
+ <serial type='file'>
+ <source path='dummy.log'/>
+ <target port='0'/>
+ </serial>
+ <serial type='pty'>
+ <source pty='/dev/pts/27'/>
+ <target port='1'/>
+ </serial>
+ <serial type='tcp'>
+ <source host="-1" service="-1" mode="bind"/>
+ </serial>
+ <console type='file'>
+ <source path='dummy.log'/>
+ <target port='0'/>
+ </console>
+ <input type='tablet' bus='usb'/>
+ <input type='mouse' bus='ps2'/>
+ <graphics type='vnc' port='-1' autoport='yes'/>
+ <graphics type='spice' port='-1' autoport='yes'/>
+ <video>
+ <model type='cirrus' vram='9216' heads='1'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x02'
+ function='0x0'/>
+ </video>
+ <memballoon model='virtio'>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x04'
+ function='0x0'/>
+ </memballoon>
+ </devices>
+</domain>''' % {'name': self._def['name'],
+ 'uuid': self._def['uuid'],
+ 'memory': self._def['memory'],
+ 'vcpu': self._def['vcpu'],
+ 'arch': self._def['os']['arch'],
+ 'disks': disks,
+ 'nics': nics}
+
+ def managedSave(self, flags):
+ self._connection._mark_not_running(self)
+ self._has_saved_state = True
+
+ def managedSaveRemove(self, flags):
+ self._has_saved_state = False
+
+ def hasManagedSaveImage(self, flags):
+ return int(self._has_saved_state)
+
+ def resume(self):
+ self._state = VIR_DOMAIN_RUNNING
+
+ def snapshotCreateXML(self, xml, flags):
+ tree = etree.fromstring(xml)
+ name = tree.find('./name').text
+ snapshot = DomainSnapshot(name, self)
+ self._snapshots[name] = snapshot
+ return snapshot
+
+ def vcpus(self):
+ vcpus = ([], [])
+ for i in range(0, self._def['vcpu']):
+ vcpus[0].append((i, 1, 120405L, i))
+ vcpus[1].append((True, True, True, True))
+ return vcpus
+
+ def memoryStats(self):
+ return {}
+
+ def maxMemory(self):
+ return self._def['memory']
+
+ def blockJobInfo(self, disk, flags):
+ return {}
+
+
+class DomainSnapshot(object):
+ def __init__(self, name, domain):
+ self._name = name
+ self._domain = domain
+
+ def delete(self, flags):
+ del self._domain._snapshots[self._name]
+
+
+class Connection(object):
+ def __init__(self, uri=None, readonly=False, version=9011):
+ if not uri or uri == '':
+ if allow_default_uri_connection:
+ uri = 'qemu:///session'
+ else:
+ raise ValueError("URI was None, but fake libvirt is "
+ "configured to not accept this.")
+
+ uri_whitelist = ['qemu:///system',
+ 'qemu:///session',
+ 'xen:///system',
+ 'uml:///system',
+ 'test:///default']
+
+ if uri not in uri_whitelist:
+ raise make_libvirtError(
+ libvirtError,
+ "libvirt error: no connection driver "
+ "available for No connection for URI %s" % uri,
+ error_code=5, error_domain=0)
+
+ self.readonly = readonly
+ self._uri = uri
+ self._vms = {}
+ self._running_vms = {}
+ self._id_counter = 1 # libvirt reserves 0 for the hypervisor.
+ self._nwfilters = {}
+ self._event_callbacks = {}
+ self.fakeLibVersion = version
+ self.fakeVersion = version
+
+ def _add_filter(self, nwfilter):
+ self._nwfilters[nwfilter._name] = nwfilter
+
+ def _remove_filter(self, nwfilter):
+ del self._nwfilters[nwfilter._name]
+
+ def _mark_running(self, dom):
+ self._running_vms[self._id_counter] = dom
+ self._emit_lifecycle(dom, VIR_DOMAIN_EVENT_STARTED, 0)
+ self._id_counter += 1
+
+ def _mark_not_running(self, dom):
+ if dom._transient:
+ self._undefine(dom)
+
+ dom._id = -1
+
+ for (k, v) in self._running_vms.iteritems():
+ if v == dom:
+ del self._running_vms[k]
+ self._emit_lifecycle(dom, VIR_DOMAIN_EVENT_STOPPED, 0)
+ return
+
+ def _undefine(self, dom):
+ del self._vms[dom.name()]
+ if not dom._transient:
+ self._emit_lifecycle(dom, VIR_DOMAIN_EVENT_UNDEFINED, 0)
+
+ def getInfo(self):
+ return [node_arch,
+ node_kB_mem,
+ node_cpus,
+ node_mhz,
+ node_nodes,
+ node_sockets,
+ node_cores,
+ node_threads]
+
+ def numOfDomains(self):
+ return len(self._running_vms)
+
+ def listDomainsID(self):
+ return self._running_vms.keys()
+
+ def lookupByID(self, id):
+ if id in self._running_vms:
+ return self._running_vms[id]
+ raise make_libvirtError(
+ libvirtError,
+ 'Domain not found: no domain with matching id %d' % id,
+ error_code=VIR_ERR_NO_DOMAIN,
+ error_domain=VIR_FROM_QEMU)
+
+ def lookupByName(self, name):
+ if name in self._vms:
+ return self._vms[name]
+ raise make_libvirtError(
+ libvirtError,
+ 'Domain not found: no domain with matching name "%s"' % name,
+ error_code=VIR_ERR_NO_DOMAIN,
+ error_domain=VIR_FROM_QEMU)
+
+ def listAllDomains(self, flags):
+ vms = []
+ for vm in self._vms:
+ if flags & VIR_CONNECT_LIST_DOMAINS_ACTIVE:
+ if vm.state != VIR_DOMAIN_SHUTOFF:
+ vms.append(vm)
+ if flags & VIR_CONNECT_LIST_DOMAINS_INACTIVE:
+ if vm.state == VIR_DOMAIN_SHUTOFF:
+ vms.append(vm)
+ return vms
+
+ def _emit_lifecycle(self, dom, event, detail):
+ if VIR_DOMAIN_EVENT_ID_LIFECYCLE not in self._event_callbacks:
+ return
+
+ cbinfo = self._event_callbacks[VIR_DOMAIN_EVENT_ID_LIFECYCLE]
+ callback = cbinfo[0]
+ opaque = cbinfo[1]
+ callback(self, dom, event, detail, opaque)
+
+ def defineXML(self, xml):
+ dom = Domain(connection=self, running=False, transient=False, xml=xml)
+ self._vms[dom.name()] = dom
+ self._emit_lifecycle(dom, VIR_DOMAIN_EVENT_DEFINED, 0)
+ return dom
+
+ def createXML(self, xml, flags):
+ dom = Domain(connection=self, running=True, transient=True, xml=xml)
+ self._vms[dom.name()] = dom
+ self._emit_lifecycle(dom, VIR_DOMAIN_EVENT_STARTED, 0)
+ return dom
+
+ def getType(self):
+ if self._uri == 'qemu:///system':
+ return 'QEMU'
+
+ def getLibVersion(self):
+ return self.fakeLibVersion
+
+ def getVersion(self):
+ return self.fakeVersion
+
+ def getHostname(self):
+ return 'compute1'
+
+ def domainEventRegisterAny(self, dom, eventid, callback, opaque):
+ self._event_callbacks[eventid] = [callback, opaque]
+
+ def registerCloseCallback(self, cb, opaque):
+ pass
+
+ def getCapabilities(self):
+ """Return spoofed capabilities."""
+ return '''<capabilities>
+ <host>
+ <uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid>
+ <cpu>
+ <arch>x86_64</arch>
+ <model>Penryn</model>
+ <vendor>Intel</vendor>
+ <topology sockets='1' cores='2' threads='1'/>
+ <feature name='xtpr'/>
+ <feature name='tm2'/>
+ <feature name='est'/>
+ <feature name='vmx'/>
+ <feature name='ds_cpl'/>
+ <feature name='monitor'/>
+ <feature name='pbe'/>
+ <feature name='tm'/>
+ <feature name='ht'/>
+ <feature name='ss'/>
+ <feature name='acpi'/>
+ <feature name='ds'/>
+ <feature name='vme'/>
+ </cpu>
+ <migration_features>
+ <live/>
+ <uri_transports>
+ <uri_transport>tcp</uri_transport>
+ </uri_transports>
+ </migration_features>
+ <secmodel>
+ <model>apparmor</model>
+ <doi>0</doi>
+ </secmodel>
+ </host>
+
+ <guest>
+ <os_type>hvm</os_type>
+ <arch name='i686'>
+ <wordsize>32</wordsize>
+ <emulator>/usr/bin/qemu</emulator>
+ <machine>pc-0.14</machine>
+ <machine canonical='pc-0.14'>pc</machine>
+ <machine>pc-0.13</machine>
+ <machine>pc-0.12</machine>
+ <machine>pc-0.11</machine>
+ <machine>pc-0.10</machine>
+ <machine>isapc</machine>
+ <domain type='qemu'>
+ </domain>
+ <domain type='kvm'>
+ <emulator>/usr/bin/kvm</emulator>
+ <machine>pc-0.14</machine>
+ <machine canonical='pc-0.14'>pc</machine>
+ <machine>pc-0.13</machine>
+ <machine>pc-0.12</machine>
+ <machine>pc-0.11</machine>
+ <machine>pc-0.10</machine>
+ <machine>isapc</machine>
+ </domain>
+ </arch>
+ <features>
+ <cpuselection/>
+ <deviceboot/>
+ <pae/>
+ <nonpae/>
+ <acpi default='on' toggle='yes'/>
+ <apic default='on' toggle='no'/>
+ </features>
+ </guest>
+
+ <guest>
+ <os_type>hvm</os_type>
+ <arch name='x86_64'>
+ <wordsize>64</wordsize>
+ <emulator>/usr/bin/qemu-system-x86_64</emulator>
+ <machine>pc-0.14</machine>
+ <machine canonical='pc-0.14'>pc</machine>
+ <machine>pc-0.13</machine>
+ <machine>pc-0.12</machine>
+ <machine>pc-0.11</machine>
+ <machine>pc-0.10</machine>
+ <machine>isapc</machine>
+ <domain type='qemu'>
+ </domain>
+ <domain type='kvm'>
+ <emulator>/usr/bin/kvm</emulator>
+ <machine>pc-0.14</machine>
+ <machine canonical='pc-0.14'>pc</machine>
+ <machine>pc-0.13</machine>
+ <machine>pc-0.12</machine>
+ <machine>pc-0.11</machine>
+ <machine>pc-0.10</machine>
+ <machine>isapc</machine>
+ </domain>
+ </arch>
+ <features>
+ <cpuselection/>
+ <deviceboot/>
+ <acpi default='on' toggle='yes'/>
+ <apic default='on' toggle='no'/>
+ </features>
+ </guest>
+
+ <guest>
+ <os_type>hvm</os_type>
+ <arch name='armv7l'>
+ <wordsize>32</wordsize>
+ <emulator>/usr/bin/qemu-system-arm</emulator>
+ <machine>integratorcp</machine>
+ <machine>vexpress-a9</machine>
+ <machine>syborg</machine>
+ <machine>musicpal</machine>
+ <machine>mainstone</machine>
+ <machine>n800</machine>
+ <machine>n810</machine>
+ <machine>n900</machine>
+ <machine>cheetah</machine>
+ <machine>sx1</machine>
+ <machine>sx1-v1</machine>
+ <machine>beagle</machine>
+ <machine>beaglexm</machine>
+ <machine>tosa</machine>
+ <machine>akita</machine>
+ <machine>spitz</machine>
+ <machine>borzoi</machine>
+ <machine>terrier</machine>
+ <machine>connex</machine>
+ <machine>verdex</machine>
+ <machine>lm3s811evb</machine>
+ <machine>lm3s6965evb</machine>
+ <machine>realview-eb</machine>
+ <machine>realview-eb-mpcore</machine>
+ <machine>realview-pb-a8</machine>
+ <machine>realview-pbx-a9</machine>
+ <machine>versatilepb</machine>
+ <machine>versatileab</machine>
+ <domain type='qemu'>
+ </domain>
+ </arch>
+ <features>
+ <deviceboot/>
+ </features>
+ </guest>
+
+ <guest>
+ <os_type>hvm</os_type>
+ <arch name='mips'>
+ <wordsize>32</wordsize>
+ <emulator>/usr/bin/qemu-system-mips</emulator>
+ <machine>malta</machine>
+ <machine>mipssim</machine>
+ <machine>magnum</machine>
+ <machine>pica61</machine>
+ <machine>mips</machine>
+ <domain type='qemu'>
+ </domain>
+ </arch>
+ <features>
+ <deviceboot/>
+ </features>
+ </guest>
+
+ <guest>
+ <os_type>hvm</os_type>
+ <arch name='mipsel'>
+ <wordsize>32</wordsize>
+ <emulator>/usr/bin/qemu-system-mipsel</emulator>
+ <machine>malta</machine>
+ <machine>mipssim</machine>
+ <machine>magnum</machine>
+ <machine>pica61</machine>
+ <machine>mips</machine>
+ <domain type='qemu'>
+ </domain>
+ </arch>
+ <features>
+ <deviceboot/>
+ </features>
+ </guest>
+
+ <guest>
+ <os_type>hvm</os_type>
+ <arch name='sparc'>
+ <wordsize>32</wordsize>
+ <emulator>/usr/bin/qemu-system-sparc</emulator>
+ <machine>SS-5</machine>
+ <machine>leon3_generic</machine>
+ <machine>SS-10</machine>
+ <machine>SS-600MP</machine>
+ <machine>SS-20</machine>
+ <machine>Voyager</machine>
+ <machine>LX</machine>
+ <machine>SS-4</machine>
+ <machine>SPARCClassic</machine>
+ <machine>SPARCbook</machine>
+ <machine>SS-1000</machine>
+ <machine>SS-2000</machine>
+ <machine>SS-2</machine>
+ <domain type='qemu'>
+ </domain>
+ </arch>
+ </guest>
+
+ <guest>
+ <os_type>hvm</os_type>
+ <arch name='ppc'>
+ <wordsize>32</wordsize>
+ <emulator>/usr/bin/qemu-system-ppc</emulator>
+ <machine>g3beige</machine>
+ <machine>virtex-ml507</machine>
+ <machine>mpc8544ds</machine>
+ <machine canonical='bamboo-0.13'>bamboo</machine>
+ <machine>bamboo-0.13</machine>
+ <machine>bamboo-0.12</machine>
+ <machine>ref405ep</machine>
+ <machine>taihu</machine>
+ <machine>mac99</machine>
+ <machine>prep</machine>
+ <domain type='qemu'>
+ </domain>
+ </arch>
+ <features>
+ <deviceboot/>
+ </features>
+ </guest>
+
+</capabilities>'''
+
+ def compareCPU(self, xml, flags):
+ tree = etree.fromstring(xml)
+
+ arch_node = tree.find('./arch')
+ if arch_node is not None:
+ if arch_node.text not in [arch.X86_64,
+ arch.I686]:
+ return VIR_CPU_COMPARE_INCOMPATIBLE
+
+ model_node = tree.find('./model')
+ if model_node is not None:
+ if model_node.text != node_cpu_model:
+ return VIR_CPU_COMPARE_INCOMPATIBLE
+
+ vendor_node = tree.find('./vendor')
+ if vendor_node is not None:
+ if vendor_node.text != node_cpu_vendor:
+ return VIR_CPU_COMPARE_INCOMPATIBLE
+
+ # The rest of the stuff libvirt implements is rather complicated
+ # and I don't think it adds much value to replicate it here.
+
+ return VIR_CPU_COMPARE_IDENTICAL
+
+ def getCPUStats(self, cpuNum, flag):
+ if cpuNum < 2:
+ return {'kernel': 5664160000000L,
+ 'idle': 1592705190000000L,
+ 'user': 26728850000000L,
+ 'iowait': 6121490000000L}
+ else:
+ raise make_libvirtError(
+ libvirtError,
+ "invalid argument: Invalid cpu number",
+ error_code=VIR_ERR_INTERNAL_ERROR,
+ error_domain=VIR_FROM_QEMU)
+
+ def nwfilterLookupByName(self, name):
+ try:
+ return self._nwfilters[name]
+ except KeyError:
+ raise make_libvirtError(
+ libvirtError,
+ "no nwfilter with matching name %s" % name,
+ error_code=VIR_ERR_NO_NWFILTER,
+ error_domain=VIR_FROM_NWFILTER)
+
+ def nwfilterDefineXML(self, xml):
+ nwfilter = NWFilter(self, xml)
+ self._add_filter(nwfilter)
+
+ def listDefinedDomains(self):
+ return []
+
+ def listDevices(self, cap, flags):
+ return []
+
+ def baselineCPU(self, cpu, flag):
+ """Add new libvirt API."""
+ return """<cpu mode='custom' match='exact'>
+ <model fallback='allow'>Westmere</model>
+ <vendor>Intel</vendor>
+ <feature policy='require' name='aes'/>
+ </cpu>"""
+
+
+def openAuth(uri, auth, flags):
+
+ if type(auth) != list:
+ raise Exception(_("Expected a list for 'auth' parameter"))
+
+ if type(auth[0]) != list:
+ raise Exception(
+ _("Expected a function in 'auth[0]' parameter"))
+
+ if not callable(auth[1]):
+ raise Exception(
+ _("Expected a function in 'auth[1]' parameter"))
+
+ return Connection(uri, (flags == VIR_CONNECT_RO))
+
+
+def virEventRunDefaultImpl():
+ time.sleep(1)
+
+
+def virEventRegisterDefaultImpl():
+ if connection_used:
+ raise Exception(_("virEventRegisterDefaultImpl() must be \
+ called before connection is used."))
+
+
+def registerErrorHandler(handler, ctxt):
+ pass
+
+
+def make_libvirtError(error_class, msg, error_code=None,
+ error_domain=None, error_message=None,
+ error_level=None, str1=None, str2=None, str3=None,
+ int1=None, int2=None):
+ """Convenience function for creating `libvirtError` exceptions which
+ allow you to specify arguments in constructor without having to manipulate
+ the `err` tuple directly.
+
+ We need to pass in `error_class` to this function because it may be
+ `libvirt.libvirtError` or `fakelibvirt.libvirtError` depending on whether
+ `libvirt-python` is installed.
+ """
+ exc = error_class(msg)
+ exc.err = (error_code, error_domain, error_message, error_level,
+ str1, str2, str3, int1, int2)
+ return exc
+
+
+virDomain = Domain
+
+
+virConnect = Connection
diff --git a/nova/tests/unit/virt/libvirt/test_blockinfo.py b/nova/tests/unit/virt/libvirt/test_blockinfo.py
new file mode 100644
index 0000000000..f849bc59a7
--- /dev/null
+++ b/nova/tests/unit/virt/libvirt/test_blockinfo.py
@@ -0,0 +1,991 @@
+# Copyright 2010 OpenStack Foundation
+# Copyright 2012 University Of Minho
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+import mock
+
+from nova import block_device
+from nova.compute import arch
+from nova import context
+from nova import exception
+from nova import objects
+from nova import test
+from nova.tests.unit import fake_block_device
+import nova.tests.unit.image.fake
+from nova.virt import block_device as driver_block_device
+from nova.virt.libvirt import blockinfo
+
+
+class LibvirtBlockInfoTest(test.NoDBTestCase):
+
+ def setUp(self):
+ super(LibvirtBlockInfoTest, self).setUp()
+
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = context.get_admin_context()
+ nova.tests.unit.image.fake.stub_out_image_service(self.stubs)
+ self.test_instance = {
+ 'uuid': '32dfcb37-5af1-552b-357c-be8c3aa38310',
+ 'memory_kb': '1024000',
+ 'basepath': '/some/path',
+ 'bridge_name': 'br100',
+ 'vcpus': 2,
+ 'project_id': 'fake',
+ 'bridge': 'br101',
+ 'image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
+ 'root_gb': 10,
+ 'ephemeral_gb': 20,
+ 'instance_type_id': 2, # m1.tiny
+ 'config_drive': None,
+ 'system_metadata': {
+ 'instance_type_memory_mb': 128,
+ 'instance_type_root_gb': 0,
+ 'instance_type_name': 'm1.micro',
+ 'instance_type_ephemeral_gb': 0,
+ 'instance_type_vcpus': 1,
+ 'instance_type_swap': 0,
+ 'instance_type_rxtx_factor': 1.0,
+ 'instance_type_flavorid': '1',
+ 'instance_type_vcpu_weight': None,
+ 'instance_type_id': 2,
+ }
+ }
+
+ def test_volume_in_mapping(self):
+ swap = {'device_name': '/dev/sdb',
+ 'swap_size': 1}
+ ephemerals = [{'device_type': 'disk', 'guest_format': 'ext3',
+ 'device_name': '/dev/sdc1', 'size': 10},
+ {'disk_bus': 'ide', 'guest_format': None,
+ 'device_name': '/dev/sdd', 'size': 10}]
+ block_device_mapping = [{'mount_device': '/dev/sde',
+ 'device_path': 'fake_device'},
+ {'mount_device': '/dev/sdf',
+ 'device_path': 'fake_device'}]
+ block_device_info = {
+ 'root_device_name': '/dev/sda',
+ 'swap': swap,
+ 'ephemerals': ephemerals,
+ 'block_device_mapping': block_device_mapping}
+
+ def _assert_volume_in_mapping(device_name, true_or_false):
+ self.assertEqual(
+ true_or_false,
+ block_device.volume_in_mapping(device_name,
+ block_device_info))
+
+ _assert_volume_in_mapping('sda', False)
+ _assert_volume_in_mapping('sdb', True)
+ _assert_volume_in_mapping('sdc1', True)
+ _assert_volume_in_mapping('sdd', True)
+ _assert_volume_in_mapping('sde', True)
+ _assert_volume_in_mapping('sdf', True)
+ _assert_volume_in_mapping('sdg', False)
+ _assert_volume_in_mapping('sdh1', False)
+
+ def test_find_disk_dev(self):
+ mapping = {
+ "disk.local": {
+ 'dev': 'sda',
+ 'bus': 'scsi',
+ 'type': 'disk',
+ },
+ "disk.swap": {
+ 'dev': 'sdc',
+ 'bus': 'scsi',
+ 'type': 'disk',
+ },
+ }
+
+ dev = blockinfo.find_disk_dev_for_disk_bus(mapping, 'scsi')
+ self.assertEqual('sdb', dev)
+
+ dev = blockinfo.find_disk_dev_for_disk_bus(mapping, 'scsi',
+ last_device=True)
+ self.assertEqual('sdz', dev)
+
+ dev = blockinfo.find_disk_dev_for_disk_bus(mapping, 'virtio')
+ self.assertEqual('vda', dev)
+
+ dev = blockinfo.find_disk_dev_for_disk_bus(mapping, 'fdc')
+ self.assertEqual('fda', dev)
+
+ def test_get_next_disk_dev(self):
+ mapping = {}
+ mapping['disk.local'] = blockinfo.get_next_disk_info(mapping,
+ 'virtio')
+ self.assertEqual({'dev': 'vda', 'bus': 'virtio', 'type': 'disk'},
+ mapping['disk.local'])
+
+ mapping['disk.swap'] = blockinfo.get_next_disk_info(mapping,
+ 'virtio')
+ self.assertEqual({'dev': 'vdb', 'bus': 'virtio', 'type': 'disk'},
+ mapping['disk.swap'])
+
+ mapping['disk.config'] = blockinfo.get_next_disk_info(mapping,
+ 'ide',
+ 'cdrom',
+ True)
+ self.assertEqual({'dev': 'hdd', 'bus': 'ide', 'type': 'cdrom'},
+ mapping['disk.config'])
+
+ def test_get_next_disk_dev_boot_index(self):
+ info = blockinfo.get_next_disk_info({}, 'virtio', boot_index=-1)
+ self.assertEqual({'dev': 'vda', 'bus': 'virtio', 'type': 'disk'}, info)
+
+ info = blockinfo.get_next_disk_info({}, 'virtio', boot_index=2)
+ self.assertEqual({'dev': 'vda', 'bus': 'virtio',
+ 'type': 'disk', 'boot_index': '2'},
+ info)
+
+ def test_get_disk_mapping_simple(self):
+ # The simplest possible disk mapping setup, all defaults
+
+ instance_ref = objects.Instance(**self.test_instance)
+
+ mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
+ "virtio", "ide")
+
+ expect = {
+ 'disk': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ 'disk.local': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
+ 'root': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'}
+ }
+ self.assertEqual(expect, mapping)
+
+ def test_get_disk_mapping_simple_rootdev(self):
+ # A simple disk mapping setup, but with custom root device name
+
+ instance_ref = objects.Instance(**self.test_instance)
+ block_device_info = {
+ 'root_device_name': '/dev/sda'
+ }
+
+ mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
+ "virtio", "ide",
+ block_device_info)
+
+ expect = {
+ 'disk': {'bus': 'scsi', 'dev': 'sda',
+ 'type': 'disk', 'boot_index': '1'},
+ 'disk.local': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'},
+ 'root': {'bus': 'scsi', 'dev': 'sda',
+ 'type': 'disk', 'boot_index': '1'}
+ }
+ self.assertEqual(expect, mapping)
+
+ def test_get_disk_mapping_rescue(self):
+ # A simple disk mapping setup, but in rescue mode
+
+ instance_ref = objects.Instance(**self.test_instance)
+
+ mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
+ "virtio", "ide",
+ rescue=True)
+
+ expect = {
+ 'disk.rescue': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ 'disk': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
+ 'root': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ }
+ self.assertEqual(expect, mapping)
+
+ def test_get_disk_mapping_lxc(self):
+ # A simple disk mapping setup, but for lxc
+
+ self.test_instance['ephemeral_gb'] = 0
+ instance_ref = objects.Instance(**self.test_instance)
+
+ mapping = blockinfo.get_disk_mapping("lxc", instance_ref,
+ "lxc", "lxc",
+ None)
+ expect = {
+ 'disk': {'bus': 'lxc', 'dev': None,
+ 'type': 'disk', 'boot_index': '1'},
+ 'root': {'bus': 'lxc', 'dev': None,
+ 'type': 'disk', 'boot_index': '1'},
+ }
+ self.assertEqual(expect, mapping)
+
+ def test_get_disk_mapping_simple_iso(self):
+ # A simple disk mapping setup, but with a ISO for root device
+
+ instance_ref = objects.Instance(**self.test_instance)
+ image_meta = {'disk_format': 'iso'}
+
+ mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
+ "virtio", "ide",
+ None,
+ image_meta)
+
+ expect = {
+ 'disk': {'bus': 'ide', 'dev': 'hda',
+ 'type': 'cdrom', 'boot_index': '1'},
+ 'disk.local': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'},
+ 'root': {'bus': 'ide', 'dev': 'hda',
+ 'type': 'cdrom', 'boot_index': '1'},
+ }
+ self.assertEqual(expect, mapping)
+
+ def test_get_disk_mapping_simple_swap(self):
+ # A simple disk mapping setup, but with a swap device added
+
+ self.test_instance['system_metadata']['instance_type_swap'] = 5
+ instance_ref = objects.Instance(**self.test_instance)
+
+ mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
+ "virtio", "ide")
+
+ expect = {
+ 'disk': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ 'disk.local': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
+ 'disk.swap': {'bus': 'virtio', 'dev': 'vdc', 'type': 'disk'},
+ 'root': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ }
+ self.assertEqual(expect, mapping)
+
+ def test_get_disk_mapping_simple_configdrive(self):
+ # A simple disk mapping setup, but with configdrive added
+ # It's necessary to check if the architecture is power, because
+ # power doesn't have support to ide, and so libvirt translate
+ # all ide calls to scsi
+
+ self.flags(force_config_drive=True)
+
+ instance_ref = objects.Instance(**self.test_instance)
+
+ mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
+ "virtio", "ide")
+
+ # The last device is selected for this. on x86 is the last ide
+ # device (hdd). Since power only support scsi, the last device
+ # is sdz
+
+ bus_ppc = ("scsi", "sdz")
+ expect_bus = {"ppc": bus_ppc, "ppc64": bus_ppc}
+
+ bus, dev = expect_bus.get(blockinfo.libvirt_utils.get_arch({}),
+ ("ide", "hdd"))
+
+ expect = {
+ 'disk': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ 'disk.local': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
+ 'disk.config': {'bus': bus, 'dev': dev, 'type': 'cdrom'},
+ 'root': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'}
+ }
+
+ self.assertEqual(expect, mapping)
+
+ def test_get_disk_mapping_cdrom_configdrive(self):
+ # A simple disk mapping setup, with configdrive added as cdrom
+ # It's necessary to check if the architecture is power, because
+ # power doesn't have support to ide, and so libvirt translate
+ # all ide calls to scsi
+
+ self.flags(force_config_drive=True)
+ self.flags(config_drive_format='iso9660')
+
+ instance_ref = objects.Instance(**self.test_instance)
+
+ mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
+ "virtio", "ide")
+
+ bus_ppc = ("scsi", "sdz")
+ expect_bus = {"ppc": bus_ppc, "ppc64": bus_ppc}
+
+ bus, dev = expect_bus.get(blockinfo.libvirt_utils.get_arch({}),
+ ("ide", "hdd"))
+
+ expect = {
+ 'disk': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ 'disk.local': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
+ 'disk.config': {'bus': bus, 'dev': dev, 'type': 'cdrom'},
+ 'root': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'}
+ }
+
+ self.assertEqual(expect, mapping)
+
+ def test_get_disk_mapping_disk_configdrive(self):
+ # A simple disk mapping setup, with configdrive added as disk
+
+ self.flags(force_config_drive=True)
+ self.flags(config_drive_format='vfat')
+
+ instance_ref = objects.Instance(**self.test_instance)
+
+ mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
+ "virtio", "ide")
+
+ expect = {
+ 'disk': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ 'disk.local': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
+ 'disk.config': {'bus': 'virtio', 'dev': 'vdz', 'type': 'disk'},
+ 'root': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ }
+ self.assertEqual(expect, mapping)
+
+ def test_get_disk_mapping_ephemeral(self):
+ # A disk mapping with ephemeral devices
+ self.test_instance['system_metadata']['instance_type_swap'] = 5
+ instance_ref = objects.Instance(**self.test_instance)
+
+ block_device_info = {
+ 'ephemerals': [
+ {'device_type': 'disk', 'guest_format': 'ext3',
+ 'device_name': '/dev/vdb', 'size': 10},
+ {'disk_bus': 'ide', 'guest_format': None,
+ 'device_name': '/dev/vdc', 'size': 10},
+ {'device_type': 'floppy',
+ 'device_name': '/dev/vdd', 'size': 10},
+ ]
+ }
+ mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
+ "virtio", "ide",
+ block_device_info)
+
+ expect = {
+ 'disk': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ 'disk.eph0': {'bus': 'virtio', 'dev': 'vdb',
+ 'type': 'disk', 'format': 'ext3'},
+ 'disk.eph1': {'bus': 'ide', 'dev': 'vdc', 'type': 'disk'},
+ 'disk.eph2': {'bus': 'virtio', 'dev': 'vdd', 'type': 'floppy'},
+ 'disk.swap': {'bus': 'virtio', 'dev': 'vde', 'type': 'disk'},
+ 'root': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ }
+ self.assertEqual(expect, mapping)
+
+ def test_get_disk_mapping_custom_swap(self):
+ # A disk mapping with a swap device at position vdb. This
+ # should cause disk.local to be removed
+ instance_ref = objects.Instance(**self.test_instance)
+
+ block_device_info = {
+ 'swap': {'device_name': '/dev/vdb',
+ 'swap_size': 10},
+ }
+ mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
+ "virtio", "ide",
+ block_device_info)
+
+ expect = {
+ 'disk': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ 'disk.swap': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
+ 'root': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ }
+ self.assertEqual(expect, mapping)
+
+ def test_get_disk_mapping_blockdev_root(self):
+ # A disk mapping with a blockdev replacing the default root
+ instance_ref = objects.Instance(**self.test_instance)
+
+ block_device_info = {
+ 'block_device_mapping': [
+ {'connection_info': "fake",
+ 'mount_device': "/dev/vda",
+ 'boot_index': 0,
+ 'device_type': 'disk',
+ 'delete_on_termination': True},
+ ]
+ }
+ mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
+ "virtio", "ide",
+ block_device_info)
+
+ expect = {
+ '/dev/vda': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ 'disk.local': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
+ 'root': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ }
+ self.assertEqual(expect, mapping)
+
+ def test_get_disk_mapping_blockdev_eph(self):
+ # A disk mapping with a blockdev replacing the ephemeral device
+ instance_ref = objects.Instance(**self.test_instance)
+
+ block_device_info = {
+ 'block_device_mapping': [
+ {'connection_info': "fake",
+ 'mount_device': "/dev/vdb",
+ 'boot_index': -1,
+ 'delete_on_termination': True},
+ ]
+ }
+ mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
+ "virtio", "ide",
+ block_device_info)
+
+ expect = {
+ 'disk': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ '/dev/vdb': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
+ 'root': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ }
+ self.assertEqual(expect, mapping)
+
+ def test_get_disk_mapping_blockdev_many(self):
+ # A disk mapping with a blockdev replacing all devices
+ instance_ref = objects.Instance(**self.test_instance)
+
+ block_device_info = {
+ 'block_device_mapping': [
+ {'connection_info': "fake",
+ 'mount_device': "/dev/vda",
+ 'boot_index': 0,
+ 'disk_bus': 'scsi',
+ 'delete_on_termination': True},
+ {'connection_info': "fake",
+ 'mount_device': "/dev/vdb",
+ 'boot_index': -1,
+ 'delete_on_termination': True},
+ {'connection_info': "fake",
+ 'mount_device': "/dev/vdc",
+ 'boot_index': -1,
+ 'device_type': 'cdrom',
+ 'delete_on_termination': True},
+ ]
+ }
+ mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
+ "virtio", "ide",
+ block_device_info)
+
+ expect = {
+ '/dev/vda': {'bus': 'scsi', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ '/dev/vdb': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
+ '/dev/vdc': {'bus': 'virtio', 'dev': 'vdc', 'type': 'cdrom'},
+ 'root': {'bus': 'scsi', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ }
+ self.assertEqual(expect, mapping)
+
+ def test_get_disk_mapping_complex(self):
+ # The strangest possible disk mapping setup
+ instance_ref = objects.Instance(**self.test_instance)
+
+ block_device_info = {
+ 'root_device_name': '/dev/vdf',
+ 'swap': {'device_name': '/dev/vdy',
+ 'swap_size': 10},
+ 'ephemerals': [
+ {'device_type': 'disk', 'guest_format': 'ext3',
+ 'device_name': '/dev/vdb', 'size': 10},
+ {'disk_bus': 'ide', 'guest_format': None,
+ 'device_name': '/dev/vdc', 'size': 10},
+ ],
+ 'block_device_mapping': [
+ {'connection_info': "fake",
+ 'mount_device': "/dev/vda",
+ 'boot_index': 1,
+ 'delete_on_termination': True},
+ ]
+ }
+ mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
+ "virtio", "ide",
+ block_device_info)
+
+ expect = {
+ 'disk': {'bus': 'virtio', 'dev': 'vdf',
+ 'type': 'disk', 'boot_index': '1'},
+ '/dev/vda': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '2'},
+ 'disk.eph0': {'bus': 'virtio', 'dev': 'vdb',
+ 'type': 'disk', 'format': 'ext3'},
+ 'disk.eph1': {'bus': 'ide', 'dev': 'vdc', 'type': 'disk'},
+ 'disk.swap': {'bus': 'virtio', 'dev': 'vdy', 'type': 'disk'},
+ 'root': {'bus': 'virtio', 'dev': 'vdf',
+ 'type': 'disk', 'boot_index': '1'},
+ }
+ self.assertEqual(expect, mapping)
+
+ def test_get_disk_mapping_updates_original(self):
+ instance_ref = objects.Instance(**self.test_instance)
+
+ block_device_info = {
+ 'root_device_name': '/dev/vda',
+ 'swap': {'device_name': '/dev/vdb',
+ 'device_type': 'really_lame_type',
+ 'swap_size': 10},
+ 'ephemerals': [{'disk_bus': 'no_such_bus',
+ 'device_type': 'yeah_right',
+ 'device_name': '/dev/vdc', 'size': 10}],
+ 'block_device_mapping': [
+ {'connection_info': "fake",
+ 'mount_device': None,
+ 'device_type': 'lawnmower',
+ 'delete_on_termination': True}]
+ }
+ expected_swap = {'device_name': '/dev/vdb', 'disk_bus': 'virtio',
+ 'device_type': 'disk', 'swap_size': 10}
+ expected_ephemeral = {'disk_bus': 'virtio',
+ 'device_type': 'disk',
+ 'device_name': '/dev/vdc', 'size': 10}
+ expected_bdm = {'connection_info': "fake",
+ 'mount_device': '/dev/vdd',
+ 'device_type': 'disk',
+ 'disk_bus': 'virtio',
+ 'delete_on_termination': True}
+
+ blockinfo.get_disk_mapping("kvm", instance_ref,
+ "virtio", "ide", block_device_info)
+
+ self.assertEqual(expected_swap, block_device_info['swap'])
+ self.assertEqual(expected_ephemeral,
+ block_device_info['ephemerals'][0])
+ self.assertEqual(expected_bdm,
+ block_device_info['block_device_mapping'][0])
+
+ def test_get_disk_bus(self):
+ expected = (
+ (arch.X86_64, 'disk', 'virtio'),
+ (arch.X86_64, 'cdrom', 'ide'),
+ (arch.X86_64, 'floppy', 'fdc'),
+ (arch.PPC, 'disk', 'virtio'),
+ (arch.PPC, 'cdrom', 'scsi'),
+ (arch.PPC64, 'disk', 'virtio'),
+ (arch.PPC64, 'cdrom', 'scsi')
+ )
+ for guestarch, dev, res in expected:
+ with mock.patch.object(blockinfo.libvirt_utils,
+ 'get_arch',
+ return_value=guestarch):
+ bus = blockinfo.get_disk_bus_for_device_type('kvm',
+ device_type=dev)
+ self.assertEqual(res, bus)
+
+ expected = (
+ ('scsi', None, 'disk', 'scsi'),
+ (None, 'scsi', 'cdrom', 'scsi'),
+ ('usb', None, 'disk', 'usb')
+ )
+ for dbus, cbus, dev, res in expected:
+ image_meta = {'properties': {'hw_disk_bus': dbus,
+ 'hw_cdrom_bus': cbus}}
+ bus = blockinfo.get_disk_bus_for_device_type('kvm',
+ image_meta,
+ device_type=dev)
+ self.assertEqual(res, bus)
+
+ image_meta = {'properties': {'hw_disk_bus': 'xen'}}
+ self.assertRaises(exception.UnsupportedHardware,
+ blockinfo.get_disk_bus_for_device_type,
+ 'kvm',
+ image_meta)
+
+ def test_success_get_disk_bus_for_disk_dev(self):
+ expected = (
+ ('ide', ("kvm", "hda")),
+ ('scsi', ("kvm", "sdf")),
+ ('virtio', ("kvm", "vds")),
+ ('fdc', ("kvm", "fdc")),
+ ('uml', ("kvm", "ubd")),
+ ('xen', ("xen", "sdf")),
+ ('xen', ("xen", "xvdb"))
+ )
+ for res, args in expected:
+ self.assertEqual(res, blockinfo.get_disk_bus_for_disk_dev(*args))
+
+ def test_fail_get_disk_bus_for_disk_dev(self):
+ self.assertRaises(exception.NovaException,
+ blockinfo.get_disk_bus_for_disk_dev, 'inv', 'val')
+
+ def test_get_config_drive_type_default(self):
+ config_drive_type = blockinfo.get_config_drive_type()
+ self.assertEqual('cdrom', config_drive_type)
+
+ def test_get_config_drive_type_cdrom(self):
+ self.flags(config_drive_format='iso9660')
+ config_drive_type = blockinfo.get_config_drive_type()
+ self.assertEqual('cdrom', config_drive_type)
+
+ def test_get_config_drive_type_disk(self):
+ self.flags(config_drive_format='vfat')
+ config_drive_type = blockinfo.get_config_drive_type()
+ self.assertEqual('disk', config_drive_type)
+
+ def test_get_config_drive_type_improper_value(self):
+ self.flags(config_drive_format='test')
+ self.assertRaises(exception.ConfigDriveUnknownFormat,
+ blockinfo.get_config_drive_type)
+
+ def test_get_info_from_bdm(self):
+ bdms = [{'device_name': '/dev/vds', 'device_type': 'disk',
+ 'disk_bus': 'usb', 'swap_size': 4},
+ {'device_type': 'disk', 'guest_format': 'ext3',
+ 'device_name': '/dev/vdb', 'size': 2},
+ {'disk_bus': 'ide', 'guest_format': None,
+ 'device_name': '/dev/vdc', 'size': 3},
+ {'connection_info': "fake",
+ 'mount_device': "/dev/sdr",
+ 'disk_bus': 'lame_bus',
+ 'device_type': 'cdrom',
+ 'boot_index': 0,
+ 'delete_on_termination': True},
+ {'connection_info': "fake",
+ 'mount_device': "/dev/vdo",
+ 'disk_bus': 'scsi',
+ 'boot_index': 1,
+ 'device_type': 'lame_type',
+ 'delete_on_termination': True}]
+ expected = [{'dev': 'vds', 'type': 'disk', 'bus': 'usb'},
+ {'dev': 'vdb', 'type': 'disk',
+ 'bus': 'virtio', 'format': 'ext3'},
+ {'dev': 'vdc', 'type': 'disk', 'bus': 'ide'},
+ {'dev': 'sdr', 'type': 'cdrom',
+ 'bus': 'scsi', 'boot_index': '1'},
+ {'dev': 'vdo', 'type': 'disk',
+ 'bus': 'scsi', 'boot_index': '2'}]
+
+ for bdm, expected in zip(bdms, expected):
+ self.assertEqual(expected,
+ blockinfo.get_info_from_bdm('kvm', bdm, {}))
+
+ # Test that passed bus and type are considered
+ bdm = {'device_name': '/dev/vda'}
+ expected = {'dev': 'vda', 'type': 'disk', 'bus': 'ide'}
+ self.assertEqual(
+ expected, blockinfo.get_info_from_bdm('kvm', bdm, {},
+ disk_bus='ide',
+ dev_type='disk'))
+
+ # Test that lame bus values are defaulted properly
+ bdm = {'disk_bus': 'lame_bus', 'device_type': 'cdrom'}
+ with mock.patch.object(blockinfo,
+ 'get_disk_bus_for_device_type',
+ return_value='ide') as get_bus:
+ blockinfo.get_info_from_bdm('kvm', bdm, {})
+ get_bus.assert_called_once_with('kvm', None, 'cdrom')
+
+ # Test that missing device is defaulted as expected
+ bdm = {'disk_bus': 'ide', 'device_type': 'cdrom'}
+ expected = {'dev': 'vdd', 'type': 'cdrom', 'bus': 'ide'}
+ mapping = {'root': {'dev': 'vda'}}
+ with mock.patch.object(blockinfo,
+ 'find_disk_dev_for_disk_bus',
+ return_value='vdd') as find_dev:
+ got = blockinfo.get_info_from_bdm(
+ 'kvm', bdm, mapping, assigned_devices=['vdb', 'vdc'])
+ find_dev.assert_called_once_with(
+ {'root': {'dev': 'vda'},
+ 'vdb': {'dev': 'vdb'},
+ 'vdc': {'dev': 'vdc'}}, 'ide')
+ self.assertEqual(expected, got)
+
+ def test_get_device_name(self):
+ bdm_obj = objects.BlockDeviceMapping(self.context,
+ **fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 3, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/vda',
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'volume_id': 'fake-volume-id-1',
+ 'boot_index': 0}))
+ self.assertEqual('/dev/vda', blockinfo.get_device_name(bdm_obj))
+
+ driver_bdm = driver_block_device.DriverVolumeBlockDevice(bdm_obj)
+ self.assertEqual('/dev/vda', blockinfo.get_device_name(driver_bdm))
+
+ bdm_obj.device_name = None
+ self.assertIsNone(blockinfo.get_device_name(bdm_obj))
+
+ driver_bdm = driver_block_device.DriverVolumeBlockDevice(bdm_obj)
+ self.assertIsNone(blockinfo.get_device_name(driver_bdm))
+
+ @mock.patch('nova.virt.libvirt.blockinfo.find_disk_dev_for_disk_bus',
+ return_value='vda')
+ @mock.patch('nova.virt.libvirt.blockinfo.get_disk_bus_for_disk_dev',
+ return_value='virtio')
+ def test_get_root_info_no_bdm(self, mock_get_bus, mock_find_dev):
+ blockinfo.get_root_info('kvm', None, None, 'virtio', 'ide')
+ mock_find_dev.assert_called_once_with({}, 'virtio')
+
+ blockinfo.get_root_info('kvm', None, None, 'virtio', 'ide',
+ root_device_name='/dev/vda')
+ mock_get_bus.assert_called_once_with('kvm', '/dev/vda')
+
+ @mock.patch('nova.virt.libvirt.blockinfo.get_info_from_bdm')
+ def test_get_root_info_bdm(self, mock_get_info):
+ root_bdm = {'mount_device': '/dev/vda',
+ 'disk_bus': 'scsi',
+ 'device_type': 'disk'}
+ # No root_device_name
+ blockinfo.get_root_info('kvm', None, root_bdm, 'virtio', 'ide')
+ mock_get_info.assert_called_once_with('kvm', root_bdm, {}, 'virtio')
+ mock_get_info.reset_mock()
+ # Both device names
+ blockinfo.get_root_info('kvm', None, root_bdm, 'virtio', 'ide',
+ root_device_name='sda')
+ mock_get_info.assert_called_once_with('kvm', root_bdm, {}, 'virtio')
+ mock_get_info.reset_mock()
+ # Missing device names
+ del root_bdm['mount_device']
+ blockinfo.get_root_info('kvm', None, root_bdm, 'virtio', 'ide',
+ root_device_name='sda')
+ mock_get_info.assert_called_once_with('kvm',
+ {'device_name': 'sda',
+ 'disk_bus': 'scsi',
+ 'device_type': 'disk'},
+ {}, 'virtio')
+
+ def test_get_boot_order_simple(self):
+ disk_info = {
+ 'disk_bus': 'virtio',
+ 'cdrom_bus': 'ide',
+ 'mapping': {
+ 'disk': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ 'root': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ }
+ }
+ expected_order = ['hd']
+ self.assertEqual(expected_order, blockinfo.get_boot_order(disk_info))
+
+ def test_get_boot_order_complex(self):
+ disk_info = {
+ 'disk_bus': 'virtio',
+ 'cdrom_bus': 'ide',
+ 'mapping': {
+ 'disk': {'bus': 'virtio', 'dev': 'vdf',
+ 'type': 'disk', 'boot_index': '1'},
+ '/dev/hda': {'bus': 'ide', 'dev': 'hda',
+ 'type': 'cdrom', 'boot_index': '3'},
+ '/dev/fda': {'bus': 'fdc', 'dev': 'fda',
+ 'type': 'floppy', 'boot_index': '2'},
+ 'disk.eph0': {'bus': 'virtio', 'dev': 'vdb',
+ 'type': 'disk', 'format': 'ext3'},
+ 'disk.eph1': {'bus': 'ide', 'dev': 'vdc', 'type': 'disk'},
+ 'disk.swap': {'bus': 'virtio', 'dev': 'vdy', 'type': 'disk'},
+ 'root': {'bus': 'virtio', 'dev': 'vdf',
+ 'type': 'disk', 'boot_index': '1'},
+ }
+ }
+ expected_order = ['hd', 'fd', 'cdrom']
+ self.assertEqual(expected_order, blockinfo.get_boot_order(disk_info))
+
+ def test_get_boot_order_overlapping(self):
+ disk_info = {
+ 'disk_bus': 'virtio',
+ 'cdrom_bus': 'ide',
+ 'mapping': {
+ '/dev/vda': {'bus': 'scsi', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ '/dev/vdb': {'bus': 'virtio', 'dev': 'vdb',
+ 'type': 'disk', 'boot_index': '2'},
+ '/dev/vdc': {'bus': 'virtio', 'dev': 'vdc',
+ 'type': 'cdrom', 'boot_index': '3'},
+ 'root': {'bus': 'scsi', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ }
+ }
+ expected_order = ['hd', 'cdrom']
+ self.assertEqual(expected_order, blockinfo.get_boot_order(disk_info))
+
+
+class DefaultDeviceNamesTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(DefaultDeviceNamesTestCase, self).setUp()
+ self.context = context.get_admin_context()
+ self.instance = {
+ 'uuid': '32dfcb37-5af1-552b-357c-be8c3aa38310',
+ 'memory_kb': '1024000',
+ 'basepath': '/some/path',
+ 'bridge_name': 'br100',
+ 'vcpus': 2,
+ 'project_id': 'fake',
+ 'bridge': 'br101',
+ 'image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
+ 'root_gb': 10,
+ 'ephemeral_gb': 20,
+ 'instance_type_id': 2}
+ self.root_device_name = '/dev/vda'
+ self.virt_type = 'kvm'
+ self.flavor = {'swap': 4}
+ self.patchers = []
+ self.patchers.append(mock.patch('nova.compute.flavors.extract_flavor',
+ return_value=self.flavor))
+ self.patchers.append(mock.patch(
+ 'nova.objects.block_device.BlockDeviceMapping.save'))
+ for patcher in self.patchers:
+ patcher.start()
+
+ self.ephemerals = [objects.BlockDeviceMapping(
+ self.context, **fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 1, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/vdb',
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'device_type': 'disk',
+ 'disk_bus': 'virtio',
+ 'delete_on_termination': True,
+ 'guest_format': None,
+ 'volume_size': 1,
+ 'boot_index': -1}))]
+
+ self.swap = [objects.BlockDeviceMapping(
+ self.context, **fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 2, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/vdc',
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'device_type': 'disk',
+ 'disk_bus': 'virtio',
+ 'delete_on_termination': True,
+ 'guest_format': 'swap',
+ 'volume_size': 1,
+ 'boot_index': -1}))]
+
+ self.block_device_mapping = [
+ objects.BlockDeviceMapping(self.context,
+ **fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 3, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/vda',
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'device_type': 'disk',
+ 'disk_bus': 'virtio',
+ 'volume_id': 'fake-volume-id-1',
+ 'boot_index': 0})),
+ objects.BlockDeviceMapping(self.context,
+ **fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 4, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/vdd',
+ 'source_type': 'snapshot',
+ 'device_type': 'disk',
+ 'disk_bus': 'virtio',
+ 'destination_type': 'volume',
+ 'snapshot_id': 'fake-snapshot-id-1',
+ 'boot_index': -1})),
+ objects.BlockDeviceMapping(self.context,
+ **fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 5, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/vde',
+ 'source_type': 'blank',
+ 'device_type': 'disk',
+ 'disk_bus': 'virtio',
+ 'destination_type': 'volume',
+ 'boot_index': -1}))]
+
+ def tearDown(self):
+ super(DefaultDeviceNamesTestCase, self).tearDown()
+ for patcher in self.patchers:
+ patcher.stop()
+
+ def _test_default_device_names(self, *block_device_lists):
+ blockinfo.default_device_names(self.virt_type,
+ self.context,
+ self.instance,
+ self.root_device_name,
+ *block_device_lists)
+
+ def test_only_block_device_mapping(self):
+ # Test no-op
+ original_bdm = copy.deepcopy(self.block_device_mapping)
+ self._test_default_device_names([], [], self.block_device_mapping)
+ for original, defaulted in zip(
+ original_bdm, self.block_device_mapping):
+ self.assertEqual(original.device_name, defaulted.device_name)
+
+ # Assert it defaults the missing one as expected
+ self.block_device_mapping[1]['device_name'] = None
+ self.block_device_mapping[2]['device_name'] = None
+ self._test_default_device_names([], [], self.block_device_mapping)
+ self.assertEqual('/dev/vdd',
+ self.block_device_mapping[1]['device_name'])
+ self.assertEqual('/dev/vde',
+ self.block_device_mapping[2]['device_name'])
+
+ def test_with_ephemerals(self):
+ # Test ephemeral gets assigned
+ self.ephemerals[0]['device_name'] = None
+ self._test_default_device_names(self.ephemerals, [],
+ self.block_device_mapping)
+ self.assertEqual('/dev/vdb', self.ephemerals[0]['device_name'])
+
+ self.block_device_mapping[1]['device_name'] = None
+ self.block_device_mapping[2]['device_name'] = None
+ self._test_default_device_names(self.ephemerals, [],
+ self.block_device_mapping)
+ self.assertEqual('/dev/vdd',
+ self.block_device_mapping[1]['device_name'])
+ self.assertEqual('/dev/vde',
+ self.block_device_mapping[2]['device_name'])
+
+ def test_with_swap(self):
+ # Test swap only
+ self.swap[0]['device_name'] = None
+ self._test_default_device_names([], self.swap, [])
+ self.assertEqual('/dev/vdc', self.swap[0]['device_name'])
+
+ # Test swap and block_device_mapping
+ self.swap[0]['device_name'] = None
+ self.block_device_mapping[1]['device_name'] = None
+ self.block_device_mapping[2]['device_name'] = None
+ self._test_default_device_names([], self.swap,
+ self.block_device_mapping)
+ self.assertEqual('/dev/vdc', self.swap[0]['device_name'])
+ self.assertEqual('/dev/vdd',
+ self.block_device_mapping[1]['device_name'])
+ self.assertEqual('/dev/vde',
+ self.block_device_mapping[2]['device_name'])
+
+ def test_all_together(self):
+ # Test swap missing
+ self.swap[0]['device_name'] = None
+ self._test_default_device_names(self.ephemerals,
+ self.swap, self.block_device_mapping)
+ self.assertEqual('/dev/vdc', self.swap[0]['device_name'])
+
+ # Test swap and eph missing
+ self.swap[0]['device_name'] = None
+ self.ephemerals[0]['device_name'] = None
+ self._test_default_device_names(self.ephemerals,
+ self.swap, self.block_device_mapping)
+ self.assertEqual('/dev/vdb', self.ephemerals[0]['device_name'])
+ self.assertEqual('/dev/vdc', self.swap[0]['device_name'])
+
+ # Test all missing
+ self.swap[0]['device_name'] = None
+ self.ephemerals[0]['device_name'] = None
+ self.block_device_mapping[1]['device_name'] = None
+ self.block_device_mapping[2]['device_name'] = None
+ self._test_default_device_names(self.ephemerals,
+ self.swap, self.block_device_mapping)
+ self.assertEqual('/dev/vdb', self.ephemerals[0]['device_name'])
+ self.assertEqual('/dev/vdc', self.swap[0]['device_name'])
+ self.assertEqual('/dev/vdd',
+ self.block_device_mapping[1]['device_name'])
+ self.assertEqual('/dev/vde',
+ self.block_device_mapping[2]['device_name'])
diff --git a/nova/tests/unit/virt/libvirt/test_config.py b/nova/tests/unit/virt/libvirt/test_config.py
new file mode 100644
index 0000000000..192d075640
--- /dev/null
+++ b/nova/tests/unit/virt/libvirt/test_config.py
@@ -0,0 +1,2344 @@
+# Copyright (C) 2012 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+from oslo.utils import units
+
+from nova.compute import arch
+from nova import test
+from nova.tests.unit import matchers
+from nova.virt.libvirt import config
+
+
+class LibvirtConfigBaseTest(test.NoDBTestCase):
+ def assertXmlEqual(self, expectedXmlstr, actualXmlstr):
+ self.assertThat(actualXmlstr, matchers.XMLMatches(expectedXmlstr))
+
+
+class LibvirtConfigTest(LibvirtConfigBaseTest):
+
+ def test_config_plain(self):
+ obj = config.LibvirtConfigObject(root_name="demo")
+ xml = obj.to_xml()
+
+ self.assertXmlEqual(xml, "<demo/>")
+
+ def test_config_ns(self):
+ obj = config.LibvirtConfigObject(root_name="demo", ns_prefix="foo",
+ ns_uri="http://example.com/foo")
+ xml = obj.to_xml()
+
+ self.assertXmlEqual(xml, """
+ <foo:demo xmlns:foo="http://example.com/foo"/>""")
+
+ def test_config_text(self):
+ obj = config.LibvirtConfigObject(root_name="demo")
+ root = obj.format_dom()
+ root.append(obj._text_node("foo", "bar"))
+
+ xml = etree.tostring(root)
+ self.assertXmlEqual(xml, "<demo><foo>bar</foo></demo>")
+
+ def test_config_text_unicode(self):
+ obj = config.LibvirtConfigObject(root_name='demo')
+ root = obj.format_dom()
+ root.append(obj._text_node('foo', u'\xF0\x9F\x92\xA9'))
+ self.assertXmlEqual('<demo><foo>&#240;&#159;&#146;&#169;</foo></demo>',
+ etree.tostring(root))
+
+ def test_config_parse(self):
+ inxml = "<demo><foo/></demo>"
+ obj = config.LibvirtConfigObject(root_name="demo")
+ obj.parse_str(inxml)
+
+
+class LibvirtConfigCapsTest(LibvirtConfigBaseTest):
+
+ def test_config_host(self):
+ xmlin = """
+ <capabilities>
+ <host>
+ <uuid>c7a5fdbd-edaf-9455-926a-d65c16db1809</uuid>
+ <cpu>
+ <arch>x86_64</arch>
+ <model>Opteron_G3</model>
+ <vendor>AMD</vendor>
+ <topology sockets='1' cores='4' threads='1'/>
+ <feature name='ibs'/>
+ <feature name='osvw'/>
+ </cpu>
+ <topology>
+ <cells num='2'>
+ <cell id='0'>
+ <memory unit='KiB'>4048280</memory>
+ <pages unit='KiB' size='4'>1011941</pages>
+ <pages unit='KiB' size='2048'>0</pages>
+ <cpus num='4'>
+ <cpu id='0' socket_id='0' core_id='0' siblings='0'/>
+ <cpu id='1' socket_id='0' core_id='1' siblings='1'/>
+ <cpu id='2' socket_id='0' core_id='2' siblings='2'/>
+ <cpu id='3' socket_id='0' core_id='3' siblings='3'/>
+ </cpus>
+ </cell>
+ <cell id='1'>
+ <memory unit='KiB'>4127684</memory>
+ <pages unit='KiB' size='4'>1031921</pages>
+ <pages unit='KiB' size='2048'>0</pages>
+ <cpus num='4'>
+ <cpu id='4' socket_id='1' core_id='0' siblings='4'/>
+ <cpu id='5' socket_id='1' core_id='1' siblings='5'/>
+ <cpu id='6' socket_id='1' core_id='2' siblings='6'/>
+ <cpu id='7' socket_id='1' core_id='3' siblings='7'/>
+ </cpus>
+ </cell>
+ </cells>
+ </topology>
+ </host>
+ <guest>
+ <os_type>hvm</os_type>
+ <arch name='x86_64'/>
+ </guest>
+ <guest>
+ <os_type>hvm</os_type>
+ <arch name='i686'/>
+ </guest>
+ </capabilities>"""
+
+ obj = config.LibvirtConfigCaps()
+ obj.parse_str(xmlin)
+
+ self.assertIsInstance(obj.host, config.LibvirtConfigCapsHost)
+ self.assertEqual(obj.host.uuid, "c7a5fdbd-edaf-9455-926a-d65c16db1809")
+
+ xmlout = obj.to_xml()
+
+ self.assertXmlEqual(xmlin, xmlout)
+
+
+class LibvirtConfigGuestTimerTest(LibvirtConfigBaseTest):
+ def test_config_platform(self):
+ obj = config.LibvirtConfigGuestTimer()
+ obj.track = "host"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <timer name="platform" track="host"/>
+ """)
+
+ def test_config_pit(self):
+ obj = config.LibvirtConfigGuestTimer()
+ obj.name = "pit"
+ obj.tickpolicy = "discard"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <timer name="pit" tickpolicy="discard"/>
+ """)
+
+ def test_config_hpet(self):
+ obj = config.LibvirtConfigGuestTimer()
+ obj.name = "hpet"
+ obj.present = False
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <timer name="hpet" present="no"/>
+ """)
+
+
+class LibvirtConfigGuestClockTest(LibvirtConfigBaseTest):
+ def test_config_utc(self):
+ obj = config.LibvirtConfigGuestClock()
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <clock offset="utc"/>
+ """)
+
+ def test_config_localtime(self):
+ obj = config.LibvirtConfigGuestClock()
+ obj.offset = "localtime"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <clock offset="localtime"/>
+ """)
+
+ def test_config_timezone(self):
+ obj = config.LibvirtConfigGuestClock()
+ obj.offset = "timezone"
+ obj.timezone = "EDT"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <clock offset="timezone" timezone="EDT"/>
+ """)
+
+ def test_config_variable(self):
+ obj = config.LibvirtConfigGuestClock()
+ obj.offset = "variable"
+ obj.adjustment = "123456"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <clock offset="variable" adjustment="123456"/>
+ """)
+
+ def test_config_timers(self):
+ obj = config.LibvirtConfigGuestClock()
+
+ tmpit = config.LibvirtConfigGuestTimer()
+ tmpit.name = "pit"
+ tmpit.tickpolicy = "discard"
+
+ tmrtc = config.LibvirtConfigGuestTimer()
+ tmrtc.name = "rtc"
+ tmrtc.tickpolicy = "merge"
+
+ obj.add_timer(tmpit)
+ obj.add_timer(tmrtc)
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <clock offset="utc">
+ <timer name="pit" tickpolicy="discard"/>
+ <timer name="rtc" tickpolicy="merge"/>
+ </clock>
+ """)
+
+
+class LibvirtConfigCPUFeatureTest(LibvirtConfigBaseTest):
+
+ def test_config_simple(self):
+ obj = config.LibvirtConfigCPUFeature("mtrr")
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <feature name="mtrr"/>
+ """)
+
+
+class LibvirtConfigGuestCPUFeatureTest(LibvirtConfigBaseTest):
+
+ def test_config_simple(self):
+ obj = config.LibvirtConfigGuestCPUFeature("mtrr")
+ obj.policy = "force"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <feature name="mtrr" policy="force"/>
+ """)
+
+
+class LibvirtConfigGuestCPUNUMATest(LibvirtConfigBaseTest):
+
+ def test_parse_dom(self):
+ xml = """
+ <numa>
+ <cell id="0" cpus="0-1" memory="1000000"/>
+ <cell id="1" cpus="2-3" memory="1500000"/>
+ </numa>
+ """
+ xmldoc = etree.fromstring(xml)
+ obj = config.LibvirtConfigGuestCPUNUMA()
+ obj.parse_dom(xmldoc)
+
+ self.assertEqual(2, len(obj.cells))
+
+ def test_config_simple(self):
+ obj = config.LibvirtConfigGuestCPUNUMA()
+
+ cell = config.LibvirtConfigGuestCPUNUMACell()
+ cell.id = 0
+ cell.cpus = set([0, 1])
+ cell.memory = 1000000
+
+ obj.cells.append(cell)
+
+ cell = config.LibvirtConfigGuestCPUNUMACell()
+ cell.id = 1
+ cell.cpus = set([2, 3])
+ cell.memory = 1500000
+
+ obj.cells.append(cell)
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <numa>
+ <cell id="0" cpus="0-1" memory="1000000"/>
+ <cell id="1" cpus="2-3" memory="1500000"/>
+ </numa>
+ """)
+
+
+class LibvirtConfigCPUTest(LibvirtConfigBaseTest):
+
+ def test_config_simple(self):
+ obj = config.LibvirtConfigCPU()
+ obj.model = "Penryn"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <cpu>
+ <model>Penryn</model>
+ </cpu>
+ """)
+
+ def test_config_complex(self):
+ obj = config.LibvirtConfigCPU()
+ obj.model = "Penryn"
+ obj.vendor = "Intel"
+ obj.arch = arch.X86_64
+
+ obj.add_feature(config.LibvirtConfigCPUFeature("mtrr"))
+ obj.add_feature(config.LibvirtConfigCPUFeature("apic"))
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <cpu>
+ <arch>x86_64</arch>
+ <model>Penryn</model>
+ <vendor>Intel</vendor>
+ <feature name="apic"/>
+ <feature name="mtrr"/>
+ </cpu>
+ """)
+
+ def test_only_uniq_cpu_featues(self):
+ obj = config.LibvirtConfigCPU()
+ obj.model = "Penryn"
+ obj.vendor = "Intel"
+ obj.arch = arch.X86_64
+
+ obj.add_feature(config.LibvirtConfigCPUFeature("mtrr"))
+ obj.add_feature(config.LibvirtConfigCPUFeature("apic"))
+ obj.add_feature(config.LibvirtConfigCPUFeature("apic"))
+ obj.add_feature(config.LibvirtConfigCPUFeature("mtrr"))
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <cpu>
+ <arch>x86_64</arch>
+ <model>Penryn</model>
+ <vendor>Intel</vendor>
+ <feature name="apic"/>
+ <feature name="mtrr"/>
+ </cpu>
+ """)
+
+ def test_config_topology(self):
+ obj = config.LibvirtConfigCPU()
+ obj.model = "Penryn"
+ obj.sockets = 4
+ obj.cores = 4
+ obj.threads = 2
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <cpu>
+ <model>Penryn</model>
+ <topology sockets="4" cores="4" threads="2"/>
+ </cpu>
+ """)
+
+
+class LibvirtConfigGuestCPUTest(LibvirtConfigBaseTest):
+
+ def test_config_simple(self):
+ obj = config.LibvirtConfigGuestCPU()
+ obj.model = "Penryn"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <cpu match="exact">
+ <model>Penryn</model>
+ </cpu>
+ """)
+
+ def test_config_complex(self):
+ obj = config.LibvirtConfigGuestCPU()
+ obj.model = "Penryn"
+ obj.vendor = "Intel"
+ obj.arch = arch.X86_64
+ obj.mode = "custom"
+
+ obj.add_feature(config.LibvirtConfigGuestCPUFeature("mtrr"))
+ obj.add_feature(config.LibvirtConfigGuestCPUFeature("apic"))
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <cpu mode="custom" match="exact">
+ <arch>x86_64</arch>
+ <model>Penryn</model>
+ <vendor>Intel</vendor>
+ <feature name="apic" policy="require"/>
+ <feature name="mtrr" policy="require"/>
+ </cpu>
+ """)
+
+ def test_config_host(self):
+ obj = config.LibvirtConfigGuestCPU()
+ obj.mode = "host-model"
+ obj.match = "exact"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <cpu mode="host-model" match="exact"/>
+ """)
+
+ def test_config_host_with_numa(self):
+ obj = config.LibvirtConfigGuestCPU()
+ obj.mode = "host-model"
+ obj.match = "exact"
+
+ numa = config.LibvirtConfigGuestCPUNUMA()
+
+ cell = config.LibvirtConfigGuestCPUNUMACell()
+ cell.id = 0
+ cell.cpus = set([0, 1])
+ cell.memory = 1000000
+
+ numa.cells.append(cell)
+
+ cell = config.LibvirtConfigGuestCPUNUMACell()
+ cell.id = 1
+ cell.cpus = set([2, 3])
+ cell.memory = 1500000
+
+ numa.cells.append(cell)
+
+ obj.numa = numa
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <cpu mode="host-model" match="exact">
+ <numa>
+ <cell id="0" cpus="0-1" memory="1000000"/>
+ <cell id="1" cpus="2-3" memory="1500000"/>
+ </numa>
+ </cpu>
+ """)
+
+
+class LibvirtConfigGuestSMBIOSTest(LibvirtConfigBaseTest):
+
+ def test_config_simple(self):
+ obj = config.LibvirtConfigGuestSMBIOS()
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <smbios mode="sysinfo"/>
+ """)
+
+
+class LibvirtConfigGuestSysinfoTest(LibvirtConfigBaseTest):
+
+ def test_config_simple(self):
+ obj = config.LibvirtConfigGuestSysinfo()
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <sysinfo type="smbios"/>
+ """)
+
+ def test_config_bios(self):
+ obj = config.LibvirtConfigGuestSysinfo()
+ obj.bios_vendor = "Acme"
+ obj.bios_version = "6.6.6"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <sysinfo type="smbios">
+ <bios>
+ <entry name="vendor">Acme</entry>
+ <entry name="version">6.6.6</entry>
+ </bios>
+ </sysinfo>
+ """)
+
+ def test_config_system(self):
+ obj = config.LibvirtConfigGuestSysinfo()
+ obj.system_manufacturer = "Acme"
+ obj.system_product = "Wile Coyote"
+ obj.system_version = "6.6.6"
+ obj.system_serial = "123456"
+ obj.system_uuid = "c7a5fdbd-edaf-9455-926a-d65c16db1809"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <sysinfo type="smbios">
+ <system>
+ <entry name="manufacturer">Acme</entry>
+ <entry name="product">Wile Coyote</entry>
+ <entry name="version">6.6.6</entry>
+ <entry name="serial">123456</entry>
+ <entry name="uuid">c7a5fdbd-edaf-9455-926a-d65c16db1809</entry>
+ </system>
+ </sysinfo>
+ """)
+
+ def test_config_mixed(self):
+ obj = config.LibvirtConfigGuestSysinfo()
+ obj.bios_vendor = "Acme"
+ obj.system_manufacturer = "Acme"
+ obj.system_product = "Wile Coyote"
+ obj.system_uuid = "c7a5fdbd-edaf-9455-926a-d65c16db1809"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <sysinfo type="smbios">
+ <bios>
+ <entry name="vendor">Acme</entry>
+ </bios>
+ <system>
+ <entry name="manufacturer">Acme</entry>
+ <entry name="product">Wile Coyote</entry>
+ <entry name="uuid">c7a5fdbd-edaf-9455-926a-d65c16db1809</entry>
+ </system>
+ </sysinfo>
+ """)
+
+
+class LibvirtConfigGuestDiskTest(LibvirtConfigBaseTest):
+
+ def test_config_file(self):
+ obj = config.LibvirtConfigGuestDisk()
+ obj.source_type = "file"
+ obj.source_path = "/tmp/hello"
+ obj.target_dev = "/dev/hda"
+ obj.target_bus = "ide"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <disk type="file" device="disk">
+ <source file="/tmp/hello"/>
+ <target bus="ide" dev="/dev/hda"/>
+ </disk>""")
+
+ def test_config_file_parse(self):
+ xml = """<disk type="file" device="disk">
+ <source file="/tmp/hello"/>
+ <target bus="ide" dev="/dev/hda"/>
+ </disk>"""
+ xmldoc = etree.fromstring(xml)
+
+ obj = config.LibvirtConfigGuestDisk()
+ obj.parse_dom(xmldoc)
+
+ self.assertEqual(obj.source_type, 'file')
+ self.assertEqual(obj.source_path, '/tmp/hello')
+ self.assertEqual(obj.target_dev, '/dev/hda')
+ self.assertEqual(obj.target_bus, 'ide')
+
+ def test_config_file_serial(self):
+ obj = config.LibvirtConfigGuestDisk()
+ obj.source_type = "file"
+ obj.source_path = "/tmp/hello"
+ obj.target_dev = "/dev/hda"
+ obj.target_bus = "ide"
+ obj.serial = "7a97c4a3-6f59-41d4-bf47-191d7f97f8e9"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <disk type="file" device="disk">
+ <source file="/tmp/hello"/>
+ <target bus="ide" dev="/dev/hda"/>
+ <serial>7a97c4a3-6f59-41d4-bf47-191d7f97f8e9</serial>
+ </disk>""")
+
+ def test_config_file_serial_parse(self):
+ xml = """<disk type="file" device="disk">
+ <source file="/tmp/hello"/>
+ <target bus="ide" dev="/dev/hda"/>
+ <serial>7a97c4a3-6f59-41d4-bf47-191d7f97f8e9</serial>
+ </disk>"""
+ xmldoc = etree.fromstring(xml)
+
+ obj = config.LibvirtConfigGuestDisk()
+ obj.parse_dom(xmldoc)
+
+ self.assertEqual(obj.source_type, 'file')
+ self.assertEqual(obj.serial, '7a97c4a3-6f59-41d4-bf47-191d7f97f8e9')
+
+ def test_config_file_discard(self):
+ obj = config.LibvirtConfigGuestDisk()
+ obj.driver_name = "qemu"
+ obj.driver_format = "qcow2"
+ obj.driver_cache = "none"
+ obj.driver_discard = "unmap"
+ obj.source_type = "file"
+ obj.source_path = "/tmp/hello.qcow2"
+ obj.target_dev = "/dev/hda"
+ obj.target_bus = "ide"
+ obj.serial = "7a97c4a3-6f59-41d4-bf47-191d7f97f8e9"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual("""
+ <disk type="file" device="disk">
+ <driver name="qemu" type="qcow2" cache="none" discard="unmap"/>
+ <source file="/tmp/hello.qcow2"/>
+ <target bus="ide" dev="/dev/hda"/>
+ <serial>7a97c4a3-6f59-41d4-bf47-191d7f97f8e9</serial>
+ </disk>""", xml)
+
+ def test_config_file_discard_parse(self):
+ xml = """
+ <disk type="file" device="disk">
+ <driver name="qemu" type="qcow2" cache="none" discard="unmap"/>
+ <source file="/tmp/hello.qcow2"/>
+ <target bus="ide" dev="/dev/hda"/>
+ <serial>7a97c4a3-6f59-41d4-bf47-191d7f97f8e9</serial>
+ </disk>"""
+ xmldoc = etree.fromstring(xml)
+
+ obj = config.LibvirtConfigGuestDisk()
+ obj.parse_dom(xmldoc)
+
+ self.assertEqual('unmap', obj.driver_discard)
+
+ def test_config_block(self):
+ obj = config.LibvirtConfigGuestDisk()
+ obj.source_type = "block"
+ obj.source_path = "/tmp/hello"
+ obj.source_device = "cdrom"
+ obj.driver_name = "qemu"
+ obj.target_dev = "/dev/hdc"
+ obj.target_bus = "ide"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <disk type="block" device="cdrom">
+ <driver name="qemu"/>
+ <source dev="/tmp/hello"/>
+ <target bus="ide" dev="/dev/hdc"/>
+ </disk>""")
+
+ def test_config_block_parse(self):
+ xml = """<disk type="block" device="cdrom">
+ <driver name="qemu"/>
+ <source dev="/tmp/hello"/>
+ <target bus="ide" dev="/dev/hdc"/>
+ </disk>"""
+ xmldoc = etree.fromstring(xml)
+
+ obj = config.LibvirtConfigGuestDisk()
+ obj.parse_dom(xmldoc)
+
+ self.assertEqual(obj.source_type, 'block')
+ self.assertEqual(obj.source_path, '/tmp/hello')
+ self.assertEqual(obj.target_dev, '/dev/hdc')
+ self.assertEqual(obj.target_bus, 'ide')
+
+ def test_config_network(self):
+ obj = config.LibvirtConfigGuestDisk()
+ obj.source_type = "network"
+ obj.source_protocol = "iscsi"
+ obj.source_name = "foo.bar.com"
+ obj.driver_name = "qemu"
+ obj.driver_format = "qcow2"
+ obj.target_dev = "/dev/hda"
+ obj.target_bus = "ide"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <disk type="network" device="disk">
+ <driver name="qemu" type="qcow2"/>
+ <source name="foo.bar.com" protocol="iscsi"/>
+ <target bus="ide" dev="/dev/hda"/>
+ </disk>""")
+
+ def test_config_network_parse(self):
+ xml = """<disk type="network" device="disk">
+ <driver name="qemu" type="qcow2"/>
+ <source name="foo.bar.com" protocol="iscsi"/>
+ <target bus="ide" dev="/dev/hda"/>
+ </disk>"""
+ xmldoc = etree.fromstring(xml)
+
+ obj = config.LibvirtConfigGuestDisk()
+ obj.parse_dom(xmldoc)
+
+ self.assertEqual(obj.source_type, 'network')
+ self.assertEqual(obj.source_protocol, 'iscsi')
+ self.assertEqual(obj.source_name, 'foo.bar.com')
+ self.assertEqual(obj.driver_name, 'qemu')
+ self.assertEqual(obj.driver_format, 'qcow2')
+ self.assertEqual(obj.target_dev, '/dev/hda')
+ self.assertEqual(obj.target_bus, 'ide')
+
+ def test_config_network_no_name(self):
+ obj = config.LibvirtConfigGuestDisk()
+ obj.source_type = 'network'
+ obj.source_protocol = 'nbd'
+ obj.source_hosts = ['foo.bar.com']
+ obj.source_ports = [None]
+ obj.driver_name = 'qemu'
+ obj.driver_format = 'raw'
+ obj.target_dev = '/dev/vda'
+ obj.target_bus = 'virtio'
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <disk type="network" device="disk">
+ <driver name="qemu" type="raw"/>
+ <source protocol="nbd">
+ <host name="foo.bar.com"/>
+ </source>
+ <target bus="virtio" dev="/dev/vda"/>
+ </disk>""")
+
+ def test_config_network_multihost(self):
+ obj = config.LibvirtConfigGuestDisk()
+ obj.source_type = 'network'
+ obj.source_protocol = 'rbd'
+ obj.source_name = 'pool/image'
+ obj.source_hosts = ['foo.bar.com', '::1', '1.2.3.4']
+ obj.source_ports = [None, '123', '456']
+ obj.driver_name = 'qemu'
+ obj.driver_format = 'raw'
+ obj.target_dev = '/dev/vda'
+ obj.target_bus = 'virtio'
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <disk type="network" device="disk">
+ <driver name="qemu" type="raw"/>
+ <source name="pool/image" protocol="rbd">
+ <host name="foo.bar.com"/>
+ <host name="::1" port="123"/>
+ <host name="1.2.3.4" port="456"/>
+ </source>
+ <target bus="virtio" dev="/dev/vda"/>
+ </disk>""")
+
+ def test_config_network_auth(self):
+ obj = config.LibvirtConfigGuestDisk()
+ obj.source_type = "network"
+ obj.source_protocol = "rbd"
+ obj.source_name = "pool/image"
+ obj.driver_name = "qemu"
+ obj.driver_format = "raw"
+ obj.target_dev = "/dev/vda"
+ obj.target_bus = "virtio"
+ obj.auth_username = "foo"
+ obj.auth_secret_type = "ceph"
+ obj.auth_secret_uuid = "b38a3f43-4be2-4046-897f-b67c2f5e0147"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <disk type="network" device="disk">
+ <driver name="qemu" type="raw"/>
+ <source name="pool/image" protocol="rbd"/>
+ <auth username="foo">
+ <secret type="ceph"
+ uuid="b38a3f43-4be2-4046-897f-b67c2f5e0147"/>
+ </auth>
+ <target bus="virtio" dev="/dev/vda"/>
+ </disk>""")
+
+ def test_config_iotune(self):
+ obj = config.LibvirtConfigGuestDisk()
+ obj.source_type = "file"
+ obj.source_path = "/tmp/hello"
+ obj.target_dev = "/dev/hda"
+ obj.target_bus = "ide"
+ obj.disk_read_bytes_sec = 1024000
+ obj.disk_read_iops_sec = 1000
+ obj.disk_total_bytes_sec = 2048000
+ obj.disk_write_bytes_sec = 1024000
+ obj.disk_write_iops_sec = 1000
+ obj.disk_total_iops_sec = 2000
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <disk type="file" device="disk">
+ <source file="/tmp/hello"/>
+ <target bus="ide" dev="/dev/hda"/>
+ <iotune>
+ <read_bytes_sec>1024000</read_bytes_sec>
+ <read_iops_sec>1000</read_iops_sec>
+ <write_bytes_sec>1024000</write_bytes_sec>
+ <write_iops_sec>1000</write_iops_sec>
+ <total_bytes_sec>2048000</total_bytes_sec>
+ <total_iops_sec>2000</total_iops_sec>
+ </iotune>
+ </disk>""")
+
+ def test_config_blockio(self):
+ obj = config.LibvirtConfigGuestDisk()
+ obj.source_type = "file"
+ obj.source_path = "/tmp/hello"
+ obj.target_dev = "/dev/hda"
+ obj.target_bus = "ide"
+ obj.logical_block_size = "4096"
+ obj.physical_block_size = "4096"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual("""
+ <disk type="file" device="disk">
+ <source file="/tmp/hello"/>
+ <target bus="ide" dev="/dev/hda"/>
+ <blockio logical_block_size="4096" physical_block_size="4096"/>
+ </disk>""", xml)
+
+
+class LibvirtConfigGuestSnapshotDiskTest(LibvirtConfigBaseTest):
+
+ def test_config_file(self):
+ obj = config.LibvirtConfigGuestDisk()
+ obj.source_type = "file"
+ obj.source_path = "/tmp/hello"
+ obj.target_dev = "/dev/hda"
+ obj.target_bus = "ide"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <disk type="file" device="disk">
+ <source file="/tmp/hello"/>
+ <target bus="ide" dev="/dev/hda"/>
+ </disk>""")
+
+ def test_config_file_parse(self):
+ xml = """<disk type="file" device="disk">
+ <source file="/tmp/hello"/>
+ <target bus="ide" dev="/dev/hda"/>
+ </disk>"""
+ xmldoc = etree.fromstring(xml)
+
+ obj = config.LibvirtConfigGuestDisk()
+ obj.parse_dom(xmldoc)
+
+ self.assertEqual(obj.source_type, 'file')
+ self.assertEqual(obj.source_path, '/tmp/hello')
+ self.assertEqual(obj.target_dev, '/dev/hda')
+ self.assertEqual(obj.target_bus, 'ide')
+
+
+class LibvirtConfigGuestDiskBackingStoreTest(LibvirtConfigBaseTest):
+
+ def test_config_file_parse(self):
+ xml = """<backingStore type='file'>
+ <driver name='qemu' type='qcow2'/>
+ <source file='/var/lib/libvirt/images/mid.qcow2'/>
+ <backingStore type='file'>
+ <driver name='qemu' type='qcow2'/>
+ <source file='/var/lib/libvirt/images/base.qcow2'/>
+ <backingStore/>
+ </backingStore>
+ </backingStore>
+ """
+ xmldoc = etree.fromstring(xml)
+
+ obj = config.LibvirtConfigGuestDiskBackingStore()
+ obj.parse_dom(xmldoc)
+
+ self.assertEqual(obj.driver_name, 'qemu')
+ self.assertEqual(obj.driver_format, 'qcow2')
+ self.assertEqual(obj.source_type, 'file')
+ self.assertEqual(obj.source_file, '/var/lib/libvirt/images/mid.qcow2')
+ self.assertEqual(obj.backing_store.driver_name, 'qemu')
+ self.assertEqual(obj.backing_store.source_type, 'file')
+ self.assertEqual(obj.backing_store.source_file,
+ '/var/lib/libvirt/images/base.qcow2')
+ self.assertIsNone(obj.backing_store.backing_store)
+
+ def test_config_network_parse(self):
+ xml = """<backingStore type='network' index='1'>
+ <format type='qcow2'/>
+ <source protocol='gluster' name='volume1/img1'>
+ <host name='host1' port='24007'/>
+ </source>
+ <backingStore type='network' index='2'>
+ <format type='qcow2'/>
+ <source protocol='gluster' name='volume1/img2'>
+ <host name='host1' port='24007'/>
+ </source>
+ <backingStore/>
+ </backingStore>
+ </backingStore>
+ """
+ xmldoc = etree.fromstring(xml)
+
+ obj = config.LibvirtConfigGuestDiskBackingStore()
+ obj.parse_dom(xmldoc)
+
+ self.assertEqual(obj.source_type, 'network')
+ self.assertEqual(obj.source_protocol, 'gluster')
+ self.assertEqual(obj.source_name, 'volume1/img1')
+ self.assertEqual(obj.source_hosts[0], 'host1')
+ self.assertEqual(obj.source_ports[0], '24007')
+ self.assertEqual(obj.index, '1')
+ self.assertEqual(obj.backing_store.source_name, 'volume1/img2')
+ self.assertEqual(obj.backing_store.index, '2')
+ self.assertEqual(obj.backing_store.source_hosts[0], 'host1')
+ self.assertEqual(obj.backing_store.source_ports[0], '24007')
+ self.assertIsNone(obj.backing_store.backing_store)
+
+
+class LibvirtConfigGuestFilesysTest(LibvirtConfigBaseTest):
+
+ def test_config_mount(self):
+ obj = config.LibvirtConfigGuestFilesys()
+ obj.source_type = "mount"
+ obj.source_dir = "/tmp/hello"
+ obj.target_dir = "/mnt"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <filesystem type="mount">
+ <source dir="/tmp/hello"/>
+ <target dir="/mnt"/>
+ </filesystem>""")
+
+
+class LibvirtConfigGuestInputTest(LibvirtConfigBaseTest):
+
+ def test_config_tablet(self):
+ obj = config.LibvirtConfigGuestInput()
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <input type="tablet" bus="usb"/>""")
+
+
+class LibvirtConfigGuestGraphicsTest(LibvirtConfigBaseTest):
+
+ def test_config_graphics(self):
+ obj = config.LibvirtConfigGuestGraphics()
+ obj.type = "vnc"
+ obj.autoport = True
+ obj.keymap = "en_US"
+ obj.listen = "127.0.0.1"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <graphics type="vnc" autoport="yes" keymap="en_US" listen="127.0.0.1"/>
+ """)
+
+
+class LibvirtConfigGuestHostdev(LibvirtConfigBaseTest):
+
+ def test_config_pci_guest_host_dev(self):
+ obj = config.LibvirtConfigGuestHostdev(mode='subsystem', type='pci')
+ xml = obj.to_xml()
+ expected = """
+ <hostdev mode="subsystem" type="pci" managed="yes"/>
+ """
+ self.assertXmlEqual(xml, expected)
+
+ def test_parse_GuestHostdev(self):
+ xmldoc = """<hostdev mode="subsystem" type="pci" managed="yes"/>"""
+ obj = config.LibvirtConfigGuestHostdev()
+ obj.parse_str(xmldoc)
+ self.assertEqual(obj.mode, 'subsystem')
+ self.assertEqual(obj.type, 'pci')
+ self.assertEqual(obj.managed, 'yes')
+
+ def test_parse_GuestHostdev_non_pci(self):
+ xmldoc = """<hostdev mode="subsystem" type="usb" managed="no"/>"""
+ obj = config.LibvirtConfigGuestHostdev()
+ obj.parse_str(xmldoc)
+ self.assertEqual(obj.mode, 'subsystem')
+ self.assertEqual(obj.type, 'usb')
+ self.assertEqual(obj.managed, 'no')
+
+
+class LibvirtConfigGuestHostdevPCI(LibvirtConfigBaseTest):
+
+ expected = """
+ <hostdev mode="subsystem" type="pci" managed="yes">
+ <source>
+ <address bus="0x11" domain="0x1234" function="0x3"
+ slot="0x22" />
+ </source>
+ </hostdev>
+ """
+
+ def test_config_guest_hosdev_pci(self):
+ hostdev = config.LibvirtConfigGuestHostdevPCI()
+ hostdev.domain = "1234"
+ hostdev.bus = "11"
+ hostdev.slot = "22"
+ hostdev.function = "3"
+ xml = hostdev.to_xml()
+ self.assertXmlEqual(self.expected, xml)
+
+ def test_parse_guest_hosdev_pci(self):
+ xmldoc = self.expected
+ obj = config.LibvirtConfigGuestHostdevPCI()
+ obj.parse_str(xmldoc)
+ self.assertEqual(obj.mode, 'subsystem')
+ self.assertEqual(obj.type, 'pci')
+ self.assertEqual(obj.managed, 'yes')
+ self.assertEqual(obj.domain, '0x1234')
+ self.assertEqual(obj.bus, '0x11')
+ self.assertEqual(obj.slot, '0x22')
+ self.assertEqual(obj.function, '0x3')
+
+ def test_parse_guest_hosdev_usb(self):
+ xmldoc = """<hostdev mode='subsystem' type='usb'>
+ <source startupPolicy='optional'>
+ <vendor id='0x1234'/>
+ <product id='0xbeef'/>
+ </source>
+ <boot order='2'/>
+ </hostdev>"""
+ obj = config.LibvirtConfigGuestHostdevPCI()
+ obj.parse_str(xmldoc)
+ self.assertEqual(obj.mode, 'subsystem')
+ self.assertEqual(obj.type, 'usb')
+
+
+class LibvirtConfigGuestSerialTest(LibvirtConfigBaseTest):
+
+ def test_config_file(self):
+ obj = config.LibvirtConfigGuestSerial()
+ obj.type = "file"
+ obj.source_path = "/tmp/vm.log"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <serial type="file">
+ <source path="/tmp/vm.log"/>
+ </serial>""")
+
+ def test_config_serial_port(self):
+ obj = config.LibvirtConfigGuestSerial()
+ obj.type = "tcp"
+ obj.listen_port = 11111
+ obj.listen_host = "0.0.0.0"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <serial type="tcp">
+ <source host="0.0.0.0" service="11111" mode="bind"/>
+ </serial>""")
+
+
+class LibvirtConfigGuestConsoleTest(LibvirtConfigBaseTest):
+ def test_config_pty(self):
+ obj = config.LibvirtConfigGuestConsole()
+ obj.type = "pty"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <console type="pty"/>""")
+
+
+class LibvirtConfigGuestChannelTest(LibvirtConfigBaseTest):
+ def test_config_spice_minimal(self):
+ obj = config.LibvirtConfigGuestChannel()
+ obj.type = "spicevmc"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <channel type="spicevmc">
+ <target type='virtio'/>
+ </channel>""")
+
+ def test_config_spice_full(self):
+ obj = config.LibvirtConfigGuestChannel()
+ obj.type = "spicevmc"
+ obj.target_name = "com.redhat.spice.0"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <channel type="spicevmc">
+ <target type='virtio' name='com.redhat.spice.0'/>
+ </channel>""")
+
+ def test_config_qga_full(self):
+ obj = config.LibvirtConfigGuestChannel()
+ obj.type = "unix"
+ obj.target_name = "org.qemu.guest_agent.0"
+ obj.source_path = "/var/lib/libvirt/qemu/%s.%s.sock" % (
+ obj.target_name, "instance-name")
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <channel type="unix">
+ <source path="%s" mode="bind"/>
+ <target type="virtio" name="org.qemu.guest_agent.0"/>
+ </channel>""" % obj.source_path)
+
+
+class LibvirtConfigGuestInterfaceTest(LibvirtConfigBaseTest):
+ def test_config_ethernet(self):
+ obj = config.LibvirtConfigGuestInterface()
+ obj.net_type = "ethernet"
+ obj.mac_addr = "DE:AD:BE:EF:CA:FE"
+ obj.model = "virtio"
+ obj.target_dev = "vnet0"
+ obj.driver_name = "vhost"
+ obj.vif_inbound_average = 1024000
+ obj.vif_inbound_peak = 10240000
+ obj.vif_inbound_burst = 1024000
+ obj.vif_outbound_average = 1024000
+ obj.vif_outbound_peak = 10240000
+ obj.vif_outbound_burst = 1024000
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <interface type="ethernet">
+ <mac address="DE:AD:BE:EF:CA:FE"/>
+ <model type="virtio"/>
+ <driver name="vhost"/>
+ <target dev="vnet0"/>
+ <bandwidth>
+ <inbound average="1024000" peak="10240000" burst="1024000"/>
+ <outbound average="1024000" peak="10240000" burst="1024000"/>
+ </bandwidth>
+ </interface>""")
+
+ def test_config_bridge(self):
+ obj = config.LibvirtConfigGuestInterface()
+ obj.net_type = "bridge"
+ obj.source_dev = "br0"
+ obj.mac_addr = "DE:AD:BE:EF:CA:FE"
+ obj.model = "virtio"
+ obj.target_dev = "tap12345678"
+ obj.filtername = "clean-traffic"
+ obj.filterparams.append({"key": "IP", "value": "192.168.122.1"})
+ obj.vif_inbound_average = 1024000
+ obj.vif_inbound_peak = 10240000
+ obj.vif_inbound_burst = 1024000
+ obj.vif_outbound_average = 1024000
+ obj.vif_outbound_peak = 10240000
+ obj.vif_outbound_burst = 1024000
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <interface type="bridge">
+ <mac address="DE:AD:BE:EF:CA:FE"/>
+ <model type="virtio"/>
+ <source bridge="br0"/>
+ <target dev="tap12345678"/>
+ <filterref filter="clean-traffic">
+ <parameter name="IP" value="192.168.122.1"/>
+ </filterref>
+ <bandwidth>
+ <inbound average="1024000" peak="10240000" burst="1024000"/>
+ <outbound average="1024000" peak="10240000" burst="1024000"/>
+ </bandwidth>
+ </interface>""")
+
+ def test_config_bridge_ovs(self):
+ obj = config.LibvirtConfigGuestInterface()
+ obj.net_type = "bridge"
+ obj.source_dev = "br0"
+ obj.mac_addr = "DE:AD:BE:EF:CA:FE"
+ obj.model = "virtio"
+ obj.target_dev = "tap12345678"
+ obj.vporttype = "openvswitch"
+ obj.vportparams.append({"key": "instanceid", "value": "foobar"})
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <interface type="bridge">
+ <mac address="DE:AD:BE:EF:CA:FE"/>
+ <model type="virtio"/>
+ <source bridge="br0"/>
+ <target dev="tap12345678"/>
+ <virtualport type="openvswitch">
+ <parameters instanceid="foobar"/>
+ </virtualport>
+ </interface>""")
+
+ def test_config_8021Qbh(self):
+ obj = config.LibvirtConfigGuestInterface()
+ obj.net_type = "direct"
+ obj.mac_addr = "DE:AD:BE:EF:CA:FE"
+ obj.model = "virtio"
+ obj.target_dev = "tap12345678"
+ obj.source_dev = "eth0"
+ obj.vporttype = "802.1Qbh"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <interface type="direct">
+ <mac address="DE:AD:BE:EF:CA:FE"/>
+ <model type="virtio"/>
+ <source dev="eth0" mode="private"/>
+ <target dev="tap12345678"/>
+ <virtualport type="802.1Qbh"/>
+ </interface>""")
+
+ def test_config_direct(self):
+ obj = config.LibvirtConfigGuestInterface()
+ obj.net_type = "direct"
+ obj.mac_addr = "DE:AD:BE:EF:CA:FE"
+ obj.model = "virtio"
+ obj.source_dev = "eth0"
+ obj.source_mode = "passthrough"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <interface type="direct">
+ <mac address="DE:AD:BE:EF:CA:FE"/>
+ <model type="virtio"/>
+ <source dev="eth0" mode="passthrough"/>
+ </interface>""")
+
+ def test_config_8021Qbh_hostdev(self):
+ obj = config.LibvirtConfigGuestInterface()
+ obj.net_type = "hostdev"
+ obj.mac_addr = "DE:AD:BE:EF:CA:FE"
+ obj.source_dev = "0000:0a:00.1"
+ obj.vporttype = "802.1Qbh"
+ obj.add_vport_param("profileid", "MyPortProfile")
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <interface type="hostdev" managed="yes">
+ <mac address="DE:AD:BE:EF:CA:FE"/>
+ <source>
+ <address type="pci" domain="0x0000"
+ bus="0x0a" slot="0x00" function="0x1"/>
+ </source>
+ <virtualport type="802.1Qbh">
+ <parameters profileid="MyPortProfile"/>
+ </virtualport>
+ </interface>""")
+
+ def test_config_hw_veb_hostdev(self):
+ obj = config.LibvirtConfigGuestInterface()
+ obj.net_type = "hostdev"
+ obj.mac_addr = "DE:AD:BE:EF:CA:FE"
+ obj.source_dev = "0000:0a:00.1"
+ obj.vlan = "100"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <interface type="hostdev" managed="yes">
+ <mac address="DE:AD:BE:EF:CA:FE"/>
+ <source>
+ <address type="pci" domain="0x0000"
+ bus="0x0a" slot="0x00" function="0x1"/>
+ </source>
+ <vlan>
+ <tag id="100"/>
+ </vlan>
+ </interface>""")
+
+
+class LibvirtConfigGuestTest(LibvirtConfigBaseTest):
+
+ def test_config_lxc(self):
+ obj = config.LibvirtConfigGuest()
+ obj.virt_type = "lxc"
+ obj.memory = 100 * units.Mi
+ obj.vcpus = 2
+ obj.cpuset = set([0, 1, 3, 4, 5])
+ obj.name = "demo"
+ obj.uuid = "b38a3f43-4be2-4046-897f-b67c2f5e0147"
+ obj.os_type = "exe"
+ obj.os_init_path = "/sbin/init"
+
+ fs = config.LibvirtConfigGuestFilesys()
+ fs.source_dir = "/root/lxc"
+ fs.target_dir = "/"
+
+ obj.add_device(fs)
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <domain type="lxc">
+ <uuid>b38a3f43-4be2-4046-897f-b67c2f5e0147</uuid>
+ <name>demo</name>
+ <memory>104857600</memory>
+ <vcpu cpuset="0-1,3-5">2</vcpu>
+ <os>
+ <type>exe</type>
+ <init>/sbin/init</init>
+ </os>
+ <devices>
+ <filesystem type="mount">
+ <source dir="/root/lxc"/>
+ <target dir="/"/>
+ </filesystem>
+ </devices>
+ </domain>""")
+
+ def test_config_lxc_with_idmap(self):
+ obj = config.LibvirtConfigGuest()
+ obj.virt_type = "lxc"
+ obj.memory = 100 * units.Mi
+ obj.vcpus = 2
+ obj.cpuset = set([0, 1, 3, 4, 5])
+ obj.name = "demo"
+ obj.uuid = "b38a3f43-4be2-4046-897f-b67c2f5e0147"
+ obj.os_type = "exe"
+ obj.os_init_path = "/sbin/init"
+
+ uidmap = config.LibvirtConfigGuestUIDMap()
+ uidmap.target = "10000"
+ uidmap.count = "1"
+ obj.idmaps.append(uidmap)
+ gidmap = config.LibvirtConfigGuestGIDMap()
+ gidmap.target = "10000"
+ gidmap.count = "1"
+ obj.idmaps.append(gidmap)
+
+ fs = config.LibvirtConfigGuestFilesys()
+ fs.source_dir = "/root/lxc"
+ fs.target_dir = "/"
+
+ obj.add_device(fs)
+
+ xml = obj.to_xml()
+ self.assertXmlEqual("""
+ <domain type="lxc">
+ <uuid>b38a3f43-4be2-4046-897f-b67c2f5e0147</uuid>
+ <name>demo</name>
+ <memory>104857600</memory>
+ <vcpu cpuset="0-1,3-5">2</vcpu>
+ <os>
+ <type>exe</type>
+ <init>/sbin/init</init>
+ </os>
+ <devices>
+ <filesystem type="mount">
+ <source dir="/root/lxc"/>
+ <target dir="/"/>
+ </filesystem>
+ </devices>
+ <idmap>
+ <uid start="0" target="10000" count="1"/>
+ <gid start="0" target="10000" count="1"/>
+ </idmap>
+ </domain>""", xml)
+
+ def test_config_xen_pv(self):
+ obj = config.LibvirtConfigGuest()
+ obj.virt_type = "xen"
+ obj.memory = 100 * units.Mi
+ obj.vcpus = 2
+ obj.cpuset = set([0, 1, 3, 4, 5])
+ obj.name = "demo"
+ obj.uuid = "b38a3f43-4be2-4046-897f-b67c2f5e0147"
+ obj.os_type = "linux"
+ obj.os_kernel = "/tmp/vmlinuz"
+ obj.os_initrd = "/tmp/ramdisk"
+ obj.os_cmdline = "console=xvc0"
+
+ disk = config.LibvirtConfigGuestDisk()
+ disk.source_type = "file"
+ disk.source_path = "/tmp/img"
+ disk.target_dev = "/dev/xvda"
+ disk.target_bus = "xen"
+
+ obj.add_device(disk)
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <domain type="xen">
+ <uuid>b38a3f43-4be2-4046-897f-b67c2f5e0147</uuid>
+ <name>demo</name>
+ <memory>104857600</memory>
+ <vcpu cpuset="0-1,3-5">2</vcpu>
+ <os>
+ <type>linux</type>
+ <kernel>/tmp/vmlinuz</kernel>
+ <initrd>/tmp/ramdisk</initrd>
+ <cmdline>console=xvc0</cmdline>
+ </os>
+ <devices>
+ <disk type="file" device="disk">
+ <source file="/tmp/img"/>
+ <target bus="xen" dev="/dev/xvda"/>
+ </disk>
+ </devices>
+ </domain>""")
+
+ def test_config_xen_hvm(self):
+ obj = config.LibvirtConfigGuest()
+ obj.virt_type = "xen"
+ obj.memory = 100 * units.Mi
+ obj.vcpus = 2
+ obj.cpuset = set([0, 1, 3, 4, 5])
+ obj.name = "demo"
+ obj.uuid = "b38a3f43-4be2-4046-897f-b67c2f5e0147"
+ obj.os_type = "hvm"
+ obj.os_loader = '/usr/lib/xen/boot/hvmloader'
+ obj.os_root = "root=xvda"
+ obj.os_cmdline = "console=xvc0"
+ obj.pae = True
+ obj.acpi = True
+ obj.apic = True
+
+ disk = config.LibvirtConfigGuestDisk()
+ disk.source_type = "file"
+ disk.source_path = "/tmp/img"
+ disk.target_dev = "/dev/xvda"
+ disk.target_bus = "xen"
+
+ obj.add_device(disk)
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <domain type="xen">
+ <uuid>b38a3f43-4be2-4046-897f-b67c2f5e0147</uuid>
+ <name>demo</name>
+ <memory>104857600</memory>
+ <vcpu cpuset="0-1,3-5">2</vcpu>
+ <os>
+ <type>hvm</type>
+ <loader>/usr/lib/xen/boot/hvmloader</loader>
+ <cmdline>console=xvc0</cmdline>
+ <root>root=xvda</root>
+ </os>
+ <features>
+ <acpi/>
+ <apic/>
+ <pae/>
+ </features>
+ <devices>
+ <disk type="file" device="disk">
+ <source file="/tmp/img"/>
+ <target bus="xen" dev="/dev/xvda"/>
+ </disk>
+ </devices>
+ </domain>""")
+
+ def test_config_kvm(self):
+ obj = config.LibvirtConfigGuest()
+ obj.virt_type = "kvm"
+ obj.memory = 100 * units.Mi
+ obj.vcpus = 2
+ obj.cpuset = set([0, 1, 3, 4, 5])
+
+ obj.cputune = config.LibvirtConfigGuestCPUTune()
+ obj.cputune.shares = 100
+ obj.cputune.quota = 50000
+ obj.cputune.period = 25000
+
+ obj.membacking = config.LibvirtConfigGuestMemoryBacking()
+ obj.membacking.hugepages = True
+
+ obj.memtune = config.LibvirtConfigGuestMemoryTune()
+ obj.memtune.hard_limit = 496
+ obj.memtune.soft_limit = 672
+ obj.memtune.swap_hard_limit = 1638
+ obj.memtune.min_guarantee = 2970
+
+ obj.numatune = config.LibvirtConfigGuestNUMATune()
+
+ numamemory = config.LibvirtConfigGuestNUMATuneMemory()
+ numamemory.mode = "preferred"
+ numamemory.nodeset = [0, 1, 2, 3, 8]
+
+ obj.numatune.memory = numamemory
+
+ numamemnode0 = config.LibvirtConfigGuestNUMATuneMemNode()
+ numamemnode0.cellid = 0
+ numamemnode0.mode = "preferred"
+ numamemnode0.nodeset = [0, 1]
+
+ numamemnode1 = config.LibvirtConfigGuestNUMATuneMemNode()
+ numamemnode1.cellid = 1
+ numamemnode1.mode = "preferred"
+ numamemnode1.nodeset = [2, 3]
+
+ numamemnode2 = config.LibvirtConfigGuestNUMATuneMemNode()
+ numamemnode2.cellid = 2
+ numamemnode2.mode = "preferred"
+ numamemnode2.nodeset = [8]
+
+ obj.numatune.memnodes.extend([numamemnode0,
+ numamemnode1,
+ numamemnode2])
+
+ obj.name = "demo"
+ obj.uuid = "b38a3f43-4be2-4046-897f-b67c2f5e0147"
+ obj.os_type = "linux"
+ obj.os_boot_dev = ["hd", "cdrom", "fd"]
+ obj.os_smbios = config.LibvirtConfigGuestSMBIOS()
+ obj.pae = True
+ obj.acpi = True
+ obj.apic = True
+
+ obj.sysinfo = config.LibvirtConfigGuestSysinfo()
+ obj.sysinfo.bios_vendor = "Acme"
+ obj.sysinfo.system_version = "1.0.0"
+
+ disk = config.LibvirtConfigGuestDisk()
+ disk.source_type = "file"
+ disk.source_path = "/tmp/img"
+ disk.target_dev = "/dev/vda"
+ disk.target_bus = "virtio"
+
+ obj.add_device(disk)
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <domain type="kvm">
+ <uuid>b38a3f43-4be2-4046-897f-b67c2f5e0147</uuid>
+ <name>demo</name>
+ <memory>104857600</memory>
+ <memoryBacking>
+ <hugepages/>
+ </memoryBacking>
+ <memtune>
+ <hard_limit units="K">496</hard_limit>
+ <soft_limit units="K">672</soft_limit>
+ <swap_hard_limit units="K">1638</swap_hard_limit>
+ <min_guarantee units="K">2970</min_guarantee>
+ </memtune>
+ <numatune>
+ <memory mode="preferred" nodeset="0-3,8"/>
+ <memnode cellid="0" mode="preferred" nodeset="0-1"/>
+ <memnode cellid="1" mode="preferred" nodeset="2-3"/>
+ <memnode cellid="2" mode="preferred" nodeset="8"/>
+ </numatune>
+ <vcpu cpuset="0-1,3-5">2</vcpu>
+ <sysinfo type='smbios'>
+ <bios>
+ <entry name="vendor">Acme</entry>
+ </bios>
+ <system>
+ <entry name="version">1.0.0</entry>
+ </system>
+ </sysinfo>
+ <os>
+ <type>linux</type>
+ <boot dev="hd"/>
+ <boot dev="cdrom"/>
+ <boot dev="fd"/>
+ <smbios mode="sysinfo"/>
+ </os>
+ <features>
+ <acpi/>
+ <apic/>
+ <pae/>
+ </features>
+ <cputune>
+ <shares>100</shares>
+ <quota>50000</quota>
+ <period>25000</period>
+ </cputune>
+ <devices>
+ <disk type="file" device="disk">
+ <source file="/tmp/img"/>
+ <target bus="virtio" dev="/dev/vda"/>
+ </disk>
+ </devices>
+ </domain>""")
+
+ def test_config_machine_type(self):
+ obj = config.LibvirtConfigGuest()
+ obj.virt_type = "kvm"
+ obj.memory = 100 * units.Mi
+ obj.vcpus = 2
+ obj.name = "demo"
+ obj.uuid = "b38a3f43-4be2-4046-897f-b67c2f5e0147"
+ obj.os_type = "hvm"
+ obj.os_mach_type = "fake_machine_type"
+ xml = obj.to_xml()
+
+ self.assertXmlEqual(xml, """
+ <domain type="kvm">
+ <uuid>b38a3f43-4be2-4046-897f-b67c2f5e0147</uuid>
+ <name>demo</name>
+ <memory>104857600</memory>
+ <vcpu>2</vcpu>
+ <os>
+ <type machine="fake_machine_type">hvm</type>
+ </os>
+ </domain>""")
+
+ def test_ConfigGuest_parse_devices(self):
+ xmldoc = """ <domain type="kvm">
+ <devices>
+ <hostdev mode="subsystem" type="pci" managed="no">
+ </hostdev>
+ </devices>
+ </domain>
+ """
+ obj = config.LibvirtConfigGuest()
+ obj.parse_str(xmldoc)
+ self.assertEqual(len(obj.devices), 1)
+ self.assertIsInstance(obj.devices[0],
+ config.LibvirtConfigGuestHostdevPCI)
+ self.assertEqual(obj.devices[0].mode, 'subsystem')
+ self.assertEqual(obj.devices[0].managed, 'no')
+
+ def test_ConfigGuest_parse_devices_wrong_type(self):
+ xmldoc = """ <domain type="kvm">
+ <devices>
+ <hostdev mode="subsystem" type="xxxx" managed="no">
+ </hostdev>
+ </devices>
+ </domain>
+ """
+ obj = config.LibvirtConfigGuest()
+ obj.parse_str(xmldoc)
+ self.assertEqual(len(obj.devices), 0)
+
+ def test_ConfigGuest_parese_cpu(self):
+ xmldoc = """ <domain>
+ <cpu mode='custom' match='exact'>
+ <model>kvm64</model>
+ </cpu>
+ </domain>
+ """
+ obj = config.LibvirtConfigGuest()
+ obj.parse_str(xmldoc)
+
+ self.assertEqual(obj.cpu.mode, 'custom')
+ self.assertEqual(obj.cpu.match, 'exact')
+ self.assertEqual(obj.cpu.model, 'kvm64')
+
+
+class LibvirtConfigGuestSnapshotTest(LibvirtConfigBaseTest):
+
+ def test_config_snapshot(self):
+ obj = config.LibvirtConfigGuestSnapshot()
+ obj.name = "Demo"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <domainsnapshot>
+ <name>Demo</name>
+ <disks/>
+ </domainsnapshot>""")
+
+ def test_config_snapshot_with_disks(self):
+ obj = config.LibvirtConfigGuestSnapshot()
+ obj.name = "Demo"
+
+ disk = config.LibvirtConfigGuestSnapshotDisk()
+ disk.name = 'vda'
+ disk.source_path = 'source-path'
+ disk.source_type = 'file'
+ disk.snapshot = 'external'
+ disk.driver_name = 'qcow2'
+ obj.add_disk(disk)
+
+ disk2 = config.LibvirtConfigGuestSnapshotDisk()
+ disk2.name = 'vdb'
+ disk2.snapshot = 'no'
+ obj.add_disk(disk2)
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <domainsnapshot>
+ <name>Demo</name>
+ <disks>
+ <disk name='vda' snapshot='external' type='file'>
+ <source file='source-path'/>
+ </disk>
+ <disk name='vdb' snapshot='no'/>
+ </disks>
+ </domainsnapshot>""")
+
+ def test_config_snapshot_with_network_disks(self):
+ obj = config.LibvirtConfigGuestSnapshot()
+ obj.name = "Demo"
+
+ disk = config.LibvirtConfigGuestSnapshotDisk()
+ disk.name = 'vda'
+ disk.source_name = 'source-file'
+ disk.source_type = 'network'
+ disk.source_hosts = ['host1']
+ disk.source_ports = ['12345']
+ disk.source_protocol = 'glusterfs'
+ disk.snapshot = 'external'
+ disk.driver_name = 'qcow2'
+ obj.add_disk(disk)
+
+ disk2 = config.LibvirtConfigGuestSnapshotDisk()
+ disk2.name = 'vdb'
+ disk2.snapshot = 'no'
+ obj.add_disk(disk2)
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <domainsnapshot>
+ <name>Demo</name>
+ <disks>
+ <disk name='vda' snapshot='external' type='network'>
+ <source protocol='glusterfs' name='source-file'>
+ <host name='host1' port='12345'/>
+ </source>
+ </disk>
+ <disk name='vdb' snapshot='no'/>
+ </disks>
+ </domainsnapshot>""")
+
+
+class LibvirtConfigNodeDeviceTest(LibvirtConfigBaseTest):
+
+ def test_config_virt_usb_device(self):
+ xmlin = """
+ <device>
+ <name>usb_0000_09_00_0</name>
+ <parent>pci_0000_00_1c_0</parent>
+ <driver>
+ <name>vxge</name>
+ </driver>
+ <capability type="usb">
+ <domain>0</domain>
+ <capability type="fake_usb">
+ <address fake_usb="fake"/>
+ </capability>
+ </capability>
+ </device>"""
+
+ obj = config.LibvirtConfigNodeDevice()
+ obj.parse_str(xmlin)
+
+ self.assertIsNone(obj.pci_capability)
+
+ def test_config_virt_device(self):
+ xmlin = """
+ <device>
+ <name>pci_0000_09_00_0</name>
+ <parent>pci_0000_00_1c_0</parent>
+ <driver>
+ <name>vxge</name>
+ </driver>
+ <capability type="pci">
+ <domain>0</domain>
+ <bus>9</bus>
+ <slot>0</slot>
+ <function>0</function>
+ <product id="0x5833">X3100 Series 10 Gigabit Ethernet PCIe</product>
+ <vendor id="0x17d5">Neterion Inc.</vendor>
+ <capability type="virt_functions">
+ <address domain="0x0000" bus="0x0a" slot="0x00" function="0x1"/>
+ <address domain="0x0000" bus="0x0a" slot="0x00" function="0x2"/>
+ <address domain="0x0000" bus="0x0a" slot="0x00" function="0x3"/>
+ </capability>
+ </capability>
+ </device>"""
+
+ obj = config.LibvirtConfigNodeDevice()
+ obj.parse_str(xmlin)
+
+ self.assertIsInstance(obj.pci_capability,
+ config.LibvirtConfigNodeDevicePciCap)
+ self.assertIsInstance(obj.pci_capability.fun_capability[0],
+ config.LibvirtConfigNodeDevicePciSubFunctionCap)
+ self.assertEqual(obj.pci_capability.fun_capability[0].type,
+ "virt_functions")
+ self.assertEqual(len(obj.pci_capability.fun_capability[0].
+ device_addrs),
+ 3)
+ self.assertEqual(obj.pci_capability.bus, 9)
+
+ def test_config_phy_device(self):
+ xmlin = """
+ <device>
+ <name>pci_0000_33_00_0</name>
+ <parent>pci_0000_22_1c_0</parent>
+ <driver>
+ <name>vxx</name>
+ </driver>
+ <capability type="pci">
+ <domain>0</domain>
+ <bus>9</bus>
+ <slot>0</slot>
+ <function>0</function>
+ <product id="0x5833">X3100 Series 10 Gigabit Ethernet PCIe</product>
+ <vendor id="0x17d5">Neterion Inc.</vendor>
+ <capability type="phys_function">
+ <address domain='0x0000' bus='0x09' slot='0x00' function='0x0'/>
+ </capability>
+ </capability>
+ </device>"""
+
+ obj = config.LibvirtConfigNodeDevice()
+ obj.parse_str(xmlin)
+
+ self.assertIsInstance(obj.pci_capability,
+ config.LibvirtConfigNodeDevicePciCap)
+ self.assertIsInstance(obj.pci_capability.fun_capability[0],
+ config.LibvirtConfigNodeDevicePciSubFunctionCap)
+ self.assertEqual(obj.pci_capability.fun_capability[0].type,
+ "phys_function")
+ self.assertEqual(len(obj.pci_capability.fun_capability[0].
+ device_addrs),
+ 1)
+
+ def test_config_non_device(self):
+ xmlin = """
+ <device>
+ <name>pci_0000_33_00_0</name>
+ <parent>pci_0000_22_1c_0</parent>
+ <driver>
+ <name>vxx</name>
+ </driver>
+ <capability type="pci">
+ <domain>0</domain>
+ <bus>9</bus>
+ <slot>0</slot>
+ <function>0</function>
+ <product id="0x5833">X3100 Series 10 Gigabit Ethernet PCIe</product>
+ <vendor id="0x17d5">Neterion Inc.</vendor>
+ <capability type="virt_functions"/>
+ </capability>
+ </device>"""
+
+ obj = config.LibvirtConfigNodeDevice()
+ obj.parse_str(xmlin)
+
+ self.assertIsInstance(obj.pci_capability,
+ config.LibvirtConfigNodeDevicePciCap)
+ self.assertIsInstance(obj.pci_capability.fun_capability[0],
+ config.LibvirtConfigNodeDevicePciSubFunctionCap)
+ self.assertEqual(obj.pci_capability.fun_capability[0].type,
+ "virt_functions")
+
+ def test_config_fail_device(self):
+ xmlin = """
+ <device>
+ <name>pci_0000_33_00_0</name>
+ <parent>pci_0000_22_1c_0</parent>
+ <driver>
+ <name>vxx</name>
+ </driver>
+ <capability type="pci">
+ <domain>0</domain>
+ <bus>9</bus>
+ <slot>0</slot>
+ <function>0</function>
+ <product id="0x5833">X3100 Series 10 Gigabit Ethernet PCIe</product>
+ <vendor id="0x17d5">Neterion Inc.</vendor>
+ <capability type="virt_functions">
+ </capability>
+ </capability>
+ </device>"""
+
+ obj = config.LibvirtConfigNodeDevice()
+ obj.parse_str(xmlin)
+
+ self.assertIsInstance(obj.pci_capability,
+ config.LibvirtConfigNodeDevicePciCap)
+ self.assertIsInstance(obj.pci_capability.fun_capability[0],
+ config.LibvirtConfigNodeDevicePciSubFunctionCap)
+ self.assertEqual(obj.pci_capability.fun_capability[0].type,
+ "virt_functions")
+
+ def test_config_2cap_device(self):
+ xmlin = """
+ <device>
+ <name>pci_0000_04_10_7</name>
+ <parent>pci_0000_00_01_1</parent>
+ <driver>
+ <name>igbvf</name>
+ </driver>
+ <capability type='pci'>
+ <domain>0</domain>
+ <bus>4</bus>
+ <slot>16</slot>
+ <function>7</function>
+ <product id='0x1520'>I350 Ethernet Controller Virtual</product>
+ <vendor id='0x8086'>Intel Corporation</vendor>
+ <capability type='phys_function'>
+ <address domain='0x0000' bus='0x04' slot='0x00' function='0x3'/>
+ </capability>
+ <capability type='virt_functions'>
+ <address domain='0x0000' bus='0x04' slot='0x00' function='0x3'/>
+ </capability>
+ </capability>
+ </device>"""
+
+ obj = config.LibvirtConfigNodeDevice()
+ obj.parse_str(xmlin)
+
+ self.assertIsInstance(obj.pci_capability,
+ config.LibvirtConfigNodeDevicePciCap)
+ self.assertIsInstance(obj.pci_capability.fun_capability[0],
+ config.LibvirtConfigNodeDevicePciSubFunctionCap)
+ self.assertEqual(obj.pci_capability.fun_capability[0].type,
+ "phys_function")
+ self.assertEqual(obj.pci_capability.fun_capability[1].type,
+ "virt_functions")
+
+
+class LibvirtConfigNodeDevicePciCapTest(LibvirtConfigBaseTest):
+
+ def test_config_device_pci_cap(self):
+ xmlin = """
+ <capability type="pci">
+ <domain>0</domain>
+ <bus>10</bus>
+ <slot>1</slot>
+ <function>5</function>
+ <product id="0x10bd">Intel 10 Gigabit Ethernet</product>
+ <vendor id="0x8086">Intel Inc.</vendor>
+ <capability type="virt_functions">
+ <address domain="0000" bus="0x0a" slot="0x1" function="0x1"/>
+ <address domain="0001" bus="0x0a" slot="0x02" function="0x03"/>
+ </capability>
+ </capability>"""
+ obj = config.LibvirtConfigNodeDevicePciCap()
+ obj.parse_str(xmlin)
+
+ self.assertEqual(obj.domain, 0)
+ self.assertEqual(obj.bus, 10)
+ self.assertEqual(obj.slot, 1)
+ self.assertEqual(obj.function, 5)
+ self.assertEqual(obj.product, "Intel 10 Gigabit Ethernet")
+ self.assertEqual(obj.product_id, 0x10bd)
+ self.assertEqual(obj.vendor, "Intel Inc.")
+ self.assertEqual(obj.vendor_id, 0x8086)
+ self.assertIsInstance(obj.fun_capability[0],
+ config.LibvirtConfigNodeDevicePciSubFunctionCap)
+
+ self.assertEqual(obj.fun_capability[0].type, 'virt_functions')
+ self.assertEqual(obj.fun_capability[0].device_addrs,
+ [(0, 10, 1, 1),
+ (1, 10, 2, 3), ])
+
+ def test_config_device_pci_2cap(self):
+ xmlin = """
+ <capability type="pci">
+ <domain>0</domain>
+ <bus>10</bus>
+ <slot>1</slot>
+ <function>5</function>
+ <product id="0x10bd">Intel 10 Gigabit Ethernet</product>
+ <vendor id="0x8086">Intel Inc.</vendor>
+ <capability type="virt_functions">
+ <address domain="0000" bus="0x0a" slot="0x1" function="0x1"/>
+ <address domain="0001" bus="0x0a" slot="0x02" function="0x03"/>
+ </capability>
+ <capability type="phys_function">
+ <address domain="0000" bus="0x0a" slot="0x1" function="0x1"/>
+ </capability>
+ </capability>"""
+ obj = config.LibvirtConfigNodeDevicePciCap()
+ obj.parse_str(xmlin)
+
+ self.assertEqual(obj.domain, 0)
+ self.assertEqual(obj.bus, 10)
+ self.assertEqual(obj.slot, 1)
+ self.assertEqual(obj.function, 5)
+ self.assertEqual(obj.product, "Intel 10 Gigabit Ethernet")
+ self.assertEqual(obj.product_id, 0x10bd)
+ self.assertEqual(obj.vendor, "Intel Inc.")
+ self.assertEqual(obj.vendor_id, 0x8086)
+ self.assertIsInstance(obj.fun_capability[0],
+ config.LibvirtConfigNodeDevicePciSubFunctionCap)
+
+ self.assertEqual(obj.fun_capability[0].type, 'virt_functions')
+ self.assertEqual(obj.fun_capability[0].device_addrs,
+ [(0, 10, 1, 1),
+ (1, 10, 2, 3), ])
+ self.assertEqual(obj.fun_capability[1].type, 'phys_function')
+ self.assertEqual(obj.fun_capability[1].device_addrs,
+ [(0, 10, 1, 1), ])
+
+ def test_config_read_only_disk(self):
+ obj = config.LibvirtConfigGuestDisk()
+ obj.source_type = "disk"
+ obj.source_device = "disk"
+ obj.driver_name = "kvm"
+ obj.target_dev = "/dev/hdc"
+ obj.target_bus = "virtio"
+ obj.readonly = True
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <disk type="disk" device="disk">
+ <driver name="kvm"/>
+ <target bus="virtio" dev="/dev/hdc"/>
+ <readonly/>
+ </disk>""")
+
+ obj.readonly = False
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <disk type="disk" device="disk">
+ <driver name="kvm"/>
+ <target bus="virtio" dev="/dev/hdc"/>
+ </disk>""")
+
+
+class LibvirtConfigNodeDevicePciSubFunctionCap(LibvirtConfigBaseTest):
+
+ def test_config_device_pci_subfunction(self):
+ xmlin = """
+ <capability type="virt_functions">
+ <address domain="0000" bus="0x0a" slot="0x1" function="0x1"/>
+ <address domain="0001" bus="0x0a" slot="0x02" function="0x03"/>
+ </capability>"""
+ fun_capability = config.LibvirtConfigNodeDevicePciSubFunctionCap()
+ fun_capability.parse_str(xmlin)
+ self.assertEqual('virt_functions', fun_capability.type)
+ self.assertEqual([(0, 10, 1, 1),
+ (1, 10, 2, 3)],
+ fun_capability.device_addrs)
+
+
+class LibvirtConfigGuestVideoTest(LibvirtConfigBaseTest):
+
+ def test_config_video_driver(self):
+ obj = config.LibvirtConfigGuestVideo()
+ obj.type = 'qxl'
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <video>
+ <model type='qxl'/>
+ </video>""")
+
+ def test_config_video_driver_vram_heads(self):
+ obj = config.LibvirtConfigGuestVideo()
+ obj.type = 'qxl'
+ obj.vram = '9216'
+ obj.heads = '1'
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <video>
+ <model type='qxl' vram='9216' heads='1'/>
+ </video>""")
+
+
+class LibvirtConfigGuestSeclabel(LibvirtConfigBaseTest):
+
+ def test_config_seclabel_config(self):
+ obj = config.LibvirtConfigSeclabel()
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <seclabel type='dynamic'/>""")
+
+ def test_config_seclabel_baselabel(self):
+ obj = config.LibvirtConfigSeclabel()
+ obj.type = 'dynamic'
+ obj.baselabel = 'system_u:system_r:my_svirt_t:s0'
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <seclabel type='dynamic'>
+ <baselabel>system_u:system_r:my_svirt_t:s0</baselabel>
+ </seclabel>""")
+
+
+class LibvirtConfigGuestRngTest(LibvirtConfigBaseTest):
+
+ def test_config_rng_driver(self):
+ obj = config.LibvirtConfigGuestRng()
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+<rng model='virtio'>
+ <backend model='random'/>
+</rng>""")
+
+ def test_config_rng_driver_with_rate(self):
+ obj = config.LibvirtConfigGuestRng()
+ obj.backend = '/dev/random'
+ obj.rate_period = '12'
+ obj.rate_bytes = '34'
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+<rng model='virtio'>
+ <rate period='12' bytes='34'/>
+ <backend model='random'>/dev/random</backend>
+</rng>""")
+
+
+class LibvirtConfigGuestControllerTest(LibvirtConfigBaseTest):
+
+ def test_config_guest_contoller(self):
+ obj = config.LibvirtConfigGuestController()
+ obj.type = 'scsi'
+ obj.index = 0
+ obj.model = 'virtio-scsi'
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <controller type='scsi' index='0' model='virtio-scsi'/>""")
+
+
+class LibvirtConfigGuestWatchdogTest(LibvirtConfigBaseTest):
+ def test_config_watchdog(self):
+ obj = config.LibvirtConfigGuestWatchdog()
+ obj.action = 'none'
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, "<watchdog model='i6300esb' action='none'/>")
+
+ def test_config_watchdog_default_action(self):
+ obj = config.LibvirtConfigGuestWatchdog()
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, "<watchdog model='i6300esb' action='reset'/>")
+
+
+class LibvirtConfigGuestCPUTuneTest(LibvirtConfigBaseTest):
+
+ def test_config_cputune_timeslice(self):
+ cputune = config.LibvirtConfigGuestCPUTune()
+ cputune.shares = 100
+ cputune.quota = 50000
+ cputune.period = 25000
+
+ xml = cputune.to_xml()
+ self.assertXmlEqual(xml, """
+ <cputune>
+ <shares>100</shares>
+ <quota>50000</quota>
+ <period>25000</period>
+ </cputune>""")
+
+ def test_config_cputune_vcpus(self):
+ cputune = config.LibvirtConfigGuestCPUTune()
+
+ vcpu0 = config.LibvirtConfigGuestCPUTuneVCPUPin()
+ vcpu0.id = 0
+ vcpu0.cpuset = set([0, 1])
+ vcpu1 = config.LibvirtConfigGuestCPUTuneVCPUPin()
+ vcpu1.id = 1
+ vcpu1.cpuset = set([2, 3])
+ vcpu2 = config.LibvirtConfigGuestCPUTuneVCPUPin()
+ vcpu2.id = 2
+ vcpu2.cpuset = set([4, 5])
+ vcpu3 = config.LibvirtConfigGuestCPUTuneVCPUPin()
+ vcpu3.id = 3
+ vcpu3.cpuset = set([6, 7])
+ cputune.vcpupin.extend([vcpu0, vcpu1, vcpu2, vcpu3])
+
+ xml = cputune.to_xml()
+ self.assertXmlEqual(xml, """
+ <cputune>
+ <vcpupin vcpu="0" cpuset="0-1"/>
+ <vcpupin vcpu="1" cpuset="2-3"/>
+ <vcpupin vcpu="2" cpuset="4-5"/>
+ <vcpupin vcpu="3" cpuset="6-7"/>
+ </cputune>""")
+
+
+class LibvirtConfigGuestMemoryBackingTest(LibvirtConfigBaseTest):
+ def test_config_memory_backing_none(self):
+ obj = config.LibvirtConfigGuestMemoryBacking()
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, "<memoryBacking/>")
+
+ def test_config_memory_backing_all(self):
+ obj = config.LibvirtConfigGuestMemoryBacking()
+ obj.locked = True
+ obj.sharedpages = False
+ obj.hugepages = True
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <memoryBacking>
+ <hugepages/>
+ <nosharedpages/>
+ <locked/>
+ </memoryBacking>""")
+
+
+class LibvirtConfigGuestMemoryTuneTest(LibvirtConfigBaseTest):
+ def test_config_memory_backing_none(self):
+ obj = config.LibvirtConfigGuestMemoryTune()
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, "<memtune/>")
+
+ def test_config_memory_backing_all(self):
+ obj = config.LibvirtConfigGuestMemoryTune()
+ obj.soft_limit = 6
+ obj.hard_limit = 28
+ obj.swap_hard_limit = 140
+ obj.min_guarantee = 270
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <memtune>
+ <hard_limit units="K">28</hard_limit>
+ <soft_limit units="K">6</soft_limit>
+ <swap_hard_limit units="K">140</swap_hard_limit>
+ <min_guarantee units="K">270</min_guarantee>
+ </memtune>""")
+
+
+class LibvirtConfigGuestNUMATuneTest(LibvirtConfigBaseTest):
+ def test_config_numa_tune_none(self):
+ obj = config.LibvirtConfigGuestNUMATune()
+
+ xml = obj.to_xml()
+ self.assertXmlEqual("<numatune/>", xml)
+
+ def test_config_numa_tune_memory(self):
+ obj = config.LibvirtConfigGuestNUMATune()
+
+ numamemory = config.LibvirtConfigGuestNUMATuneMemory()
+ numamemory.nodeset = [0, 1, 2, 3, 8]
+
+ obj.memory = numamemory
+
+ xml = obj.to_xml()
+ self.assertXmlEqual("""
+ <numatune>
+ <memory mode="strict" nodeset="0-3,8"/>
+ </numatune>""", xml)
+
+ def test_config_numa_tune_memnodes(self):
+ obj = config.LibvirtConfigGuestNUMATune()
+
+ numamemnode0 = config.LibvirtConfigGuestNUMATuneMemNode()
+ numamemnode0.cellid = 0
+ numamemnode0.nodeset = [0, 1]
+
+ numamemnode1 = config.LibvirtConfigGuestNUMATuneMemNode()
+ numamemnode1.cellid = 1
+ numamemnode1.nodeset = [2, 3]
+
+ numamemnode2 = config.LibvirtConfigGuestNUMATuneMemNode()
+ numamemnode2.cellid = 2
+ numamemnode2.nodeset = [8]
+
+ obj.memnodes.extend([numamemnode0,
+ numamemnode1,
+ numamemnode2])
+
+ xml = obj.to_xml()
+ self.assertXmlEqual("""
+ <numatune>
+ <memnode cellid="0" mode="strict" nodeset="0-1"/>
+ <memnode cellid="1" mode="strict" nodeset="2-3"/>
+ <memnode cellid="2" mode="strict" nodeset="8"/>
+ </numatune>""", xml)
+
+
+class LibvirtConfigGuestMetadataNovaTest(LibvirtConfigBaseTest):
+
+ def test_config_metadata(self):
+ meta = config.LibvirtConfigGuestMetaNovaInstance()
+ meta.package = "2014.2.3"
+ meta.name = "moonbuggy"
+ meta.creationTime = 1234567890
+ meta.roottype = "image"
+ meta.rootid = "fe55c69a-8b2e-4bbc-811a-9ad2023a0426"
+
+ owner = config.LibvirtConfigGuestMetaNovaOwner()
+ owner.userid = "3472c2a6-de91-4fb5-b618-42bc781ef670"
+ owner.username = "buzz"
+ owner.projectid = "f241e906-010e-4917-ae81-53f4fb8aa021"
+ owner.projectname = "moonshot"
+
+ meta.owner = owner
+
+ flavor = config.LibvirtConfigGuestMetaNovaFlavor()
+ flavor.name = "m1.lowgravity"
+ flavor.vcpus = 8
+ flavor.memory = 2048
+ flavor.swap = 10
+ flavor.disk = 50
+ flavor.ephemeral = 10
+
+ meta.flavor = flavor
+
+ xml = meta.to_xml()
+ self.assertXmlEqual(xml, """
+ <nova:instance xmlns:nova='http://openstack.org/xmlns/libvirt/nova/1.0'>
+ <nova:package version="2014.2.3"/>
+ <nova:name>moonbuggy</nova:name>
+ <nova:creationTime>2009-02-13 23:31:30</nova:creationTime>
+ <nova:flavor name="m1.lowgravity">
+ <nova:memory>2048</nova:memory>
+ <nova:disk>50</nova:disk>
+ <nova:swap>10</nova:swap>
+ <nova:ephemeral>10</nova:ephemeral>
+ <nova:vcpus>8</nova:vcpus>
+ </nova:flavor>
+ <nova:owner>
+ <nova:user
+ uuid="3472c2a6-de91-4fb5-b618-42bc781ef670">buzz</nova:user>
+ <nova:project
+ uuid="f241e906-010e-4917-ae81-53f4fb8aa021">moonshot</nova:project>
+ </nova:owner>
+ <nova:root type="image" uuid="fe55c69a-8b2e-4bbc-811a-9ad2023a0426"/>
+ </nova:instance>
+ """)
+
+
+class LibvirtConfigGuestIDMap(LibvirtConfigBaseTest):
+ def test_config_id_map_parse_start_not_int(self):
+ xmlin = "<uid start='a' target='20000' count='5'/>"
+ obj = config.LibvirtConfigGuestIDMap()
+
+ self.assertRaises(ValueError, obj.parse_str, xmlin)
+
+ def test_config_id_map_parse_target_not_int(self):
+ xmlin = "<uid start='2' target='a' count='5'/>"
+ obj = config.LibvirtConfigGuestIDMap()
+
+ self.assertRaises(ValueError, obj.parse_str, xmlin)
+
+ def test_config_id_map_parse_count_not_int(self):
+ xmlin = "<uid start='2' target='20000' count='a'/>"
+ obj = config.LibvirtConfigGuestIDMap()
+
+ self.assertRaises(ValueError, obj.parse_str, xmlin)
+
+ def test_config_uid_map(self):
+ obj = config.LibvirtConfigGuestUIDMap()
+ obj.start = 1
+ obj.target = 10000
+ obj.count = 2
+
+ xml = obj.to_xml()
+ self.assertXmlEqual("<uid start='1' target='10000' count='2'/>", xml)
+
+ def test_config_uid_map_parse(self):
+ xmlin = "<uid start='2' target='20000' count='5'/>"
+ obj = config.LibvirtConfigGuestUIDMap()
+ obj.parse_str(xmlin)
+
+ self.assertEqual(2, obj.start)
+ self.assertEqual(20000, obj.target)
+ self.assertEqual(5, obj.count)
+
+ def test_config_gid_map(self):
+ obj = config.LibvirtConfigGuestGIDMap()
+ obj.start = 1
+ obj.target = 10000
+ obj.count = 2
+
+ xml = obj.to_xml()
+ self.assertXmlEqual("<gid start='1' target='10000' count='2'/>", xml)
+
+ def test_config_gid_map_parse(self):
+ xmlin = "<gid start='2' target='20000' count='5'/>"
+ obj = config.LibvirtConfigGuestGIDMap()
+ obj.parse_str(xmlin)
+
+ self.assertEqual(2, obj.start)
+ self.assertEqual(20000, obj.target)
+ self.assertEqual(5, obj.count)
+
+
+class LibvirtConfigMemoryBalloonTest(LibvirtConfigBaseTest):
+
+ def test_config_memory_balloon_period(self):
+ balloon = config.LibvirtConfigMemoryBalloon()
+ balloon.model = 'fake_virtio'
+ balloon.period = 11
+
+ xml = balloon.to_xml()
+ expected_xml = """
+ <memballoon model='fake_virtio'>
+ <stats period='11'/>
+ </memballoon>"""
+
+ self.assertXmlEqual(expected_xml, xml)
diff --git a/nova/tests/unit/virt/libvirt/test_designer.py b/nova/tests/unit/virt/libvirt/test_designer.py
new file mode 100644
index 0000000000..649144c0d1
--- /dev/null
+++ b/nova/tests/unit/virt/libvirt/test_designer.py
@@ -0,0 +1,30 @@
+# Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import test
+from nova.virt.libvirt import designer
+
+
+class DesignerTestCase(test.NoDBTestCase):
+ def test_set_vif_bandwidth_config_no_extra_specs(self):
+ # Test whether test_set_vif_bandwidth_config_no_extra_specs fails when
+ # its second parameter has no 'extra_specs' field.
+
+ try:
+ # The conf will never be user be used, so we can use 'None'.
+ # An empty dictionary is fine: all that matters it that there is no
+ # 'extra_specs' field.
+ designer.set_vif_bandwidth_config(None, {})
+ except KeyError as e:
+ self.fail('KeyError: %s' % e)
diff --git a/nova/tests/unit/virt/libvirt/test_dmcrypt.py b/nova/tests/unit/virt/libvirt/test_dmcrypt.py
new file mode 100644
index 0000000000..02efbe10b5
--- /dev/null
+++ b/nova/tests/unit/virt/libvirt/test_dmcrypt.py
@@ -0,0 +1,72 @@
+# Copyright (c) 2014 The Johns Hopkins University/Applied Physics Laboratory
+# All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+
+from nova import test
+from nova import utils
+from nova.virt.libvirt import dmcrypt
+
+
+class LibvirtDmcryptTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(LibvirtDmcryptTestCase, self).setUp()
+
+ self.CIPHER = 'cipher'
+ self.KEY_SIZE = 256
+ self.NAME = 'disk'
+ self.TARGET = dmcrypt.volume_name(self.NAME)
+ self.PATH = '/dev/nova-lvm/instance_disk'
+ self.KEY = range(0, self.KEY_SIZE)
+ self.KEY_STR = ''.join(["%02x" % x for x in range(0, self.KEY_SIZE)])
+
+ self.executes = []
+ self.kwargs = {}
+
+ def fake_execute(*cmd, **kwargs):
+ self.executes.append(cmd)
+ self.kwargs = kwargs
+ return None, None
+
+ def fake_listdir(path):
+ return [self.TARGET, '/dev/mapper/disk']
+
+ self.stubs.Set(utils, 'execute', fake_execute)
+ self.stubs.Set(os, 'listdir', fake_listdir)
+
+ def test_create_volume(self):
+ expected_commands = [('cryptsetup',
+ 'create',
+ self.TARGET,
+ self.PATH,
+ '--cipher=' + self.CIPHER,
+ '--key-size=' + str(self.KEY_SIZE),
+ '--key-file=-')]
+ dmcrypt.create_volume(self.TARGET, self.PATH, self.CIPHER,
+ self.KEY_SIZE, self.KEY)
+
+ self.assertEqual(expected_commands, self.executes)
+ self.assertEqual(self.KEY_STR, self.kwargs['process_input'])
+
+ def test_delete_volume(self):
+ expected_commands = [('cryptsetup', 'remove', self.TARGET)]
+ dmcrypt.delete_volume(self.TARGET)
+
+ self.assertEqual(expected_commands, self.executes)
+
+ def test_list_volumes(self):
+ encrypted_volumes = dmcrypt.list_volumes()
+
+ self.assertEqual([self.TARGET], encrypted_volumes)
diff --git a/nova/tests/unit/virt/libvirt/test_driver.py b/nova/tests/unit/virt/libvirt/test_driver.py
new file mode 100644
index 0000000000..90e25e1b3b
--- /dev/null
+++ b/nova/tests/unit/virt/libvirt/test_driver.py
@@ -0,0 +1,12576 @@
+# Copyright 2010 OpenStack Foundation
+# Copyright 2012 University Of Minho
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import __builtin__
+import contextlib
+import copy
+import datetime
+import errno
+import os
+import random
+import re
+import shutil
+import threading
+import time
+import uuid
+
+import eventlet
+from eventlet import greenthread
+import fixtures
+from lxml import etree
+import mock
+import mox
+from oslo.concurrency import lockutils
+from oslo.concurrency import processutils
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+from oslo.utils import encodeutils
+from oslo.utils import importutils
+from oslo.utils import timeutils
+from oslo.utils import units
+import six
+
+from nova.api.metadata import base as instance_metadata
+from nova.compute import arch
+from nova.compute import manager
+from nova.compute import power_state
+from nova.compute import task_states
+from nova.compute import utils as compute_utils
+from nova.compute import vm_mode
+from nova.compute import vm_states
+from nova import context
+from nova import db
+from nova import exception
+from nova.network import model as network_model
+from nova import objects
+from nova.openstack.common import fileutils
+from nova.openstack.common import loopingcall
+from nova.openstack.common import uuidutils
+from nova.pci import manager as pci_manager
+from nova import test
+from nova.tests.unit import fake_block_device
+from nova.tests.unit import fake_instance
+from nova.tests.unit import fake_network
+import nova.tests.unit.image.fake
+from nova.tests.unit import matchers
+from nova.tests.unit.objects import test_pci_device
+from nova.tests.unit.virt.libvirt import fake_imagebackend
+from nova.tests.unit.virt.libvirt import fake_libvirt_utils
+from nova.tests.unit.virt.libvirt import fakelibvirt
+from nova import utils
+from nova import version
+from nova.virt import block_device as driver_block_device
+from nova.virt import configdrive
+from nova.virt.disk import api as disk
+from nova.virt import driver
+from nova.virt import event as virtevent
+from nova.virt import fake
+from nova.virt import firewall as base_firewall
+from nova.virt import hardware
+from nova.virt import images
+from nova.virt.libvirt import blockinfo
+from nova.virt.libvirt import config as vconfig
+from nova.virt.libvirt import driver as libvirt_driver
+from nova.virt.libvirt import firewall
+from nova.virt.libvirt import imagebackend
+from nova.virt.libvirt import rbd_utils
+from nova.virt.libvirt import utils as libvirt_utils
+
+try:
+ import libvirt
+except ImportError:
+ libvirt = fakelibvirt
+libvirt_driver.libvirt = libvirt
+
+
+CONF = cfg.CONF
+CONF.import_opt('compute_manager', 'nova.service')
+CONF.import_opt('host', 'nova.netconf')
+CONF.import_opt('my_ip', 'nova.netconf')
+CONF.import_opt('image_cache_subdirectory_name', 'nova.virt.imagecache')
+CONF.import_opt('instances_path', 'nova.compute.manager')
+
+_fake_network_info = fake_network.fake_get_instance_nw_info
+
+_fake_NodeDevXml = \
+ {"pci_0000_04_00_3": """
+ <device>
+ <name>pci_0000_04_00_3</name>
+ <parent>pci_0000_00_01_1</parent>
+ <driver>
+ <name>igb</name>
+ </driver>
+ <capability type='pci'>
+ <domain>0</domain>
+ <bus>4</bus>
+ <slot>0</slot>
+ <function>3</function>
+ <product id='0x1521'>I350 Gigabit Network Connection</product>
+ <vendor id='0x8086'>Intel Corporation</vendor>
+ <capability type='virt_functions'>
+ <address domain='0x0000' bus='0x04' slot='0x10' function='0x3'/>
+ <address domain='0x0000' bus='0x04' slot='0x10' function='0x7'/>
+ <address domain='0x0000' bus='0x04' slot='0x11' function='0x3'/>
+ <address domain='0x0000' bus='0x04' slot='0x11' function='0x7'/>
+ </capability>
+ </capability>
+ </device>""",
+ "pci_0000_04_10_7": """
+ <device>
+ <name>pci_0000_04_10_7</name>
+ <parent>pci_0000_00_01_1</parent>
+ <driver>
+ <name>igbvf</name>
+ </driver>
+ <capability type='pci'>
+ <domain>0</domain>
+ <bus>4</bus>
+ <slot>16</slot>
+ <function>7</function>
+ <product id='0x1520'>I350 Ethernet Controller Virtual Function</product>
+ <vendor id='0x8086'>Intel Corporation</vendor>
+ <capability type='phys_function'>
+ <address domain='0x0000' bus='0x04' slot='0x00' function='0x3'/>
+ </capability>
+ <capability type='virt_functions'>
+ </capability>
+ </capability>
+ </device>"""}
+
+
+def _concurrency(signal, wait, done, target, is_block_dev=False):
+ signal.send()
+ wait.wait()
+ done.send()
+
+
+class FakeVirDomainSnapshot(object):
+
+ def __init__(self, dom=None):
+ self.dom = dom
+
+ def delete(self, flags):
+ pass
+
+
+class FakeVirtDomain(object):
+
+ def __init__(self, fake_xml=None, uuidstr=None, id=None, name=None):
+ if uuidstr is None:
+ uuidstr = str(uuid.uuid4())
+ self.uuidstr = uuidstr
+ self.id = id
+ self.domname = name
+ self._info = [power_state.RUNNING, 2048 * units.Mi, 1234 * units.Mi,
+ None, None]
+ if fake_xml:
+ self._fake_dom_xml = fake_xml
+ else:
+ self._fake_dom_xml = """
+ <domain type='kvm'>
+ <devices>
+ <disk type='file'>
+ <source file='filename'/>
+ </disk>
+ </devices>
+ </domain>
+ """
+
+ def name(self):
+ if self.domname is None:
+ return "fake-domain %s" % self
+ else:
+ return self.domname
+
+ def ID(self):
+ return self.id
+
+ def info(self):
+ return self._info
+
+ def create(self):
+ pass
+
+ def managedSave(self, *args):
+ pass
+
+ def createWithFlags(self, launch_flags):
+ pass
+
+ def XMLDesc(self, *args):
+ return self._fake_dom_xml
+
+ def UUIDString(self):
+ return self.uuidstr
+
+ def attachDeviceFlags(self, xml, flags):
+ pass
+
+ def attachDevice(self, xml):
+ pass
+
+ def detachDeviceFlags(self, xml, flags):
+ pass
+
+ def snapshotCreateXML(self, xml, flags):
+ pass
+
+ def blockCommit(self, disk, base, top, bandwidth=0, flags=0):
+ pass
+
+ def blockRebase(self, disk, base, bandwidth=0, flags=0):
+ pass
+
+ def blockJobInfo(self, path, flags):
+ pass
+
+ def resume(self):
+ pass
+
+ def destroy(self):
+ pass
+
+
+class CacheConcurrencyTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(CacheConcurrencyTestCase, self).setUp()
+
+ self.flags(instances_path=self.useFixture(fixtures.TempDir()).path)
+
+ # utils.synchronized() will create the lock_path for us if it
+ # doesn't already exist. It will also delete it when it's done,
+ # which can cause race conditions with the multiple threads we
+ # use for tests. So, create the path here so utils.synchronized()
+ # won't delete it out from under one of the threads.
+ self.lock_path = os.path.join(CONF.instances_path, 'locks')
+ fileutils.ensure_tree(self.lock_path)
+
+ def fake_exists(fname):
+ basedir = os.path.join(CONF.instances_path,
+ CONF.image_cache_subdirectory_name)
+ if fname == basedir or fname == self.lock_path:
+ return True
+ return False
+
+ def fake_execute(*args, **kwargs):
+ pass
+
+ def fake_extend(image, size, use_cow=False):
+ pass
+
+ self.stubs.Set(os.path, 'exists', fake_exists)
+ self.stubs.Set(utils, 'execute', fake_execute)
+ self.stubs.Set(imagebackend.disk, 'extend', fake_extend)
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.libvirt.imagebackend.libvirt_utils',
+ fake_libvirt_utils))
+
+ def test_same_fname_concurrency(self):
+ # Ensures that the same fname cache runs at a sequentially.
+ uuid = uuidutils.generate_uuid()
+
+ backend = imagebackend.Backend(False)
+ wait1 = eventlet.event.Event()
+ done1 = eventlet.event.Event()
+ sig1 = eventlet.event.Event()
+ thr1 = eventlet.spawn(backend.image({'name': 'instance',
+ 'uuid': uuid},
+ 'name').cache,
+ _concurrency, 'fname', None,
+ signal=sig1, wait=wait1, done=done1)
+ eventlet.sleep(0)
+ # Thread 1 should run before thread 2.
+ sig1.wait()
+
+ wait2 = eventlet.event.Event()
+ done2 = eventlet.event.Event()
+ sig2 = eventlet.event.Event()
+ thr2 = eventlet.spawn(backend.image({'name': 'instance',
+ 'uuid': uuid},
+ 'name').cache,
+ _concurrency, 'fname', None,
+ signal=sig2, wait=wait2, done=done2)
+
+ wait2.send()
+ eventlet.sleep(0)
+ try:
+ self.assertFalse(done2.ready())
+ finally:
+ wait1.send()
+ done1.wait()
+ eventlet.sleep(0)
+ self.assertTrue(done2.ready())
+ # Wait on greenthreads to assert they didn't raise exceptions
+ # during execution
+ thr1.wait()
+ thr2.wait()
+
+ def test_different_fname_concurrency(self):
+ # Ensures that two different fname caches are concurrent.
+ uuid = uuidutils.generate_uuid()
+
+ backend = imagebackend.Backend(False)
+ wait1 = eventlet.event.Event()
+ done1 = eventlet.event.Event()
+ sig1 = eventlet.event.Event()
+ thr1 = eventlet.spawn(backend.image({'name': 'instance',
+ 'uuid': uuid},
+ 'name').cache,
+ _concurrency, 'fname2', None,
+ signal=sig1, wait=wait1, done=done1)
+ eventlet.sleep(0)
+ # Thread 1 should run before thread 2.
+ sig1.wait()
+
+ wait2 = eventlet.event.Event()
+ done2 = eventlet.event.Event()
+ sig2 = eventlet.event.Event()
+ thr2 = eventlet.spawn(backend.image({'name': 'instance',
+ 'uuid': uuid},
+ 'name').cache,
+ _concurrency, 'fname1', None,
+ signal=sig2, wait=wait2, done=done2)
+ eventlet.sleep(0)
+ # Wait for thread 2 to start.
+ sig2.wait()
+
+ wait2.send()
+ tries = 0
+ while not done2.ready() and tries < 10:
+ eventlet.sleep(0)
+ tries += 1
+ try:
+ self.assertTrue(done2.ready())
+ finally:
+ wait1.send()
+ eventlet.sleep(0)
+ # Wait on greenthreads to assert they didn't raise exceptions
+ # during execution
+ thr1.wait()
+ thr2.wait()
+
+
+class FakeVolumeDriver(object):
+ def __init__(self, *args, **kwargs):
+ pass
+
+ def attach_volume(self, *args):
+ pass
+
+ def detach_volume(self, *args):
+ pass
+
+ def get_xml(self, *args):
+ return ""
+
+ def get_config(self, *args):
+ """Connect the volume to a fake device."""
+ conf = vconfig.LibvirtConfigGuestDisk()
+ conf.source_type = "network"
+ conf.source_protocol = "fake"
+ conf.source_name = "fake"
+ conf.target_dev = "fake"
+ conf.target_bus = "fake"
+ return conf
+
+ def connect_volume(self, *args):
+ """Connect the volume to a fake device."""
+ return self.get_config()
+
+
+class FakeConfigGuestDisk(object):
+ def __init__(self, *args, **kwargs):
+ self.source_type = None
+ self.driver_cache = None
+
+
+class FakeConfigGuest(object):
+ def __init__(self, *args, **kwargs):
+ self.driver_cache = None
+
+
+class FakeNodeDevice(object):
+ def __init__(self, fakexml):
+ self.xml = fakexml
+
+ def XMLDesc(self, *args):
+ return self.xml
+
+
+class LibvirtConnTestCase(test.NoDBTestCase):
+
+ REQUIRES_LOCKING = True
+
+ def setUp(self):
+ super(LibvirtConnTestCase, self).setUp()
+ self.flags(fake_call=True)
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = context.get_admin_context()
+ temp_dir = self.useFixture(fixtures.TempDir()).path
+ self.flags(instances_path=temp_dir)
+ self.flags(snapshots_directory=temp_dir, group='libvirt')
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.libvirt.driver.libvirt_utils',
+ fake_libvirt_utils))
+ # Force libvirt to return a host UUID that matches the serial in
+ # nova.tests.unit.fakelibvirt. This is necessary because the host UUID
+ # returned by libvirt becomes the serial whose value is checked for in
+ # test_xml_and_uri_* below.
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.libvirt.driver.LibvirtDriver._get_host_uuid',
+ lambda _: 'cef19ce0-0ca2-11df-855d-b19fbce37686'))
+ # Prevent test suite trying to find /etc/machine-id
+ # which isn't guaranteed to exist. Instead it will use
+ # the host UUID from libvirt which we mock above
+ self.flags(sysinfo_serial="hardware", group="libvirt")
+
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.libvirt.imagebackend.libvirt_utils',
+ fake_libvirt_utils))
+
+ def fake_extend(image, size, use_cow=False):
+ pass
+
+ self.stubs.Set(libvirt_driver.disk, 'extend', fake_extend)
+
+ self.stubs.Set(imagebackend.Image, 'resolve_driver_format',
+ imagebackend.Image._get_driver_format)
+
+ class FakeConn():
+ def baselineCPU(self, cpu, flag):
+ """Add new libvirt API."""
+ return """<cpu mode='custom' match='exact'>
+ <model fallback='allow'>Westmere</model>
+ <vendor>Intel</vendor>
+ <feature policy='require' name='aes'/>
+ <feature policy='require' name='hypervisor'/>
+ </cpu>"""
+
+ def getCapabilities(self):
+ """Ensure standard capabilities being returned."""
+ return """<capabilities>
+ <host><cpu><arch>x86_64</arch>
+ <feature policy='require' name='hypervisor'/>
+ </cpu></host>
+ </capabilities>"""
+
+ def getVersion(self):
+ return 1005001
+
+ def getLibVersion(self):
+ return (0 * 1000 * 1000) + (9 * 1000) + 11
+
+ def domainEventRegisterAny(self, *args, **kwargs):
+ pass
+
+ def registerCloseCallback(self, cb, opaque):
+ pass
+
+ def nwfilterDefineXML(self, *args, **kwargs):
+ pass
+
+ def nodeDeviceLookupByName(self, x):
+ pass
+
+ def listDevices(self, cap, flags):
+ return []
+
+ def lookupByName(self, name):
+ pass
+
+ def getHostname(self):
+ return "mustard"
+
+ def getType(self):
+ return "QEMU"
+
+ def numOfDomains(self):
+ return 0
+
+ def listDomainsID(self):
+ return []
+
+ def listDefinedDomains(self):
+ return []
+
+ def getInfo(self):
+ return [arch.X86_64, 123456, 2, 2000,
+ 2, 1, 1, 1]
+
+ self.conn = FakeConn()
+ self.stubs.Set(libvirt_driver.LibvirtDriver, '_connect',
+ lambda *a, **k: self.conn)
+
+ sys_meta = {
+ 'instance_type_memory_mb': 2048,
+ 'instance_type_swap': 0,
+ 'instance_type_vcpu_weight': None,
+ 'instance_type_root_gb': 1,
+ 'instance_type_id': 2,
+ 'instance_type_name': u'm1.small',
+ 'instance_type_ephemeral_gb': 0,
+ 'instance_type_rxtx_factor': 1.0,
+ 'instance_type_flavorid': u'1',
+ 'instance_type_vcpus': 1
+ }
+
+ self.image_service = nova.tests.unit.image.fake.stub_out_image_service(
+ self.stubs)
+ self.test_instance = {
+ 'id': 1,
+ 'uuid': '32dfcb37-5af1-552b-357c-be8c3aa38310',
+ 'memory_kb': '1024000',
+ 'basepath': '/some/path',
+ 'bridge_name': 'br100',
+ 'display_name': "Acme webserver",
+ 'vcpus': 2,
+ 'project_id': 'fake',
+ 'bridge': 'br101',
+ 'image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
+ 'root_gb': 10,
+ 'ephemeral_gb': 20,
+ 'instance_type_id': '5', # m1.small
+ 'extra_specs': {},
+ 'system_metadata': sys_meta,
+ 'pci_devices': objects.PciDeviceList(),
+ 'numa_topology': None,
+ 'config_drive': None,
+ 'vm_mode': None,
+ 'kernel_id': None,
+ 'ramdisk_id': None,
+ 'os_type': 'linux',
+ 'user_id': '838a72b0-0d54-4827-8fd6-fb1227633ceb',
+ 'ephemeral_key_uuid': None,
+ }
+
+ def relpath(self, path):
+ return os.path.relpath(path, CONF.instances_path)
+
+ def tearDown(self):
+ nova.tests.unit.image.fake.FakeImageService_reset()
+ super(LibvirtConnTestCase, self).tearDown()
+
+ def create_fake_libvirt_mock(self, **kwargs):
+ """Defining mocks for LibvirtDriver(libvirt is not used)."""
+
+ # A fake libvirt.virConnect
+ class FakeLibvirtDriver(object):
+ def defineXML(self, xml):
+ return FakeVirtDomain()
+
+ # Creating mocks
+ volume_driver = ('iscsi=nova.tests.unit.virt.libvirt.test_driver'
+ '.FakeVolumeDriver')
+ self.flags(volume_drivers=[volume_driver],
+ group='libvirt')
+ fake = FakeLibvirtDriver()
+ # Customizing above fake if necessary
+ for key, val in kwargs.items():
+ fake.__setattr__(key, val)
+
+ self.stubs.Set(libvirt_driver.LibvirtDriver, '_conn', fake)
+
+ def fake_lookup(self, instance_name):
+ return FakeVirtDomain()
+
+ def fake_execute(self, *args, **kwargs):
+ open(args[-1], "a").close()
+
+ def _create_service(self, **kwargs):
+ service_ref = {'host': kwargs.get('host', 'dummy'),
+ 'disabled': kwargs.get('disabled', False),
+ 'binary': 'nova-compute',
+ 'topic': 'compute',
+ 'report_count': 0}
+
+ return objects.Service(**service_ref)
+
+ def _get_launch_flags(self, conn, network_info, power_on=True,
+ vifs_already_plugged=False):
+ timeout = CONF.vif_plugging_timeout
+
+ events = []
+ if (conn._conn_supports_start_paused and
+ utils.is_neutron() and
+ not vifs_already_plugged and
+ power_on and timeout):
+ events = conn._get_neutron_events(network_info)
+
+ launch_flags = events and libvirt.VIR_DOMAIN_START_PAUSED or 0
+
+ return launch_flags
+
+ def test_public_api_signatures(self):
+ baseinst = driver.ComputeDriver(None)
+ inst = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ self.assertPublicAPISignatures(baseinst, inst)
+
+ @mock.patch.object(libvirt_driver.LibvirtDriver, "_has_min_version")
+ def test_min_version_start_ok(self, mock_version):
+ mock_version.return_value = True
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ drvr.init_host("dummyhost")
+
+ @mock.patch.object(libvirt_driver.LibvirtDriver, "_has_min_version")
+ def test_min_version_start_abort(self, mock_version):
+ mock_version.return_value = False
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ self.assertRaises(exception.NovaException,
+ drvr.init_host,
+ "dummyhost")
+
+ @mock.patch.object(objects.Service, 'get_by_compute_host')
+ def test_set_host_enabled_with_disable(self, mock_svc):
+ # Tests disabling an enabled host.
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ svc = self._create_service(host='fake-mini')
+ mock_svc.return_value = svc
+ conn._set_host_enabled(False)
+ self.assertTrue(svc.disabled)
+
+ @mock.patch.object(objects.Service, 'get_by_compute_host')
+ def test_set_host_enabled_with_enable(self, mock_svc):
+ # Tests enabling a disabled host.
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ svc = self._create_service(disabled=True, host='fake-mini')
+ mock_svc.return_value = svc
+ conn._set_host_enabled(True)
+ self.assertTrue(svc.disabled)
+
+ @mock.patch.object(objects.Service, 'get_by_compute_host')
+ def test_set_host_enabled_with_enable_state_enabled(self, mock_svc):
+ # Tests enabling an enabled host.
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ svc = self._create_service(disabled=False, host='fake-mini')
+ mock_svc.return_value = svc
+ conn._set_host_enabled(True)
+ self.assertFalse(svc.disabled)
+
+ @mock.patch.object(objects.Service, 'get_by_compute_host')
+ def test_set_host_enabled_with_disable_state_disabled(self, mock_svc):
+ # Tests disabling a disabled host.
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ svc = self._create_service(disabled=True, host='fake-mini')
+ mock_svc.return_value = svc
+ conn._set_host_enabled(False)
+ self.assertTrue(svc.disabled)
+
+ def test_set_host_enabled_swallows_exceptions(self):
+ # Tests that set_host_enabled will swallow exceptions coming from the
+ # db_api code so they don't break anything calling it, e.g. the
+ # _get_new_connection method.
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ with mock.patch.object(db, 'service_get_by_compute_host') as db_mock:
+ # Make db.service_get_by_compute_host raise NovaException; this
+ # is more robust than just raising ComputeHostNotFound.
+ db_mock.side_effect = exception.NovaException
+ conn._set_host_enabled(False)
+
+ def test_prepare_pci_device(self):
+
+ pci_devices = [dict(hypervisor_name='xxx')]
+
+ self.flags(virt_type='xen', group='libvirt')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ class FakeDev():
+ def attach(self):
+ pass
+
+ def dettach(self):
+ pass
+
+ def reset(self):
+ pass
+
+ self.mox.StubOutWithMock(self.conn, 'nodeDeviceLookupByName')
+ self.conn.nodeDeviceLookupByName('xxx').AndReturn(FakeDev())
+ self.conn.nodeDeviceLookupByName('xxx').AndReturn(FakeDev())
+ self.mox.ReplayAll()
+ conn._prepare_pci_devices_for_use(pci_devices)
+
+ def test_prepare_pci_device_exception(self):
+
+ pci_devices = [dict(hypervisor_name='xxx',
+ id='id1',
+ instance_uuid='uuid')]
+
+ self.flags(virt_type='xen', group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ class FakeDev():
+
+ def attach(self):
+ pass
+
+ def dettach(self):
+ raise libvirt.libvirtError("xxxxx")
+
+ def reset(self):
+ pass
+
+ self.stubs.Set(self.conn, 'nodeDeviceLookupByName',
+ lambda x: FakeDev())
+ self.assertRaises(exception.PciDevicePrepareFailed,
+ conn._prepare_pci_devices_for_use, pci_devices)
+
+ def test_detach_pci_devices_exception(self):
+
+ pci_devices = [dict(hypervisor_name='xxx',
+ id='id1',
+ instance_uuid='uuid')]
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
+ '_has_min_version')
+ libvirt_driver.LibvirtDriver._has_min_version = lambda x, y: False
+
+ self.assertRaises(exception.PciDeviceDetachFailed,
+ conn._detach_pci_devices, None, pci_devices)
+
+ def test_detach_pci_devices(self):
+
+ fake_domXML1 =\
+ """<domain> <devices>
+ <disk type='file' device='disk'>
+ <driver name='qemu' type='qcow2' cache='none'/>
+ <source file='xxx'/>
+ <target dev='vda' bus='virtio'/>
+ <alias name='virtio-disk0'/>
+ <address type='pci' domain='0x0000' bus='0x00'
+ slot='0x04' function='0x0'/>
+ </disk>
+ <hostdev mode="subsystem" type="pci" managed="yes">
+ <source>
+ <address function="0x1" slot="0x10" domain="0x0000"
+ bus="0x04"/>
+ </source>
+ </hostdev></devices></domain>"""
+
+ pci_devices = [dict(hypervisor_name='xxx',
+ id='id1',
+ instance_uuid='uuid',
+ address="0001:04:10:1")]
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
+ '_has_min_version')
+ libvirt_driver.LibvirtDriver._has_min_version = lambda x, y: True
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
+ '_get_guest_pci_device')
+
+ class FakeDev():
+ def to_xml(self):
+ pass
+
+ libvirt_driver.LibvirtDriver._get_guest_pci_device =\
+ lambda x, y: FakeDev()
+
+ class FakeDomain():
+ def detachDeviceFlags(self, xml, flag):
+ pci_devices[0]['hypervisor_name'] = 'marked'
+ pass
+
+ def XMLDesc(self, flag):
+ return fake_domXML1
+
+ conn._detach_pci_devices(FakeDomain(), pci_devices)
+ self.assertEqual(pci_devices[0]['hypervisor_name'], 'marked')
+
+ def test_detach_pci_devices_timeout(self):
+
+ fake_domXML1 =\
+ """<domain>
+ <devices>
+ <hostdev mode="subsystem" type="pci" managed="yes">
+ <source>
+ <address function="0x1" slot="0x10" domain="0x0000" bus="0x04"/>
+ </source>
+ </hostdev>
+ </devices>
+ </domain>"""
+
+ pci_devices = [dict(hypervisor_name='xxx',
+ id='id1',
+ instance_uuid='uuid',
+ address="0000:04:10:1")]
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
+ '_has_min_version')
+ libvirt_driver.LibvirtDriver._has_min_version = lambda x, y: True
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
+ '_get_guest_pci_device')
+
+ class FakeDev():
+ def to_xml(self):
+ pass
+
+ libvirt_driver.LibvirtDriver._get_guest_pci_device =\
+ lambda x, y: FakeDev()
+
+ class FakeDomain():
+ def detachDeviceFlags(self, xml, flag):
+ pass
+
+ def XMLDesc(self, flag):
+ return fake_domXML1
+ self.assertRaises(exception.PciDeviceDetachFailed,
+ conn._detach_pci_devices, FakeDomain(), pci_devices)
+
+ def test_get_connector(self):
+ initiator = 'fake.initiator.iqn'
+ ip = 'fakeip'
+ host = 'fakehost'
+ wwpns = ['100010604b019419']
+ wwnns = ['200010604b019419']
+ self.flags(my_ip=ip)
+ self.flags(host=host)
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ expected = {
+ 'ip': ip,
+ 'initiator': initiator,
+ 'host': host,
+ 'wwpns': wwpns,
+ 'wwnns': wwnns
+ }
+ volume = {
+ 'id': 'fake'
+ }
+ result = conn.get_volume_connector(volume)
+ self.assertThat(expected, matchers.DictMatches(result))
+
+ def test_lifecycle_event_registration(self):
+ calls = []
+
+ def fake_registerErrorHandler(*args, **kwargs):
+ calls.append('fake_registerErrorHandler')
+
+ def fake_get_host_capabilities(**args):
+ cpu = vconfig.LibvirtConfigGuestCPU()
+ cpu.arch = arch.ARMV7
+
+ caps = vconfig.LibvirtConfigCaps()
+ caps.host = vconfig.LibvirtConfigCapsHost()
+ caps.host.cpu = cpu
+ calls.append('fake_get_host_capabilities')
+ return caps
+
+ @mock.patch.object(libvirt, 'registerErrorHandler',
+ side_effect=fake_registerErrorHandler)
+ @mock.patch.object(libvirt_driver.LibvirtDriver,
+ '_get_host_capabilities',
+ side_effect=fake_get_host_capabilities)
+ def test_init_host(get_host_capabilities, register_error_handler):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ conn.init_host("test_host")
+
+ test_init_host()
+ # NOTE(dkliban): Will fail if get_host_capabilities is called before
+ # registerErrorHandler
+ self.assertEqual(['fake_registerErrorHandler',
+ 'fake_get_host_capabilities'], calls)
+
+ @mock.patch.object(libvirt_driver, 'LOG')
+ def test_connect_auth_cb_exception(self, log_mock):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ creds = dict(authname='nova', password='verybadpass')
+ self.assertRaises(exception.NovaException,
+ conn._connect_auth_cb, creds, False)
+ self.assertEqual(0, len(log_mock.method_calls),
+ 'LOG should not be used in _connect_auth_cb.')
+
+ def test_sanitize_log_to_xml(self):
+ # setup fake data
+ data = {'auth_password': 'scrubme'}
+ bdm = [{'connection_info': {'data': data}}]
+ bdi = {'block_device_mapping': bdm}
+
+ # Tests that the parameters to the _get_guest_xml method
+ # are sanitized for passwords when logged.
+ def fake_debug(*args, **kwargs):
+ if 'auth_password' in args[0]:
+ self.assertNotIn('scrubme', args[0])
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ conf = mock.Mock()
+ with contextlib.nested(
+ mock.patch.object(libvirt_driver.LOG, 'debug',
+ side_effect=fake_debug),
+ mock.patch.object(conn, '_get_guest_config', return_value=conf)
+ ) as (
+ debug_mock, conf_mock
+ ):
+ conn._get_guest_xml(self.context, self.test_instance,
+ network_info={}, disk_info={},
+ image_meta={}, block_device_info=bdi)
+ # we don't care what the log message is, we just want to make sure
+ # our stub method is called which asserts the password is scrubbed
+ self.assertTrue(debug_mock.called)
+
+ def test_close_callback(self):
+ self.close_callback = None
+
+ def set_close_callback(cb, opaque):
+ self.close_callback = cb
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ service_mock = mock.MagicMock()
+ service_mock.disabled.return_value = False
+ with contextlib.nested(
+ mock.patch.object(conn, "_connect", return_value=self.conn),
+ mock.patch.object(self.conn, "registerCloseCallback",
+ side_effect=set_close_callback),
+ mock.patch.object(objects.Service, "get_by_compute_host",
+ return_value=service_mock)):
+
+ # verify that the driver registers for the close callback
+ # and re-connects after receiving the callback
+ conn._get_connection()
+ self.assertFalse(service_mock.disabled)
+ self.assertTrue(self.close_callback)
+ conn._init_events_pipe()
+ self.close_callback(self.conn, 1, None)
+ conn._dispatch_events()
+
+ self.assertTrue(service_mock.disabled)
+ conn._get_connection()
+
+ def test_close_callback_bad_signature(self):
+ '''Validates that a connection to libvirt exist,
+ even when registerCloseCallback method has a different
+ number of arguments in the libvirt python library.
+ '''
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ service_mock = mock.MagicMock()
+ service_mock.disabled.return_value = False
+ with contextlib.nested(
+ mock.patch.object(conn, "_connect", return_value=self.conn),
+ mock.patch.object(self.conn, "registerCloseCallback",
+ side_effect=TypeError('dd')),
+ mock.patch.object(objects.Service, "get_by_compute_host",
+ return_value=service_mock)):
+
+ connection = conn._get_connection()
+ self.assertTrue(connection)
+
+ def test_close_callback_not_defined(self):
+ '''Validates that a connection to libvirt exist,
+ even when registerCloseCallback method missing from
+ the libvirt python library.
+ '''
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ service_mock = mock.MagicMock()
+ service_mock.disabled.return_value = False
+ with contextlib.nested(
+ mock.patch.object(conn, "_connect", return_value=self.conn),
+ mock.patch.object(self.conn, "registerCloseCallback",
+ side_effect=AttributeError('dd')),
+ mock.patch.object(objects.Service, "get_by_compute_host",
+ return_value=service_mock)):
+
+ connection = conn._get_connection()
+ self.assertTrue(connection)
+
+ def test_cpu_features_bug_1217630(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ # Test old version of libvirt, it shouldn't see the `aes' feature
+ with mock.patch('nova.virt.libvirt.driver.libvirt') as mock_libvirt:
+ del mock_libvirt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES
+ caps = conn._get_host_capabilities()
+ self.assertNotIn('aes', [x.name for x in caps.host.cpu.features])
+
+ # Test new verion of libvirt, should find the `aes' feature
+ with mock.patch('nova.virt.libvirt.driver.libvirt') as mock_libvirt:
+ mock_libvirt['VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES'] = 1
+ # Cleanup the capabilities cache firstly
+ conn._caps = None
+ caps = conn._get_host_capabilities()
+ self.assertIn('aes', [x.name for x in caps.host.cpu.features])
+
+ def test_cpu_features_are_not_duplicated(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ # Test old version of libvirt. Should return single 'hypervisor'
+ with mock.patch('nova.virt.libvirt.driver.libvirt') as mock_libvirt:
+ del mock_libvirt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES
+ caps = conn._get_host_capabilities()
+ cnt = [x.name for x in caps.host.cpu.features].count('hypervisor')
+ self.assertEqual(1, cnt)
+
+ # Test new version of libvirt. Should still return single 'hypervisor'
+ with mock.patch('nova.virt.libvirt.driver.libvirt') as mock_libvirt:
+ mock_libvirt['VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES'] = 1
+ # Cleanup the capabilities cache firstly
+ conn._caps = None
+ caps = conn._get_host_capabilities()
+ cnt = [x.name for x in caps.host.cpu.features].count('hypervisor')
+ self.assertEqual(1, cnt)
+
+ def test_baseline_cpu_not_supported(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ # `mock` has trouble stubbing attributes that don't exist yet, so
+ # fallback to plain-Python attribute setting/deleting
+ cap_str = 'VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES'
+ if not hasattr(libvirt_driver.libvirt, cap_str):
+ setattr(libvirt_driver.libvirt, cap_str, True)
+ self.addCleanup(delattr, libvirt_driver.libvirt, cap_str)
+
+ # Handle just the NO_SUPPORT error
+ not_supported_exc = fakelibvirt.make_libvirtError(
+ libvirt.libvirtError,
+ 'this function is not supported by the connection driver:'
+ ' virConnectBaselineCPU',
+ error_code=libvirt.VIR_ERR_NO_SUPPORT)
+
+ with mock.patch.object(conn._conn, 'baselineCPU',
+ side_effect=not_supported_exc):
+ caps = conn._get_host_capabilities()
+ self.assertEqual(vconfig.LibvirtConfigCaps, type(caps))
+ self.assertNotIn('aes', [x.name for x in caps.host.cpu.features])
+
+ # Clear cached result so we can test again...
+ conn._caps = None
+
+ # Other errors should not be caught
+ other_exc = fakelibvirt.make_libvirtError(
+ libvirt.libvirtError,
+ 'other exc',
+ error_code=libvirt.VIR_ERR_NO_DOMAIN)
+
+ with mock.patch.object(conn._conn, 'baselineCPU',
+ side_effect=other_exc):
+ self.assertRaises(libvirt.libvirtError,
+ conn._get_host_capabilities)
+
+ def test_lxc_get_host_capabilities_failed(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ with mock.patch.object(conn._conn, 'baselineCPU', return_value=-1):
+ setattr(libvirt, 'VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES', 1)
+ caps = conn._get_host_capabilities()
+ delattr(libvirt, 'VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES')
+ self.assertEqual(vconfig.LibvirtConfigCaps, type(caps))
+ self.assertNotIn('aes', [x.name for x in caps.host.cpu.features])
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ @mock.patch.object(time, "time")
+ def test_get_guest_config(self, time_mock, mock_flavor):
+ time_mock.return_value = 1234567.89
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ test_instance = copy.deepcopy(self.test_instance)
+ test_instance["display_name"] = "purple tomatoes"
+
+ ctxt = context.RequestContext(project_id=123,
+ project_name="aubergine",
+ user_id=456,
+ user_name="pie")
+
+ flavor = objects.Flavor(name='m1.small',
+ memory_mb=6,
+ vcpus=28,
+ root_gb=496,
+ ephemeral_gb=8128,
+ swap=33550336,
+ extra_specs={})
+ instance_ref = objects.Instance(**test_instance)
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ cfg = conn._get_guest_config(instance_ref,
+ _fake_network_info(self.stubs, 1),
+ {}, disk_info,
+ context=ctxt)
+
+ self.assertEqual(cfg.uuid, instance_ref["uuid"])
+ self.assertEqual(cfg.pae, False)
+ self.assertEqual(cfg.acpi, True)
+ self.assertEqual(cfg.apic, True)
+ self.assertEqual(cfg.memory, 6 * units.Ki)
+ self.assertEqual(cfg.vcpus, 28)
+ self.assertEqual(cfg.os_type, vm_mode.HVM)
+ self.assertEqual(cfg.os_boot_dev, ["hd"])
+ self.assertIsNone(cfg.os_root)
+ self.assertEqual(len(cfg.devices), 9)
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestInterface)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestInput)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[7],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[8],
+ vconfig.LibvirtConfigMemoryBalloon)
+ self.assertEqual(len(cfg.metadata), 1)
+ self.assertIsInstance(cfg.metadata[0],
+ vconfig.LibvirtConfigGuestMetaNovaInstance)
+ self.assertEqual(version.version_string_with_package(),
+ cfg.metadata[0].package)
+ self.assertEqual("purple tomatoes",
+ cfg.metadata[0].name)
+ self.assertEqual(1234567.89,
+ cfg.metadata[0].creationTime)
+ self.assertEqual("image",
+ cfg.metadata[0].roottype)
+ self.assertEqual(str(instance_ref["image_ref"]),
+ cfg.metadata[0].rootid)
+
+ self.assertIsInstance(cfg.metadata[0].owner,
+ vconfig.LibvirtConfigGuestMetaNovaOwner)
+ self.assertEqual(456,
+ cfg.metadata[0].owner.userid)
+ self.assertEqual("pie",
+ cfg.metadata[0].owner.username)
+ self.assertEqual(123,
+ cfg.metadata[0].owner.projectid)
+ self.assertEqual("aubergine",
+ cfg.metadata[0].owner.projectname)
+
+ self.assertIsInstance(cfg.metadata[0].flavor,
+ vconfig.LibvirtConfigGuestMetaNovaFlavor)
+ self.assertEqual("m1.small",
+ cfg.metadata[0].flavor.name)
+ self.assertEqual(6,
+ cfg.metadata[0].flavor.memory)
+ self.assertEqual(28,
+ cfg.metadata[0].flavor.vcpus)
+ self.assertEqual(496,
+ cfg.metadata[0].flavor.disk)
+ self.assertEqual(8128,
+ cfg.metadata[0].flavor.ephemeral)
+ self.assertEqual(33550336,
+ cfg.metadata[0].flavor.swap)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_lxc(self, mock_flavor):
+ self.flags(virt_type='lxc', group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ cfg = conn._get_guest_config(instance_ref,
+ _fake_network_info(self.stubs, 1),
+ None, {'mapping': {}})
+ self.assertEqual(instance_ref["uuid"], cfg.uuid)
+ self.assertEqual(2 * units.Mi, cfg.memory)
+ self.assertEqual(1, cfg.vcpus)
+ self.assertEqual(vm_mode.EXE, cfg.os_type)
+ self.assertEqual("/sbin/init", cfg.os_init_path)
+ self.assertEqual("console=tty0 console=ttyS0", cfg.os_cmdline)
+ self.assertIsNone(cfg.os_root)
+ self.assertEqual(3, len(cfg.devices))
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestFilesys)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestInterface)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestConsole)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_lxc_with_id_maps(self, mock_flavor):
+ self.flags(virt_type='lxc', group='libvirt')
+ self.flags(uid_maps=['0:1000:100'], group='libvirt')
+ self.flags(gid_maps=['0:1000:100'], group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ cfg = conn._get_guest_config(instance_ref,
+ _fake_network_info(self.stubs, 1),
+ None, {'mapping': {}})
+ self.assertEqual(instance_ref["uuid"], cfg.uuid)
+ self.assertEqual(2 * units.Mi, cfg.memory)
+ self.assertEqual(1, cfg.vcpus)
+ self.assertEqual(vm_mode.EXE, cfg.os_type)
+ self.assertEqual("/sbin/init", cfg.os_init_path)
+ self.assertEqual("console=tty0 console=ttyS0", cfg.os_cmdline)
+ self.assertIsNone(cfg.os_root)
+ self.assertEqual(3, len(cfg.devices))
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestFilesys)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestInterface)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestConsole)
+ self.assertEqual(len(cfg.idmaps), 2)
+ self.assertIsInstance(cfg.idmaps[0],
+ vconfig.LibvirtConfigGuestUIDMap)
+ self.assertIsInstance(cfg.idmaps[1],
+ vconfig.LibvirtConfigGuestGIDMap)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_numa_host_instance_fits(self, mock_flavor):
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = objects.Flavor(memory_mb=1, vcpus=2, root_gb=496,
+ ephemeral_gb=8128, swap=33550336, name='fake',
+ extra_specs={})
+ mock_flavor.return_value = flavor
+
+ caps = vconfig.LibvirtConfigCaps()
+ caps.host = vconfig.LibvirtConfigCapsHost()
+ caps.host.cpu = vconfig.LibvirtConfigCPU()
+ caps.host.cpu.arch = "x86_64"
+ caps.host.topology = self._fake_caps_numa_topology()
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ with contextlib.nested(
+ mock.patch.object(conn, '_has_min_version', return_value=True),
+ mock.patch.object(
+ conn, "_get_host_capabilities", return_value=caps),
+ mock.patch.object(
+ random, 'choice', side_effect=lambda cells: cells[0])):
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+ self.assertEqual(set([0, 1]), cfg.cpuset)
+ self.assertIsNone(cfg.cputune)
+ self.assertIsNone(cfg.cpu.numa)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_numa_host_instance_no_fit(self, mock_flavor):
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = objects.Flavor(memory_mb=4096, vcpus=4, root_gb=496,
+ ephemeral_gb=8128, swap=33550336, name='fake',
+ extra_specs={})
+ mock_flavor.return_value = flavor
+
+ caps = vconfig.LibvirtConfigCaps()
+ caps.host = vconfig.LibvirtConfigCapsHost()
+ caps.host.cpu = vconfig.LibvirtConfigCPU()
+ caps.host.cpu.arch = "x86_64"
+ caps.host.topology = self._fake_caps_numa_topology()
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ with contextlib.nested(
+ mock.patch.object(
+ conn, "_get_host_capabilities", return_value=caps),
+ mock.patch.object(
+ hardware, 'get_vcpu_pin_set', return_value=set([3])),
+ mock.patch.object(random, 'choice')
+ ) as (get_host_cap_mock,
+ get_vcpu_pin_set_mock, choice_mock):
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+ self.assertFalse(choice_mock.called)
+ self.assertEqual(set([3]), cfg.cpuset)
+ self.assertIsNone(cfg.cputune)
+ self.assertIsNone(cfg.cpu.numa)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_numa_host_instance_fit_w_cpu_pinset(self,
+ mock_flavor):
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = objects.Flavor(memory_mb=1024, vcpus=2, root_gb=496,
+ ephemeral_gb=8128, swap=33550336, name='fake',
+ extra_specs={})
+ mock_flavor.return_value = flavor
+
+ caps = vconfig.LibvirtConfigCaps()
+ caps.host = vconfig.LibvirtConfigCapsHost()
+ caps.host.cpu = vconfig.LibvirtConfigCPU()
+ caps.host.cpu.arch = "x86_64"
+ caps.host.topology = self._fake_caps_numa_topology()
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ with contextlib.nested(
+ mock.patch.object(conn, '_has_min_version', return_value=True),
+ mock.patch.object(
+ conn, "_get_host_capabilities", return_value=caps),
+ mock.patch.object(
+ hardware, 'get_vcpu_pin_set', return_value=set([2, 3])),
+ mock.patch.object(
+ random, 'choice', side_effect=lambda cells: cells[0])
+ ) as (has_min_version_mock, get_host_cap_mock,
+ get_vcpu_pin_set_mock, choice_mock):
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+ # NOTE(ndipanov): we make sure that pin_set was taken into account
+ # when choosing viable cells
+ choice_mock.assert_called_once_with([set([2, 3])])
+ self.assertEqual(set([2, 3]), cfg.cpuset)
+ self.assertIsNone(cfg.cputune)
+ self.assertIsNone(cfg.cpu.numa)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_non_numa_host_instance_topo(self, mock_flavor):
+ instance_topology = objects.InstanceNUMATopology.obj_from_topology(
+ hardware.VirtNUMAInstanceTopology(
+ cells=[hardware.VirtNUMATopologyCellInstance(
+ 0, set([0]), 1024),
+ hardware.VirtNUMATopologyCellInstance(
+ 1, set([2]), 1024)]))
+ instance_ref = objects.Instance(**self.test_instance)
+ instance_ref.numa_topology = instance_topology
+ flavor = objects.Flavor(memory_mb=2048, vcpus=2, root_gb=496,
+ ephemeral_gb=8128, swap=33550336, name='fake',
+ extra_specs={})
+ mock_flavor.return_value = flavor
+
+ caps = vconfig.LibvirtConfigCaps()
+ caps.host = vconfig.LibvirtConfigCapsHost()
+ caps.host.cpu = vconfig.LibvirtConfigCPU()
+ caps.host.cpu.arch = "x86_64"
+ caps.host.topology = None
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ with contextlib.nested(
+ mock.patch.object(
+ objects.InstanceNUMATopology, "get_by_instance_uuid",
+ return_value=instance_topology),
+ mock.patch.object(conn, '_has_min_version', return_value=True),
+ mock.patch.object(
+ conn, "_get_host_capabilities", return_value=caps)):
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+ self.assertIsNone(cfg.cpuset)
+ self.assertIsNone(cfg.cputune)
+ self.assertIsNotNone(cfg.cpu.numa)
+ for instance_cell, numa_cfg_cell in zip(
+ instance_topology.cells, cfg.cpu.numa.cells):
+ self.assertEqual(instance_cell.id, numa_cfg_cell.id)
+ self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus)
+ self.assertEqual(instance_cell.memory * units.Ki,
+ numa_cfg_cell.memory)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_numa_host_instance_topo(self, mock_flavor):
+ instance_topology = objects.InstanceNUMATopology.obj_from_topology(
+ hardware.VirtNUMAInstanceTopology(
+ cells=[hardware.VirtNUMATopologyCellInstance(
+ 0, set([0, 1]), 1024),
+ hardware.VirtNUMATopologyCellInstance(
+ 1, set([2, 3]),
+ 1024)]))
+ instance_ref = objects.Instance(**self.test_instance)
+ instance_ref.numa_topology = instance_topology
+ flavor = objects.Flavor(memory_mb=2048, vcpus=2, root_gb=496,
+ ephemeral_gb=8128, swap=33550336, name='fake',
+ extra_specs={})
+ mock_flavor.return_value = flavor
+
+ caps = vconfig.LibvirtConfigCaps()
+ caps.host = vconfig.LibvirtConfigCapsHost()
+ caps.host.cpu = vconfig.LibvirtConfigCPU()
+ caps.host.cpu.arch = "x86_64"
+ caps.host.topology = self._fake_caps_numa_topology()
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ with contextlib.nested(
+ mock.patch.object(
+ objects.Flavor, "get_by_id", return_value=flavor),
+ mock.patch.object(
+ objects.InstanceNUMATopology, "get_by_instance_uuid",
+ return_value=instance_topology),
+ mock.patch.object(conn, '_has_min_version', return_value=True),
+ mock.patch.object(
+ conn, "_get_host_capabilities", return_value=caps),
+ mock.patch.object(
+ hardware, 'get_vcpu_pin_set', return_value=set([0, 1, 2]))
+ ):
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+ self.assertIsNone(cfg.cpuset)
+ # Test that the pinning is correct and limited to allowed only
+ self.assertEqual(0, cfg.cputune.vcpupin[0].id)
+ self.assertEqual(set([0, 1]), cfg.cputune.vcpupin[0].cpuset)
+ self.assertEqual(1, cfg.cputune.vcpupin[1].id)
+ self.assertEqual(set([0, 1]), cfg.cputune.vcpupin[1].cpuset)
+ self.assertEqual(2, cfg.cputune.vcpupin[2].id)
+ self.assertEqual(set([2]), cfg.cputune.vcpupin[2].cpuset)
+ self.assertEqual(3, cfg.cputune.vcpupin[3].id)
+ self.assertEqual(set([2]), cfg.cputune.vcpupin[3].cpuset)
+ self.assertIsNotNone(cfg.cpu.numa)
+ for instance_cell, numa_cfg_cell in zip(
+ instance_topology.cells, cfg.cpu.numa.cells):
+ self.assertEqual(instance_cell.id, numa_cfg_cell.id)
+ self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus)
+ self.assertEqual(instance_cell.memory * units.Ki,
+ numa_cfg_cell.memory)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_clock(self, mock_flavor):
+ self.flags(virt_type='kvm', group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ image_meta = {}
+ hpet_map = {
+ arch.X86_64: True,
+ arch.I686: True,
+ arch.PPC: False,
+ arch.PPC64: False,
+ arch.ARMV7: False,
+ arch.AARCH64: False,
+ }
+
+ for guestarch, expect_hpet in hpet_map.items():
+ with mock.patch.object(libvirt_driver.libvirt_utils,
+ 'get_arch',
+ return_value=guestarch):
+ cfg = conn._get_guest_config(instance_ref, [],
+ image_meta,
+ disk_info)
+ self.assertIsInstance(cfg.clock,
+ vconfig.LibvirtConfigGuestClock)
+ self.assertEqual(cfg.clock.offset, "utc")
+ self.assertIsInstance(cfg.clock.timers[0],
+ vconfig.LibvirtConfigGuestTimer)
+ self.assertIsInstance(cfg.clock.timers[1],
+ vconfig.LibvirtConfigGuestTimer)
+ self.assertEqual(cfg.clock.timers[0].name, "pit")
+ self.assertEqual(cfg.clock.timers[0].tickpolicy,
+ "delay")
+ self.assertEqual(cfg.clock.timers[1].name, "rtc")
+ self.assertEqual(cfg.clock.timers[1].tickpolicy,
+ "catchup")
+ if expect_hpet:
+ self.assertEqual(3, len(cfg.clock.timers))
+ self.assertIsInstance(cfg.clock.timers[2],
+ vconfig.LibvirtConfigGuestTimer)
+ self.assertEqual('hpet', cfg.clock.timers[2].name)
+ self.assertFalse(cfg.clock.timers[2].present)
+ else:
+ self.assertEqual(2, len(cfg.clock.timers))
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_windows(self, mock_flavor):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ instance_ref['os_type'] = 'windows'
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ cfg = conn._get_guest_config(instance_ref,
+ _fake_network_info(self.stubs, 1),
+ {}, disk_info)
+
+ self.assertIsInstance(cfg.clock,
+ vconfig.LibvirtConfigGuestClock)
+ self.assertEqual(cfg.clock.offset, "localtime")
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_two_nics(self, mock_flavor):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ cfg = conn._get_guest_config(instance_ref,
+ _fake_network_info(self.stubs, 2),
+ {}, disk_info)
+ self.assertEqual(cfg.acpi, True)
+ self.assertEqual(cfg.memory, 2 * units.Mi)
+ self.assertEqual(cfg.vcpus, 1)
+ self.assertEqual(cfg.os_type, vm_mode.HVM)
+ self.assertEqual(cfg.os_boot_dev, ["hd"])
+ self.assertIsNone(cfg.os_root)
+ self.assertEqual(len(cfg.devices), 10)
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestInterface)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestInterface)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigGuestInput)
+ self.assertIsInstance(cfg.devices[7],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[8],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[9],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_bug_1118829(self, mock_flavor):
+ self.flags(virt_type='uml', group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = {'disk_bus': 'virtio',
+ 'cdrom_bus': 'ide',
+ 'mapping': {u'vda': {'bus': 'virtio',
+ 'type': 'disk',
+ 'dev': u'vda'},
+ 'root': {'bus': 'virtio',
+ 'type': 'disk',
+ 'dev': 'vda'}}}
+
+ # NOTE(jdg): For this specific test leave this blank
+ # This will exercise the failed code path still,
+ # and won't require fakes and stubs of the iscsi discovery
+ block_device_info = {}
+ conn._get_guest_config(instance_ref, [], {}, disk_info,
+ None, block_device_info)
+ self.assertEqual(instance_ref['root_device_name'], '/dev/vda')
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_root_device_name(self, mock_flavor):
+ self.flags(virt_type='uml', group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ block_device_info = {'root_device_name': '/dev/vdb'}
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref,
+ block_device_info)
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info,
+ None, block_device_info)
+ self.assertEqual(cfg.acpi, False)
+ self.assertEqual(cfg.memory, 2 * units.Mi)
+ self.assertEqual(cfg.vcpus, 1)
+ self.assertEqual(cfg.os_type, "uml")
+ self.assertEqual(cfg.os_boot_dev, [])
+ self.assertEqual(cfg.os_root, '/dev/vdb')
+ self.assertEqual(len(cfg.devices), 3)
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestConsole)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_block_device(self, mock_flavor):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+ conn_info = {'driver_volume_type': 'fake'}
+ info = {'block_device_mapping': driver_block_device.convert_volumes([
+ fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 1,
+ 'source_type': 'volume', 'destination_type': 'volume',
+ 'device_name': '/dev/vdc'}),
+ fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 2,
+ 'source_type': 'volume', 'destination_type': 'volume',
+ 'device_name': '/dev/vdd'}),
+ ])}
+ info['block_device_mapping'][0]['connection_info'] = conn_info
+ info['block_device_mapping'][1]['connection_info'] = conn_info
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref, info)
+ with mock.patch.object(
+ driver_block_device.DriverVolumeBlockDevice, 'save'):
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info,
+ None, info)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertEqual(cfg.devices[2].target_dev, 'vdc')
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertEqual(cfg.devices[3].target_dev, 'vdd')
+ self.assertTrue(info['block_device_mapping'][0].save.called)
+ self.assertTrue(info['block_device_mapping'][1].save.called)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_lxc_with_attached_volume(self, mock_flavor):
+ self.flags(virt_type='lxc', group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+ conn_info = {'driver_volume_type': 'fake'}
+ info = {'block_device_mapping': driver_block_device.convert_volumes([
+ fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 1,
+ 'source_type': 'volume', 'destination_type': 'volume',
+ 'boot_index': 0}),
+ fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 2,
+ 'source_type': 'volume', 'destination_type': 'volume',
+ }),
+ fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 3,
+ 'source_type': 'volume', 'destination_type': 'volume',
+ }),
+ ])}
+
+ info['block_device_mapping'][0]['connection_info'] = conn_info
+ info['block_device_mapping'][1]['connection_info'] = conn_info
+ info['block_device_mapping'][2]['connection_info'] = conn_info
+ info['block_device_mapping'][0]['mount_device'] = '/dev/vda'
+ info['block_device_mapping'][1]['mount_device'] = '/dev/vdc'
+ info['block_device_mapping'][2]['mount_device'] = '/dev/vdd'
+ with mock.patch.object(
+ driver_block_device.DriverVolumeBlockDevice, 'save'):
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref, info)
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info,
+ None, info)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertEqual(cfg.devices[1].target_dev, 'vdc')
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertEqual(cfg.devices[2].target_dev, 'vdd')
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_configdrive(self, mock_flavor):
+ # It's necessary to check if the architecture is power, because
+ # power doesn't have support to ide, and so libvirt translate
+ # all ide calls to scsi
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ # make configdrive.required_by() return True
+ instance_ref['config_drive'] = True
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+
+ # The last device is selected for this. on x86 is the last ide
+ # device (hdd). Since power only support scsi, the last device
+ # is sdz
+
+ expect = {"ppc": "sdz", "ppc64": "sdz"}
+ disk = expect.get(blockinfo.libvirt_utils.get_arch({}), "hdd")
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertEqual(cfg.devices[2].target_dev, disk)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_virtio_scsi_bus(self, mock_flavor):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ image_meta = {"properties": {"hw_scsi_model": "virtio-scsi"}}
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref, [], image_meta)
+ cfg = conn._get_guest_config(instance_ref, [], image_meta, disk_info)
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestController)
+ self.assertEqual(cfg.devices[2].model, 'virtio-scsi')
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_virtio_scsi_bus_bdm(self, mock_flavor):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ image_meta = {"properties": {"hw_scsi_model": "virtio-scsi"}}
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+ conn_info = {'driver_volume_type': 'fake'}
+ bd_info = {
+ 'block_device_mapping': driver_block_device.convert_volumes([
+ fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 1,
+ 'source_type': 'volume', 'destination_type': 'volume',
+ 'device_name': '/dev/sdc', 'disk_bus': 'scsi'}),
+ fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 2,
+ 'source_type': 'volume', 'destination_type': 'volume',
+ 'device_name': '/dev/sdd', 'disk_bus': 'scsi'}),
+ ])}
+ bd_info['block_device_mapping'][0]['connection_info'] = conn_info
+ bd_info['block_device_mapping'][1]['connection_info'] = conn_info
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref, bd_info, image_meta)
+ with mock.patch.object(
+ driver_block_device.DriverVolumeBlockDevice, 'save'):
+ cfg = conn._get_guest_config(instance_ref, [], image_meta,
+ disk_info, [], bd_info)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertEqual(cfg.devices[2].target_dev, 'sdc')
+ self.assertEqual(cfg.devices[2].target_bus, 'scsi')
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertEqual(cfg.devices[3].target_dev, 'sdd')
+ self.assertEqual(cfg.devices[3].target_bus, 'scsi')
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestController)
+ self.assertEqual(cfg.devices[4].model, 'virtio-scsi')
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_vnc(self, mock_flavor):
+ self.flags(vnc_enabled=True)
+ self.flags(virt_type='kvm',
+ use_usb_tablet=False,
+ group='libvirt')
+ self.flags(enabled=False, group='spice')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+ self.assertEqual(len(cfg.devices), 7)
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ self.assertEqual(cfg.devices[4].type, "vnc")
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_vnc_and_tablet(self, mock_flavor):
+ self.flags(vnc_enabled=True)
+ self.flags(virt_type='kvm',
+ use_usb_tablet=True,
+ group='libvirt')
+ self.flags(enabled=False, group='spice')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+ self.assertEqual(len(cfg.devices), 8)
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestInput)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[7],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ self.assertEqual(cfg.devices[4].type, "tablet")
+ self.assertEqual(cfg.devices[5].type, "vnc")
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_spice_and_tablet(self, mock_flavor):
+ self.flags(vnc_enabled=False)
+ self.flags(virt_type='kvm',
+ use_usb_tablet=True,
+ group='libvirt')
+ self.flags(enabled=True,
+ agent_enabled=False,
+ group='spice')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+ self.assertEqual(len(cfg.devices), 8)
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestInput)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[7],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ self.assertEqual(cfg.devices[4].type, "tablet")
+ self.assertEqual(cfg.devices[5].type, "spice")
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_spice_and_agent(self, mock_flavor):
+ self.flags(vnc_enabled=False)
+ self.flags(virt_type='kvm',
+ use_usb_tablet=True,
+ group='libvirt')
+ self.flags(enabled=True,
+ agent_enabled=True,
+ group='spice')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+ self.assertEqual(len(cfg.devices), 8)
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestChannel)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[7],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ self.assertEqual(cfg.devices[4].target_name, "com.redhat.spice.0")
+ self.assertEqual(cfg.devices[5].type, "spice")
+ self.assertEqual(cfg.devices[6].type, "qxl")
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ @mock.patch('nova.console.serial.acquire_port')
+ def test_get_guest_config_serial_console(self, acquire_port,
+ mock_flavor):
+ self.flags(enabled=True, group='serial_console')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ acquire_port.return_value = 11111
+
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+ self.assertEqual(8, len(cfg.devices))
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestInput)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[7],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ self.assertEqual("tcp", cfg.devices[2].type)
+ self.assertEqual(11111, cfg.devices[2].listen_port)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_serial_console_through_flavor(self, mock_flavor):
+ self.flags(enabled=True, group='serial_console')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {'hw:serial_port_count': 3}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+ self.assertEqual(10, len(cfg.devices))
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigGuestInput)
+ self.assertIsInstance(cfg.devices[7],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[8],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[9],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ self.assertEqual("tcp", cfg.devices[2].type)
+ self.assertEqual("tcp", cfg.devices[3].type)
+ self.assertEqual("tcp", cfg.devices[4].type)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_serial_console_invalid_flavor(self, mock_flavor):
+ self.flags(enabled=True, group='serial_console')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {'hw:serial_port_count': "a"}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ self.assertRaises(
+ exception.ImageSerialPortNumberInvalid,
+ conn._get_guest_config, instance_ref, [], {}, disk_info)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_serial_console_image_and_flavor(self,
+ mock_flavor):
+ self.flags(enabled=True, group='serial_console')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ image_meta = {"properties": {"hw_serial_port_count": "3"}}
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {'hw:serial_port_count': 4}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ cfg = conn._get_guest_config(instance_ref, [], image_meta,
+ disk_info)
+ self.assertEqual(10, len(cfg.devices), cfg.devices)
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigGuestInput)
+ self.assertIsInstance(cfg.devices[7],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[8],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[9],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ self.assertEqual("tcp", cfg.devices[2].type)
+ self.assertEqual("tcp", cfg.devices[3].type)
+ self.assertEqual("tcp", cfg.devices[4].type)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_serial_console_invalid_img_meta(self,
+ mock_flavor):
+ self.flags(enabled=True, group='serial_console')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ image_meta = {"properties": {"hw_serial_port_count": "fail"}}
+ self.assertRaises(
+ exception.ImageSerialPortNumberInvalid,
+ conn._get_guest_config, instance_ref, [], image_meta, disk_info)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ @mock.patch('nova.console.serial.acquire_port')
+ def test_get_guest_config_serial_console_through_port_rng_exhausted(
+ self, acquire_port, mock_flavor):
+ self.flags(enabled=True, group='serial_console')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ acquire_port.side_effect = exception.SocketPortRangeExhaustedException(
+ '127.0.0.1')
+ self.assertRaises(
+ exception.SocketPortRangeExhaustedException,
+ conn._get_guest_config, instance_ref, [], {}, disk_info)
+
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._lookup_by_name')
+ def test_get_serial_ports_from_instance(self, _lookup_by_name):
+ i = self._test_get_serial_ports_from_instance(_lookup_by_name)
+ self.assertEqual([
+ ('127.0.0.1', 100),
+ ('127.0.0.1', 101),
+ ('127.0.0.2', 100),
+ ('127.0.0.2', 101)], list(i))
+
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._lookup_by_name')
+ def test_get_serial_ports_from_instance_bind_only(self, _lookup_by_name):
+ i = self._test_get_serial_ports_from_instance(
+ _lookup_by_name, mode='bind')
+ self.assertEqual([
+ ('127.0.0.1', 101),
+ ('127.0.0.2', 100)], list(i))
+
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._lookup_by_name')
+ def test_get_serial_ports_from_instance_connect_only(self,
+ _lookup_by_name):
+ i = self._test_get_serial_ports_from_instance(
+ _lookup_by_name, mode='connect')
+ self.assertEqual([
+ ('127.0.0.1', 100),
+ ('127.0.0.2', 101)], list(i))
+
+ def _test_get_serial_ports_from_instance(self, _lookup_by_name, mode=None):
+ xml = """
+ <domain type='kvm'>
+ <devices>
+ <serial type="tcp">
+ <source host="127.0.0.1" service="100" mode="connect"/>
+ </serial>
+ <serial type="tcp">
+ <source host="127.0.0.1" service="101" mode="bind"/>
+ </serial>
+ <serial type="tcp">
+ <source host="127.0.0.2" service="100" mode="bind"/>
+ </serial>
+ <serial type="tcp">
+ <source host="127.0.0.2" service="101" mode="connect"/>
+ </serial>
+ </devices>
+ </domain>"""
+
+ dom = mock.MagicMock()
+ dom.XMLDesc.return_value = xml
+ _lookup_by_name.return_value = dom
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ return conn._get_serial_ports_from_instance(
+ {'name': 'fake_instance'}, mode=mode)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_type_xen(self, mock_flavor):
+ self.flags(vnc_enabled=True)
+ self.flags(virt_type='xen',
+ use_usb_tablet=False,
+ group='libvirt')
+ self.flags(enabled=False,
+ group='spice')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+ self.assertEqual(len(cfg.devices), 6)
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestConsole)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ self.assertEqual(cfg.devices[3].type, "vnc")
+ self.assertEqual(cfg.devices[4].type, "xen")
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_type_xen_pae_hvm(self, mock_flavor):
+ self.flags(vnc_enabled=True)
+ self.flags(virt_type='xen',
+ use_usb_tablet=False,
+ group='libvirt')
+ self.flags(enabled=False,
+ group='spice')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ instance_ref['vm_mode'] = vm_mode.HVM
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+
+ self.assertEqual(cfg.os_type, vm_mode.HVM)
+ self.assertEqual(cfg.os_loader, CONF.libvirt.xen_hvmloader_path)
+ self.assertEqual(cfg.pae, True)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_type_xen_pae_pvm(self, mock_flavor):
+ self.flags(vnc_enabled=True)
+ self.flags(virt_type='xen',
+ use_usb_tablet=False,
+ group='libvirt')
+ self.flags(enabled=False,
+ group='spice')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+
+ self.assertEqual(cfg.os_type, vm_mode.XEN)
+ self.assertEqual(cfg.pae, True)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_vnc_and_spice(self, mock_flavor):
+ self.flags(vnc_enabled=True)
+ self.flags(virt_type='kvm',
+ use_usb_tablet=True,
+ group='libvirt')
+ self.flags(enabled=True,
+ agent_enabled=True,
+ group='spice')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+ self.assertEqual(len(cfg.devices), 10)
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestInput)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestChannel)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[7],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[8],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[9],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ self.assertEqual(cfg.devices[4].type, "tablet")
+ self.assertEqual(cfg.devices[5].target_name, "com.redhat.spice.0")
+ self.assertEqual(cfg.devices[6].type, "vnc")
+ self.assertEqual(cfg.devices[7].type, "spice")
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_invalid_watchdog_action(self, mock_flavor):
+ self.flags(virt_type='kvm', group='libvirt')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ image_meta = {"properties": {"hw_watchdog_action": "something"}}
+ self.assertRaises(exception.InvalidWatchdogAction,
+ conn._get_guest_config,
+ instance_ref,
+ [],
+ image_meta,
+ disk_info)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_watchdog_action_image_meta(self,
+ mock_flavor):
+ self.flags(virt_type='kvm', group='libvirt')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ image_meta = {"properties": {"hw_watchdog_action": "none"}}
+ cfg = conn._get_guest_config(instance_ref, [], image_meta, disk_info)
+ self.assertEqual(len(cfg.devices), 9)
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestInput)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[7],
+ vconfig.LibvirtConfigGuestWatchdog)
+ self.assertIsInstance(cfg.devices[8],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ self.assertEqual("none", cfg.devices[7].action)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def _test_get_guest_config_with_watchdog_action_flavor(self, mock_flavor,
+ hw_watchdog_action="hw:watchdog_action"):
+ self.flags(virt_type='kvm', group='libvirt')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {hw_watchdog_action: 'none'}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+
+ self.assertEqual(9, len(cfg.devices))
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestInput)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[7],
+ vconfig.LibvirtConfigGuestWatchdog)
+ self.assertIsInstance(cfg.devices[8],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ self.assertEqual("none", cfg.devices[7].action)
+
+ def test_get_guest_config_with_watchdog_action_through_flavor(self):
+ self._test_get_guest_config_with_watchdog_action_flavor()
+
+ # TODO(pkholkin): the test accepting old property name 'hw_watchdog_action'
+ # should be removed in the next release
+ def test_get_guest_config_with_watchdog_action_through_flavor_no_scope(
+ self):
+ self._test_get_guest_config_with_watchdog_action_flavor(
+ hw_watchdog_action="hw_watchdog_action")
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_watchdog_overrides_flavor(self,
+ mock_flavor):
+ self.flags(virt_type='kvm', group='libvirt')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {'hw_watchdog_action': 'none'}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ image_meta = {"properties": {"hw_watchdog_action": "pause"}}
+
+ cfg = conn._get_guest_config(instance_ref, [],
+ image_meta, disk_info)
+
+ self.assertEqual(9, len(cfg.devices))
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestInput)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[7],
+ vconfig.LibvirtConfigGuestWatchdog)
+ self.assertIsInstance(cfg.devices[8],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ self.assertEqual("pause", cfg.devices[7].action)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_unsupported_video_driver_through_image_meta(self,
+ mock_flavor):
+ self.flags(virt_type='kvm', group='libvirt')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ image_meta = {"properties": {"hw_video_model": "something"}}
+ self.assertRaises(exception.InvalidVideoMode,
+ conn._get_guest_config,
+ instance_ref,
+ [],
+ image_meta,
+ disk_info)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_video_driver_image_meta(self,
+ mock_flavor):
+ self.flags(virt_type='kvm', group='libvirt')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ image_meta = {"properties": {"hw_video_model": "vmvga"}}
+ cfg = conn._get_guest_config(instance_ref, [], image_meta, disk_info)
+ self.assertEqual(len(cfg.devices), 8)
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestInput)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[7],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ self.assertEqual(cfg.devices[5].type, "vnc")
+ self.assertEqual(cfg.devices[6].type, "vmvga")
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_qga_through_image_meta(self,
+ mock_flavor):
+ self.flags(virt_type='kvm', group='libvirt')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ image_meta = {"properties": {"hw_qemu_guest_agent": "yes"}}
+ cfg = conn._get_guest_config(instance_ref, [], image_meta, disk_info)
+ self.assertEqual(len(cfg.devices), 9)
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestInput)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[7],
+ vconfig.LibvirtConfigGuestChannel)
+ self.assertIsInstance(cfg.devices[8],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ self.assertEqual(cfg.devices[4].type, "tablet")
+ self.assertEqual(cfg.devices[5].type, "vnc")
+ self.assertEqual(cfg.devices[7].type, "unix")
+ self.assertEqual(cfg.devices[7].target_name, "org.qemu.guest_agent.0")
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_video_driver_vram(self, mock_flavor):
+ self.flags(vnc_enabled=False)
+ self.flags(virt_type='kvm', group='libvirt')
+ self.flags(enabled=True,
+ agent_enabled=True,
+ group='spice')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {'hw_video:ram_max_mb': "100"}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ image_meta = {"properties": {"hw_video_model": "qxl",
+ "hw_video_ram": "64"}}
+
+ cfg = conn._get_guest_config(instance_ref, [],
+ image_meta, disk_info)
+ self.assertEqual(len(cfg.devices), 8)
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestChannel)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[7],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ self.assertEqual(cfg.devices[5].type, "spice")
+ self.assertEqual(cfg.devices[6].type, "qxl")
+ self.assertEqual(cfg.devices[6].vram, 64)
+
+ @mock.patch('nova.virt.disk.api.teardown_container')
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info')
+ @mock.patch('nova.virt.disk.api.setup_container')
+ @mock.patch('nova.openstack.common.fileutils.ensure_tree')
+ @mock.patch.object(fake_libvirt_utils, 'get_instance_path')
+ def test_unmount_fs_if_error_during_lxc_create_domain(self,
+ mock_get_inst_path, mock_ensure_tree, mock_setup_container,
+ mock_get_info, mock_teardown):
+ """If we hit an error during a `_create_domain` call to `libvirt+lxc`
+ we need to ensure the guest FS is unmounted from the host so that any
+ future `lvremove` calls will work.
+ """
+ self.flags(virt_type='lxc', group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ mock_instance = mock.MagicMock()
+ mock_get_inst_path.return_value = '/tmp/'
+ mock_image_backend = mock.MagicMock()
+ conn.image_backend = mock_image_backend
+ mock_image = mock.MagicMock()
+ mock_image.path = '/tmp/test.img'
+ conn.image_backend.image.return_value = mock_image
+ mock_setup_container.return_value = '/dev/nbd0'
+ mock_get_info.side_effect = exception.InstanceNotFound(
+ instance_id='foo')
+ conn._conn.defineXML = mock.Mock()
+ conn._conn.defineXML.side_effect = ValueError('somethingbad')
+ with contextlib.nested(
+ mock.patch.object(conn, '_is_booted_from_volume',
+ return_value=False),
+ mock.patch.object(conn, 'plug_vifs'),
+ mock.patch.object(conn, 'firewall_driver'),
+ mock.patch.object(conn, 'cleanup')):
+ self.assertRaises(ValueError,
+ conn._create_domain_and_network,
+ self.context,
+ 'xml',
+ mock_instance, None)
+
+ mock_teardown.assert_called_with(container_dir='/tmp/rootfs')
+
+ def test_video_driver_flavor_limit_not_set(self):
+ self.flags(virt_type='kvm', group='libvirt')
+ self.flags(enabled=True,
+ agent_enabled=True,
+ group='spice')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ image_meta = {"properties": {"hw_video_model": "qxl",
+ "hw_video_ram": "64"}}
+
+ with contextlib.nested(
+ mock.patch.object(objects.Flavor, 'get_by_id'),
+ mock.patch.object(objects.Instance, 'save'),
+ ) as (mock_flavor, mock_instance):
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ self.assertRaises(exception.RequestedVRamTooHigh,
+ conn._get_guest_config,
+ instance_ref,
+ [],
+ image_meta,
+ disk_info)
+
+ def test_video_driver_ram_above_flavor_limit(self):
+ self.flags(virt_type='kvm', group='libvirt')
+ self.flags(enabled=True,
+ agent_enabled=True,
+ group='spice')
+
+ instance_ref = objects.Instance(**self.test_instance)
+ instance_type = instance_ref.get_flavor()
+ instance_type.extra_specs = {'hw_video:ram_max_mb': "50"}
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ image_meta = {"properties": {"hw_video_model": "qxl",
+ "hw_video_ram": "64"}}
+ with contextlib.nested(
+ mock.patch.object(objects.Flavor, 'get_by_id',
+ return_value=instance_type),
+ mock.patch.object(objects.Instance, 'save')):
+ self.assertRaises(exception.RequestedVRamTooHigh,
+ conn._get_guest_config,
+ instance_ref,
+ [],
+ image_meta,
+ disk_info)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_without_qga_through_image_meta(self,
+ mock_flavor):
+ self.flags(virt_type='kvm', group='libvirt')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ image_meta = {"properties": {"hw_qemu_guest_agent": "no"}}
+ cfg = conn._get_guest_config(instance_ref, [], image_meta, disk_info)
+ self.assertEqual(len(cfg.devices), 8)
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestInput)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[7],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ self.assertEqual(cfg.devices[4].type, "tablet")
+ self.assertEqual(cfg.devices[5].type, "vnc")
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_rng_device(self, mock_flavor):
+ self.flags(virt_type='kvm',
+ use_usb_tablet=False,
+ group='libvirt')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {'hw_rng:allowed': 'True'}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ image_meta = {"properties": {"hw_rng_model": "virtio"}}
+
+ cfg = conn._get_guest_config(instance_ref, [],
+ image_meta, disk_info)
+ self.assertEqual(len(cfg.devices), 8)
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigGuestRng)
+ self.assertIsInstance(cfg.devices[7],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ self.assertEqual(cfg.devices[6].model, 'random')
+ self.assertIsNone(cfg.devices[6].backend)
+ self.assertIsNone(cfg.devices[6].rate_bytes)
+ self.assertIsNone(cfg.devices[6].rate_period)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_rng_not_allowed(self, mock_flavor):
+ self.flags(virt_type='kvm',
+ use_usb_tablet=False,
+ group='libvirt')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ image_meta = {"properties": {"hw_rng_model": "virtio"}}
+ cfg = conn._get_guest_config(instance_ref, [],
+ image_meta, disk_info)
+ self.assertEqual(len(cfg.devices), 7)
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_rng_limits(self, mock_flavor):
+ self.flags(virt_type='kvm',
+ use_usb_tablet=False,
+ group='libvirt')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {'hw_rng:allowed': 'True',
+ 'hw_rng:rate_bytes': '1024',
+ 'hw_rng:rate_period': '2'}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ image_meta = {"properties": {"hw_rng_model": "virtio"}}
+
+ cfg = conn._get_guest_config(instance_ref, [],
+ image_meta, disk_info)
+ self.assertEqual(len(cfg.devices), 8)
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigGuestRng)
+ self.assertIsInstance(cfg.devices[7],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ self.assertEqual(cfg.devices[6].model, 'random')
+ self.assertIsNone(cfg.devices[6].backend)
+ self.assertEqual(cfg.devices[6].rate_bytes, 1024)
+ self.assertEqual(cfg.devices[6].rate_period, 2)
+
+ @mock.patch('nova.virt.libvirt.driver.os.path.exists')
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_rng_backend(self, mock_flavor, mock_path):
+ self.flags(virt_type='kvm',
+ use_usb_tablet=False,
+ rng_dev_path='/dev/hw_rng',
+ group='libvirt')
+ mock_path.return_value = True
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {'hw_rng:allowed': 'True'}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ image_meta = {"properties": {"hw_rng_model": "virtio"}}
+
+ cfg = conn._get_guest_config(instance_ref, [],
+ image_meta, disk_info)
+ self.assertEqual(len(cfg.devices), 8)
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigGuestRng)
+ self.assertIsInstance(cfg.devices[7],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ self.assertEqual(cfg.devices[6].model, 'random')
+ self.assertEqual(cfg.devices[6].backend, '/dev/hw_rng')
+ self.assertIsNone(cfg.devices[6].rate_bytes)
+ self.assertIsNone(cfg.devices[6].rate_period)
+
+ @mock.patch('nova.virt.libvirt.driver.os.path.exists')
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_rng_dev_not_present(self, mock_flavor,
+ mock_path):
+ self.flags(virt_type='kvm',
+ use_usb_tablet=False,
+ rng_dev_path='/dev/hw_rng',
+ group='libvirt')
+ mock_path.return_value = False
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {'hw_rng:allowed': 'True'}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ image_meta = {"properties": {"hw_rng_model": "virtio"}}
+
+ self.assertRaises(exception.RngDeviceNotExist,
+ conn._get_guest_config,
+ instance_ref,
+ [],
+ image_meta, disk_info)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_cpu_quota(self, mock_flavor):
+ self.flags(virt_type='kvm', group='libvirt')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {'quota:cpu_shares': '10000',
+ 'quota:cpu_period': '20000'}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+
+ self.assertEqual(10000, cfg.cputune.shares)
+ self.assertEqual(20000, cfg.cputune.period)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_bogus_cpu_quota(self, mock_flavor):
+ self.flags(virt_type='kvm', group='libvirt')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {'quota:cpu_shares': 'fishfood',
+ 'quota:cpu_period': '20000'}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ self.assertRaises(ValueError,
+ conn._get_guest_config,
+ instance_ref, [], {}, disk_info)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def _test_get_guest_config_sysinfo_serial(self, expected_serial,
+ mock_flavor):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ cfg = drvr._get_guest_config_sysinfo(instance_ref)
+
+ self.assertIsInstance(cfg, vconfig.LibvirtConfigGuestSysinfo)
+ self.assertEqual(version.vendor_string(),
+ cfg.system_manufacturer)
+ self.assertEqual(version.product_string(),
+ cfg.system_product)
+ self.assertEqual(version.version_string_with_package(),
+ cfg.system_version)
+ self.assertEqual(expected_serial,
+ cfg.system_serial)
+ self.assertEqual(instance_ref['uuid'],
+ cfg.system_uuid)
+
+ def test_get_guest_config_sysinfo_serial_none(self):
+ self.flags(sysinfo_serial="none", group="libvirt")
+ self._test_get_guest_config_sysinfo_serial(None)
+
+ @mock.patch.object(libvirt_driver.LibvirtDriver, "_get_host_uuid")
+ def test_get_guest_config_sysinfo_serial_hardware(self, mock_uuid):
+ self.flags(sysinfo_serial="hardware", group="libvirt")
+
+ theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc"
+ mock_uuid.return_value = theuuid
+
+ self._test_get_guest_config_sysinfo_serial(theuuid)
+
+ def test_get_guest_config_sysinfo_serial_os(self):
+ self.flags(sysinfo_serial="os", group="libvirt")
+
+ real_open = __builtin__.open
+ with contextlib.nested(
+ mock.patch.object(__builtin__, "open"),
+ ) as (mock_open, ):
+ theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc"
+
+ def fake_open(filename, *args, **kwargs):
+ if filename == "/etc/machine-id":
+ h = mock.MagicMock()
+ h.read.return_value = theuuid
+ h.__enter__.return_value = h
+ return h
+ return real_open(filename, *args, **kwargs)
+
+ mock_open.side_effect = fake_open
+
+ self._test_get_guest_config_sysinfo_serial(theuuid)
+
+ def test_get_guest_config_sysinfo_serial_auto_hardware(self):
+ self.flags(sysinfo_serial="auto", group="libvirt")
+
+ real_exists = os.path.exists
+ with contextlib.nested(
+ mock.patch.object(os.path, "exists"),
+ mock.patch.object(libvirt_driver.LibvirtDriver,
+ "_get_host_uuid")
+ ) as (mock_exists, mock_uuid):
+ def fake_exists(filename):
+ if filename == "/etc/machine-id":
+ return False
+ return real_exists(filename)
+
+ mock_exists.side_effect = fake_exists
+
+ theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc"
+ mock_uuid.return_value = theuuid
+
+ self._test_get_guest_config_sysinfo_serial(theuuid)
+
+ def test_get_guest_config_sysinfo_serial_auto_os(self):
+ self.flags(sysinfo_serial="auto", group="libvirt")
+
+ real_exists = os.path.exists
+ real_open = __builtin__.open
+ with contextlib.nested(
+ mock.patch.object(os.path, "exists"),
+ mock.patch.object(__builtin__, "open"),
+ ) as (mock_exists, mock_open):
+ def fake_exists(filename):
+ if filename == "/etc/machine-id":
+ return True
+ return real_exists(filename)
+
+ mock_exists.side_effect = fake_exists
+
+ theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc"
+
+ def fake_open(filename, *args, **kwargs):
+ if filename == "/etc/machine-id":
+ h = mock.MagicMock()
+ h.read.return_value = theuuid
+ h.__enter__.return_value = h
+ return h
+ return real_open(filename, *args, **kwargs)
+
+ mock_open.side_effect = fake_open
+
+ self._test_get_guest_config_sysinfo_serial(theuuid)
+
+ def test_get_guest_config_sysinfo_serial_invalid(self):
+ self.flags(sysinfo_serial="invalid", group="libvirt")
+
+ self.assertRaises(exception.NovaException,
+ libvirt_driver.LibvirtDriver,
+ fake.FakeVirtAPI(),
+ True)
+
+ def _create_fake_service_compute(self):
+ service_info = {
+ 'id': 1729,
+ 'host': 'fake',
+ 'report_count': 0
+ }
+ service_ref = objects.Service(**service_info)
+
+ compute_info = {
+ 'id': 1729,
+ 'vcpus': 2,
+ 'memory_mb': 1024,
+ 'local_gb': 2048,
+ 'vcpus_used': 0,
+ 'memory_mb_used': 0,
+ 'local_gb_used': 0,
+ 'free_ram_mb': 1024,
+ 'free_disk_gb': 2048,
+ 'hypervisor_type': 'xen',
+ 'hypervisor_version': 1,
+ 'running_vms': 0,
+ 'cpu_info': '',
+ 'current_workload': 0,
+ 'service_id': service_ref['id']
+ }
+ compute_ref = objects.ComputeNode(**compute_info)
+ return (service_ref, compute_ref)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_pci_passthrough_kvm(self, mock_flavor):
+ self.flags(virt_type='kvm', group='libvirt')
+ service_ref, compute_ref = self._create_fake_service_compute()
+
+ instance = objects.Instance(**self.test_instance)
+ flavor = instance.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ pci_device_info = dict(test_pci_device.fake_db_dev)
+ pci_device_info.update(compute_node_id=1,
+ label='fake',
+ status='allocated',
+ address='0000:00:00.1',
+ compute_id=compute_ref['id'],
+ instance_uuid=instance.uuid,
+ request_id=None,
+ extra_info={})
+ pci_device = objects.PciDevice(**pci_device_info)
+ pci_list = objects.PciDeviceList()
+ pci_list.objects.append(pci_device)
+ instance.pci_devices = pci_list
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance)
+ cfg = conn._get_guest_config(instance, [], {}, disk_info)
+
+ had_pci = 0
+ # care only about the PCI devices
+ for dev in cfg.devices:
+ if type(dev) == vconfig.LibvirtConfigGuestHostdevPCI:
+ had_pci += 1
+ self.assertEqual(dev.type, 'pci')
+ self.assertEqual(dev.managed, 'yes')
+ self.assertEqual(dev.mode, 'subsystem')
+
+ self.assertEqual(dev.domain, "0000")
+ self.assertEqual(dev.bus, "00")
+ self.assertEqual(dev.slot, "00")
+ self.assertEqual(dev.function, "1")
+ self.assertEqual(had_pci, 1)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_pci_passthrough_xen(self, mock_flavor):
+ self.flags(virt_type='xen', group='libvirt')
+ service_ref, compute_ref = self._create_fake_service_compute()
+
+ instance = objects.Instance(**self.test_instance)
+ flavor = instance.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ pci_device_info = dict(test_pci_device.fake_db_dev)
+ pci_device_info.update(compute_node_id=1,
+ label='fake',
+ status='allocated',
+ address='0000:00:00.2',
+ compute_id=compute_ref['id'],
+ instance_uuid=instance.uuid,
+ request_id=None,
+ extra_info={})
+ pci_device = objects.PciDevice(**pci_device_info)
+ pci_list = objects.PciDeviceList()
+ pci_list.objects.append(pci_device)
+ instance.pci_devices = pci_list
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance)
+ cfg = conn._get_guest_config(instance, [], {}, disk_info)
+ had_pci = 0
+ # care only about the PCI devices
+ for dev in cfg.devices:
+ if type(dev) == vconfig.LibvirtConfigGuestHostdevPCI:
+ had_pci += 1
+ self.assertEqual(dev.type, 'pci')
+ self.assertEqual(dev.managed, 'no')
+ self.assertEqual(dev.mode, 'subsystem')
+
+ self.assertEqual(dev.domain, "0000")
+ self.assertEqual(dev.bus, "00")
+ self.assertEqual(dev.slot, "00")
+ self.assertEqual(dev.function, "2")
+ self.assertEqual(had_pci, 1)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_os_command_line_through_image_meta(self,
+ mock_flavor):
+ self.flags(virt_type="kvm",
+ cpu_mode=None,
+ group='libvirt')
+
+ self.test_instance['kernel_id'] = "fake_kernel_id"
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ image_meta = {"properties": {"os_command_line":
+ "fake_os_command_line"}}
+ cfg = conn._get_guest_config(instance_ref,
+ _fake_network_info(self.stubs, 1),
+ image_meta, disk_info)
+ self.assertEqual(cfg.os_cmdline, "fake_os_command_line")
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_os_command_line_without_kernel_id(self,
+ mock_flavor):
+ self.flags(virt_type="kvm",
+ cpu_mode=None,
+ group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ image_meta = {"properties": {"os_command_line":
+ "fake_os_command_line"}}
+
+ cfg = conn._get_guest_config(instance_ref,
+ _fake_network_info(self.stubs, 1),
+ image_meta, disk_info)
+ self.assertIsNone(cfg.os_cmdline)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_os_command_empty(self, mock_flavor):
+ self.flags(virt_type="kvm",
+ cpu_mode=None,
+ group='libvirt')
+
+ self.test_instance['kernel_id'] = "fake_kernel_id"
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ # the instance has 'root=/dev/vda console=tty0 console=ttyS0' set by
+ # default, so testing an empty string and None value in the
+ # os_command_line image property must pass
+ image_meta = {"properties": {"os_command_line": ""}}
+ cfg = conn._get_guest_config(instance_ref,
+ _fake_network_info(self.stubs, 1),
+ image_meta, disk_info)
+ self.assertNotEqual(cfg.os_cmdline, "")
+
+ image_meta = {"properties": {"os_command_line": None}}
+ cfg = conn._get_guest_config(instance_ref,
+ _fake_network_info(self.stubs, 1),
+ image_meta, disk_info)
+ self.assertIsNotNone(cfg.os_cmdline)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_armv7(self, mock_flavor):
+ def get_host_capabilities_stub(self):
+ cpu = vconfig.LibvirtConfigGuestCPU()
+ cpu.arch = arch.ARMV7
+
+ caps = vconfig.LibvirtConfigCaps()
+ caps.host = vconfig.LibvirtConfigCapsHost()
+ caps.host.cpu = cpu
+ return caps
+
+ self.flags(virt_type="kvm",
+ group="libvirt")
+
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ self.stubs.Set(libvirt_driver.LibvirtDriver,
+ "_get_host_capabilities",
+ get_host_capabilities_stub)
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ cfg = conn._get_guest_config(instance_ref,
+ _fake_network_info(self.stubs, 1),
+ {}, disk_info)
+ self.assertEqual(cfg.os_mach_type, "vexpress-a15")
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_aarch64(self, mock_flavor):
+ def get_host_capabilities_stub(self):
+ cpu = vconfig.LibvirtConfigGuestCPU()
+ cpu.arch = arch.AARCH64
+
+ caps = vconfig.LibvirtConfigCaps()
+ caps.host = vconfig.LibvirtConfigCapsHost()
+ caps.host.cpu = cpu
+ return caps
+
+ self.flags(virt_type="kvm",
+ group="libvirt")
+
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ self.stubs.Set(libvirt_driver.LibvirtDriver,
+ "_get_host_capabilities",
+ get_host_capabilities_stub)
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ cfg = conn._get_guest_config(instance_ref,
+ _fake_network_info(self.stubs, 1),
+ {}, disk_info)
+ self.assertEqual(cfg.os_mach_type, "virt")
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_machine_type_through_image_meta(self,
+ mock_flavor):
+ self.flags(virt_type="kvm",
+ group='libvirt')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ image_meta = {"properties": {"hw_machine_type":
+ "fake_machine_type"}}
+ cfg = conn._get_guest_config(instance_ref,
+ _fake_network_info(self.stubs, 1),
+ image_meta, disk_info)
+ self.assertEqual(cfg.os_mach_type, "fake_machine_type")
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_machine_type_from_config(self, mock_flavor):
+ self.flags(virt_type='kvm', group='libvirt')
+ self.flags(hw_machine_type=['x86_64=fake_machine_type'],
+ group='libvirt')
+
+ def fake_getCapabilities():
+ return """
+ <capabilities>
+ <host>
+ <uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid>
+ <cpu>
+ <arch>x86_64</arch>
+ <model>Penryn</model>
+ <vendor>Intel</vendor>
+ <topology sockets='1' cores='2' threads='1'/>
+ <feature name='xtpr'/>
+ </cpu>
+ </host>
+ </capabilities>
+ """
+
+ def fake_baselineCPU(cpu, flag):
+ return """<cpu mode='custom' match='exact'>
+ <model fallback='allow'>Penryn</model>
+ <vendor>Intel</vendor>
+ <feature policy='require' name='xtpr'/>
+ </cpu>
+ """
+
+ # Make sure the host arch is mocked as x86_64
+ self.create_fake_libvirt_mock(getCapabilities=fake_getCapabilities,
+ baselineCPU=fake_baselineCPU,
+ getVersion=lambda: 1005001)
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ cfg = conn._get_guest_config(instance_ref,
+ _fake_network_info(self.stubs, 1),
+ {}, disk_info)
+ self.assertEqual(cfg.os_mach_type, "fake_machine_type")
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def _test_get_guest_config_ppc64(self, device_index, mock_flavor):
+ """Test for nova.virt.libvirt.driver.LibvirtDriver._get_guest_config.
+ """
+ self.flags(virt_type='kvm', group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ image_meta = {}
+ expected = (arch.PPC64, arch.PPC)
+ for guestarch in expected:
+ with mock.patch.object(libvirt_driver.libvirt_utils,
+ 'get_arch',
+ return_value=guestarch):
+ cfg = conn._get_guest_config(instance_ref, [],
+ image_meta,
+ disk_info)
+ self.assertIsInstance(cfg.devices[device_index],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertEqual(cfg.devices[device_index].type, 'vga')
+
+ def test_get_guest_config_ppc64_through_image_meta_vnc_enabled(self):
+ self.flags(vnc_enabled=True)
+ self._test_get_guest_config_ppc64(6)
+
+ def test_get_guest_config_ppc64_through_image_meta_spice_enabled(self):
+ self.flags(enabled=True,
+ agent_enabled=True,
+ group='spice')
+ self._test_get_guest_config_ppc64(8)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_cpu_config_none(self, mock_flavor):
+ self.flags(cpu_mode="none", group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ conf = conn._get_guest_config(instance_ref,
+ _fake_network_info(self.stubs, 1),
+ {}, disk_info)
+ self.assertIsInstance(conf.cpu,
+ vconfig.LibvirtConfigGuestCPU)
+ self.assertIsNone(conf.cpu.mode)
+ self.assertIsNone(conf.cpu.model)
+ self.assertEqual(conf.cpu.sockets, 1)
+ self.assertEqual(conf.cpu.cores, 1)
+ self.assertEqual(conf.cpu.threads, 1)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_cpu_config_default_kvm(self, mock_flavor):
+ self.flags(virt_type="kvm",
+ cpu_mode=None,
+ group='libvirt')
+
+ def get_lib_version_stub():
+ return (0 * 1000 * 1000) + (9 * 1000) + 11
+
+ self.stubs.Set(self.conn,
+ "getLibVersion",
+ get_lib_version_stub)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ conf = conn._get_guest_config(instance_ref,
+ _fake_network_info(self.stubs, 1),
+ {}, disk_info)
+ self.assertIsInstance(conf.cpu,
+ vconfig.LibvirtConfigGuestCPU)
+ self.assertEqual(conf.cpu.mode, "host-model")
+ self.assertIsNone(conf.cpu.model)
+ self.assertEqual(conf.cpu.sockets, 1)
+ self.assertEqual(conf.cpu.cores, 1)
+ self.assertEqual(conf.cpu.threads, 1)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_cpu_config_default_uml(self, mock_flavor):
+ self.flags(virt_type="uml",
+ cpu_mode=None,
+ group='libvirt')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ conf = conn._get_guest_config(instance_ref,
+ _fake_network_info(self.stubs, 1),
+ {}, disk_info)
+ self.assertIsNone(conf.cpu)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_cpu_config_default_lxc(self, mock_flavor):
+ self.flags(virt_type="lxc",
+ cpu_mode=None,
+ group='libvirt')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ conf = conn._get_guest_config(instance_ref,
+ _fake_network_info(self.stubs, 1),
+ {}, disk_info)
+ self.assertIsNone(conf.cpu)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_cpu_config_host_passthrough(self, mock_flavor):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ self.flags(cpu_mode="host-passthrough", group='libvirt')
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ conf = conn._get_guest_config(instance_ref,
+ _fake_network_info(self.stubs, 1),
+ {}, disk_info)
+ self.assertIsInstance(conf.cpu,
+ vconfig.LibvirtConfigGuestCPU)
+ self.assertEqual(conf.cpu.mode, "host-passthrough")
+ self.assertIsNone(conf.cpu.model)
+ self.assertEqual(conf.cpu.sockets, 1)
+ self.assertEqual(conf.cpu.cores, 1)
+ self.assertEqual(conf.cpu.threads, 1)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_cpu_config_host_model(self, mock_flavor):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ self.flags(cpu_mode="host-model", group='libvirt')
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ conf = conn._get_guest_config(instance_ref,
+ _fake_network_info(self.stubs, 1),
+ {}, disk_info)
+ self.assertIsInstance(conf.cpu,
+ vconfig.LibvirtConfigGuestCPU)
+ self.assertEqual(conf.cpu.mode, "host-model")
+ self.assertIsNone(conf.cpu.model)
+ self.assertEqual(conf.cpu.sockets, 1)
+ self.assertEqual(conf.cpu.cores, 1)
+ self.assertEqual(conf.cpu.threads, 1)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_cpu_config_custom(self, mock_flavor):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ self.flags(cpu_mode="custom",
+ cpu_model="Penryn",
+ group='libvirt')
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ conf = conn._get_guest_config(instance_ref,
+ _fake_network_info(self.stubs, 1),
+ {}, disk_info)
+ self.assertIsInstance(conf.cpu,
+ vconfig.LibvirtConfigGuestCPU)
+ self.assertEqual(conf.cpu.mode, "custom")
+ self.assertEqual(conf.cpu.model, "Penryn")
+ self.assertEqual(conf.cpu.sockets, 1)
+ self.assertEqual(conf.cpu.cores, 1)
+ self.assertEqual(conf.cpu.threads, 1)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_cpu_topology(self, mock_flavor):
+ fake_flavor = objects.flavor.Flavor.get_by_id(
+ self.context,
+ self.test_instance['instance_type_id'])
+ fake_flavor.vcpus = 8
+ fake_flavor.extra_specs = {'hw:cpu_max_sockets': '4'}
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ with mock.patch.object(objects.flavor.Flavor, 'get_by_id',
+ return_value=fake_flavor):
+ conf = conn._get_guest_config(instance_ref,
+ _fake_network_info(self.stubs, 1),
+ {}, disk_info)
+ self.assertIsInstance(conf.cpu,
+ vconfig.LibvirtConfigGuestCPU)
+ self.assertEqual(conf.cpu.mode, "host-model")
+ self.assertEqual(conf.cpu.sockets, 4)
+ self.assertEqual(conf.cpu.cores, 2)
+ self.assertEqual(conf.cpu.threads, 1)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_memory_balloon_config_by_default(self, mock_flavor):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+ for device in cfg.devices:
+ if device.root_name == 'memballoon':
+ self.assertIsInstance(device,
+ vconfig.LibvirtConfigMemoryBalloon)
+ self.assertEqual('virtio', device.model)
+ self.assertEqual(10, device.period)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_memory_balloon_config_disable(self, mock_flavor):
+ self.flags(mem_stats_period_seconds=0, group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+ no_exist = True
+ for device in cfg.devices:
+ if device.root_name == 'memballoon':
+ no_exist = False
+ break
+ self.assertTrue(no_exist)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_memory_balloon_config_period_value(self, mock_flavor):
+ self.flags(mem_stats_period_seconds=21, group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+ for device in cfg.devices:
+ if device.root_name == 'memballoon':
+ self.assertIsInstance(device,
+ vconfig.LibvirtConfigMemoryBalloon)
+ self.assertEqual('virtio', device.model)
+ self.assertEqual(21, device.period)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_memory_balloon_config_qemu(self, mock_flavor):
+ self.flags(virt_type='qemu', group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+ for device in cfg.devices:
+ if device.root_name == 'memballoon':
+ self.assertIsInstance(device,
+ vconfig.LibvirtConfigMemoryBalloon)
+ self.assertEqual('virtio', device.model)
+ self.assertEqual(10, device.period)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_memory_balloon_config_xen(self, mock_flavor):
+ self.flags(virt_type='xen', group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+ for device in cfg.devices:
+ if device.root_name == 'memballoon':
+ self.assertIsInstance(device,
+ vconfig.LibvirtConfigMemoryBalloon)
+ self.assertEqual('xen', device.model)
+ self.assertEqual(10, device.period)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_memory_balloon_config_lxc(self, mock_flavor):
+ self.flags(virt_type='lxc', group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+ no_exist = True
+ for device in cfg.devices:
+ if device.root_name == 'memballoon':
+ no_exist = False
+ break
+ self.assertTrue(no_exist)
+
+ def test_xml_and_uri_no_ramdisk_no_kernel(self):
+ instance_data = dict(self.test_instance)
+ self._check_xml_and_uri(instance_data,
+ expect_kernel=False, expect_ramdisk=False)
+
+ def test_xml_and_uri_no_ramdisk_no_kernel_xen_hvm(self):
+ instance_data = dict(self.test_instance)
+ instance_data.update({'vm_mode': vm_mode.HVM})
+ self._check_xml_and_uri(instance_data, expect_kernel=False,
+ expect_ramdisk=False, expect_xen_hvm=True)
+
+ def test_xml_and_uri_no_ramdisk_no_kernel_xen_pv(self):
+ instance_data = dict(self.test_instance)
+ instance_data.update({'vm_mode': vm_mode.XEN})
+ self._check_xml_and_uri(instance_data, expect_kernel=False,
+ expect_ramdisk=False, expect_xen_hvm=False,
+ xen_only=True)
+
+ def test_xml_and_uri_no_ramdisk(self):
+ instance_data = dict(self.test_instance)
+ instance_data['kernel_id'] = 'aki-deadbeef'
+ self._check_xml_and_uri(instance_data,
+ expect_kernel=True, expect_ramdisk=False)
+
+ def test_xml_and_uri_no_kernel(self):
+ instance_data = dict(self.test_instance)
+ instance_data['ramdisk_id'] = 'ari-deadbeef'
+ self._check_xml_and_uri(instance_data,
+ expect_kernel=False, expect_ramdisk=False)
+
+ def test_xml_and_uri(self):
+ instance_data = dict(self.test_instance)
+ instance_data['ramdisk_id'] = 'ari-deadbeef'
+ instance_data['kernel_id'] = 'aki-deadbeef'
+ self._check_xml_and_uri(instance_data,
+ expect_kernel=True, expect_ramdisk=True)
+
+ def test_xml_and_uri_rescue(self):
+ instance_data = dict(self.test_instance)
+ instance_data['ramdisk_id'] = 'ari-deadbeef'
+ instance_data['kernel_id'] = 'aki-deadbeef'
+ self._check_xml_and_uri(instance_data, expect_kernel=True,
+ expect_ramdisk=True, rescue=instance_data)
+
+ def test_xml_and_uri_rescue_no_kernel_no_ramdisk(self):
+ instance_data = dict(self.test_instance)
+ self._check_xml_and_uri(instance_data, expect_kernel=False,
+ expect_ramdisk=False, rescue=instance_data)
+
+ def test_xml_and_uri_rescue_no_kernel(self):
+ instance_data = dict(self.test_instance)
+ instance_data['ramdisk_id'] = 'aki-deadbeef'
+ self._check_xml_and_uri(instance_data, expect_kernel=False,
+ expect_ramdisk=True, rescue=instance_data)
+
+ def test_xml_and_uri_rescue_no_ramdisk(self):
+ instance_data = dict(self.test_instance)
+ instance_data['kernel_id'] = 'aki-deadbeef'
+ self._check_xml_and_uri(instance_data, expect_kernel=True,
+ expect_ramdisk=False, rescue=instance_data)
+
+ def test_xml_uuid(self):
+ self._check_xml_and_uuid({"disk_format": "raw"})
+
+ def test_lxc_container_and_uri(self):
+ instance_data = dict(self.test_instance)
+ self._check_xml_and_container(instance_data)
+
+ def test_xml_disk_prefix(self):
+ instance_data = dict(self.test_instance)
+ self._check_xml_and_disk_prefix(instance_data, None)
+
+ def test_xml_user_specified_disk_prefix(self):
+ instance_data = dict(self.test_instance)
+ self._check_xml_and_disk_prefix(instance_data, 'sd')
+
+ def test_xml_disk_driver(self):
+ instance_data = dict(self.test_instance)
+ self._check_xml_and_disk_driver(instance_data)
+
+ def test_xml_disk_bus_virtio(self):
+ self._check_xml_and_disk_bus({"disk_format": "raw"},
+ None,
+ (("disk", "virtio", "vda"),))
+
+ def test_xml_disk_bus_ide(self):
+ # It's necessary to check if the architecture is power, because
+ # power doesn't have support to ide, and so libvirt translate
+ # all ide calls to scsi
+
+ expected = {arch.PPC: ("cdrom", "scsi", "sda"),
+ arch.PPC64: ("cdrom", "scsi", "sda")}
+
+ expec_val = expected.get(blockinfo.libvirt_utils.get_arch({}),
+ ("cdrom", "ide", "hda"))
+ self._check_xml_and_disk_bus({"disk_format": "iso"},
+ None,
+ (expec_val,))
+
+ def test_xml_disk_bus_ide_and_virtio(self):
+ # It's necessary to check if the architecture is power, because
+ # power doesn't have support to ide, and so libvirt translate
+ # all ide calls to scsi
+
+ expected = {arch.PPC: ("cdrom", "scsi", "sda"),
+ arch.PPC64: ("cdrom", "scsi", "sda")}
+
+ swap = {'device_name': '/dev/vdc',
+ 'swap_size': 1}
+ ephemerals = [{'device_type': 'disk',
+ 'disk_bus': 'virtio',
+ 'device_name': '/dev/vdb',
+ 'size': 1}]
+ block_device_info = {
+ 'swap': swap,
+ 'ephemerals': ephemerals}
+ expec_val = expected.get(blockinfo.libvirt_utils.get_arch({}),
+ ("cdrom", "ide", "hda"))
+ self._check_xml_and_disk_bus({"disk_format": "iso"},
+ block_device_info,
+ (expec_val,
+ ("disk", "virtio", "vdb"),
+ ("disk", "virtio", "vdc")))
+
+ def test_list_instance_domains_fast(self):
+ if not hasattr(libvirt, "VIR_CONNECT_LIST_DOMAINS_ACTIVE"):
+ self.skipTest("libvirt missing VIR_CONNECT_LIST_DOMAINS_ACTIVE")
+
+ vm1 = FakeVirtDomain(id=3, name="instance00000001")
+ vm2 = FakeVirtDomain(id=17, name="instance00000002")
+ vm3 = FakeVirtDomain(name="instance00000003")
+ vm4 = FakeVirtDomain(name="instance00000004")
+
+ def fake_list_all(flags):
+ vms = []
+ if flags & libvirt.VIR_CONNECT_LIST_DOMAINS_ACTIVE:
+ vms.extend([vm1, vm2])
+ if flags & libvirt.VIR_CONNECT_LIST_DOMAINS_INACTIVE:
+ vms.extend([vm3, vm4])
+ return vms
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.listAllDomains = fake_list_all
+
+ self.mox.ReplayAll()
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ doms = drvr._list_instance_domains_fast()
+ self.assertEqual(len(doms), 2)
+ self.assertEqual(doms[0].name(), vm1.name())
+ self.assertEqual(doms[1].name(), vm2.name())
+
+ doms = drvr._list_instance_domains_fast(only_running=False)
+ self.assertEqual(len(doms), 4)
+ self.assertEqual(doms[0].name(), vm1.name())
+ self.assertEqual(doms[1].name(), vm2.name())
+ self.assertEqual(doms[2].name(), vm3.name())
+ self.assertEqual(doms[3].name(), vm4.name())
+
+ def test_list_instance_domains_slow(self):
+ vm1 = FakeVirtDomain(id=3, name="instance00000001")
+ vm2 = FakeVirtDomain(id=17, name="instance00000002")
+ vm3 = FakeVirtDomain(name="instance00000003")
+ vm4 = FakeVirtDomain(name="instance00000004")
+ vms = [vm1, vm2, vm3, vm4]
+
+ def fake_lookup_id(id):
+ for vm in vms:
+ if vm.ID() == id:
+ return vm
+ ex = fakelibvirt.make_libvirtError(
+ libvirt.libvirtError,
+ "No such domain",
+ error_code=libvirt.VIR_ERR_NO_DOMAIN)
+ raise ex
+
+ def fake_lookup_name(name):
+ for vm in vms:
+ if vm.name() == name:
+ return vm
+ ex = fakelibvirt.make_libvirtError(
+ libvirt.libvirtError,
+ "No such domain",
+ error_code=libvirt.VIR_ERR_NO_DOMAIN)
+ raise ex
+
+ def fake_list_doms():
+ # Include one ID that no longer exists
+ return [vm1.ID(), vm2.ID(), 666]
+
+ def fake_list_ddoms():
+ # Include one name that no longer exists and
+ # one dup from running list to show race in
+ # transition from inactive -> running
+ return [vm1.name(), vm3.name(), vm4.name(), "fishfood"]
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.listDomainsID = fake_list_doms
+ libvirt_driver.LibvirtDriver._conn.listDefinedDomains = fake_list_ddoms
+ libvirt_driver.LibvirtDriver._conn.lookupByID = fake_lookup_id
+ libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup_name
+ libvirt_driver.LibvirtDriver._conn.numOfDomains = lambda: 2
+ libvirt_driver.LibvirtDriver._conn.numOfDefinedDomains = lambda: 2
+
+ self.mox.ReplayAll()
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ doms = drvr._list_instance_domains_slow()
+ self.assertEqual(len(doms), 2)
+ self.assertEqual(doms[0].name(), vm1.name())
+ self.assertEqual(doms[1].name(), vm2.name())
+
+ doms = drvr._list_instance_domains_slow(only_running=False)
+ self.assertEqual(len(doms), 4)
+ self.assertEqual(doms[0].name(), vm1.name())
+ self.assertEqual(doms[1].name(), vm2.name())
+ self.assertEqual(doms[2].name(), vm3.name())
+ self.assertEqual(doms[3].name(), vm4.name())
+
+ def test_list_instance_domains_fallback_no_support(self):
+ vm1 = FakeVirtDomain(id=3, name="instance00000001")
+ vm2 = FakeVirtDomain(id=17, name="instance00000002")
+ vms = [vm1, vm2]
+
+ def fake_lookup_id(id):
+ for vm in vms:
+ if vm.ID() == id:
+ return vm
+ ex = fakelibvirt.make_libvirtError(
+ libvirt.libvirtError,
+ "No such domain",
+ error_code=libvirt.VIR_ERR_NO_DOMAIN)
+ raise ex
+
+ def fake_list_doms():
+ return [vm1.ID(), vm2.ID()]
+
+ def fake_list_all(flags):
+ ex = fakelibvirt.make_libvirtError(
+ libvirt.libvirtError,
+ "API is not supported",
+ error_code=libvirt.VIR_ERR_NO_SUPPORT)
+ raise ex
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.listDomainsID = fake_list_doms
+ libvirt_driver.LibvirtDriver._conn.lookupByID = fake_lookup_id
+ libvirt_driver.LibvirtDriver._conn.numOfDomains = lambda: 2
+ libvirt_driver.LibvirtDriver._conn.listAllDomains = fake_list_all
+
+ self.mox.ReplayAll()
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ doms = drvr._list_instance_domains()
+ self.assertEqual(len(doms), 2)
+ self.assertEqual(doms[0].id, vm1.id)
+ self.assertEqual(doms[1].id, vm2.id)
+
+ @mock.patch.object(libvirt_driver.LibvirtDriver,
+ "_list_instance_domains_fast")
+ def test_list_instance_domains_filtering(self, mock_list):
+ vm0 = FakeVirtDomain(id=0, name="Domain-0") # Xen dom-0
+ vm1 = FakeVirtDomain(id=3, name="instance00000001")
+ vm2 = FakeVirtDomain(id=17, name="instance00000002")
+ vm3 = FakeVirtDomain(name="instance00000003")
+ vm4 = FakeVirtDomain(name="instance00000004")
+
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ mock_list.return_value = [vm0, vm1, vm2]
+ doms = drvr._list_instance_domains()
+ self.assertEqual(len(doms), 2)
+ self.assertEqual(doms[0].name(), vm1.name())
+ self.assertEqual(doms[1].name(), vm2.name())
+ mock_list.assert_called_with(True)
+
+ mock_list.return_value = [vm0, vm1, vm2, vm3, vm4]
+ doms = drvr._list_instance_domains(only_running=False)
+ self.assertEqual(len(doms), 4)
+ self.assertEqual(doms[0].name(), vm1.name())
+ self.assertEqual(doms[1].name(), vm2.name())
+ self.assertEqual(doms[2].name(), vm3.name())
+ self.assertEqual(doms[3].name(), vm4.name())
+ mock_list.assert_called_with(False)
+
+ mock_list.return_value = [vm0, vm1, vm2]
+ doms = drvr._list_instance_domains(only_guests=False)
+ self.assertEqual(len(doms), 3)
+ self.assertEqual(doms[0].name(), vm0.name())
+ self.assertEqual(doms[1].name(), vm1.name())
+ self.assertEqual(doms[2].name(), vm2.name())
+ mock_list.assert_called_with(True)
+
+ @mock.patch.object(libvirt_driver.LibvirtDriver,
+ "_list_instance_domains")
+ def test_list_instances(self, mock_list):
+ vm1 = FakeVirtDomain(id=3, name="instance00000001")
+ vm2 = FakeVirtDomain(id=17, name="instance00000002")
+ vm3 = FakeVirtDomain(name="instance00000003")
+ vm4 = FakeVirtDomain(name="instance00000004")
+
+ mock_list.return_value = [vm1, vm2, vm3, vm4]
+
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ names = drvr.list_instances()
+ self.assertEqual(names[0], vm1.name())
+ self.assertEqual(names[1], vm2.name())
+ self.assertEqual(names[2], vm3.name())
+ self.assertEqual(names[3], vm4.name())
+ mock_list.assert_called_with(only_running=False)
+
+ @mock.patch.object(libvirt_driver.LibvirtDriver,
+ "_list_instance_domains")
+ def test_list_instance_uuids(self, mock_list):
+ vm1 = FakeVirtDomain(id=3, name="instance00000001")
+ vm2 = FakeVirtDomain(id=17, name="instance00000002")
+ vm3 = FakeVirtDomain(name="instance00000003")
+ vm4 = FakeVirtDomain(name="instance00000004")
+
+ mock_list.return_value = [vm1, vm2, vm3, vm4]
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ uuids = drvr.list_instance_uuids()
+ self.assertEqual(len(uuids), 4)
+ self.assertEqual(uuids[0], vm1.UUIDString())
+ self.assertEqual(uuids[1], vm2.UUIDString())
+ self.assertEqual(uuids[2], vm3.UUIDString())
+ self.assertEqual(uuids[3], vm4.UUIDString())
+ mock_list.assert_called_with(only_running=False)
+
+ @mock.patch.object(libvirt_driver.LibvirtDriver,
+ "_list_instance_domains")
+ def test_get_all_block_devices(self, mock_list):
+ xml = [
+ """
+ <domain type='kvm'>
+ <devices>
+ <disk type='file'>
+ <source file='filename'/>
+ </disk>
+ <disk type='block'>
+ <source dev='/path/to/dev/1'/>
+ </disk>
+ </devices>
+ </domain>
+ """,
+ """
+ <domain type='kvm'>
+ <devices>
+ <disk type='file'>
+ <source file='filename'/>
+ </disk>
+ </devices>
+ </domain>
+ """,
+ """
+ <domain type='kvm'>
+ <devices>
+ <disk type='file'>
+ <source file='filename'/>
+ </disk>
+ <disk type='block'>
+ <source dev='/path/to/dev/3'/>
+ </disk>
+ </devices>
+ </domain>
+ """,
+ ]
+
+ mock_list.return_value = [
+ FakeVirtDomain(xml[0], id=3, name="instance00000001"),
+ FakeVirtDomain(xml[1], id=1, name="instance00000002"),
+ FakeVirtDomain(xml[2], id=5, name="instance00000003")]
+
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ devices = drvr._get_all_block_devices()
+ self.assertEqual(devices, ['/path/to/dev/1', '/path/to/dev/3'])
+ mock_list.assert_called_with()
+
+ def test_snapshot_in_ami_format(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+
+ self.flags(snapshots_directory='./', group='libvirt')
+
+ # Assign different image_ref from nova/images/fakes for testing ami
+ test_instance = copy.deepcopy(self.test_instance)
+ test_instance["image_ref"] = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
+
+ # Assuming that base image already exists in image_service
+ instance_ref = objects.Instance(**test_instance)
+ instance_ref.info_cache = objects.InstanceInfoCache(
+ network_info=None)
+
+ properties = {'instance_id': instance_ref['id'],
+ 'user_id': str(self.context.user_id)}
+ snapshot_name = 'test-snap'
+ sent_meta = {'name': snapshot_name, 'is_public': False,
+ 'status': 'creating', 'properties': properties}
+ # Create new image. It will be updated in snapshot method
+ # To work with it from snapshot, the single image_service is needed
+ recv_meta = self.image_service.create(context, sent_meta)
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
+ self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
+ libvirt_driver.utils.execute = self.fake_execute
+ libvirt_driver.libvirt_utils.disk_type = "qcow2"
+
+ self.mox.ReplayAll()
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ conn.snapshot(self.context, instance_ref, recv_meta['id'],
+ func_call_matcher.call)
+
+ snapshot = self.image_service.show(context, recv_meta['id'])
+ self.assertIsNone(func_call_matcher.match())
+ self.assertEqual(snapshot['properties']['image_state'], 'available')
+ self.assertEqual(snapshot['status'], 'active')
+ self.assertEqual(snapshot['disk_format'], 'ami')
+ self.assertEqual(snapshot['name'], snapshot_name)
+
+ def test_lxc_snapshot_in_ami_format(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+
+ self.flags(snapshots_directory='./',
+ virt_type='lxc',
+ group='libvirt')
+
+ # Assign different image_ref from nova/images/fakes for testing ami
+ test_instance = copy.deepcopy(self.test_instance)
+ test_instance["image_ref"] = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
+
+ # Assuming that base image already exists in image_service
+ instance_ref = objects.Instance(**test_instance)
+ instance_ref.info_cache = objects.InstanceInfoCache(
+ network_info=None)
+
+ properties = {'instance_id': instance_ref['id'],
+ 'user_id': str(self.context.user_id)}
+ snapshot_name = 'test-snap'
+ sent_meta = {'name': snapshot_name, 'is_public': False,
+ 'status': 'creating', 'properties': properties}
+ # Create new image. It will be updated in snapshot method
+ # To work with it from snapshot, the single image_service is needed
+ recv_meta = self.image_service.create(context, sent_meta)
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
+ self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
+ libvirt_driver.utils.execute = self.fake_execute
+ libvirt_driver.libvirt_utils.disk_type = "qcow2"
+
+ self.mox.ReplayAll()
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ conn.snapshot(self.context, instance_ref, recv_meta['id'],
+ func_call_matcher.call)
+
+ snapshot = self.image_service.show(context, recv_meta['id'])
+ self.assertIsNone(func_call_matcher.match())
+ self.assertEqual(snapshot['properties']['image_state'], 'available')
+ self.assertEqual(snapshot['status'], 'active')
+ self.assertEqual(snapshot['disk_format'], 'ami')
+ self.assertEqual(snapshot['name'], snapshot_name)
+
+ def test_snapshot_in_raw_format(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+
+ self.flags(snapshots_directory='./', group='libvirt')
+
+ # Assuming that base image already exists in image_service
+ instance_ref = objects.Instance(**self.test_instance)
+ instance_ref.info_cache = objects.InstanceInfoCache(
+ network_info=None)
+
+ properties = {'instance_id': instance_ref['id'],
+ 'user_id': str(self.context.user_id)}
+ snapshot_name = 'test-snap'
+ sent_meta = {'name': snapshot_name, 'is_public': False,
+ 'status': 'creating', 'properties': properties}
+ # Create new image. It will be updated in snapshot method
+ # To work with it from snapshot, the single image_service is needed
+ recv_meta = self.image_service.create(context, sent_meta)
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
+ self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
+ libvirt_driver.utils.execute = self.fake_execute
+ self.stubs.Set(libvirt_driver.libvirt_utils, 'disk_type', 'raw')
+
+ def convert_image(source, dest, out_format):
+ libvirt_driver.libvirt_utils.files[dest] = ''
+
+ self.stubs.Set(images, 'convert_image', convert_image)
+
+ self.mox.ReplayAll()
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ conn.snapshot(self.context, instance_ref, recv_meta['id'],
+ func_call_matcher.call)
+
+ snapshot = self.image_service.show(context, recv_meta['id'])
+ self.assertIsNone(func_call_matcher.match())
+ self.assertEqual(snapshot['properties']['image_state'], 'available')
+ self.assertEqual(snapshot['status'], 'active')
+ self.assertEqual(snapshot['disk_format'], 'raw')
+ self.assertEqual(snapshot['name'], snapshot_name)
+
+ def test_lvm_snapshot_in_raw_format(self):
+ # Tests Lvm backend snapshot functionality with raw format
+ # snapshots.
+ xml = """
+ <domain type='kvm'>
+ <devices>
+ <disk type='block' device='disk'>
+ <source dev='/dev/some-vg/some-lv'/>
+ </disk>
+ </devices>
+ </domain>
+ """
+ update_task_state_calls = [
+ mock.call(task_state=task_states.IMAGE_PENDING_UPLOAD),
+ mock.call(task_state=task_states.IMAGE_UPLOADING,
+ expected_state=task_states.IMAGE_PENDING_UPLOAD)]
+ mock_update_task_state = mock.Mock()
+ mock_lookupByName = mock.Mock(return_value=FakeVirtDomain(xml),
+ autospec=True)
+ volume_info = {'VG': 'nova-vg', 'LV': 'disk'}
+ mock_volume_info = mock.Mock(return_value=volume_info,
+ autospec=True)
+ mock_volume_info_calls = [mock.call('/dev/nova-vg/lv')]
+ mock_convert_image = mock.Mock()
+
+ def convert_image_side_effect(source, dest, out_format,
+ run_as_root=True):
+ libvirt_driver.libvirt_utils.files[dest] = ''
+ mock_convert_image.side_effect = convert_image_side_effect
+
+ self.flags(snapshots_directory='./',
+ snapshot_image_format='raw',
+ images_type='lvm',
+ images_volume_group='nova-vg', group='libvirt')
+ libvirt_driver.libvirt_utils.disk_type = "lvm"
+
+ # Start test
+ image_service = nova.tests.unit.image.fake.FakeImageService()
+ instance_ref = objects.Instance(**self.test_instance)
+ instance_ref.info_cache = objects.InstanceInfoCache(
+ network_info=None)
+
+ properties = {'instance_id': instance_ref['id'],
+ 'user_id': str(self.context.user_id)}
+ snapshot_name = 'test-snap'
+ sent_meta = {'name': snapshot_name, 'is_public': False,
+ 'status': 'creating', 'properties': properties}
+ recv_meta = image_service.create(context, sent_meta)
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ with contextlib.nested(
+ mock.patch.object(libvirt_driver.LibvirtDriver,
+ '_conn',
+ autospec=True),
+ mock.patch.object(libvirt_driver.imagebackend.lvm,
+ 'volume_info',
+ mock_volume_info),
+ mock.patch.object(libvirt_driver.imagebackend.images,
+ 'convert_image',
+ mock_convert_image),
+ mock.patch.object(libvirt_driver.LibvirtDriver,
+ '_lookup_by_name',
+ mock_lookupByName)):
+ conn.snapshot(self.context, instance_ref, recv_meta['id'],
+ mock_update_task_state)
+
+ mock_lookupByName.assert_called_once_with("instance-00000001")
+ mock_volume_info.assert_has_calls(mock_volume_info_calls)
+ mock_convert_image.assert_called_once_with('/dev/nova-vg/lv',
+ mock.ANY,
+ 'raw',
+ run_as_root=True)
+ snapshot = image_service.show(context, recv_meta['id'])
+ mock_update_task_state.assert_has_calls(update_task_state_calls)
+ self.assertEqual('available', snapshot['properties']['image_state'])
+ self.assertEqual('active', snapshot['status'])
+ self.assertEqual('raw', snapshot['disk_format'])
+ self.assertEqual(snapshot_name, snapshot['name'])
+ # This is for all the subsequent tests that do not set the value of
+ # images type
+ self.flags(images_type='default', group='libvirt')
+ libvirt_driver.libvirt_utils.disk_type = "qcow2"
+
+ def test_lxc_snapshot_in_raw_format(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+
+ self.flags(snapshots_directory='./',
+ virt_type='lxc',
+ group='libvirt')
+
+ # Assuming that base image already exists in image_service
+ instance_ref = objects.Instance(**self.test_instance)
+ instance_ref.info_cache = objects.InstanceInfoCache(
+ network_info=None)
+
+ properties = {'instance_id': instance_ref['id'],
+ 'user_id': str(self.context.user_id)}
+ snapshot_name = 'test-snap'
+ sent_meta = {'name': snapshot_name, 'is_public': False,
+ 'status': 'creating', 'properties': properties}
+ # Create new image. It will be updated in snapshot method
+ # To work with it from snapshot, the single image_service is needed
+ recv_meta = self.image_service.create(context, sent_meta)
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
+ self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
+ libvirt_driver.utils.execute = self.fake_execute
+ self.stubs.Set(libvirt_driver.libvirt_utils, 'disk_type', 'raw')
+ libvirt_driver.libvirt_utils.disk_type = "raw"
+
+ def convert_image(source, dest, out_format):
+ libvirt_driver.libvirt_utils.files[dest] = ''
+
+ self.stubs.Set(images, 'convert_image', convert_image)
+
+ self.mox.ReplayAll()
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ conn.snapshot(self.context, instance_ref, recv_meta['id'],
+ func_call_matcher.call)
+
+ snapshot = self.image_service.show(context, recv_meta['id'])
+ self.assertIsNone(func_call_matcher.match())
+ self.assertEqual(snapshot['properties']['image_state'], 'available')
+ self.assertEqual(snapshot['status'], 'active')
+ self.assertEqual(snapshot['disk_format'], 'raw')
+ self.assertEqual(snapshot['name'], snapshot_name)
+
+ def test_snapshot_in_qcow2_format(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+
+ self.flags(snapshot_image_format='qcow2',
+ snapshots_directory='./',
+ group='libvirt')
+
+ # Assuming that base image already exists in image_service
+ instance_ref = objects.Instance(**self.test_instance)
+ instance_ref.info_cache = objects.InstanceInfoCache(
+ network_info=None)
+
+ properties = {'instance_id': instance_ref['id'],
+ 'user_id': str(self.context.user_id)}
+ snapshot_name = 'test-snap'
+ sent_meta = {'name': snapshot_name, 'is_public': False,
+ 'status': 'creating', 'properties': properties}
+ # Create new image. It will be updated in snapshot method
+ # To work with it from snapshot, the single image_service is needed
+ recv_meta = self.image_service.create(context, sent_meta)
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
+ self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
+ libvirt_driver.utils.execute = self.fake_execute
+ libvirt_driver.libvirt_utils.disk_type = "qcow2"
+
+ self.mox.ReplayAll()
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ conn.snapshot(self.context, instance_ref, recv_meta['id'],
+ func_call_matcher.call)
+
+ snapshot = self.image_service.show(context, recv_meta['id'])
+ self.assertIsNone(func_call_matcher.match())
+ self.assertEqual(snapshot['properties']['image_state'], 'available')
+ self.assertEqual(snapshot['status'], 'active')
+ self.assertEqual(snapshot['disk_format'], 'qcow2')
+ self.assertEqual(snapshot['name'], snapshot_name)
+
+ def test_lxc_snapshot_in_qcow2_format(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+
+ self.flags(snapshot_image_format='qcow2',
+ snapshots_directory='./',
+ virt_type='lxc',
+ group='libvirt')
+
+ # Assuming that base image already exists in image_service
+ instance_ref = objects.Instance(**self.test_instance)
+ instance_ref.info_cache = objects.InstanceInfoCache(
+ network_info=None)
+
+ properties = {'instance_id': instance_ref['id'],
+ 'user_id': str(self.context.user_id)}
+ snapshot_name = 'test-snap'
+ sent_meta = {'name': snapshot_name, 'is_public': False,
+ 'status': 'creating', 'properties': properties}
+ # Create new image. It will be updated in snapshot method
+ # To work with it from snapshot, the single image_service is needed
+ recv_meta = self.image_service.create(context, sent_meta)
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
+ self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
+ libvirt_driver.utils.execute = self.fake_execute
+ libvirt_driver.libvirt_utils.disk_type = "qcow2"
+
+ self.mox.ReplayAll()
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ conn.snapshot(self.context, instance_ref, recv_meta['id'],
+ func_call_matcher.call)
+
+ snapshot = self.image_service.show(context, recv_meta['id'])
+ self.assertIsNone(func_call_matcher.match())
+ self.assertEqual(snapshot['properties']['image_state'], 'available')
+ self.assertEqual(snapshot['status'], 'active')
+ self.assertEqual(snapshot['disk_format'], 'qcow2')
+ self.assertEqual(snapshot['name'], snapshot_name)
+
+ def test_lvm_snapshot_in_qcow2_format(self):
+ # Tests Lvm backend snapshot functionality with raw format
+ # snapshots.
+ xml = """
+ <domain type='kvm'>
+ <devices>
+ <disk type='block' device='disk'>
+ <source dev='/dev/some-vg/some-lv'/>
+ </disk>
+ </devices>
+ </domain>
+ """
+ update_task_state_calls = [
+ mock.call(task_state=task_states.IMAGE_PENDING_UPLOAD),
+ mock.call(task_state=task_states.IMAGE_UPLOADING,
+ expected_state=task_states.IMAGE_PENDING_UPLOAD)]
+ mock_update_task_state = mock.Mock()
+ mock_lookupByName = mock.Mock(return_value=FakeVirtDomain(xml),
+ autospec=True)
+ volume_info = {'VG': 'nova-vg', 'LV': 'disk'}
+ mock_volume_info = mock.Mock(return_value=volume_info, autospec=True)
+ mock_volume_info_calls = [mock.call('/dev/nova-vg/lv')]
+ mock_convert_image = mock.Mock()
+
+ def convert_image_side_effect(source, dest, out_format,
+ run_as_root=True):
+ libvirt_driver.libvirt_utils.files[dest] = ''
+ mock_convert_image.side_effect = convert_image_side_effect
+
+ self.flags(snapshots_directory='./',
+ snapshot_image_format='qcow2',
+ images_type='lvm',
+ images_volume_group='nova-vg', group='libvirt')
+ libvirt_driver.libvirt_utils.disk_type = "lvm"
+
+ # Start test
+ image_service = nova.tests.unit.image.fake.FakeImageService()
+ instance_ref = objects.Instance(**self.test_instance)
+ instance_ref.info_cache = objects.InstanceInfoCache(
+ network_info=None)
+
+ properties = {'instance_id': instance_ref['id'],
+ 'user_id': str(self.context.user_id)}
+ snapshot_name = 'test-snap'
+ sent_meta = {'name': snapshot_name, 'is_public': False,
+ 'status': 'creating', 'properties': properties}
+ recv_meta = image_service.create(context, sent_meta)
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ with contextlib.nested(
+ mock.patch.object(libvirt_driver.LibvirtDriver,
+ '_conn',
+ autospec=True),
+ mock.patch.object(libvirt_driver.imagebackend.lvm,
+ 'volume_info',
+ mock_volume_info),
+ mock.patch.object(libvirt_driver.imagebackend.images,
+ 'convert_image',
+ mock_convert_image),
+ mock.patch.object(libvirt_driver.LibvirtDriver,
+ '_lookup_by_name',
+ mock_lookupByName)):
+ conn.snapshot(self.context, instance_ref, recv_meta['id'],
+ mock_update_task_state)
+
+ mock_lookupByName.assert_called_once_with("instance-00000001")
+ mock_volume_info.assert_has_calls(mock_volume_info_calls)
+ mock_convert_image.assert_called_once_with('/dev/nova-vg/lv',
+ mock.ANY,
+ 'qcow2',
+ run_as_root=True)
+ snapshot = image_service.show(context, recv_meta['id'])
+ mock_update_task_state.assert_has_calls(update_task_state_calls)
+ self.assertEqual('available', snapshot['properties']['image_state'])
+ self.assertEqual('active', snapshot['status'])
+ self.assertEqual('qcow2', snapshot['disk_format'])
+ self.assertEqual(snapshot_name, snapshot['name'])
+ self.flags(images_type='default', group='libvirt')
+ libvirt_driver.libvirt_utils.disk_type = "qcow2"
+
+ def test_snapshot_no_image_architecture(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+
+ self.flags(snapshots_directory='./',
+ group='libvirt')
+
+ # Assign different image_ref from nova/images/fakes for
+ # testing different base image
+ test_instance = copy.deepcopy(self.test_instance)
+ test_instance["image_ref"] = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+
+ # Assuming that base image already exists in image_service
+ instance_ref = objects.Instance(**test_instance)
+ instance_ref.info_cache = objects.InstanceInfoCache(
+ network_info=None)
+
+ properties = {'instance_id': instance_ref['id'],
+ 'user_id': str(self.context.user_id)}
+ snapshot_name = 'test-snap'
+ sent_meta = {'name': snapshot_name, 'is_public': False,
+ 'status': 'creating', 'properties': properties}
+ # Create new image. It will be updated in snapshot method
+ # To work with it from snapshot, the single image_service is needed
+ recv_meta = self.image_service.create(context, sent_meta)
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
+ self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
+ libvirt_driver.utils.execute = self.fake_execute
+
+ self.mox.ReplayAll()
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ conn.snapshot(self.context, instance_ref, recv_meta['id'],
+ func_call_matcher.call)
+
+ snapshot = self.image_service.show(context, recv_meta['id'])
+ self.assertIsNone(func_call_matcher.match())
+ self.assertEqual(snapshot['properties']['image_state'], 'available')
+ self.assertEqual(snapshot['status'], 'active')
+ self.assertEqual(snapshot['name'], snapshot_name)
+
+ def test_lxc_snapshot_no_image_architecture(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+
+ self.flags(snapshots_directory='./',
+ virt_type='lxc',
+ group='libvirt')
+
+ # Assign different image_ref from nova/images/fakes for
+ # testing different base image
+ test_instance = copy.deepcopy(self.test_instance)
+ test_instance["image_ref"] = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+
+ # Assuming that base image already exists in image_service
+ instance_ref = objects.Instance(**test_instance)
+ properties = {'instance_id': instance_ref['id'],
+ 'user_id': str(self.context.user_id)}
+ snapshot_name = 'test-snap'
+ sent_meta = {'name': snapshot_name, 'is_public': False,
+ 'status': 'creating', 'properties': properties}
+ # Create new image. It will be updated in snapshot method
+ # To work with it from snapshot, the single image_service is needed
+ recv_meta = self.image_service.create(context, sent_meta)
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
+ self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
+ libvirt_driver.utils.execute = self.fake_execute
+ libvirt_driver.libvirt_utils.disk_type = "qcow2"
+
+ self.mox.ReplayAll()
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ conn.snapshot(self.context, instance_ref, recv_meta['id'],
+ func_call_matcher.call)
+
+ snapshot = self.image_service.show(context, recv_meta['id'])
+ self.assertIsNone(func_call_matcher.match())
+ self.assertEqual(snapshot['properties']['image_state'], 'available')
+ self.assertEqual(snapshot['status'], 'active')
+ self.assertEqual(snapshot['name'], snapshot_name)
+
+ def test_snapshot_no_original_image(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+
+ self.flags(snapshots_directory='./',
+ group='libvirt')
+
+ # Assign a non-existent image
+ test_instance = copy.deepcopy(self.test_instance)
+ test_instance["image_ref"] = '661122aa-1234-dede-fefe-babababababa'
+
+ instance_ref = objects.Instance(**test_instance)
+ instance_ref.info_cache = objects.InstanceInfoCache(
+ network_info=None)
+
+ properties = {'instance_id': instance_ref['id'],
+ 'user_id': str(self.context.user_id)}
+ snapshot_name = 'test-snap'
+ sent_meta = {'name': snapshot_name, 'is_public': False,
+ 'status': 'creating', 'properties': properties}
+ recv_meta = self.image_service.create(context, sent_meta)
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
+ self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
+ libvirt_driver.utils.execute = self.fake_execute
+
+ self.mox.ReplayAll()
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ conn.snapshot(self.context, instance_ref, recv_meta['id'],
+ func_call_matcher.call)
+
+ snapshot = self.image_service.show(context, recv_meta['id'])
+ self.assertIsNone(func_call_matcher.match())
+ self.assertEqual(snapshot['properties']['image_state'], 'available')
+ self.assertEqual(snapshot['status'], 'active')
+ self.assertEqual(snapshot['name'], snapshot_name)
+
+ def test_lxc_snapshot_no_original_image(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+
+ self.flags(snapshots_directory='./',
+ virt_type='lxc',
+ group='libvirt')
+ libvirt_driver.libvirt_utils.disk_type = "qcow2"
+
+ # Assign a non-existent image
+ test_instance = copy.deepcopy(self.test_instance)
+ test_instance["image_ref"] = '661122aa-1234-dede-fefe-babababababa'
+
+ instance_ref = objects.Instance(**test_instance)
+ properties = {'instance_id': instance_ref['id'],
+ 'user_id': str(self.context.user_id)}
+ snapshot_name = 'test-snap'
+ sent_meta = {'name': snapshot_name, 'is_public': False,
+ 'status': 'creating', 'properties': properties}
+ recv_meta = self.image_service.create(context, sent_meta)
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
+ self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
+ libvirt_driver.utils.execute = self.fake_execute
+
+ self.mox.ReplayAll()
+
+ conn = libvirt_driver.LibvirtDriver(False)
+ conn.snapshot(self.context, instance_ref, recv_meta['id'],
+ func_call_matcher.call)
+
+ snapshot = self.image_service.show(context, recv_meta['id'])
+ self.assertIsNone(func_call_matcher.match())
+ self.assertEqual(snapshot['properties']['image_state'], 'available')
+ self.assertEqual(snapshot['status'], 'active')
+ self.assertEqual(snapshot['name'], snapshot_name)
+
+ def test_snapshot_metadata_image(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+
+ self.flags(snapshots_directory='./',
+ group='libvirt')
+
+ # Assign an image with an architecture defined (x86_64)
+ test_instance = copy.deepcopy(self.test_instance)
+ test_instance["image_ref"] = 'a440c04b-79fa-479c-bed1-0b816eaec379'
+
+ instance_ref = objects.Instance(**test_instance)
+ instance_ref.info_cache = objects.InstanceInfoCache(
+ network_info=None)
+
+ properties = {'instance_id': instance_ref['id'],
+ 'user_id': str(self.context.user_id),
+ 'architecture': 'fake_arch',
+ 'key_a': 'value_a',
+ 'key_b': 'value_b'}
+ snapshot_name = 'test-snap'
+ sent_meta = {'name': snapshot_name, 'is_public': False,
+ 'status': 'creating', 'properties': properties}
+ recv_meta = self.image_service.create(context, sent_meta)
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
+ self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
+ libvirt_driver.utils.execute = self.fake_execute
+
+ self.mox.ReplayAll()
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ conn.snapshot(self.context, instance_ref, recv_meta['id'],
+ func_call_matcher.call)
+
+ snapshot = self.image_service.show(context, recv_meta['id'])
+ self.assertIsNone(func_call_matcher.match())
+ self.assertEqual(snapshot['properties']['image_state'], 'available')
+ self.assertEqual(snapshot['properties']['architecture'], 'fake_arch')
+ self.assertEqual(snapshot['properties']['key_a'], 'value_a')
+ self.assertEqual(snapshot['properties']['key_b'], 'value_b')
+ self.assertEqual(snapshot['status'], 'active')
+ self.assertEqual(snapshot['name'], snapshot_name)
+
+ def test_snapshot_with_os_type(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+
+ self.flags(snapshots_directory='./',
+ group='libvirt')
+
+ # Assign a non-existent image
+ test_instance = copy.deepcopy(self.test_instance)
+ test_instance["image_ref"] = '661122aa-1234-dede-fefe-babababababa'
+ test_instance["os_type"] = 'linux'
+
+ instance_ref = objects.Instance(**test_instance)
+ instance_ref.info_cache = objects.InstanceInfoCache(
+ network_info=None)
+ properties = {'instance_id': instance_ref['id'],
+ 'user_id': str(self.context.user_id),
+ 'os_type': instance_ref['os_type']}
+ snapshot_name = 'test-snap'
+ sent_meta = {'name': snapshot_name, 'is_public': False,
+ 'status': 'creating', 'properties': properties}
+ recv_meta = self.image_service.create(context, sent_meta)
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
+ self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
+ libvirt_driver.utils.execute = self.fake_execute
+
+ self.mox.ReplayAll()
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ conn.snapshot(self.context, instance_ref, recv_meta['id'],
+ func_call_matcher.call)
+
+ snapshot = self.image_service.show(context, recv_meta['id'])
+ self.assertIsNone(func_call_matcher.match())
+ self.assertEqual(snapshot['properties']['image_state'], 'available')
+ self.assertEqual(snapshot['properties']['os_type'],
+ instance_ref['os_type'])
+ self.assertEqual(snapshot['status'], 'active')
+ self.assertEqual(snapshot['name'], snapshot_name)
+
+ def test__create_snapshot_metadata(self):
+ base = {}
+ instance = {'kernel_id': 'kernel',
+ 'project_id': 'prj_id',
+ 'ramdisk_id': 'ram_id',
+ 'os_type': None}
+ img_fmt = 'raw'
+ snp_name = 'snapshot_name'
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ ret = conn._create_snapshot_metadata(base, instance, img_fmt, snp_name)
+ expected = {'is_public': False,
+ 'status': 'active',
+ 'name': snp_name,
+ 'properties': {
+ 'kernel_id': instance['kernel_id'],
+ 'image_location': 'snapshot',
+ 'image_state': 'available',
+ 'owner_id': instance['project_id'],
+ 'ramdisk_id': instance['ramdisk_id'],
+ },
+ 'disk_format': img_fmt,
+ 'container_format': base.get('container_format', 'bare')
+ }
+ self.assertEqual(ret, expected)
+
+ # simulate an instance with os_type field defined
+ # disk format equals to ami
+ # container format not equals to bare
+ instance['os_type'] = 'linux'
+ base['disk_format'] = 'ami'
+ base['container_format'] = 'test_container'
+ expected['properties']['os_type'] = instance['os_type']
+ expected['disk_format'] = base['disk_format']
+ expected['container_format'] = base.get('container_format', 'bare')
+ ret = conn._create_snapshot_metadata(base, instance, img_fmt, snp_name)
+ self.assertEqual(ret, expected)
+
+ @mock.patch('nova.virt.libvirt.volume.LibvirtFakeVolumeDriver.'
+ 'connect_volume')
+ @mock.patch('nova.virt.libvirt.volume.LibvirtFakeVolumeDriver.get_config')
+ def test_get_volume_config(self, get_config, connect_volume):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ connection_info = {'driver_volume_type': 'fake',
+ 'data': {'device_path': '/fake',
+ 'access_mode': 'rw'}}
+ bdm = {'device_name': 'vdb',
+ 'disk_bus': 'fake-bus',
+ 'device_type': 'fake-type'}
+ disk_info = {'bus': bdm['disk_bus'], 'type': bdm['device_type'],
+ 'dev': 'vdb'}
+ mock_config = mock.MagicMock()
+
+ get_config.return_value = mock_config
+ config = conn._get_volume_config(connection_info, disk_info)
+ get_config.assert_called_once_with(connection_info, disk_info)
+ self.assertEqual(mock_config, config)
+
+ def test_attach_invalid_volume_type(self):
+ self.create_fake_libvirt_mock()
+ libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
+ instance = fake_instance.fake_instance_obj(
+ self.context, **self.test_instance)
+ self.mox.ReplayAll()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.assertRaises(exception.VolumeDriverNotFound,
+ conn.attach_volume, None,
+ {"driver_volume_type": "badtype"},
+ instance,
+ "/dev/sda")
+
+ def test_attach_blockio_invalid_hypervisor(self):
+ self.flags(virt_type='fake_type', group='libvirt')
+ self.create_fake_libvirt_mock()
+ libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
+ instance = fake_instance.fake_instance_obj(
+ self.context, **self.test_instance)
+ self.mox.ReplayAll()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.assertRaises(exception.InvalidHypervisorType,
+ conn.attach_volume, None,
+ {"driver_volume_type": "fake",
+ "data": {"logical_block_size": "4096",
+ "physical_block_size": "4096"}
+ },
+ instance,
+ "/dev/sda")
+
+ def test_attach_blockio_invalid_version(self):
+ def get_lib_version_stub():
+ return (0 * 1000 * 1000) + (9 * 1000) + 8
+ self.flags(virt_type='qemu', group='libvirt')
+ self.create_fake_libvirt_mock()
+ libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
+ instance = fake_instance.fake_instance_obj(
+ self.context, **self.test_instance)
+ self.mox.ReplayAll()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(self.conn, "getLibVersion", get_lib_version_stub)
+ self.assertRaises(exception.Invalid,
+ conn.attach_volume, None,
+ {"driver_volume_type": "fake",
+ "data": {"logical_block_size": "4096",
+ "physical_block_size": "4096"}
+ },
+ instance,
+ "/dev/sda")
+
+ @mock.patch('nova.virt.libvirt.blockinfo.get_info_from_bdm')
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._lookup_by_name')
+ def test_attach_volume_with_vir_domain_affect_live_flag(self,
+ mock_lookup_by_name, mock_get_info):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
+ mock_dom = mock.MagicMock()
+ mock_lookup_by_name.return_value = mock_dom
+
+ connection_info = {"driver_volume_type": "fake",
+ "data": {"device_path": "/fake",
+ "access_mode": "rw"}}
+ bdm = {'device_name': 'vdb',
+ 'disk_bus': 'fake-bus',
+ 'device_type': 'fake-type'}
+ disk_info = {'bus': bdm['disk_bus'], 'type': bdm['device_type'],
+ 'dev': 'vdb'}
+ mock_get_info.return_value = disk_info
+ mock_conf = mock.MagicMock()
+ flags = (fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG |
+ fakelibvirt.VIR_DOMAIN_AFFECT_LIVE)
+
+ with contextlib.nested(
+ mock.patch.object(conn, '_connect_volume'),
+ mock.patch.object(conn, '_get_volume_config',
+ return_value=mock_conf),
+ mock.patch.object(conn, '_set_cache_mode')
+ ) as (mock_connect_volume, mock_get_volume_config,
+ mock_set_cache_mode):
+ for state in (power_state.RUNNING, power_state.PAUSED):
+ mock_dom.info.return_value = [state, 512, 512, 2, 1234, 5678]
+
+ conn.attach_volume(self.context, connection_info, instance,
+ "/dev/vdb", disk_bus=bdm['disk_bus'],
+ device_type=bdm['device_type'])
+
+ mock_lookup_by_name.assert_called_with(instance['name'])
+ mock_get_info.assert_called_with(CONF.libvirt.virt_type, bdm)
+ mock_connect_volume.assert_called_with(
+ connection_info, disk_info)
+ mock_get_volume_config.assert_called_with(
+ connection_info, disk_info)
+ mock_set_cache_mode.assert_called_with(mock_conf)
+ mock_dom.attachDeviceFlags.assert_called_with(
+ mock_conf.to_xml(), flags)
+
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_disk_xml')
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._lookup_by_name')
+ def test_detach_volume_with_vir_domain_affect_live_flag(self,
+ mock_lookup_by_name, mock_get_disk_xml):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
+ mock_dom = mock.MagicMock()
+ mock_xml = \
+ """
+ <disk type='file'>
+ <source file='/path/to/fake-volume'/>
+ <target dev='vdc' bus='virtio'/>
+ </disk>
+ """
+ mock_get_disk_xml.return_value = mock_xml
+
+ connection_info = {"driver_volume_type": "fake",
+ "data": {"device_path": "/fake",
+ "access_mode": "rw"}}
+ flags = (fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG |
+ fakelibvirt.VIR_DOMAIN_AFFECT_LIVE)
+
+ with mock.patch.object(conn, '_disconnect_volume') as \
+ mock_disconnect_volume:
+ for state in (power_state.RUNNING, power_state.PAUSED):
+ mock_dom.info.return_value = [state, 512, 512, 2, 1234, 5678]
+ mock_lookup_by_name.return_value = mock_dom
+
+ conn.detach_volume(connection_info, instance, '/dev/vdc')
+
+ mock_lookup_by_name.assert_called_with(instance['name'])
+ mock_get_disk_xml.assert_called_with(mock_dom.XMLDesc(0),
+ 'vdc')
+ mock_dom.detachDeviceFlags.assert_called_with(mock_xml, flags)
+ mock_disconnect_volume.assert_called_with(
+ connection_info, 'vdc')
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_multi_nic(self, mock_flavor):
+ network_info = _fake_network_info(self.stubs, 2)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ xml = conn._get_guest_xml(self.context, instance_ref,
+ network_info, disk_info)
+ tree = etree.fromstring(xml)
+ interfaces = tree.findall("./devices/interface")
+ self.assertEqual(len(interfaces), 2)
+ self.assertEqual(interfaces[0].get('type'), 'bridge')
+
+ def _behave_supports_direct_io(self, raise_open=False, raise_write=False,
+ exc=ValueError()):
+ open_behavior = os.open(os.path.join('.', '.directio.test'),
+ os.O_CREAT | os.O_WRONLY | os.O_DIRECT)
+ if raise_open:
+ open_behavior.AndRaise(exc)
+ else:
+ open_behavior.AndReturn(3)
+ write_bahavior = os.write(3, mox.IgnoreArg())
+ if raise_write:
+ write_bahavior.AndRaise(exc)
+ else:
+ os.close(3)
+ os.unlink(3)
+
+ def test_supports_direct_io(self):
+ # O_DIRECT is not supported on all Python runtimes, so on platforms
+ # where it's not supported (e.g. Mac), we can still test the code-path
+ # by stubbing out the value.
+ if not hasattr(os, 'O_DIRECT'):
+ # `mock` seems to have trouble stubbing an attr that doesn't
+ # originally exist, so falling back to stubbing out the attribute
+ # directly.
+ os.O_DIRECT = 16384
+ self.addCleanup(delattr, os, 'O_DIRECT')
+
+ einval = OSError()
+ einval.errno = errno.EINVAL
+ self.mox.StubOutWithMock(os, 'open')
+ self.mox.StubOutWithMock(os, 'write')
+ self.mox.StubOutWithMock(os, 'close')
+ self.mox.StubOutWithMock(os, 'unlink')
+ _supports_direct_io = libvirt_driver.LibvirtDriver._supports_direct_io
+
+ self._behave_supports_direct_io()
+ self._behave_supports_direct_io(raise_write=True)
+ self._behave_supports_direct_io(raise_open=True)
+ self._behave_supports_direct_io(raise_write=True, exc=einval)
+ self._behave_supports_direct_io(raise_open=True, exc=einval)
+
+ self.mox.ReplayAll()
+ self.assertTrue(_supports_direct_io('.'))
+ self.assertRaises(ValueError, _supports_direct_io, '.')
+ self.assertRaises(ValueError, _supports_direct_io, '.')
+ self.assertFalse(_supports_direct_io('.'))
+ self.assertFalse(_supports_direct_io('.'))
+ self.mox.VerifyAll()
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def _check_xml_and_container(self, instance, mock_flavor):
+ instance_ref = objects.Instance(**instance)
+
+ self.flags(virt_type='lxc', group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ self.assertEqual(conn.uri(), 'lxc:///')
+
+ network_info = _fake_network_info(self.stubs, 1)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ xml = conn._get_guest_xml(self.context, instance_ref,
+ network_info, disk_info)
+ tree = etree.fromstring(xml)
+
+ check = [
+ (lambda t: t.find('.').get('type'), 'lxc'),
+ (lambda t: t.find('./os/type').text, 'exe'),
+ (lambda t: t.find('./devices/filesystem/target').get('dir'), '/')]
+
+ for i, (check, expected_result) in enumerate(check):
+ self.assertEqual(check(tree),
+ expected_result,
+ '%s failed common check %d' % (xml, i))
+
+ target = tree.find('./devices/filesystem/source').get('dir')
+ self.assertTrue(len(target) > 0)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def _check_xml_and_disk_prefix(self, instance, prefix, mock_flavor):
+ instance_ref = objects.Instance(**instance)
+
+ def _get_prefix(p, default):
+ if p:
+ return p + 'a'
+ return default
+
+ type_disk_map = {
+ 'qemu': [
+ (lambda t: t.find('.').get('type'), 'qemu'),
+ (lambda t: t.find('./devices/disk/target').get('dev'),
+ _get_prefix(prefix, 'vda'))],
+ 'xen': [
+ (lambda t: t.find('.').get('type'), 'xen'),
+ (lambda t: t.find('./devices/disk/target').get('dev'),
+ _get_prefix(prefix, 'sda'))],
+ 'kvm': [
+ (lambda t: t.find('.').get('type'), 'kvm'),
+ (lambda t: t.find('./devices/disk/target').get('dev'),
+ _get_prefix(prefix, 'vda'))],
+ 'uml': [
+ (lambda t: t.find('.').get('type'), 'uml'),
+ (lambda t: t.find('./devices/disk/target').get('dev'),
+ _get_prefix(prefix, 'ubda'))]
+ }
+
+ for (virt_type, checks) in type_disk_map.iteritems():
+ self.flags(virt_type=virt_type, group='libvirt')
+ if prefix:
+ self.flags(disk_prefix=prefix, group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ network_info = _fake_network_info(self.stubs, 1)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+ xml = conn._get_guest_xml(self.context, instance_ref,
+ network_info, disk_info)
+ tree = etree.fromstring(xml)
+
+ for i, (check, expected_result) in enumerate(checks):
+ self.assertEqual(check(tree),
+ expected_result,
+ '%s != %s failed check %d' %
+ (check(tree), expected_result, i))
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def _check_xml_and_disk_driver(self, image_meta, mock_flavor):
+ os_open = os.open
+ directio_supported = True
+
+ def os_open_stub(path, flags, *args, **kwargs):
+ if flags & os.O_DIRECT:
+ if not directio_supported:
+ raise OSError(errno.EINVAL,
+ '%s: %s' % (os.strerror(errno.EINVAL), path))
+ flags &= ~os.O_DIRECT
+ return os_open(path, flags, *args, **kwargs)
+
+ self.stubs.Set(os, 'open', os_open_stub)
+
+ @staticmethod
+ def connection_supports_direct_io_stub(dirpath):
+ return directio_supported
+
+ self.stubs.Set(libvirt_driver.LibvirtDriver,
+ '_supports_direct_io', connection_supports_direct_io_stub)
+
+ instance_ref = objects.Instance(**self.test_instance)
+ network_info = _fake_network_info(self.stubs, 1)
+
+ drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ xml = drv._get_guest_xml(self.context, instance_ref,
+ network_info, disk_info, image_meta)
+ tree = etree.fromstring(xml)
+ disks = tree.findall('./devices/disk/driver')
+ for guest_disk in disks:
+ self.assertEqual(guest_disk.get("cache"), "none")
+
+ directio_supported = False
+
+ # The O_DIRECT availability is cached on first use in
+ # LibvirtDriver, hence we re-create it here
+ drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ xml = drv._get_guest_xml(self.context, instance_ref,
+ network_info, disk_info, image_meta)
+ tree = etree.fromstring(xml)
+ disks = tree.findall('./devices/disk/driver')
+ for guest_disk in disks:
+ self.assertEqual(guest_disk.get("cache"), "writethrough")
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def _check_xml_and_disk_bus(self, image_meta,
+ block_device_info, wantConfig,
+ mock_flavor):
+ instance_ref = objects.Instance(**self.test_instance)
+ network_info = _fake_network_info(self.stubs, 1)
+
+ drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref,
+ block_device_info,
+ image_meta)
+
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ xml = drv._get_guest_xml(self.context, instance_ref,
+ network_info, disk_info, image_meta,
+ block_device_info=block_device_info)
+ tree = etree.fromstring(xml)
+
+ got_disks = tree.findall('./devices/disk')
+ got_disk_targets = tree.findall('./devices/disk/target')
+ for i in range(len(wantConfig)):
+ want_device_type = wantConfig[i][0]
+ want_device_bus = wantConfig[i][1]
+ want_device_dev = wantConfig[i][2]
+
+ got_device_type = got_disks[i].get('device')
+ got_device_bus = got_disk_targets[i].get('bus')
+ got_device_dev = got_disk_targets[i].get('dev')
+
+ self.assertEqual(got_device_type, want_device_type)
+ self.assertEqual(got_device_bus, want_device_bus)
+ self.assertEqual(got_device_dev, want_device_dev)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def _check_xml_and_uuid(self, image_meta, mock_flavor):
+ instance_ref = objects.Instance(**self.test_instance)
+ network_info = _fake_network_info(self.stubs, 1)
+
+ drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+ xml = drv._get_guest_xml(self.context, instance_ref,
+ network_info, disk_info, image_meta)
+ tree = etree.fromstring(xml)
+ self.assertEqual(tree.find('./uuid').text,
+ instance_ref['uuid'])
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def _check_xml_and_uri(self, instance, mock_flavor,
+ expect_ramdisk=False, expect_kernel=False,
+ rescue=None, expect_xen_hvm=False, xen_only=False):
+ instance_ref = objects.Instance(**instance)
+
+ xen_vm_mode = vm_mode.XEN
+ if expect_xen_hvm:
+ xen_vm_mode = vm_mode.HVM
+
+ type_uri_map = {'qemu': ('qemu:///system',
+ [(lambda t: t.find('.').get('type'), 'qemu'),
+ (lambda t: t.find('./os/type').text,
+ vm_mode.HVM),
+ (lambda t: t.find('./devices/emulator'), None)]),
+ 'kvm': ('qemu:///system',
+ [(lambda t: t.find('.').get('type'), 'kvm'),
+ (lambda t: t.find('./os/type').text,
+ vm_mode.HVM),
+ (lambda t: t.find('./devices/emulator'), None)]),
+ 'uml': ('uml:///system',
+ [(lambda t: t.find('.').get('type'), 'uml'),
+ (lambda t: t.find('./os/type').text,
+ vm_mode.UML)]),
+ 'xen': ('xen:///',
+ [(lambda t: t.find('.').get('type'), 'xen'),
+ (lambda t: t.find('./os/type').text,
+ xen_vm_mode)])}
+
+ if expect_xen_hvm or xen_only:
+ hypervisors_to_check = ['xen']
+ else:
+ hypervisors_to_check = ['qemu', 'kvm', 'xen']
+
+ for hypervisor_type in hypervisors_to_check:
+ check_list = type_uri_map[hypervisor_type][1]
+
+ if rescue:
+ suffix = '.rescue'
+ else:
+ suffix = ''
+ if expect_kernel:
+ check = (lambda t: self.relpath(t.find('./os/kernel').text).
+ split('/')[1], 'kernel' + suffix)
+ else:
+ check = (lambda t: t.find('./os/kernel'), None)
+ check_list.append(check)
+
+ if expect_kernel:
+ check = (lambda t: "no_timer_check" in t.find('./os/cmdline').
+ text, hypervisor_type == "qemu")
+ check_list.append(check)
+ # Hypervisors that only support vm_mode.HVM and Xen
+ # should not produce configuration that results in kernel
+ # arguments
+ if not expect_kernel and (hypervisor_type in
+ ['qemu', 'kvm', 'xen']):
+ check = (lambda t: t.find('./os/root'), None)
+ check_list.append(check)
+ check = (lambda t: t.find('./os/cmdline'), None)
+ check_list.append(check)
+
+ if expect_ramdisk:
+ check = (lambda t: self.relpath(t.find('./os/initrd').text).
+ split('/')[1], 'ramdisk' + suffix)
+ else:
+ check = (lambda t: t.find('./os/initrd'), None)
+ check_list.append(check)
+
+ if hypervisor_type in ['qemu', 'kvm']:
+ xpath = "./sysinfo/system/entry"
+ check = (lambda t: t.findall(xpath)[0].get("name"),
+ "manufacturer")
+ check_list.append(check)
+ check = (lambda t: t.findall(xpath)[0].text,
+ version.vendor_string())
+ check_list.append(check)
+
+ check = (lambda t: t.findall(xpath)[1].get("name"),
+ "product")
+ check_list.append(check)
+ check = (lambda t: t.findall(xpath)[1].text,
+ version.product_string())
+ check_list.append(check)
+
+ check = (lambda t: t.findall(xpath)[2].get("name"),
+ "version")
+ check_list.append(check)
+ # NOTE(sirp): empty strings don't roundtrip in lxml (they are
+ # converted to None), so we need an `or ''` to correct for that
+ check = (lambda t: t.findall(xpath)[2].text or '',
+ version.version_string_with_package())
+ check_list.append(check)
+
+ check = (lambda t: t.findall(xpath)[3].get("name"),
+ "serial")
+ check_list.append(check)
+ check = (lambda t: t.findall(xpath)[3].text,
+ "cef19ce0-0ca2-11df-855d-b19fbce37686")
+ check_list.append(check)
+
+ check = (lambda t: t.findall(xpath)[4].get("name"),
+ "uuid")
+ check_list.append(check)
+ check = (lambda t: t.findall(xpath)[4].text,
+ instance['uuid'])
+ check_list.append(check)
+
+ if hypervisor_type in ['qemu', 'kvm']:
+ check = (lambda t: t.findall('./devices/serial')[0].get(
+ 'type'), 'file')
+ check_list.append(check)
+ check = (lambda t: t.findall('./devices/serial')[1].get(
+ 'type'), 'pty')
+ check_list.append(check)
+ check = (lambda t: self.relpath(t.findall(
+ './devices/serial/source')[0].get('path')).
+ split('/')[1], 'console.log')
+ check_list.append(check)
+ else:
+ check = (lambda t: t.find('./devices/console').get(
+ 'type'), 'pty')
+ check_list.append(check)
+
+ common_checks = [
+ (lambda t: t.find('.').tag, 'domain'),
+ (lambda t: t.find('./memory').text, '2097152')]
+ if rescue:
+ common_checks += [
+ (lambda t: self.relpath(t.findall('./devices/disk/source')[0].
+ get('file')).split('/')[1], 'disk.rescue'),
+ (lambda t: self.relpath(t.findall('./devices/disk/source')[1].
+ get('file')).split('/')[1], 'disk')]
+ else:
+ common_checks += [(lambda t: self.relpath(t.findall(
+ './devices/disk/source')[0].get('file')).split('/')[1],
+ 'disk')]
+ common_checks += [(lambda t: self.relpath(t.findall(
+ './devices/disk/source')[1].get('file')).split('/')[1],
+ 'disk.local')]
+
+ for virt_type in hypervisors_to_check:
+ expected_uri = type_uri_map[virt_type][0]
+ checks = type_uri_map[virt_type][1]
+ self.flags(virt_type=virt_type, group='libvirt')
+
+ with mock.patch('nova.virt.libvirt.driver.libvirt') as old_virt:
+ del old_virt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ self.assertEqual(conn.uri(), expected_uri)
+
+ network_info = _fake_network_info(self.stubs, 1)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref,
+ rescue=rescue)
+
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+ xml = conn._get_guest_xml(self.context, instance_ref,
+ network_info, disk_info,
+ rescue=rescue)
+ tree = etree.fromstring(xml)
+ for i, (check, expected_result) in enumerate(checks):
+ self.assertEqual(check(tree),
+ expected_result,
+ '%s != %s failed check %d' %
+ (check(tree), expected_result, i))
+
+ for i, (check, expected_result) in enumerate(common_checks):
+ self.assertEqual(check(tree),
+ expected_result,
+ '%s != %s failed common check %d' %
+ (check(tree), expected_result, i))
+
+ filterref = './devices/interface/filterref'
+ vif = network_info[0]
+ nic_id = vif['address'].replace(':', '')
+ fw = firewall.NWFilterFirewall(fake.FakeVirtAPI(), conn)
+ instance_filter_name = fw._instance_filter_name(instance_ref,
+ nic_id)
+ self.assertEqual(tree.find(filterref).get('filter'),
+ instance_filter_name)
+
+ # This test is supposed to make sure we don't
+ # override a specifically set uri
+ #
+ # Deliberately not just assigning this string to CONF.connection_uri
+ # and checking against that later on. This way we make sure the
+ # implementation doesn't fiddle around with the CONF.
+ testuri = 'something completely different'
+ self.flags(connection_uri=testuri, group='libvirt')
+ for (virt_type, (expected_uri, checks)) in type_uri_map.iteritems():
+ self.flags(virt_type=virt_type, group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ self.assertEqual(conn.uri(), testuri)
+
+ def test_ensure_filtering_rules_for_instance_timeout(self):
+ # ensure_filtering_fules_for_instance() finishes with timeout.
+ # Preparing mocks
+ def fake_none(self, *args):
+ return
+
+ def fake_raise(self):
+ raise libvirt.libvirtError('ERR')
+
+ class FakeTime(object):
+ def __init__(self):
+ self.counter = 0
+
+ def sleep(self, t):
+ self.counter += t
+
+ fake_timer = FakeTime()
+
+ def fake_sleep(t):
+ fake_timer.sleep(t)
+
+ # _fake_network_info must be called before create_fake_libvirt_mock(),
+ # as _fake_network_info calls importutils.import_class() and
+ # create_fake_libvirt_mock() mocks importutils.import_class().
+ network_info = _fake_network_info(self.stubs, 1)
+ self.create_fake_libvirt_mock()
+ instance_ref = objects.Instance(**self.test_instance)
+
+ # Start test
+ self.mox.ReplayAll()
+ try:
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(conn.firewall_driver,
+ 'setup_basic_filtering',
+ fake_none)
+ self.stubs.Set(conn.firewall_driver,
+ 'prepare_instance_filter',
+ fake_none)
+ self.stubs.Set(conn.firewall_driver,
+ 'instance_filter_exists',
+ fake_none)
+ self.stubs.Set(greenthread,
+ 'sleep',
+ fake_sleep)
+ conn.ensure_filtering_rules_for_instance(instance_ref,
+ network_info)
+ except exception.NovaException as e:
+ msg = ('The firewall filter for %s does not exist' %
+ instance_ref['name'])
+ c1 = (0 <= six.text_type(e).find(msg))
+ self.assertTrue(c1)
+
+ self.assertEqual(29, fake_timer.counter, "Didn't wait the expected "
+ "amount of time")
+
+ def test_check_can_live_migrate_dest_all_pass_with_block_migration(self):
+ instance_ref = objects.Instance(**self.test_instance)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ compute_info = {'disk_available_least': 400,
+ 'cpu_info': 'asdf',
+ }
+ filename = "file"
+
+ self.mox.StubOutWithMock(conn, '_create_shared_storage_test_file')
+ self.mox.StubOutWithMock(conn, '_compare_cpu')
+
+ # _check_cpu_match
+ conn._compare_cpu("asdf")
+
+ # mounted_on_same_shared_storage
+ conn._create_shared_storage_test_file().AndReturn(filename)
+
+ self.mox.ReplayAll()
+ return_value = conn.check_can_live_migrate_destination(self.context,
+ instance_ref, compute_info, compute_info, True)
+ self.assertThat({"filename": "file",
+ 'image_type': 'default',
+ 'disk_available_mb': 409600,
+ "disk_over_commit": False,
+ "block_migration": True},
+ matchers.DictMatches(return_value))
+
+ def test_check_can_live_migrate_dest_all_pass_no_block_migration(self):
+ instance_ref = objects.Instance(**self.test_instance)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ compute_info = {'cpu_info': 'asdf'}
+ filename = "file"
+
+ self.mox.StubOutWithMock(conn, '_create_shared_storage_test_file')
+ self.mox.StubOutWithMock(conn, '_compare_cpu')
+
+ # _check_cpu_match
+ conn._compare_cpu("asdf")
+
+ # mounted_on_same_shared_storage
+ conn._create_shared_storage_test_file().AndReturn(filename)
+
+ self.mox.ReplayAll()
+ return_value = conn.check_can_live_migrate_destination(self.context,
+ instance_ref, compute_info, compute_info, False)
+ self.assertThat({"filename": "file",
+ "image_type": 'default',
+ "block_migration": False,
+ "disk_over_commit": False,
+ "disk_available_mb": None},
+ matchers.DictMatches(return_value))
+
+ def test_check_can_live_migrate_dest_incompatible_cpu_raises(self):
+ instance_ref = objects.Instance(**self.test_instance)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ compute_info = {'cpu_info': 'asdf'}
+
+ self.mox.StubOutWithMock(conn, '_compare_cpu')
+
+ conn._compare_cpu("asdf").AndRaise(exception.InvalidCPUInfo(
+ reason='foo')
+ )
+
+ self.mox.ReplayAll()
+ self.assertRaises(exception.InvalidCPUInfo,
+ conn.check_can_live_migrate_destination,
+ self.context, instance_ref,
+ compute_info, compute_info, False)
+
+ def test_check_can_live_migrate_dest_cleanup_works_correctly(self):
+ objects.Instance(**self.test_instance)
+ dest_check_data = {"filename": "file",
+ "block_migration": True,
+ "disk_over_commit": False,
+ "disk_available_mb": 1024}
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ self.mox.StubOutWithMock(conn, '_cleanup_shared_storage_test_file')
+ conn._cleanup_shared_storage_test_file("file")
+
+ self.mox.ReplayAll()
+ conn.check_can_live_migrate_destination_cleanup(self.context,
+ dest_check_data)
+
+ def _mock_can_live_migrate_source(self, block_migration=False,
+ is_shared_block_storage=False,
+ is_shared_instance_path=False,
+ disk_available_mb=1024):
+ instance = objects.Instance(**self.test_instance)
+ dest_check_data = {'filename': 'file',
+ 'image_type': 'default',
+ 'block_migration': block_migration,
+ 'disk_over_commit': False,
+ 'disk_available_mb': disk_available_mb}
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ self.mox.StubOutWithMock(conn, '_is_shared_block_storage')
+ conn._is_shared_block_storage(instance, dest_check_data).AndReturn(
+ is_shared_block_storage)
+ self.mox.StubOutWithMock(conn, '_check_shared_storage_test_file')
+ conn._check_shared_storage_test_file('file').AndReturn(
+ is_shared_instance_path)
+
+ return (instance, dest_check_data, conn)
+
+ def test_check_can_live_migrate_source_block_migration(self):
+ instance, dest_check_data, conn = self._mock_can_live_migrate_source(
+ block_migration=True)
+
+ self.mox.StubOutWithMock(conn, "_assert_dest_node_has_enough_disk")
+ conn._assert_dest_node_has_enough_disk(
+ self.context, instance, dest_check_data['disk_available_mb'],
+ False, None)
+
+ self.mox.ReplayAll()
+ ret = conn.check_can_live_migrate_source(self.context, instance,
+ dest_check_data)
+ self.assertIsInstance(ret, dict)
+ self.assertIn('is_shared_block_storage', ret)
+ self.assertIn('is_shared_instance_path', ret)
+
+ def test_check_can_live_migrate_source_shared_block_storage(self):
+ instance, dest_check_data, conn = self._mock_can_live_migrate_source(
+ is_shared_block_storage=True)
+ self.mox.ReplayAll()
+ conn.check_can_live_migrate_source(self.context, instance,
+ dest_check_data)
+
+ def test_check_can_live_migrate_source_shared_instance_path(self):
+ instance, dest_check_data, conn = self._mock_can_live_migrate_source(
+ is_shared_instance_path=True)
+ self.mox.ReplayAll()
+ conn.check_can_live_migrate_source(self.context, instance,
+ dest_check_data)
+
+ def test_check_can_live_migrate_source_non_shared_fails(self):
+ instance, dest_check_data, conn = self._mock_can_live_migrate_source()
+ self.mox.ReplayAll()
+ self.assertRaises(exception.InvalidSharedStorage,
+ conn.check_can_live_migrate_source, self.context,
+ instance, dest_check_data)
+
+ def test_check_can_live_migrate_source_shared_block_migration_fails(self):
+ instance, dest_check_data, conn = self._mock_can_live_migrate_source(
+ block_migration=True,
+ is_shared_block_storage=True)
+
+ self.mox.ReplayAll()
+ self.assertRaises(exception.InvalidLocalStorage,
+ conn.check_can_live_migrate_source,
+ self.context, instance, dest_check_data)
+
+ def test_check_can_live_migrate_shared_path_block_migration_fails(self):
+ instance, dest_check_data, conn = self._mock_can_live_migrate_source(
+ block_migration=True,
+ is_shared_instance_path=True)
+
+ self.mox.ReplayAll()
+ self.assertRaises(exception.InvalidLocalStorage,
+ conn.check_can_live_migrate_source,
+ self.context, instance, dest_check_data)
+
+ def test_check_can_live_migrate_non_shared_non_block_migration_fails(self):
+ instance, dest_check_data, conn = self._mock_can_live_migrate_source()
+ self.mox.ReplayAll()
+ self.assertRaises(exception.InvalidSharedStorage,
+ conn.check_can_live_migrate_source,
+ self.context, instance, dest_check_data)
+
+ def test_check_can_live_migrate_source_with_dest_not_enough_disk(self):
+ instance, dest_check_data, conn = self._mock_can_live_migrate_source(
+ block_migration=True,
+ disk_available_mb=0)
+
+ self.mox.StubOutWithMock(conn, "get_instance_disk_info")
+ conn.get_instance_disk_info(instance["name"],
+ block_device_info=None).AndReturn(
+ '[{"virt_disk_size":2}]')
+
+ self.mox.ReplayAll()
+ self.assertRaises(exception.MigrationError,
+ conn.check_can_live_migrate_source,
+ self.context, instance, dest_check_data)
+
+ def test_is_shared_block_storage_rbd(self):
+ CONF.set_override('images_type', 'rbd', 'libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.assertTrue(conn._is_shared_block_storage(
+ 'instance', {'image_type': 'rbd'}))
+
+ def test_is_shared_block_storage_non_remote(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.assertFalse(conn._is_shared_block_storage(
+ 'instance', {'is_shared_instance_path': False}))
+
+ def test_is_shared_block_storage_rbd_only_source(self):
+ CONF.set_override('images_type', 'rbd', 'libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.assertFalse(conn._is_shared_block_storage(
+ 'instance', {'is_shared_instance_path': False}))
+
+ def test_is_shared_block_storage_rbd_only_dest(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.assertFalse(conn._is_shared_block_storage(
+ 'instance', {'image_type': 'rbd',
+ 'is_shared_instance_path': False}))
+
+ def test_is_shared_block_storage_volume_backed(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ with mock.patch.object(conn, 'get_instance_disk_info') as mock_get:
+ mock_get.return_value = '[]'
+ self.assertTrue(conn._is_shared_block_storage(
+ {'name': 'name'}, {'is_volume_backed': True,
+ 'is_shared_instance_path': False}))
+
+ def test_is_shared_block_storage_volume_backed_with_disk(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ with mock.patch.object(conn, 'get_instance_disk_info') as mock_get:
+ mock_get.return_value = '[{"virt_disk_size":2}]'
+ self.assertFalse(conn._is_shared_block_storage(
+ {'name': 'instance_name'},
+ {'is_volume_backed': True, 'is_shared_instance_path': False}))
+ mock_get.assert_called_once_with('instance_name')
+
+ def test_is_shared_block_storage_nfs(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ mock_image_backend = mock.MagicMock()
+ conn.image_backend = mock_image_backend
+ mock_backend = mock.MagicMock()
+ mock_image_backend.backend.return_value = mock_backend
+ mock_backend.is_file_in_instance_path.return_value = True
+ self.assertTrue(conn._is_shared_block_storage(
+ 'instance', {'is_shared_instance_path': True}))
+
+ @mock.patch.object(libvirt, 'VIR_DOMAIN_XML_MIGRATABLE', 8675, create=True)
+ def test_live_migration_changes_listen_addresses(self):
+ self.compute = importutils.import_object(CONF.compute_manager)
+ instance_dict = dict(self.test_instance)
+ instance_dict.update({'host': 'fake',
+ 'power_state': power_state.RUNNING,
+ 'vm_state': vm_states.ACTIVE})
+ instance_ref = objects.Instance(**instance_dict)
+
+ xml_tmpl = ("<domain type='kvm'>"
+ "<devices>"
+ "<graphics type='vnc' listen='{vnc}'>"
+ "<listen address='{vnc}'/>"
+ "</graphics>"
+ "<graphics type='spice' listen='{spice}'>"
+ "<listen address='{spice}'/>"
+ "</graphics>"
+ "</devices>"
+ "</domain>")
+
+ initial_xml = xml_tmpl.format(vnc='1.2.3.4',
+ spice='5.6.7.8')
+
+ target_xml = xml_tmpl.format(vnc='10.0.0.1',
+ spice='10.0.0.2')
+ target_xml = etree.tostring(etree.fromstring(target_xml))
+
+ # Preparing mocks
+ vdmock = self.mox.CreateMock(libvirt.virDomain)
+ self.mox.StubOutWithMock(vdmock, "migrateToURI2")
+ _bandwidth = CONF.libvirt.live_migration_bandwidth
+ vdmock.XMLDesc(libvirt.VIR_DOMAIN_XML_MIGRATABLE).AndReturn(
+ initial_xml)
+ vdmock.migrateToURI2(CONF.libvirt.live_migration_uri % 'dest',
+ None,
+ target_xml,
+ mox.IgnoreArg(),
+ None,
+ _bandwidth).AndRaise(libvirt.libvirtError("ERR"))
+
+ def fake_lookup(instance_name):
+ if instance_name == instance_ref['name']:
+ return vdmock
+
+ self.create_fake_libvirt_mock(lookupByName=fake_lookup)
+ self.mox.StubOutWithMock(self.compute, "_rollback_live_migration")
+ self.compute._rollback_live_migration(self.context, instance_ref,
+ 'dest', False)
+
+ # start test
+ migrate_data = {'pre_live_migration_result':
+ {'graphics_listen_addrs':
+ {'vnc': '10.0.0.1', 'spice': '10.0.0.2'}}}
+ self.mox.ReplayAll()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.assertRaises(libvirt.libvirtError,
+ conn._live_migration,
+ self.context, instance_ref, 'dest', False,
+ self.compute._rollback_live_migration,
+ migrate_data=migrate_data)
+
+ @mock.patch.object(libvirt, 'VIR_DOMAIN_XML_MIGRATABLE', None, create=True)
+ def test_live_migration_uses_migrateToURI_without_migratable_flag(self):
+ self.compute = importutils.import_object(CONF.compute_manager)
+ instance_dict = dict(self.test_instance)
+ instance_dict.update({'host': 'fake',
+ 'power_state': power_state.RUNNING,
+ 'vm_state': vm_states.ACTIVE})
+ instance_ref = objects.Instance(**instance_dict)
+
+ # Preparing mocks
+ vdmock = self.mox.CreateMock(libvirt.virDomain)
+ self.mox.StubOutWithMock(vdmock, "migrateToURI")
+ _bandwidth = CONF.libvirt.live_migration_bandwidth
+ vdmock.migrateToURI(CONF.libvirt.live_migration_uri % 'dest',
+ mox.IgnoreArg(),
+ None,
+ _bandwidth).AndRaise(libvirt.libvirtError("ERR"))
+
+ def fake_lookup(instance_name):
+ if instance_name == instance_ref['name']:
+ return vdmock
+
+ self.create_fake_libvirt_mock(lookupByName=fake_lookup)
+ self.mox.StubOutWithMock(self.compute, "_rollback_live_migration")
+ self.compute._rollback_live_migration(self.context, instance_ref,
+ 'dest', False)
+
+ # start test
+ migrate_data = {'pre_live_migration_result':
+ {'graphics_listen_addrs':
+ {'vnc': '0.0.0.0', 'spice': '0.0.0.0'}}}
+ self.mox.ReplayAll()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.assertRaises(libvirt.libvirtError,
+ conn._live_migration,
+ self.context, instance_ref, 'dest', False,
+ self.compute._rollback_live_migration,
+ migrate_data=migrate_data)
+
+ def test_live_migration_uses_migrateToURI_without_dest_listen_addrs(self):
+ self.compute = importutils.import_object(CONF.compute_manager)
+ instance_dict = dict(self.test_instance)
+ instance_dict.update({'host': 'fake',
+ 'power_state': power_state.RUNNING,
+ 'vm_state': vm_states.ACTIVE})
+ instance_ref = objects.Instance(**instance_dict)
+
+ # Preparing mocks
+ vdmock = self.mox.CreateMock(libvirt.virDomain)
+ self.mox.StubOutWithMock(vdmock, "migrateToURI")
+ _bandwidth = CONF.libvirt.live_migration_bandwidth
+ vdmock.migrateToURI(CONF.libvirt.live_migration_uri % 'dest',
+ mox.IgnoreArg(),
+ None,
+ _bandwidth).AndRaise(libvirt.libvirtError("ERR"))
+
+ def fake_lookup(instance_name):
+ if instance_name == instance_ref['name']:
+ return vdmock
+
+ self.create_fake_libvirt_mock(lookupByName=fake_lookup)
+ self.mox.StubOutWithMock(self.compute, "_rollback_live_migration")
+ self.compute._rollback_live_migration(self.context, instance_ref,
+ 'dest', False)
+
+ # start test
+ migrate_data = {}
+ self.mox.ReplayAll()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.assertRaises(libvirt.libvirtError,
+ conn._live_migration,
+ self.context, instance_ref, 'dest', False,
+ self.compute._rollback_live_migration,
+ migrate_data=migrate_data)
+
+ @mock.patch.object(libvirt, 'VIR_DOMAIN_XML_MIGRATABLE', None, create=True)
+ def test_live_migration_fails_without_migratable_flag_or_0_addr(self):
+ self.flags(vnc_enabled=True, vncserver_listen='1.2.3.4')
+ self.compute = importutils.import_object(CONF.compute_manager)
+ instance_dict = dict(self.test_instance)
+ instance_dict.update({'host': 'fake',
+ 'power_state': power_state.RUNNING,
+ 'vm_state': vm_states.ACTIVE})
+ instance_ref = objects.Instance(**instance_dict)
+
+ # Preparing mocks
+ vdmock = self.mox.CreateMock(libvirt.virDomain)
+ self.mox.StubOutWithMock(vdmock, "migrateToURI")
+
+ def fake_lookup(instance_name):
+ if instance_name == instance_ref['name']:
+ return vdmock
+
+ self.create_fake_libvirt_mock(lookupByName=fake_lookup)
+ self.mox.StubOutWithMock(self.compute, "_rollback_live_migration")
+ self.compute._rollback_live_migration(self.context, instance_ref,
+ 'dest', False)
+
+ # start test
+ migrate_data = {'pre_live_migration_result':
+ {'graphics_listen_addrs':
+ {'vnc': '1.2.3.4', 'spice': '1.2.3.4'}}}
+ self.mox.ReplayAll()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.assertRaises(exception.MigrationError,
+ conn._live_migration,
+ self.context, instance_ref, 'dest', False,
+ self.compute._rollback_live_migration,
+ migrate_data=migrate_data)
+
+ def test_live_migration_raises_exception(self):
+ # Confirms recover method is called when exceptions are raised.
+ # Preparing data
+ self.compute = importutils.import_object(CONF.compute_manager)
+ instance_dict = dict(self.test_instance)
+ instance_dict.update({'host': 'fake',
+ 'power_state': power_state.RUNNING,
+ 'vm_state': vm_states.ACTIVE})
+ instance_ref = objects.Instance(**instance_dict)
+
+ # Preparing mocks
+ vdmock = self.mox.CreateMock(libvirt.virDomain)
+ self.mox.StubOutWithMock(vdmock, "migrateToURI2")
+ _bandwidth = CONF.libvirt.live_migration_bandwidth
+ if getattr(libvirt, 'VIR_DOMAIN_XML_MIGRATABLE', None) is None:
+ vdmock.migrateToURI(CONF.libvirt.live_migration_uri % 'dest',
+ mox.IgnoreArg(),
+ None,
+ _bandwidth).AndRaise(
+ libvirt.libvirtError('ERR'))
+ else:
+ vdmock.XMLDesc(libvirt.VIR_DOMAIN_XML_MIGRATABLE).AndReturn(
+ FakeVirtDomain().XMLDesc(0))
+ vdmock.migrateToURI2(CONF.libvirt.live_migration_uri % 'dest',
+ None,
+ mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ None,
+ _bandwidth).AndRaise(
+ libvirt.libvirtError('ERR'))
+
+ def fake_lookup(instance_name):
+ if instance_name == instance_ref['name']:
+ return vdmock
+
+ self.create_fake_libvirt_mock(lookupByName=fake_lookup)
+ self.mox.StubOutWithMock(self.compute, "_rollback_live_migration")
+ self.compute._rollback_live_migration(self.context, instance_ref,
+ 'dest', False)
+
+ # start test
+ migrate_data = {'pre_live_migration_result':
+ {'graphics_listen_addrs':
+ {'vnc': '127.0.0.1', 'spice': '127.0.0.1'}}}
+ self.mox.ReplayAll()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.assertRaises(libvirt.libvirtError,
+ conn._live_migration,
+ self.context, instance_ref, 'dest', False,
+ self.compute._rollback_live_migration,
+ migrate_data=migrate_data)
+
+ self.assertEqual(vm_states.ACTIVE, instance_ref.vm_state)
+ self.assertEqual(power_state.RUNNING, instance_ref.power_state)
+
+ @mock.patch.object(libvirt, 'VIR_DOMAIN_XML_MIGRATABLE', 8675, create=True)
+ def test_live_migration_raises_unsupported_config_exception(self):
+ # Tests that when migrateToURI2 fails with VIR_ERR_CONFIG_UNSUPPORTED,
+ # migrateToURI is used instead.
+
+ # Preparing data
+ instance_ref = fake_instance.fake_instance_obj(
+ self.context, **self.test_instance)
+
+ # Preparing mocks
+ vdmock = self.mox.CreateMock(libvirt.virDomain)
+ self.mox.StubOutWithMock(vdmock, 'migrateToURI2')
+ self.mox.StubOutWithMock(vdmock, 'migrateToURI')
+ _bandwidth = CONF.libvirt.live_migration_bandwidth
+ vdmock.XMLDesc(libvirt.VIR_DOMAIN_XML_MIGRATABLE).AndReturn(
+ FakeVirtDomain().XMLDesc(0))
+ unsupported_config_error = libvirt.libvirtError('ERR')
+ unsupported_config_error.err = (libvirt.VIR_ERR_CONFIG_UNSUPPORTED,)
+ # This is the first error we hit but since the error code is
+ # VIR_ERR_CONFIG_UNSUPPORTED we'll try migrateToURI.
+ vdmock.migrateToURI2(CONF.libvirt.live_migration_uri % 'dest', None,
+ mox.IgnoreArg(), mox.IgnoreArg(), None,
+ _bandwidth).AndRaise(unsupported_config_error)
+ # This is the second and final error that will actually kill the run,
+ # we use TestingException to make sure it's not the same libvirtError
+ # above.
+ vdmock.migrateToURI(CONF.libvirt.live_migration_uri % 'dest',
+ mox.IgnoreArg(), None,
+ _bandwidth).AndRaise(test.TestingException('oops'))
+
+ def fake_lookup(instance_name):
+ if instance_name == instance_ref.name:
+ return vdmock
+
+ self.create_fake_libvirt_mock(lookupByName=fake_lookup)
+
+ def fake_recover_method(context, instance, dest, block_migration):
+ pass
+
+ graphics_listen_addrs = {'vnc': '0.0.0.0', 'spice': '127.0.0.1'}
+ migrate_data = {'pre_live_migration_result':
+ {'graphics_listen_addrs': graphics_listen_addrs}}
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ self.mox.StubOutWithMock(
+ conn, '_check_graphics_addresses_can_live_migrate')
+ conn._check_graphics_addresses_can_live_migrate(graphics_listen_addrs)
+ self.mox.ReplayAll()
+
+ # start test
+ self.assertRaises(test.TestingException, conn._live_migration,
+ self.context, instance_ref, 'dest', post_method=None,
+ recover_method=fake_recover_method,
+ migrate_data=migrate_data)
+
+ def test_rollback_live_migration_at_destination(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ with mock.patch.object(conn, "destroy") as mock_destroy:
+ conn.rollback_live_migration_at_destination("context",
+ "instance", [], None, True, None)
+ mock_destroy.assert_called_once_with("context",
+ "instance", [], None, True, None)
+
+ def _do_test_create_images_and_backing(self, disk_type):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.mox.StubOutWithMock(conn, '_fetch_instance_kernel_ramdisk')
+ self.mox.StubOutWithMock(libvirt_driver.libvirt_utils, 'create_image')
+
+ disk_info = {'path': 'foo', 'type': disk_type,
+ 'disk_size': 1 * 1024 ** 3,
+ 'virt_disk_size': 20 * 1024 ** 3,
+ 'backing_file': None}
+ disk_info_json = jsonutils.dumps([disk_info])
+
+ libvirt_driver.libvirt_utils.create_image(
+ disk_info['type'], mox.IgnoreArg(), disk_info['virt_disk_size'])
+ conn._fetch_instance_kernel_ramdisk(self.context, self.test_instance)
+ self.mox.ReplayAll()
+
+ self.stubs.Set(os.path, 'exists', lambda *args: False)
+ conn._create_images_and_backing(self.context, self.test_instance,
+ "/fake/instance/dir", disk_info_json)
+
+ def test_create_images_and_backing_qcow2(self):
+ self._do_test_create_images_and_backing('qcow2')
+
+ def test_create_images_and_backing_raw(self):
+ self._do_test_create_images_and_backing('raw')
+
+ def test_create_images_and_backing_ephemeral_gets_created(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ disk_info_json = jsonutils.dumps(
+ [{u'backing_file': u'fake_image_backing_file',
+ u'disk_size': 10747904,
+ u'path': u'disk_path',
+ u'type': u'qcow2',
+ u'virt_disk_size': 25165824},
+ {u'backing_file': u'ephemeral_1_default',
+ u'disk_size': 393216,
+ u'over_committed_disk_size': 1073348608,
+ u'path': u'disk_eph_path',
+ u'type': u'qcow2',
+ u'virt_disk_size': 1073741824}])
+
+ base_dir = os.path.join(CONF.instances_path,
+ CONF.image_cache_subdirectory_name)
+ self.test_instance.update({'name': 'fake_instance',
+ 'user_id': 'fake-user',
+ 'os_type': None,
+ 'project_id': 'fake-project'})
+
+ with contextlib.nested(
+ mock.patch.object(conn, '_fetch_instance_kernel_ramdisk'),
+ mock.patch.object(libvirt_driver.libvirt_utils, 'fetch_image'),
+ mock.patch.object(conn, '_create_ephemeral')
+ ) as (fetch_kernel_ramdisk_mock, fetch_image_mock,
+ create_ephemeral_mock):
+ conn._create_images_and_backing(self.context, self.test_instance,
+ "/fake/instance/dir",
+ disk_info_json)
+ self.assertEqual(len(create_ephemeral_mock.call_args_list), 1)
+ m_args, m_kwargs = create_ephemeral_mock.call_args_list[0]
+ self.assertEqual(
+ os.path.join(base_dir, 'ephemeral_1_default'),
+ m_kwargs['target'])
+ self.assertEqual(len(fetch_image_mock.call_args_list), 1)
+ m_args, m_kwargs = fetch_image_mock.call_args_list[0]
+ self.assertEqual(
+ os.path.join(base_dir, 'fake_image_backing_file'),
+ m_kwargs['target'])
+
+ def test_create_images_and_backing_disk_info_none(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.mox.StubOutWithMock(conn, '_fetch_instance_kernel_ramdisk')
+
+ conn._fetch_instance_kernel_ramdisk(self.context, self.test_instance)
+ self.mox.ReplayAll()
+
+ conn._create_images_and_backing(self.context, self.test_instance,
+ "/fake/instance/dir", None)
+
+ def test_pre_live_migration_works_correctly_mocked(self):
+ # Creating testdata
+ vol = {'block_device_mapping': [
+ {'connection_info': 'dummy', 'mount_device': '/dev/sda'},
+ {'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]}
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ class FakeNetworkInfo():
+ def fixed_ips(self):
+ return ["test_ip_addr"]
+
+ def fake_none(*args, **kwargs):
+ return
+
+ self.stubs.Set(conn, '_create_images_and_backing', fake_none)
+
+ inst_ref = {'id': 'foo'}
+ c = context.get_admin_context()
+ nw_info = FakeNetworkInfo()
+
+ # Creating mocks
+ self.mox.StubOutWithMock(driver, "block_device_info_get_mapping")
+ driver.block_device_info_get_mapping(vol
+ ).AndReturn(vol['block_device_mapping'])
+ self.mox.StubOutWithMock(conn, "_connect_volume")
+ for v in vol['block_device_mapping']:
+ disk_info = {
+ 'bus': "scsi",
+ 'dev': v['mount_device'].rpartition("/")[2],
+ 'type': "disk"
+ }
+ conn._connect_volume(v['connection_info'],
+ disk_info)
+ self.mox.StubOutWithMock(conn, 'plug_vifs')
+ conn.plug_vifs(mox.IsA(inst_ref), nw_info)
+
+ self.mox.ReplayAll()
+ result = conn.pre_live_migration(c, inst_ref, vol, nw_info, None)
+
+ target_res = {'graphics_listen_addrs': {'spice': '127.0.0.1',
+ 'vnc': '127.0.0.1'}}
+ self.assertEqual(result, target_res)
+
+ def test_pre_live_migration_block_with_config_drive_mocked(self):
+ # Creating testdata
+ vol = {'block_device_mapping': [
+ {'connection_info': 'dummy', 'mount_device': '/dev/sda'},
+ {'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]}
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ def fake_true(*args, **kwargs):
+ return True
+
+ self.stubs.Set(configdrive, 'required_by', fake_true)
+
+ inst_ref = {'id': 'foo'}
+ c = context.get_admin_context()
+
+ self.assertRaises(exception.NoLiveMigrationForConfigDriveInLibVirt,
+ conn.pre_live_migration, c, inst_ref, vol, None,
+ None, {'is_shared_instance_path': False,
+ 'is_shared_block_storage': False})
+
+ def test_pre_live_migration_vol_backed_works_correctly_mocked(self):
+ # Creating testdata, using temp dir.
+ with utils.tempdir() as tmpdir:
+ self.flags(instances_path=tmpdir)
+ vol = {'block_device_mapping': [
+ {'connection_info': 'dummy', 'mount_device': '/dev/sda'},
+ {'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]}
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ def fake_none(*args, **kwargs):
+ return
+
+ self.stubs.Set(conn, '_create_images_and_backing', fake_none)
+
+ class FakeNetworkInfo():
+ def fixed_ips(self):
+ return ["test_ip_addr"]
+ inst_ref = objects.Instance(**self.test_instance)
+ c = context.get_admin_context()
+ nw_info = FakeNetworkInfo()
+ # Creating mocks
+ self.mox.StubOutWithMock(conn, "_connect_volume")
+ for v in vol['block_device_mapping']:
+ disk_info = {
+ 'bus': "scsi",
+ 'dev': v['mount_device'].rpartition("/")[2],
+ 'type': "disk"
+ }
+ conn._connect_volume(v['connection_info'],
+ disk_info)
+ self.mox.StubOutWithMock(conn, 'plug_vifs')
+ conn.plug_vifs(mox.IsA(inst_ref), nw_info)
+ self.mox.ReplayAll()
+ migrate_data = {'is_shared_instance_path': False,
+ 'is_volume_backed': True,
+ 'block_migration': False,
+ 'instance_relative_path': inst_ref['name']
+ }
+ ret = conn.pre_live_migration(c, inst_ref, vol, nw_info, None,
+ migrate_data)
+ target_ret = {'graphics_listen_addrs': {'spice': '127.0.0.1',
+ 'vnc': '127.0.0.1'}}
+ self.assertEqual(ret, target_ret)
+ self.assertTrue(os.path.exists('%s/%s/' % (tmpdir,
+ inst_ref['name'])))
+
+ def test_pre_live_migration_plug_vifs_retry_fails(self):
+ self.flags(live_migration_retry_count=3)
+ instance = {'name': 'test', 'uuid': 'uuid'}
+
+ def fake_plug_vifs(instance, network_info):
+ raise processutils.ProcessExecutionError()
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(conn, 'plug_vifs', fake_plug_vifs)
+ self.stubs.Set(eventlet.greenthread, 'sleep', lambda x: None)
+ self.assertRaises(processutils.ProcessExecutionError,
+ conn.pre_live_migration,
+ self.context, instance, block_device_info=None,
+ network_info=[], disk_info={})
+
+ def test_pre_live_migration_plug_vifs_retry_works(self):
+ self.flags(live_migration_retry_count=3)
+ called = {'count': 0}
+ instance = {'name': 'test', 'uuid': 'uuid'}
+
+ def fake_plug_vifs(instance, network_info):
+ called['count'] += 1
+ if called['count'] < CONF.live_migration_retry_count:
+ raise processutils.ProcessExecutionError()
+ else:
+ return
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(conn, 'plug_vifs', fake_plug_vifs)
+ self.stubs.Set(eventlet.greenthread, 'sleep', lambda x: None)
+ conn.pre_live_migration(self.context, instance, block_device_info=None,
+ network_info=[], disk_info={})
+
+ def test_pre_live_migration_image_not_created_with_shared_storage(self):
+ migrate_data_set = [{'is_shared_block_storage': False,
+ 'block_migration': False},
+ {'is_shared_block_storage': True,
+ 'block_migration': False},
+ {'is_shared_block_storage': False,
+ 'block_migration': True}]
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ instance = objects.Instance(**self.test_instance)
+ # creating mocks
+ with contextlib.nested(
+ mock.patch.object(conn,
+ '_create_images_and_backing'),
+ mock.patch.object(conn,
+ 'ensure_filtering_rules_for_instance'),
+ mock.patch.object(conn, 'plug_vifs'),
+ ) as (
+ create_image_mock,
+ rules_mock,
+ plug_mock,
+ ):
+ for migrate_data in migrate_data_set:
+ res = conn.pre_live_migration(self.context, instance,
+ block_device_info=None,
+ network_info=[], disk_info={},
+ migrate_data=migrate_data)
+ self.assertFalse(create_image_mock.called)
+ self.assertIsInstance(res, dict)
+
+ def test_pre_live_migration_with_not_shared_instance_path(self):
+ migrate_data = {'is_shared_block_storage': False,
+ 'is_shared_instance_path': False}
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ instance = objects.Instance(**self.test_instance)
+
+ def check_instance_dir(context, instance,
+ instance_dir, disk_info):
+ self.assertTrue(instance_dir)
+ # creating mocks
+ with contextlib.nested(
+ mock.patch.object(conn,
+ '_create_images_and_backing',
+ side_effect=check_instance_dir),
+ mock.patch.object(conn,
+ 'ensure_filtering_rules_for_instance'),
+ mock.patch.object(conn, 'plug_vifs'),
+ ) as (
+ create_image_mock,
+ rules_mock,
+ plug_mock,
+ ):
+ res = conn.pre_live_migration(self.context, instance,
+ block_device_info=None,
+ network_info=[], disk_info={},
+ migrate_data=migrate_data)
+ self.assertTrue(create_image_mock.called)
+ self.assertIsInstance(res, dict)
+
+ def test_get_instance_disk_info_works_correctly(self):
+ # Test data
+ instance_ref = objects.Instance(**self.test_instance)
+ dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
+ "<devices>"
+ "<disk type='file'><driver name='qemu' type='raw'/>"
+ "<source file='/test/disk'/>"
+ "<target dev='vda' bus='virtio'/></disk>"
+ "<disk type='file'><driver name='qemu' type='qcow2'/>"
+ "<source file='/test/disk.local'/>"
+ "<target dev='vdb' bus='virtio'/></disk>"
+ "</devices></domain>")
+
+ # Preparing mocks
+ vdmock = self.mox.CreateMock(libvirt.virDomain)
+ self.mox.StubOutWithMock(vdmock, "XMLDesc")
+ vdmock.XMLDesc(0).AndReturn(dummyxml)
+
+ def fake_lookup(instance_name):
+ if instance_name == instance_ref['name']:
+ return vdmock
+ self.create_fake_libvirt_mock(lookupByName=fake_lookup)
+
+ fake_libvirt_utils.disk_sizes['/test/disk'] = 10 * units.Gi
+ fake_libvirt_utils.disk_sizes['/test/disk.local'] = 20 * units.Gi
+ fake_libvirt_utils.disk_backing_files['/test/disk.local'] = 'file'
+
+ self.mox.StubOutWithMock(os.path, "getsize")
+ os.path.getsize('/test/disk').AndReturn((10737418240))
+ os.path.getsize('/test/disk.local').AndReturn((3328599655))
+
+ ret = ("image: /test/disk\n"
+ "file format: raw\n"
+ "virtual size: 20G (21474836480 bytes)\n"
+ "disk size: 3.1G\n"
+ "cluster_size: 2097152\n"
+ "backing file: /test/dummy (actual path: /backing/file)\n")
+
+ self.mox.StubOutWithMock(os.path, "exists")
+ os.path.exists('/test/disk.local').AndReturn(True)
+
+ self.mox.StubOutWithMock(utils, "execute")
+ utils.execute('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info',
+ '/test/disk.local').AndReturn((ret, ''))
+
+ self.mox.ReplayAll()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ info = conn.get_instance_disk_info(instance_ref['name'])
+ info = jsonutils.loads(info)
+ self.assertEqual(info[0]['type'], 'raw')
+ self.assertEqual(info[0]['path'], '/test/disk')
+ self.assertEqual(info[0]['disk_size'], 10737418240)
+ self.assertEqual(info[0]['backing_file'], "")
+ self.assertEqual(info[0]['over_committed_disk_size'], 0)
+ self.assertEqual(info[1]['type'], 'qcow2')
+ self.assertEqual(info[1]['path'], '/test/disk.local')
+ self.assertEqual(info[1]['virt_disk_size'], 21474836480)
+ self.assertEqual(info[1]['backing_file'], "file")
+ self.assertEqual(info[1]['over_committed_disk_size'], 18146236825)
+
+ def test_post_live_migration(self):
+ vol = {'block_device_mapping': [
+ {'connection_info': 'dummy1', 'mount_device': '/dev/sda'},
+ {'connection_info': 'dummy2', 'mount_device': '/dev/sdb'}]}
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ inst_ref = {'id': 'foo'}
+ cntx = context.get_admin_context()
+
+ # Set up the mock expectations
+ with contextlib.nested(
+ mock.patch.object(driver, 'block_device_info_get_mapping',
+ return_value=vol['block_device_mapping']),
+ mock.patch.object(conn, '_disconnect_volume')
+ ) as (block_device_info_get_mapping, _disconnect_volume):
+ conn.post_live_migration(cntx, inst_ref, vol)
+
+ block_device_info_get_mapping.assert_has_calls([
+ mock.call(vol)])
+ _disconnect_volume.assert_has_calls([
+ mock.call(v['connection_info'],
+ v['mount_device'].rpartition("/")[2])
+ for v in vol['block_device_mapping']])
+
+ def test_get_instance_disk_info_excludes_volumes(self):
+ # Test data
+ instance_ref = objects.Instance(**self.test_instance)
+ dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
+ "<devices>"
+ "<disk type='file'><driver name='qemu' type='raw'/>"
+ "<source file='/test/disk'/>"
+ "<target dev='vda' bus='virtio'/></disk>"
+ "<disk type='file'><driver name='qemu' type='qcow2'/>"
+ "<source file='/test/disk.local'/>"
+ "<target dev='vdb' bus='virtio'/></disk>"
+ "<disk type='file'><driver name='qemu' type='qcow2'/>"
+ "<source file='/fake/path/to/volume1'/>"
+ "<target dev='vdc' bus='virtio'/></disk>"
+ "<disk type='file'><driver name='qemu' type='qcow2'/>"
+ "<source file='/fake/path/to/volume2'/>"
+ "<target dev='vdd' bus='virtio'/></disk>"
+ "</devices></domain>")
+
+ # Preparing mocks
+ vdmock = self.mox.CreateMock(libvirt.virDomain)
+ self.mox.StubOutWithMock(vdmock, "XMLDesc")
+ vdmock.XMLDesc(0).AndReturn(dummyxml)
+
+ def fake_lookup(instance_name):
+ if instance_name == instance_ref['name']:
+ return vdmock
+ self.create_fake_libvirt_mock(lookupByName=fake_lookup)
+
+ fake_libvirt_utils.disk_sizes['/test/disk'] = 10 * units.Gi
+ fake_libvirt_utils.disk_sizes['/test/disk.local'] = 20 * units.Gi
+ fake_libvirt_utils.disk_backing_files['/test/disk.local'] = 'file'
+
+ self.mox.StubOutWithMock(os.path, "getsize")
+ os.path.getsize('/test/disk').AndReturn((10737418240))
+ os.path.getsize('/test/disk.local').AndReturn((3328599655))
+
+ ret = ("image: /test/disk\n"
+ "file format: raw\n"
+ "virtual size: 20G (21474836480 bytes)\n"
+ "disk size: 3.1G\n"
+ "cluster_size: 2097152\n"
+ "backing file: /test/dummy (actual path: /backing/file)\n")
+
+ self.mox.StubOutWithMock(os.path, "exists")
+ os.path.exists('/test/disk.local').AndReturn(True)
+
+ self.mox.StubOutWithMock(utils, "execute")
+ utils.execute('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info',
+ '/test/disk.local').AndReturn((ret, ''))
+
+ self.mox.ReplayAll()
+ conn_info = {'driver_volume_type': 'fake'}
+ info = {'block_device_mapping': [
+ {'connection_info': conn_info, 'mount_device': '/dev/vdc'},
+ {'connection_info': conn_info, 'mount_device': '/dev/vdd'}]}
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ info = conn.get_instance_disk_info(instance_ref['name'],
+ block_device_info=info)
+ info = jsonutils.loads(info)
+ self.assertEqual(info[0]['type'], 'raw')
+ self.assertEqual(info[0]['path'], '/test/disk')
+ self.assertEqual(info[0]['disk_size'], 10737418240)
+ self.assertEqual(info[0]['backing_file'], "")
+ self.assertEqual(info[0]['over_committed_disk_size'], 0)
+ self.assertEqual(info[1]['type'], 'qcow2')
+ self.assertEqual(info[1]['path'], '/test/disk.local')
+ self.assertEqual(info[1]['virt_disk_size'], 21474836480)
+ self.assertEqual(info[1]['backing_file'], "file")
+ self.assertEqual(info[1]['over_committed_disk_size'], 18146236825)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_spawn_with_network_info(self, mock_flavor):
+ # Preparing mocks
+ def fake_none(*args, **kwargs):
+ return
+
+ def fake_getLibVersion():
+ return 9011
+
+ def fake_getCapabilities():
+ return """
+ <capabilities>
+ <host>
+ <uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid>
+ <cpu>
+ <arch>x86_64</arch>
+ <model>Penryn</model>
+ <vendor>Intel</vendor>
+ <topology sockets='1' cores='2' threads='1'/>
+ <feature name='xtpr'/>
+ </cpu>
+ </host>
+ </capabilities>
+ """
+
+ def fake_baselineCPU(cpu, flag):
+ return """<cpu mode='custom' match='exact'>
+ <model fallback='allow'>Penryn</model>
+ <vendor>Intel</vendor>
+ <feature policy='require' name='xtpr'/>
+ </cpu>
+ """
+
+ # _fake_network_info must be called before create_fake_libvirt_mock(),
+ # as _fake_network_info calls importutils.import_class() and
+ # create_fake_libvirt_mock() mocks importutils.import_class().
+ network_info = _fake_network_info(self.stubs, 1)
+ self.create_fake_libvirt_mock(getLibVersion=fake_getLibVersion,
+ getCapabilities=fake_getCapabilities,
+ getVersion=lambda: 1005001,
+ baselineCPU=fake_baselineCPU)
+
+ instance_ref = self.test_instance
+ instance_ref['image_ref'] = 123456 # we send an int to test sha1 call
+ instance = objects.Instance(**instance_ref)
+ flavor = instance.get_flavor()
+ flavor.extra_specs = {}
+
+ mock_flavor.return_value = flavor
+
+ # Mock out the get_info method of the LibvirtDriver so that the polling
+ # in the spawn method of the LibvirtDriver returns immediately
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, 'get_info')
+ libvirt_driver.LibvirtDriver.get_info(instance
+ ).AndReturn({'state': power_state.RUNNING})
+
+ # Start test
+ self.mox.ReplayAll()
+
+ with mock.patch('nova.virt.libvirt.driver.libvirt') as old_virt:
+ del old_virt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(conn.firewall_driver,
+ 'setup_basic_filtering',
+ fake_none)
+ self.stubs.Set(conn.firewall_driver,
+ 'prepare_instance_filter',
+ fake_none)
+ self.stubs.Set(imagebackend.Image,
+ 'cache',
+ fake_none)
+
+ conn.spawn(self.context, instance, None, [], 'herp',
+ network_info=network_info)
+
+ path = os.path.join(CONF.instances_path, instance['name'])
+ if os.path.isdir(path):
+ shutil.rmtree(path)
+
+ path = os.path.join(CONF.instances_path,
+ CONF.image_cache_subdirectory_name)
+ if os.path.isdir(path):
+ shutil.rmtree(os.path.join(CONF.instances_path,
+ CONF.image_cache_subdirectory_name))
+
+ def test_spawn_without_image_meta(self):
+ self.create_image_called = False
+
+ def fake_none(*args, **kwargs):
+ return
+
+ def fake_create_image(*args, **kwargs):
+ self.create_image_called = True
+
+ def fake_get_info(instance):
+ return {'state': power_state.RUNNING}
+
+ instance_ref = self.test_instance
+ instance_ref['image_ref'] = 1
+ instance = objects.Instance(**instance_ref)
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(conn, '_get_guest_xml', fake_none)
+ self.stubs.Set(conn, '_create_image', fake_create_image)
+ self.stubs.Set(conn, '_create_domain_and_network', fake_none)
+ self.stubs.Set(conn, 'get_info', fake_get_info)
+
+ conn.spawn(self.context, instance, None, [], None)
+ self.assertTrue(self.create_image_called)
+
+ conn.spawn(self.context,
+ instance,
+ {'id': instance['image_ref']},
+ [],
+ None)
+ self.assertTrue(self.create_image_called)
+
+ def test_spawn_from_volume_calls_cache(self):
+ self.cache_called_for_disk = False
+
+ def fake_none(*args, **kwargs):
+ return
+
+ def fake_cache(*args, **kwargs):
+ if kwargs.get('image_id') == 'my_fake_image':
+ self.cache_called_for_disk = True
+
+ def fake_get_info(instance):
+ return {'state': power_state.RUNNING}
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(conn, '_get_guest_xml', fake_none)
+
+ self.stubs.Set(imagebackend.Image, 'cache', fake_cache)
+ self.stubs.Set(conn, '_create_domain_and_network', fake_none)
+ self.stubs.Set(conn, 'get_info', fake_get_info)
+
+ block_device_info = {'root_device_name': '/dev/vda',
+ 'block_device_mapping': [
+ {'mount_device': 'vda',
+ 'boot_index': 0}
+ ]
+ }
+
+ # Volume-backed instance created without image
+ instance_ref = self.test_instance
+ instance_ref['image_ref'] = ''
+ instance_ref['root_device_name'] = '/dev/vda'
+ instance_ref['uuid'] = uuidutils.generate_uuid()
+ instance = objects.Instance(**instance_ref)
+
+ conn.spawn(self.context, instance, None, [], None,
+ block_device_info=block_device_info)
+ self.assertFalse(self.cache_called_for_disk)
+
+ # Booted from volume but with placeholder image
+ instance_ref = self.test_instance
+ instance_ref['image_ref'] = 'my_fake_image'
+ instance_ref['root_device_name'] = '/dev/vda'
+ instance_ref['uuid'] = uuidutils.generate_uuid()
+ instance = objects.Instance(**instance_ref)
+
+ conn.spawn(self.context, instance, None, [], None,
+ block_device_info=block_device_info)
+ self.assertFalse(self.cache_called_for_disk)
+
+ # Booted from an image
+ instance_ref['image_ref'] = 'my_fake_image'
+ instance_ref['uuid'] = uuidutils.generate_uuid()
+ instance = objects.Instance(**instance_ref)
+ conn.spawn(self.context, instance, None, [], None)
+ self.assertTrue(self.cache_called_for_disk)
+
+ def test_start_lxc_from_volume(self):
+ self.flags(virt_type="lxc",
+ group='libvirt')
+
+ def check_setup_container(path, container_dir=None, use_cow=False):
+ self.assertEqual(path, '/dev/path/to/dev')
+ self.assertTrue(use_cow)
+ return '/dev/nbd1'
+
+ bdm = {
+ 'guest_format': None,
+ 'boot_index': 0,
+ 'mount_device': '/dev/sda',
+ 'connection_info': {
+ 'driver_volume_type': 'iscsi',
+ 'serial': 'afc1',
+ 'data': {
+ 'access_mode': 'rw',
+ 'device_path': '/dev/path/to/dev',
+ 'target_discovered': False,
+ 'encrypted': False,
+ 'qos_specs': None,
+ 'target_iqn': 'iqn: volume-afc1',
+ 'target_portal': 'ip: 3260',
+ 'volume_id': 'afc1',
+ 'target_lun': 1,
+ 'auth_password': 'uj',
+ 'auth_username': '47',
+ 'auth_method': 'CHAP'
+ }
+ },
+ 'disk_bus': 'scsi',
+ 'device_type': 'disk',
+ 'delete_on_termination': False
+ }
+
+ def _get(key, opt=None):
+ return bdm.get(key, opt)
+
+ def getitem(key):
+ return bdm[key]
+
+ def setitem(key, val):
+ bdm[key] = val
+
+ bdm_mock = mock.MagicMock()
+ bdm_mock.__getitem__.side_effect = getitem
+ bdm_mock.__setitem__.side_effect = setitem
+ bdm_mock.get = _get
+
+ disk_mock = mock.MagicMock()
+ disk_mock.source_path = '/dev/path/to/dev'
+
+ block_device_info = {'block_device_mapping': [bdm_mock],
+ 'root_device_name': '/dev/sda'}
+
+ # Volume-backed instance created without image
+ instance_ref = self.test_instance
+ instance_ref['image_ref'] = ''
+ instance_ref['root_device_name'] = '/dev/sda'
+ instance_ref['ephemeral_gb'] = 0
+ instance_ref['uuid'] = uuidutils.generate_uuid()
+ instance_ref['system_metadata']['image_disk_format'] = 'qcow2'
+ inst_obj = objects.Instance(**instance_ref)
+
+ flavor = inst_obj.get_flavor()
+ flavor.extra_specs = {}
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ with contextlib.nested(
+ mock.patch.object(conn, '_create_images_and_backing'),
+ mock.patch.object(conn, 'plug_vifs'),
+ mock.patch.object(conn.firewall_driver, 'setup_basic_filtering'),
+ mock.patch.object(conn.firewall_driver, 'prepare_instance_filter'),
+ mock.patch.object(conn.firewall_driver, 'apply_instance_filter'),
+ mock.patch.object(conn, '_create_domain'),
+ mock.patch.object(conn, '_connect_volume'),
+ mock.patch.object(conn, '_get_volume_config',
+ return_value=disk_mock),
+ mock.patch.object(conn, 'get_info',
+ return_value={'state': power_state.RUNNING}),
+ mock.patch('nova.virt.disk.api.setup_container',
+ side_effect=check_setup_container),
+ mock.patch('nova.virt.disk.api.teardown_container'),
+ mock.patch.object(objects.Instance, 'save'),
+ mock.patch.object(objects.Flavor, 'get_by_id',
+ return_value=flavor)):
+
+ conn.spawn(self.context, inst_obj, None, [], None,
+ network_info=[],
+ block_device_info=block_device_info)
+ self.assertEqual('/dev/nbd1',
+ inst_obj.system_metadata.get(
+ 'rootfs_device_name'))
+
+ def test_spawn_with_pci_devices(self):
+ def fake_none(*args, **kwargs):
+ return None
+
+ def fake_get_info(instance):
+ return {'state': power_state.RUNNING}
+
+ class FakeLibvirtPciDevice():
+ def dettach(self):
+ return None
+
+ def reset(self):
+ return None
+
+ def fake_node_device_lookup_by_name(address):
+ pattern = ("pci_%(hex)s{4}_%(hex)s{2}_%(hex)s{2}_%(oct)s{1}"
+ % dict(hex='[\da-f]', oct='[0-8]'))
+ pattern = re.compile(pattern)
+ if pattern.match(address) is None:
+ raise libvirt.libvirtError()
+ return FakeLibvirtPciDevice()
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(conn, '_get_guest_xml', fake_none)
+ self.stubs.Set(conn, '_create_image', fake_none)
+ self.stubs.Set(conn, '_create_domain_and_network', fake_none)
+ self.stubs.Set(conn, 'get_info', fake_get_info)
+
+ conn._conn.nodeDeviceLookupByName = \
+ fake_node_device_lookup_by_name
+
+ instance_ref = self.test_instance
+ instance_ref['image_ref'] = 'my_fake_image'
+ instance = objects.Instance(**instance_ref)
+ instance = dict(instance.iteritems())
+ instance['pci_devices'] = [{'address': '0000:00:00.0'}]
+
+ conn.spawn(self.context, instance, None, [], None)
+
+ def test_chown_disk_config_for_instance(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ instance = copy.deepcopy(self.test_instance)
+ instance['name'] = 'test_name'
+ self.mox.StubOutWithMock(fake_libvirt_utils, 'get_instance_path')
+ self.mox.StubOutWithMock(os.path, 'exists')
+ self.mox.StubOutWithMock(fake_libvirt_utils, 'chown')
+ fake_libvirt_utils.get_instance_path(instance).AndReturn('/tmp/uuid')
+ os.path.exists('/tmp/uuid/disk.config').AndReturn(True)
+ fake_libvirt_utils.chown('/tmp/uuid/disk.config', os.getuid())
+
+ self.mox.ReplayAll()
+ conn._chown_disk_config_for_instance(instance)
+
+ def _test_create_image_plain(self, os_type='', filename='', mkfs=False):
+ gotFiles = []
+
+ def fake_image(self, instance, name, image_type=''):
+ class FakeImage(imagebackend.Image):
+ def __init__(self, instance, name, is_block_dev=False):
+ self.path = os.path.join(instance['name'], name)
+ self.is_block_dev = is_block_dev
+
+ def create_image(self, prepare_template, base,
+ size, *args, **kwargs):
+ pass
+
+ def cache(self, fetch_func, filename, size=None,
+ *args, **kwargs):
+ gotFiles.append({'filename': filename,
+ 'size': size})
+
+ def snapshot(self, name):
+ pass
+
+ return FakeImage(instance, name)
+
+ def fake_none(*args, **kwargs):
+ return
+
+ def fake_get_info(instance):
+ return {'state': power_state.RUNNING}
+
+ # Stop 'libvirt_driver._create_image' touching filesystem
+ self.stubs.Set(nova.virt.libvirt.imagebackend.Backend, "image",
+ fake_image)
+
+ instance_ref = self.test_instance
+ instance_ref['image_ref'] = 1
+ instance = objects.Instance(**instance_ref)
+ instance['os_type'] = os_type
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(conn, '_get_guest_xml', fake_none)
+ self.stubs.Set(conn, '_create_domain_and_network', fake_none)
+ self.stubs.Set(conn, 'get_info', fake_get_info)
+ if mkfs:
+ self.stubs.Set(nova.virt.disk.api, '_MKFS_COMMAND',
+ {os_type: 'mkfs.ext3 --label %(fs_label)s %(target)s'})
+
+ image_meta = {'id': instance['image_ref']}
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance,
+ None,
+ image_meta)
+ conn._create_image(context, instance, disk_info['mapping'])
+ conn._get_guest_xml(self.context, instance, None,
+ disk_info, image_meta)
+
+ wantFiles = [
+ {'filename': '356a192b7913b04c54574d18c28d46e6395428ab',
+ 'size': 10 * units.Gi},
+ {'filename': filename,
+ 'size': 20 * units.Gi},
+ ]
+ self.assertEqual(gotFiles, wantFiles)
+
+ def test_create_image_plain_os_type_blank(self):
+ self._test_create_image_plain(os_type='',
+ filename='ephemeral_20_default',
+ mkfs=False)
+
+ def test_create_image_plain_os_type_none(self):
+ self._test_create_image_plain(os_type=None,
+ filename='ephemeral_20_default',
+ mkfs=False)
+
+ def test_create_image_plain_os_type_set_no_fs(self):
+ self._test_create_image_plain(os_type='test',
+ filename='ephemeral_20_default',
+ mkfs=False)
+
+ def test_create_image_plain_os_type_set_with_fs(self):
+ self._test_create_image_plain(os_type='test',
+ filename='ephemeral_20_test',
+ mkfs=True)
+
+ def test_create_image_with_swap(self):
+ gotFiles = []
+
+ def fake_image(self, instance, name, image_type=''):
+ class FakeImage(imagebackend.Image):
+ def __init__(self, instance, name, is_block_dev=False):
+ self.path = os.path.join(instance['name'], name)
+ self.is_block_dev = is_block_dev
+
+ def create_image(self, prepare_template, base,
+ size, *args, **kwargs):
+ pass
+
+ def cache(self, fetch_func, filename, size=None,
+ *args, **kwargs):
+ gotFiles.append({'filename': filename,
+ 'size': size})
+
+ def snapshot(self, name):
+ pass
+
+ return FakeImage(instance, name)
+
+ def fake_none(*args, **kwargs):
+ return
+
+ def fake_get_info(instance):
+ return {'state': power_state.RUNNING}
+
+ # Stop 'libvirt_driver._create_image' touching filesystem
+ self.stubs.Set(nova.virt.libvirt.imagebackend.Backend, "image",
+ fake_image)
+
+ instance_ref = self.test_instance
+ instance_ref['image_ref'] = 1
+ # Turn on some swap to exercise that codepath in _create_image
+ instance_ref['system_metadata']['instance_type_swap'] = 500
+ instance = objects.Instance(**instance_ref)
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(conn, '_get_guest_xml', fake_none)
+ self.stubs.Set(conn, '_create_domain_and_network', fake_none)
+ self.stubs.Set(conn, 'get_info', fake_get_info)
+
+ image_meta = {'id': instance['image_ref']}
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance,
+ None,
+ image_meta)
+ conn._create_image(context, instance, disk_info['mapping'])
+ conn._get_guest_xml(self.context, instance, None,
+ disk_info, image_meta)
+
+ wantFiles = [
+ {'filename': '356a192b7913b04c54574d18c28d46e6395428ab',
+ 'size': 10 * units.Gi},
+ {'filename': 'ephemeral_20_default',
+ 'size': 20 * units.Gi},
+ {'filename': 'swap_500',
+ 'size': 500 * units.Mi},
+ ]
+ self.assertEqual(gotFiles, wantFiles)
+
+ @mock.patch.object(utils, 'execute')
+ def test_create_ephemeral_specified_fs(self, mock_exec):
+ self.flags(default_ephemeral_format='ext3')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ conn._create_ephemeral('/dev/something', 20, 'myVol', 'linux',
+ is_block_dev=True, max_size=20,
+ specified_fs='ext4')
+ mock_exec.assert_called_once_with('mkfs', '-t', 'ext4', '-F', '-L',
+ 'myVol', '/dev/something',
+ run_as_root=True)
+
+ def test_create_ephemeral_specified_fs_not_valid(self):
+ CONF.set_override('default_ephemeral_format', 'ext4')
+ ephemerals = [{'device_type': 'disk',
+ 'disk_bus': 'virtio',
+ 'device_name': '/dev/vdb',
+ 'guest_format': 'dummy',
+ 'size': 1}]
+ block_device_info = {
+ 'ephemerals': ephemerals}
+ instance_ref = self.test_instance
+ instance_ref['image_ref'] = 1
+ instance = objects.Instance(**instance_ref)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ image_meta = {'id': instance['image_ref']}
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance,
+ None,
+ image_meta)
+ disk_info['mapping'].pop('disk.local')
+
+ with contextlib.nested(
+ mock.patch.object(utils, 'execute'),
+ mock.patch.object(conn, 'get_info'),
+ mock.patch.object(conn, '_create_domain_and_network')):
+ self.assertRaises(exception.InvalidBDMFormat, conn._create_image,
+ context, instance, disk_info['mapping'],
+ block_device_info=block_device_info)
+
+ def test_create_ephemeral_default(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.mox.StubOutWithMock(utils, 'execute')
+ utils.execute('mkfs', '-t', 'ext3', '-F', '-L', 'myVol',
+ '/dev/something', run_as_root=True)
+ self.mox.ReplayAll()
+ conn._create_ephemeral('/dev/something', 20, 'myVol', 'linux',
+ is_block_dev=True, max_size=20)
+
+ def test_create_ephemeral_with_conf(self):
+ CONF.set_override('default_ephemeral_format', 'ext4')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.mox.StubOutWithMock(utils, 'execute')
+ utils.execute('mkfs', '-t', 'ext4', '-F', '-L', 'myVol',
+ '/dev/something', run_as_root=True)
+ self.mox.ReplayAll()
+ conn._create_ephemeral('/dev/something', 20, 'myVol', 'linux',
+ is_block_dev=True)
+
+ def test_create_ephemeral_with_arbitrary(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(nova.virt.disk.api, '_MKFS_COMMAND',
+ {'linux': 'mkfs.ext3 --label %(fs_label)s %(target)s'})
+ self.mox.StubOutWithMock(utils, 'execute')
+ utils.execute('mkfs.ext3', '--label', 'myVol', '/dev/something',
+ run_as_root=True)
+ self.mox.ReplayAll()
+ conn._create_ephemeral('/dev/something', 20, 'myVol', 'linux',
+ is_block_dev=True)
+
+ def test_create_swap_default(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.mox.StubOutWithMock(utils, 'execute')
+ utils.execute('mkswap', '/dev/something', run_as_root=False)
+ self.mox.ReplayAll()
+
+ conn._create_swap('/dev/something', 1, max_size=20)
+
+ def test_get_console_output_file(self):
+ fake_libvirt_utils.files['console.log'] = '01234567890'
+
+ with utils.tempdir() as tmpdir:
+ self.flags(instances_path=tmpdir)
+
+ instance_ref = self.test_instance
+ instance_ref['image_ref'] = 123456
+ instance = objects.Instance(**instance_ref)
+
+ console_dir = (os.path.join(tmpdir, instance['name']))
+ console_log = '%s/console.log' % (console_dir)
+ fake_dom_xml = """
+ <domain type='kvm'>
+ <devices>
+ <disk type='file'>
+ <source file='filename'/>
+ </disk>
+ <console type='file'>
+ <source path='%s'/>
+ <target port='0'/>
+ </console>
+ </devices>
+ </domain>
+ """ % console_log
+
+ def fake_lookup(id):
+ return FakeVirtDomain(fake_dom_xml)
+
+ self.create_fake_libvirt_mock()
+ libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ try:
+ prev_max = libvirt_driver.MAX_CONSOLE_BYTES
+ libvirt_driver.MAX_CONSOLE_BYTES = 5
+ output = conn.get_console_output(self.context, instance)
+ finally:
+ libvirt_driver.MAX_CONSOLE_BYTES = prev_max
+
+ self.assertEqual('67890', output)
+
+ def test_get_console_output_pty(self):
+ fake_libvirt_utils.files['pty'] = '01234567890'
+
+ with utils.tempdir() as tmpdir:
+ self.flags(instances_path=tmpdir)
+
+ instance_ref = self.test_instance
+ instance_ref['image_ref'] = 123456
+ instance = objects.Instance(**instance_ref)
+
+ console_dir = (os.path.join(tmpdir, instance['name']))
+ pty_file = '%s/fake_pty' % (console_dir)
+ fake_dom_xml = """
+ <domain type='kvm'>
+ <devices>
+ <disk type='file'>
+ <source file='filename'/>
+ </disk>
+ <console type='pty'>
+ <source path='%s'/>
+ <target port='0'/>
+ </console>
+ </devices>
+ </domain>
+ """ % pty_file
+
+ def fake_lookup(id):
+ return FakeVirtDomain(fake_dom_xml)
+
+ def _fake_flush(self, fake_pty):
+ return 'foo'
+
+ def _fake_append_to_file(self, data, fpath):
+ return 'pty'
+
+ self.create_fake_libvirt_mock()
+ libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup
+ libvirt_driver.LibvirtDriver._flush_libvirt_console = _fake_flush
+ libvirt_driver.LibvirtDriver._append_to_file = _fake_append_to_file
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ try:
+ prev_max = libvirt_driver.MAX_CONSOLE_BYTES
+ libvirt_driver.MAX_CONSOLE_BYTES = 5
+ output = conn.get_console_output(self.context, instance)
+ finally:
+ libvirt_driver.MAX_CONSOLE_BYTES = prev_max
+
+ self.assertEqual('67890', output)
+
+ def test_get_host_ip_addr(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ ip = conn.get_host_ip_addr()
+ self.assertEqual(ip, CONF.my_ip)
+
+ def test_broken_connection(self):
+ for (error, domain) in (
+ (libvirt.VIR_ERR_SYSTEM_ERROR, libvirt.VIR_FROM_REMOTE),
+ (libvirt.VIR_ERR_SYSTEM_ERROR, libvirt.VIR_FROM_RPC),
+ (libvirt.VIR_ERR_INTERNAL_ERROR, libvirt.VIR_FROM_RPC)):
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ self.mox.StubOutWithMock(conn, "_wrapped_conn")
+ self.mox.StubOutWithMock(conn._wrapped_conn, "getLibVersion")
+ self.mox.StubOutWithMock(libvirt.libvirtError, "get_error_code")
+ self.mox.StubOutWithMock(libvirt.libvirtError, "get_error_domain")
+
+ conn._wrapped_conn.getLibVersion().AndRaise(
+ libvirt.libvirtError("fake failure"))
+
+ libvirt.libvirtError.get_error_code().AndReturn(error)
+ libvirt.libvirtError.get_error_domain().AndReturn(domain)
+
+ self.mox.ReplayAll()
+
+ self.assertFalse(conn._test_connection(conn._wrapped_conn))
+
+ self.mox.UnsetStubs()
+
+ def test_command_with_broken_connection(self):
+ self.mox.UnsetStubs()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ with contextlib.nested(
+ mock.patch.object(libvirt, 'openAuth',
+ side_effect=libvirt.libvirtError("fake")),
+ mock.patch.object(libvirt.libvirtError, "get_error_code"),
+ mock.patch.object(libvirt.libvirtError, "get_error_domain"),
+ mock.patch.object(conn, '_set_host_enabled')):
+ self.assertRaises(exception.HypervisorUnavailable,
+ conn.get_num_instances)
+
+ def test_broken_connection_disable_service(self):
+ self.mox.UnsetStubs()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ conn._init_events_pipe()
+ with contextlib.nested(
+ mock.patch.object(conn, '_set_host_enabled')):
+ conn._close_callback(conn._wrapped_conn, 'ERROR!', '')
+ conn._dispatch_events()
+ conn._set_host_enabled.assert_called_once_with(
+ False,
+ disable_reason=u'Connection to libvirt lost: ERROR!')
+
+ def test_service_resume_after_broken_connection(self):
+ self.mox.UnsetStubs()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ service_mock = mock.MagicMock()
+ service_mock.disabled.return_value = True
+ with contextlib.nested(
+ mock.patch.object(libvirt, 'openAuth',
+ return_value=mock.MagicMock()),
+ mock.patch.object(objects.Service, "get_by_compute_host",
+ return_value=service_mock)):
+
+ conn.get_num_instances()
+ self.assertTrue(not service_mock.disabled and
+ service_mock.disabled_reason is 'None')
+
+ def test_broken_connection_no_wrapped_conn(self):
+ # Tests that calling _close_callback when _wrapped_conn is None
+ # is a no-op, i.e. set_host_enabled won't be called.
+ self.mox.UnsetStubs()
+ # conn._wrapped_conn will be None since we never call libvirt.openAuth
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
+ # create our mock connection that libvirt will send to the callback
+ mock_failed_conn = mock.MagicMock()
+ mock_failed_conn.__getitem__.return_value = True
+ # nothing should happen when calling _close_callback since
+ # _wrapped_conn is None in the driver
+ conn._init_events_pipe()
+ conn._close_callback(mock_failed_conn, reason=None, opaque=None)
+ conn._dispatch_events()
+
+ def test_immediate_delete(self):
+ def fake_lookup_by_name(instance_name):
+ raise exception.InstanceNotFound(instance_id=instance_name)
+
+ def fake_delete_instance_files(instance):
+ pass
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
+ self.stubs.Set(conn, '_delete_instance_files',
+ fake_delete_instance_files)
+
+ instance = objects.Instance(**self.test_instance)
+ conn.destroy(self.context, instance, {})
+
+ def _test_destroy_removes_disk(self, volume_fail=False):
+ instance = {"name": "instancename", "id": "42",
+ "uuid": "875a8070-d0b9-4949-8b31-104d125c9a64",
+ "cleaned": 0, 'info_cache': None, 'security_groups': []}
+ vol = {'block_device_mapping': [
+ {'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]}
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
+ '_undefine_domain')
+ self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
+ db.instance_get_by_uuid(mox.IgnoreArg(), mox.IgnoreArg(),
+ columns_to_join=['info_cache',
+ 'security_groups'],
+ use_slave=False
+ ).AndReturn(instance)
+ self.mox.StubOutWithMock(driver, "block_device_info_get_mapping")
+ driver.block_device_info_get_mapping(vol
+ ).AndReturn(vol['block_device_mapping'])
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
+ "_disconnect_volume")
+ if volume_fail:
+ libvirt_driver.LibvirtDriver._disconnect_volume(
+ mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).\
+ AndRaise(exception.VolumeNotFound('vol'))
+ else:
+ libvirt_driver.LibvirtDriver._disconnect_volume(
+ mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
+ 'delete_instance_files')
+ (libvirt_driver.LibvirtDriver.delete_instance_files(mox.IgnoreArg()).
+ AndReturn(True))
+ libvirt_driver.LibvirtDriver._undefine_domain(instance)
+
+ # Start test
+ self.mox.ReplayAll()
+
+ def fake_destroy(instance):
+ pass
+
+ def fake_os_path_exists(path):
+ return True
+
+ def fake_unplug_vifs(instance, network_info, ignore_errors=False):
+ pass
+
+ def fake_unfilter_instance(instance, network_info):
+ pass
+
+ def fake_obj_load_attr(self, attrname):
+ if not hasattr(self, attrname):
+ self[attrname] = {}
+
+ def fake_save(self, context):
+ pass
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ self.stubs.Set(conn, '_destroy', fake_destroy)
+ self.stubs.Set(conn, 'unplug_vifs', fake_unplug_vifs)
+ self.stubs.Set(conn.firewall_driver,
+ 'unfilter_instance', fake_unfilter_instance)
+ self.stubs.Set(os.path, 'exists', fake_os_path_exists)
+ self.stubs.Set(objects.Instance, 'fields',
+ {'id': int, 'uuid': str, 'cleaned': int})
+ self.stubs.Set(objects.Instance, 'obj_load_attr',
+ fake_obj_load_attr)
+ self.stubs.Set(objects.Instance, 'save', fake_save)
+
+ conn.destroy(self.context, instance, [], vol)
+
+ def test_destroy_removes_disk(self):
+ self._test_destroy_removes_disk(volume_fail=False)
+
+ def test_destroy_removes_disk_volume_fails(self):
+ self._test_destroy_removes_disk(volume_fail=True)
+
+ def test_destroy_not_removes_disk(self):
+ instance = {"name": "instancename", "id": "instanceid",
+ "uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
+ '_undefine_domain')
+ libvirt_driver.LibvirtDriver._undefine_domain(instance)
+
+ # Start test
+ self.mox.ReplayAll()
+
+ def fake_destroy(instance):
+ pass
+
+ def fake_os_path_exists(path):
+ return True
+
+ def fake_unplug_vifs(instance, network_info, ignore_errors=False):
+ pass
+
+ def fake_unfilter_instance(instance, network_info):
+ pass
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ self.stubs.Set(conn, '_destroy', fake_destroy)
+ self.stubs.Set(conn, 'unplug_vifs', fake_unplug_vifs)
+ self.stubs.Set(conn.firewall_driver,
+ 'unfilter_instance', fake_unfilter_instance)
+ self.stubs.Set(os.path, 'exists', fake_os_path_exists)
+ conn.destroy(self.context, instance, [], None, False)
+
+ @mock.patch.object(libvirt_driver.LibvirtDriver, 'cleanup')
+ @mock.patch.object(libvirt_driver.LibvirtDriver, '_teardown_container')
+ @mock.patch.object(libvirt_driver.LibvirtDriver, '_lookup_by_name')
+ def test_destroy_lxc_calls_teardown_container(self, mock_look_up,
+ mock_teardown_container,
+ mock_cleanup):
+ self.flags(virt_type='lxc', group='libvirt')
+ fake_domain = FakeVirtDomain()
+
+ def destroy_side_effect(*args, **kwargs):
+ fake_domain._info[0] = power_state.SHUTDOWN
+
+ with mock.patch.object(fake_domain, 'destroy',
+ side_effect=destroy_side_effect) as mock_domain_destroy:
+ mock_look_up.return_value = fake_domain
+ instance = fake_instance.fake_instance_obj(self.context)
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ network_info = []
+ conn.destroy(self.context, instance, network_info, None, False)
+
+ mock_look_up.assert_has_calls([mock.call(instance.name),
+ mock.call(instance.name)])
+ mock_domain_destroy.assert_called_once_with()
+ mock_teardown_container.assert_called_once_with(instance)
+ mock_cleanup.assert_called_once_with(self.context, instance,
+ network_info, None, False,
+ None)
+
+ @mock.patch.object(libvirt_driver.LibvirtDriver, 'cleanup')
+ @mock.patch.object(libvirt_driver.LibvirtDriver, '_teardown_container')
+ @mock.patch.object(libvirt_driver.LibvirtDriver, '_lookup_by_name')
+ def test_destroy_lxc_calls_teardown_container_when_no_domain(self,
+ mock_look_up, mock_teardown_container, mock_cleanup):
+ self.flags(virt_type='lxc', group='libvirt')
+ instance = fake_instance.fake_instance_obj(self.context)
+ inf_exception = exception.InstanceNotFound(instance_id=instance.name)
+ mock_look_up.side_effect = inf_exception
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ network_info = []
+ conn.destroy(self.context, instance, network_info, None, False)
+
+ mock_look_up.assert_has_calls([mock.call(instance.name),
+ mock.call(instance.name)])
+ mock_teardown_container.assert_called_once_with(instance)
+ mock_cleanup.assert_called_once_with(self.context, instance,
+ network_info, None, False,
+ None)
+
+ def test_reboot_different_ids(self):
+ class FakeLoopingCall:
+ def start(self, *a, **k):
+ return self
+
+ def wait(self):
+ return None
+
+ self.flags(wait_soft_reboot_seconds=1, group='libvirt')
+ info_tuple = ('fake', 'fake', 'fake', 'also_fake')
+ self.reboot_create_called = False
+
+ # Mock domain
+ mock_domain = self.mox.CreateMock(libvirt.virDomain)
+ mock_domain.info().AndReturn(
+ (libvirt_driver.VIR_DOMAIN_RUNNING,) + info_tuple)
+ mock_domain.ID().AndReturn('some_fake_id')
+ mock_domain.shutdown()
+ mock_domain.info().AndReturn(
+ (libvirt_driver.VIR_DOMAIN_CRASHED,) + info_tuple)
+ mock_domain.ID().AndReturn('some_other_fake_id')
+
+ self.mox.ReplayAll()
+
+ def fake_lookup_by_name(instance_name):
+ return mock_domain
+
+ def fake_create_domain(**kwargs):
+ self.reboot_create_called = True
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ instance = objects.Instance(**self.test_instance)
+ self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
+ self.stubs.Set(conn, '_create_domain', fake_create_domain)
+ self.stubs.Set(loopingcall, 'FixedIntervalLoopingCall',
+ lambda *a, **k: FakeLoopingCall())
+ self.stubs.Set(pci_manager, 'get_instance_pci_devs', lambda *a: [])
+ conn.reboot(None, instance, [], 'SOFT')
+ self.assertTrue(self.reboot_create_called)
+
+ def test_reboot_same_ids(self):
+ class FakeLoopingCall:
+ def start(self, *a, **k):
+ return self
+
+ def wait(self):
+ return None
+
+ self.flags(wait_soft_reboot_seconds=1, group='libvirt')
+ info_tuple = ('fake', 'fake', 'fake', 'also_fake')
+ self.reboot_hard_reboot_called = False
+
+ # Mock domain
+ mock_domain = self.mox.CreateMock(libvirt.virDomain)
+ mock_domain.info().AndReturn(
+ (libvirt_driver.VIR_DOMAIN_RUNNING,) + info_tuple)
+ mock_domain.ID().AndReturn('some_fake_id')
+ mock_domain.shutdown()
+ mock_domain.info().AndReturn(
+ (libvirt_driver.VIR_DOMAIN_CRASHED,) + info_tuple)
+ mock_domain.ID().AndReturn('some_fake_id')
+
+ self.mox.ReplayAll()
+
+ def fake_lookup_by_name(instance_name):
+ return mock_domain
+
+ def fake_hard_reboot(*args, **kwargs):
+ self.reboot_hard_reboot_called = True
+
+ def fake_sleep(interval):
+ pass
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ instance = objects.Instance(**self.test_instance)
+ self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
+ self.stubs.Set(greenthread, 'sleep', fake_sleep)
+ self.stubs.Set(conn, '_hard_reboot', fake_hard_reboot)
+ self.stubs.Set(loopingcall, 'FixedIntervalLoopingCall',
+ lambda *a, **k: FakeLoopingCall())
+ self.stubs.Set(pci_manager, 'get_instance_pci_devs', lambda *a: [])
+ conn.reboot(None, instance, [], 'SOFT')
+ self.assertTrue(self.reboot_hard_reboot_called)
+
+ def test_soft_reboot_libvirt_exception(self):
+ # Tests that a hard reboot is performed when a soft reboot results
+ # in raising a libvirtError.
+ info_tuple = ('fake', 'fake', 'fake', 'also_fake')
+
+ # setup mocks
+ mock_domain = self.mox.CreateMock(libvirt.virDomain)
+ mock_domain.info().AndReturn(
+ (libvirt_driver.VIR_DOMAIN_RUNNING,) + info_tuple)
+ mock_domain.ID().AndReturn('some_fake_id')
+ mock_domain.shutdown().AndRaise(libvirt.libvirtError('Err'))
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ context = None
+ instance = {"name": "instancename", "id": "instanceid",
+ "uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
+ network_info = []
+
+ self.mox.StubOutWithMock(conn, '_lookup_by_name')
+ conn._lookup_by_name(instance['name']).AndReturn(mock_domain)
+ self.mox.StubOutWithMock(conn, '_hard_reboot')
+ conn._hard_reboot(context, instance, network_info, None)
+
+ self.mox.ReplayAll()
+
+ conn.reboot(context, instance, network_info, 'SOFT')
+
+ def _test_resume_state_on_host_boot_with_state(self, state):
+ called = {'count': 0}
+ mock = self.mox.CreateMock(libvirt.virDomain)
+ mock.info().AndReturn([state, None, None, None, None])
+ self.mox.ReplayAll()
+
+ def fake_lookup_by_name(instance_name):
+ return mock
+
+ def fake_hard_reboot(*args):
+ called['count'] += 1
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
+ self.stubs.Set(conn, '_hard_reboot', fake_hard_reboot)
+ instance_details = {"name": "instancename", "id": 1,
+ "uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
+ instance = fake_instance.fake_instance_obj(
+ self.context, **instance_details)
+ network_info = _fake_network_info(self.stubs, 1)
+
+ conn.resume_state_on_host_boot(self.context, instance, network_info,
+ block_device_info=None)
+
+ ignored_states = (power_state.RUNNING,
+ power_state.SUSPENDED,
+ power_state.NOSTATE,
+ power_state.PAUSED)
+ if state in ignored_states:
+ self.assertEqual(called['count'], 0)
+ else:
+ self.assertEqual(called['count'], 1)
+
+ def test_resume_state_on_host_boot_with_running_state(self):
+ self._test_resume_state_on_host_boot_with_state(power_state.RUNNING)
+
+ def test_resume_state_on_host_boot_with_suspended_state(self):
+ self._test_resume_state_on_host_boot_with_state(power_state.SUSPENDED)
+
+ def test_resume_state_on_host_boot_with_paused_state(self):
+ self._test_resume_state_on_host_boot_with_state(power_state.PAUSED)
+
+ def test_resume_state_on_host_boot_with_nostate(self):
+ self._test_resume_state_on_host_boot_with_state(power_state.NOSTATE)
+
+ def test_resume_state_on_host_boot_with_shutdown_state(self):
+ self._test_resume_state_on_host_boot_with_state(power_state.RUNNING)
+
+ def test_resume_state_on_host_boot_with_crashed_state(self):
+ self._test_resume_state_on_host_boot_with_state(power_state.CRASHED)
+
+ def test_resume_state_on_host_boot_with_instance_not_found_on_driver(self):
+ called = {'count': 0}
+ instance_details = {'name': 'test'}
+ instance = fake_instance.fake_instance_obj(
+ self.context, **instance_details)
+
+ def fake_lookup_by_name(instance_name):
+ raise exception.InstanceNotFound(instance_id='fake')
+
+ def fake_hard_reboot(*args):
+ called['count'] += 1
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
+ self.stubs.Set(conn, '_hard_reboot', fake_hard_reboot)
+ conn.resume_state_on_host_boot(self.context, instance, network_info=[],
+ block_device_info=None)
+
+ self.assertEqual(called['count'], 1)
+
+ def test_hard_reboot(self):
+ called = {'count': 0}
+ instance = objects.Instance(**self.test_instance)
+ network_info = _fake_network_info(self.stubs, 1)
+ block_device_info = None
+
+ dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
+ "<devices>"
+ "<disk type='file'><driver name='qemu' type='raw'/>"
+ "<source file='/test/disk'/>"
+ "<target dev='vda' bus='virtio'/></disk>"
+ "<disk type='file'><driver name='qemu' type='qcow2'/>"
+ "<source file='/test/disk.local'/>"
+ "<target dev='vdb' bus='virtio'/></disk>"
+ "</devices></domain>")
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.mox.StubOutWithMock(conn, '_destroy')
+ self.mox.StubOutWithMock(conn, '_get_instance_disk_info')
+ self.mox.StubOutWithMock(conn, '_get_guest_xml')
+ self.mox.StubOutWithMock(conn, '_create_images_and_backing')
+ self.mox.StubOutWithMock(conn, '_create_domain_and_network')
+
+ def fake_get_info(instance_name):
+ called['count'] += 1
+ if called['count'] == 1:
+ state = power_state.SHUTDOWN
+ else:
+ state = power_state.RUNNING
+ return dict(state=state)
+
+ self.stubs.Set(conn, 'get_info', fake_get_info)
+
+ conn._destroy(instance)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance, block_device_info)
+
+ system_meta = utils.instance_sys_meta(instance)
+ image_meta = utils.get_image_from_system_metadata(system_meta)
+
+ conn._get_guest_xml(self.context, instance, network_info, disk_info,
+ image_meta=image_meta,
+ block_device_info=block_device_info,
+ write_to_disk=True).AndReturn(dummyxml)
+ disk_info_json = '[{"virt_disk_size": 2}]'
+ conn._get_instance_disk_info(instance["name"], dummyxml,
+ block_device_info).AndReturn(disk_info_json)
+ conn._create_images_and_backing(self.context, instance,
+ libvirt_utils.get_instance_path(instance),
+ disk_info_json)
+ conn._create_domain_and_network(self.context, dummyxml, instance,
+ network_info, block_device_info,
+ reboot=True, vifs_already_plugged=True)
+ self.mox.ReplayAll()
+
+ conn._hard_reboot(self.context, instance, network_info,
+ block_device_info)
+
+ @mock.patch('nova.openstack.common.loopingcall.FixedIntervalLoopingCall')
+ @mock.patch('nova.pci.manager.get_instance_pci_devs')
+ @mock.patch('nova.virt.libvirt.LibvirtDriver._prepare_pci_devices_for_use')
+ @mock.patch('nova.virt.libvirt.LibvirtDriver._create_domain_and_network')
+ @mock.patch('nova.virt.libvirt.LibvirtDriver._create_images_and_backing')
+ @mock.patch('nova.virt.libvirt.LibvirtDriver._get_instance_disk_info')
+ @mock.patch('nova.virt.libvirt.utils.write_to_file')
+ @mock.patch('nova.virt.libvirt.utils.get_instance_path')
+ @mock.patch('nova.virt.libvirt.LibvirtDriver._get_guest_config')
+ @mock.patch('nova.virt.libvirt.blockinfo.get_disk_info')
+ @mock.patch('nova.virt.libvirt.LibvirtDriver._destroy')
+ def test_hard_reboot_does_not_call_glance_show(self,
+ mock_destroy, mock_get_disk_info, mock_get_guest_config,
+ mock_get_instance_path, mock_write_to_file,
+ mock_get_instance_disk_info, mock_create_images_and_backing,
+ mock_create_domand_and_network, mock_prepare_pci_devices_for_use,
+ mock_get_instance_pci_devs, mock_looping_call):
+ """For a hard reboot, we shouldn't need an additional call to glance
+ to get the image metadata.
+
+ This is important for automatically spinning up instances on a
+ host-reboot, since we won't have a user request context that'll allow
+ the Glance request to go through. We have to rely on the cached image
+ metadata, instead.
+
+ https://bugs.launchpad.net/nova/+bug/1339386
+ """
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ instance = objects.Instance(**self.test_instance)
+
+ network_info = mock.MagicMock()
+ block_device_info = mock.MagicMock()
+ mock_get_disk_info.return_value = {}
+ mock_get_guest_config.return_value = mock.MagicMock()
+ mock_get_instance_path.return_value = '/foo'
+ mock_looping_call.return_value = mock.MagicMock()
+ conn._image_api = mock.MagicMock()
+
+ conn._hard_reboot(self.context, instance, network_info,
+ block_device_info)
+
+ self.assertFalse(conn._image_api.get.called)
+
+ def test_power_on(self):
+
+ def _check_xml_bus(name, xml, block_info):
+ tree = etree.fromstring(xml)
+ got_disk_targets = tree.findall('./devices/disk/target')
+ system_meta = utils.instance_sys_meta(instance)
+ image_meta = utils.get_image_from_system_metadata(system_meta)
+ want_device_bus = image_meta.get('hw_disk_bus')
+ if not want_device_bus:
+ want_device_bus = self.fake_img['properties']['hw_disk_bus']
+ got_device_bus = got_disk_targets[0].get('bus')
+ self.assertEqual(got_device_bus, want_device_bus)
+
+ def fake_get_info(instance_name):
+ called['count'] += 1
+ if called['count'] == 1:
+ state = power_state.SHUTDOWN
+ else:
+ state = power_state.RUNNING
+ return dict(state=state)
+
+ def _get_inst(with_meta=True):
+ inst_ref = self.test_instance
+ inst_ref['uuid'] = uuidutils.generate_uuid()
+ if with_meta:
+ inst_ref['system_metadata']['image_hw_disk_bus'] = 'ide'
+ instance = objects.Instance(**inst_ref)
+ instance['image_ref'] = '70a599e0-31e7-49b7-b260-868f221a761e'
+ return instance
+
+ called = {'count': 0}
+ self.fake_img = {'id': '70a599e0-31e7-49b7-b260-868f221a761e',
+ 'name': 'myfakeimage',
+ 'created_at': '',
+ 'updated_at': '',
+ 'deleted_at': None,
+ 'deleted': False,
+ 'status': 'active',
+ 'is_public': False,
+ 'container_format': 'bare',
+ 'disk_format': 'qcow2',
+ 'size': '74185822',
+ 'properties': {'hw_disk_bus': 'ide'}}
+
+ instance = _get_inst()
+ flavor = instance.get_flavor()
+ flavor.extra_specs = {}
+ network_info = _fake_network_info(self.stubs, 1)
+ block_device_info = None
+ image_service_mock = mock.Mock()
+ image_service_mock.show.return_value = self.fake_img
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ with contextlib.nested(
+ mock.patch.object(conn, '_destroy', return_value=None),
+ mock.patch.object(conn, '_create_images_and_backing'),
+ mock.patch.object(conn, '_create_domain_and_network'),
+ mock.patch.object(objects.Flavor, 'get_by_id',
+ return_value = flavor),
+ mock.patch.object(objects.Instance, 'save')):
+ conn.get_info = fake_get_info
+ conn._get_instance_disk_info = _check_xml_bus
+ conn._hard_reboot(self.context, instance, network_info,
+ block_device_info)
+
+ instance = _get_inst(with_meta=False)
+ conn._hard_reboot(self.context, instance, network_info,
+ block_device_info)
+
+ def _test_clean_shutdown(self, seconds_to_shutdown,
+ timeout, retry_interval,
+ shutdown_attempts, succeeds):
+ self.stubs.Set(time, 'sleep', lambda x: None)
+ info_tuple = ('fake', 'fake', 'fake', 'also_fake')
+ shutdown_count = []
+
+ def count_shutdowns():
+ shutdown_count.append("shutdown")
+
+ # Mock domain
+ mock_domain = self.mox.CreateMock(libvirt.virDomain)
+
+ mock_domain.info().AndReturn(
+ (libvirt_driver.VIR_DOMAIN_RUNNING,) + info_tuple)
+ mock_domain.shutdown().WithSideEffects(count_shutdowns)
+
+ retry_countdown = retry_interval
+ for x in xrange(min(seconds_to_shutdown, timeout)):
+ mock_domain.info().AndReturn(
+ (libvirt_driver.VIR_DOMAIN_RUNNING,) + info_tuple)
+ if retry_countdown == 0:
+ mock_domain.shutdown().WithSideEffects(count_shutdowns)
+ retry_countdown = retry_interval
+ else:
+ retry_countdown -= 1
+
+ if seconds_to_shutdown < timeout:
+ mock_domain.info().AndReturn(
+ (libvirt_driver.VIR_DOMAIN_SHUTDOWN,) + info_tuple)
+
+ self.mox.ReplayAll()
+
+ def fake_lookup_by_name(instance_name):
+ return mock_domain
+
+ def fake_create_domain(**kwargs):
+ self.reboot_create_called = True
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ instance = {"name": "instancename", "id": "instanceid",
+ "uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
+ self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
+ self.stubs.Set(conn, '_create_domain', fake_create_domain)
+ result = conn._clean_shutdown(instance, timeout, retry_interval)
+
+ self.assertEqual(succeeds, result)
+ self.assertEqual(shutdown_attempts, len(shutdown_count))
+
+ def test_clean_shutdown_first_time(self):
+ self._test_clean_shutdown(seconds_to_shutdown=2,
+ timeout=5,
+ retry_interval=3,
+ shutdown_attempts=1,
+ succeeds=True)
+
+ def test_clean_shutdown_with_retry(self):
+ self._test_clean_shutdown(seconds_to_shutdown=4,
+ timeout=5,
+ retry_interval=3,
+ shutdown_attempts=2,
+ succeeds=True)
+
+ def test_clean_shutdown_failure(self):
+ self._test_clean_shutdown(seconds_to_shutdown=6,
+ timeout=5,
+ retry_interval=3,
+ shutdown_attempts=2,
+ succeeds=False)
+
+ def test_clean_shutdown_no_wait(self):
+ self._test_clean_shutdown(seconds_to_shutdown=6,
+ timeout=0,
+ retry_interval=3,
+ shutdown_attempts=1,
+ succeeds=False)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ @mock.patch.object(FakeVirtDomain, 'attachDevice')
+ @mock.patch.object(FakeVirtDomain, 'ID', return_value=1)
+ @mock.patch.object(compute_utils, 'get_image_metadata', return_value=None)
+ def test_attach_sriov_ports(self,
+ mock_get_image_metadata,
+ mock_ID,
+ mock_attachDevice,
+ mock_flavor):
+ instance = objects.Instance(**self.test_instance)
+ flavor = instance.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ network_info = _fake_network_info(self.stubs, 1)
+ network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT
+ domain = FakeVirtDomain()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ conn._attach_sriov_ports(self.context, instance, domain, network_info)
+ mock_get_image_metadata.assert_called_once_with(self.context,
+ conn._image_api, instance['image_ref'], instance)
+ self.assertTrue(mock_attachDevice.called)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ @mock.patch.object(FakeVirtDomain, 'attachDevice')
+ @mock.patch.object(FakeVirtDomain, 'ID', return_value=1)
+ @mock.patch.object(compute_utils, 'get_image_metadata', return_value=None)
+ def test_attach_sriov_ports_with_info_cache(self,
+ mock_get_image_metadata,
+ mock_ID,
+ mock_attachDevice,
+ mock_flavor):
+ instance = objects.Instance(**self.test_instance)
+ flavor = instance.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ network_info = _fake_network_info(self.stubs, 1)
+ network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT
+ instance.info_cache = objects.InstanceInfoCache(
+ network_info=network_info)
+ domain = FakeVirtDomain()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ conn._attach_sriov_ports(self.context, instance, domain, None)
+ mock_get_image_metadata.assert_called_once_with(self.context,
+ conn._image_api, instance['image_ref'], instance)
+ self.assertTrue(mock_attachDevice.called)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ @mock.patch.object(libvirt_driver.LibvirtDriver,
+ '_has_min_version', return_value=True)
+ @mock.patch.object(FakeVirtDomain, 'detachDeviceFlags')
+ @mock.patch.object(compute_utils, 'get_image_metadata', return_value=None)
+ def test_detach_sriov_ports(self,
+ mock_get_image_metadata,
+ mock_detachDeviceFlags,
+ mock_has_min_version,
+ mock_flavor):
+ instance = objects.Instance(**self.test_instance)
+ flavor = instance.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ network_info = _fake_network_info(self.stubs, 1)
+ network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT
+ instance.info_cache = objects.InstanceInfoCache(
+ network_info=network_info)
+
+ domain = FakeVirtDomain()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ conn._detach_sriov_ports(instance, domain)
+ mock_get_image_metadata.assert_called_once_with(mock.ANY,
+ conn._image_api, instance['image_ref'], instance)
+ self.assertTrue(mock_detachDeviceFlags.called)
+
+ def test_resume(self):
+ dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
+ "<devices>"
+ "<disk type='file'><driver name='qemu' type='raw'/>"
+ "<source file='/test/disk'/>"
+ "<target dev='vda' bus='virtio'/></disk>"
+ "<disk type='file'><driver name='qemu' type='qcow2'/>"
+ "<source file='/test/disk.local'/>"
+ "<target dev='vdb' bus='virtio'/></disk>"
+ "</devices></domain>")
+ instance = objects.Instance(**self.test_instance)
+ network_info = _fake_network_info(self.stubs, 1)
+ block_device_info = None
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ with contextlib.nested(
+ mock.patch.object(conn, '_get_existing_domain_xml',
+ return_value=dummyxml),
+ mock.patch.object(conn, '_create_domain_and_network',
+ return_value='fake_dom'),
+ mock.patch.object(conn, '_attach_pci_devices'),
+ mock.patch.object(pci_manager, 'get_instance_pci_devs',
+ return_value='fake_pci_devs'),
+ ) as (_get_existing_domain_xml, _create_domain_and_network,
+ _attach_pci_devices, get_instance_pci_devs):
+ conn.resume(self.context, instance, network_info,
+ block_device_info)
+ _get_existing_domain_xml.assert_has_calls([mock.call(instance,
+ network_info, block_device_info)])
+ _create_domain_and_network.assert_has_calls([mock.call(
+ self.context, dummyxml,
+ instance, network_info,
+ block_device_info=block_device_info,
+ vifs_already_plugged=True)])
+ _attach_pci_devices.assert_has_calls([mock.call('fake_dom',
+ 'fake_pci_devs')])
+
+ def test_destroy_undefines(self):
+ mock = self.mox.CreateMock(libvirt.virDomain)
+ mock.ID()
+ mock.destroy()
+ mock.undefineFlags(1).AndReturn(1)
+
+ self.mox.ReplayAll()
+
+ def fake_lookup_by_name(instance_name):
+ return mock
+
+ def fake_get_info(instance_name):
+ return {'state': power_state.SHUTDOWN, 'id': -1}
+
+ def fake_delete_instance_files(instance):
+ return None
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
+ self.stubs.Set(conn, 'get_info', fake_get_info)
+ self.stubs.Set(conn, '_delete_instance_files',
+ fake_delete_instance_files)
+
+ instance = {"name": "instancename", "id": "instanceid",
+ "uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
+ conn.destroy(self.context, instance, [])
+
+ @mock.patch.object(rbd_utils, 'RBDDriver')
+ def test_cleanup_rbd(self, mock_driver):
+ driver = mock_driver.return_value
+ driver.cleanup_volumes = mock.Mock()
+ fake_instance = {'uuid': '875a8070-d0b9-4949-8b31-104d125c9a64'}
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ conn._cleanup_rbd(fake_instance)
+
+ driver.cleanup_volumes.assert_called_once_with(fake_instance)
+
+ def test_destroy_undefines_no_undefine_flags(self):
+ mock = self.mox.CreateMock(libvirt.virDomain)
+ mock.ID()
+ mock.destroy()
+ mock.undefineFlags(1).AndRaise(libvirt.libvirtError('Err'))
+ mock.undefine()
+
+ self.mox.ReplayAll()
+
+ def fake_lookup_by_name(instance_name):
+ return mock
+
+ def fake_get_info(instance_name):
+ return {'state': power_state.SHUTDOWN, 'id': -1}
+
+ def fake_delete_instance_files(instance):
+ return None
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
+ self.stubs.Set(conn, 'get_info', fake_get_info)
+ self.stubs.Set(conn, '_delete_instance_files',
+ fake_delete_instance_files)
+ instance = {"name": "instancename", "id": "instanceid",
+ "uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
+ conn.destroy(self.context, instance, [])
+
+ def test_destroy_undefines_no_attribute_with_managed_save(self):
+ mock = self.mox.CreateMock(libvirt.virDomain)
+ mock.ID()
+ mock.destroy()
+ mock.undefineFlags(1).AndRaise(AttributeError())
+ mock.hasManagedSaveImage(0).AndReturn(True)
+ mock.managedSaveRemove(0)
+ mock.undefine()
+
+ self.mox.ReplayAll()
+
+ def fake_lookup_by_name(instance_name):
+ return mock
+
+ def fake_get_info(instance_name):
+ return {'state': power_state.SHUTDOWN, 'id': -1}
+
+ def fake_delete_instance_files(instance):
+ return None
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
+ self.stubs.Set(conn, 'get_info', fake_get_info)
+ self.stubs.Set(conn, '_delete_instance_files',
+ fake_delete_instance_files)
+ instance = {"name": "instancename", "id": "instanceid",
+ "uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
+ conn.destroy(self.context, instance, [])
+
+ def test_destroy_undefines_no_attribute_no_managed_save(self):
+ mock = self.mox.CreateMock(libvirt.virDomain)
+ mock.ID()
+ mock.destroy()
+ mock.undefineFlags(1).AndRaise(AttributeError())
+ mock.hasManagedSaveImage(0).AndRaise(AttributeError())
+ mock.undefine()
+
+ self.mox.ReplayAll()
+
+ def fake_lookup_by_name(instance_name):
+ return mock
+
+ def fake_get_info(instance_name):
+ return {'state': power_state.SHUTDOWN, 'id': -1}
+
+ def fake_delete_instance_files(instance):
+ return None
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
+ self.stubs.Set(conn, 'get_info', fake_get_info)
+ self.stubs.Set(conn, '_delete_instance_files',
+ fake_delete_instance_files)
+ instance = {"name": "instancename", "id": "instanceid",
+ "uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
+ conn.destroy(self.context, instance, [])
+
+ def test_destroy_timed_out(self):
+ mock = self.mox.CreateMock(libvirt.virDomain)
+ mock.ID()
+ mock.destroy().AndRaise(libvirt.libvirtError("timed out"))
+ self.mox.ReplayAll()
+
+ def fake_lookup_by_name(instance_name):
+ return mock
+
+ def fake_get_error_code(self):
+ return libvirt.VIR_ERR_OPERATION_TIMEOUT
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
+ self.stubs.Set(libvirt.libvirtError, 'get_error_code',
+ fake_get_error_code)
+ instance = {"name": "instancename", "id": "instanceid",
+ "uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
+ self.assertRaises(exception.InstancePowerOffFailure,
+ conn.destroy, self.context, instance, [])
+
+ def test_private_destroy_not_found(self):
+ ex = fakelibvirt.make_libvirtError(
+ libvirt.libvirtError,
+ "No such domain",
+ error_code=libvirt.VIR_ERR_NO_DOMAIN)
+ mock = self.mox.CreateMock(libvirt.virDomain)
+ mock.ID()
+ mock.destroy().AndRaise(ex)
+ mock.info().AndRaise(ex)
+ self.mox.ReplayAll()
+
+ def fake_lookup_by_name(instance_name):
+ return mock
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
+ instance = {"name": "instancename", "id": "instanceid",
+ "uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
+ # NOTE(vish): verifies destroy doesn't raise if the instance disappears
+ conn._destroy(instance)
+
+ def test_undefine_domain_with_not_found_instance(self):
+ def fake_lookup(instance_name):
+ raise libvirt.libvirtError("not found")
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup
+ self.mox.StubOutWithMock(libvirt.libvirtError, "get_error_code")
+ libvirt.libvirtError.get_error_code().AndReturn(
+ libvirt.VIR_ERR_NO_DOMAIN)
+
+ self.mox.ReplayAll()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ instance = {'name': 'test'}
+
+ # NOTE(wenjianhn): verifies undefine doesn't raise if the
+ # instance disappears
+ conn._undefine_domain(instance)
+
+ @mock.patch.object(libvirt_driver.LibvirtDriver,
+ "_list_instance_domains")
+ def test_disk_over_committed_size_total(self, mock_list):
+ # Ensure destroy calls managedSaveRemove for saved instance.
+ class DiagFakeDomain(object):
+ def __init__(self, name):
+ self._name = name
+
+ def ID(self):
+ return 1
+
+ def name(self):
+ return self._name
+
+ def UUIDString(self):
+ return "19479fee-07a5-49bb-9138-d3738280d63c"
+
+ def XMLDesc(self, flags):
+ return "<domain/>"
+
+ mock_list.return_value = [
+ DiagFakeDomain("instance0000001"),
+ DiagFakeDomain("instance0000002")]
+
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ fake_disks = {'instance0000001':
+ [{'type': 'qcow2', 'path': '/somepath/disk1',
+ 'virt_disk_size': '10737418240',
+ 'backing_file': '/somepath/disk1',
+ 'disk_size': '83886080',
+ 'over_committed_disk_size': '10653532160'}],
+ 'instance0000002':
+ [{'type': 'raw', 'path': '/somepath/disk2',
+ 'virt_disk_size': '0',
+ 'backing_file': '/somepath/disk2',
+ 'disk_size': '10737418240',
+ 'over_committed_disk_size': '0'}]}
+
+ def get_info(instance_name, xml, **kwargs):
+ return jsonutils.dumps(fake_disks.get(instance_name))
+
+ with mock.patch.object(drvr,
+ "_get_instance_disk_info") as mock_info:
+ mock_info.side_effect = get_info
+
+ result = drvr._get_disk_over_committed_size_total()
+ self.assertEqual(result, 10653532160)
+ mock_list.assert_called_with()
+ mock_info.assert_called()
+
+ @mock.patch.object(libvirt_driver.LibvirtDriver,
+ "_list_instance_domains")
+ def test_disk_over_committed_size_total_eperm(self, mock_list):
+ # Ensure destroy calls managedSaveRemove for saved instance.
+ class DiagFakeDomain(object):
+ def __init__(self, name):
+ self._name = name
+
+ def ID(self):
+ return 1
+
+ def name(self):
+ return self._name
+
+ def UUIDString(self):
+ return "19479fee-07a5-49bb-9138-d3738280d63c"
+
+ def XMLDesc(self, flags):
+ return "<domain/>"
+
+ mock_list.return_value = [
+ DiagFakeDomain("instance0000001"),
+ DiagFakeDomain("instance0000002")]
+
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ fake_disks = {'instance0000001':
+ [{'type': 'qcow2', 'path': '/somepath/disk1',
+ 'virt_disk_size': '10737418240',
+ 'backing_file': '/somepath/disk1',
+ 'disk_size': '83886080',
+ 'over_committed_disk_size': '10653532160'}],
+ 'instance0000002':
+ [{'type': 'raw', 'path': '/somepath/disk2',
+ 'virt_disk_size': '0',
+ 'backing_file': '/somepath/disk2',
+ 'disk_size': '10737418240',
+ 'over_committed_disk_size': '21474836480'}]}
+
+ def side_effect(name, dom):
+ if name == 'instance0000001':
+ raise OSError(errno.EACCES, 'Permission denied')
+ if name == 'instance0000002':
+ return jsonutils.dumps(fake_disks.get(name))
+ get_disk_info = mock.Mock()
+ get_disk_info.side_effect = side_effect
+ drvr._get_instance_disk_info = get_disk_info
+
+ result = drvr._get_disk_over_committed_size_total()
+ self.assertEqual(21474836480, result)
+ mock_list.assert_called_with()
+
+ @mock.patch.object(libvirt_driver.LibvirtDriver, "_list_instance_domains",
+ return_value=[mock.MagicMock(name='foo')])
+ @mock.patch.object(libvirt_driver.LibvirtDriver, "_get_instance_disk_info",
+ side_effect=exception.VolumeBDMPathNotFound(path='bar'))
+ def test_disk_over_committed_size_total_bdm_not_found(self,
+ mock_get_disk_info,
+ mock_list_domains):
+ # Tests that we handle VolumeBDMPathNotFound gracefully.
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.assertEqual(0, drvr._get_disk_over_committed_size_total())
+
+ def test_cpu_info(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ def get_host_capabilities_stub(self):
+ cpu = vconfig.LibvirtConfigCPU()
+ cpu.model = "Opteron_G4"
+ cpu.vendor = "AMD"
+ cpu.arch = arch.X86_64
+
+ cpu.cores = 2
+ cpu.threads = 1
+ cpu.sockets = 4
+
+ cpu.add_feature(vconfig.LibvirtConfigCPUFeature("extapic"))
+ cpu.add_feature(vconfig.LibvirtConfigCPUFeature("3dnow"))
+
+ caps = vconfig.LibvirtConfigCaps()
+ caps.host = vconfig.LibvirtConfigCapsHost()
+ caps.host.cpu = cpu
+
+ guest = vconfig.LibvirtConfigGuest()
+ guest.ostype = vm_mode.HVM
+ guest.arch = arch.X86_64
+ guest.domtype = ["kvm"]
+ caps.guests.append(guest)
+
+ guest = vconfig.LibvirtConfigGuest()
+ guest.ostype = vm_mode.HVM
+ guest.arch = arch.I686
+ guest.domtype = ["kvm"]
+ caps.guests.append(guest)
+
+ return caps
+
+ self.stubs.Set(libvirt_driver.LibvirtDriver,
+ '_get_host_capabilities',
+ get_host_capabilities_stub)
+
+ want = {"vendor": "AMD",
+ "features": ["extapic", "3dnow"],
+ "model": "Opteron_G4",
+ "arch": arch.X86_64,
+ "topology": {"cores": 2, "threads": 1, "sockets": 4}}
+ got = jsonutils.loads(conn._get_cpu_info())
+ self.assertEqual(want, got)
+
+ def test_get_pcidev_info(self):
+
+ def fake_nodeDeviceLookupByName(name):
+ return FakeNodeDevice(_fake_NodeDevXml[name])
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.nodeDeviceLookupByName =\
+ fake_nodeDeviceLookupByName
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ actualvf = conn._get_pcidev_info("pci_0000_04_00_3")
+ expect_vf = {
+ "dev_id": "pci_0000_04_00_3",
+ "address": "0000:04:00.3",
+ "product_id": '1521',
+ "vendor_id": '8086',
+ "label": 'label_8086_1521',
+ "dev_type": 'type-PF',
+ }
+
+ self.assertEqual(actualvf, expect_vf)
+ actualvf = conn._get_pcidev_info("pci_0000_04_10_7")
+ expect_vf = {
+ "dev_id": "pci_0000_04_10_7",
+ "address": "0000:04:10.7",
+ "product_id": '1520',
+ "vendor_id": '8086',
+ "label": 'label_8086_1520',
+ "dev_type": 'type-VF',
+ "phys_function": '0000:04:00.3',
+ }
+
+ self.assertEqual(actualvf, expect_vf)
+
+ def test_pci_device_assignable(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(conn.dev_filter, 'device_assignable', lambda x: True)
+
+ fake_dev = {'dev_type': 'type-PF'}
+ self.assertFalse(conn._pci_device_assignable(fake_dev))
+ fake_dev = {'dev_type': 'type-VF'}
+ self.assertTrue(conn._pci_device_assignable(fake_dev))
+ fake_dev = {'dev_type': 'type-PCI'}
+ self.assertTrue(conn._pci_device_assignable(fake_dev))
+
+ def test_list_devices_not_supported(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ # Handle just the NO_SUPPORT error
+ not_supported_exc = fakelibvirt.make_libvirtError(
+ libvirt.libvirtError,
+ 'this function is not supported by the connection driver:'
+ ' virNodeNumOfDevices',
+ error_code=libvirt.VIR_ERR_NO_SUPPORT)
+
+ with mock.patch.object(conn._conn, 'listDevices',
+ side_effect=not_supported_exc):
+ self.assertEqual('[]', conn._get_pci_passthrough_devices())
+
+ # We cache not supported status to avoid emitting too many logging
+ # messages. Clear this value to test the other exception case.
+ del conn._list_devices_supported
+
+ # Other errors should not be caught
+ other_exc = fakelibvirt.make_libvirtError(
+ libvirt.libvirtError,
+ 'other exc',
+ error_code=libvirt.VIR_ERR_NO_DOMAIN)
+
+ with mock.patch.object(conn._conn, 'listDevices',
+ side_effect=other_exc):
+ self.assertRaises(libvirt.libvirtError,
+ conn._get_pci_passthrough_devices)
+
+ def test_get_pci_passthrough_devices(self):
+
+ def fakelistDevices(caps, fakeargs=0):
+ return ['pci_0000_04_00_3', 'pci_0000_04_10_7']
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.listDevices = fakelistDevices
+
+ def fake_nodeDeviceLookupByName(name):
+ return FakeNodeDevice(_fake_NodeDevXml[name])
+
+ libvirt_driver.LibvirtDriver._conn.nodeDeviceLookupByName =\
+ fake_nodeDeviceLookupByName
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(conn.dev_filter, 'device_assignable', lambda x: x)
+ actjson = conn._get_pci_passthrough_devices()
+
+ expectvfs = [
+ {
+ "dev_id": "pci_0000_04_00_3",
+ "address": "0000:04:10.3",
+ "product_id": '1521',
+ "vendor_id": '8086',
+ "dev_type": 'type-PF',
+ "phys_function": None},
+ {
+ "dev_id": "pci_0000_04_10_7",
+ "domain": 0,
+ "address": "0000:04:10.7",
+ "product_id": '1520',
+ "vendor_id": '8086',
+ "dev_type": 'type-VF',
+ "phys_function": [('0x0000', '0x04', '0x00', '0x3')],
+ }
+ ]
+
+ actctualvfs = jsonutils.loads(actjson)
+ for key in actctualvfs[0].keys():
+ if key not in ['phys_function', 'virt_functions', 'label']:
+ self.assertEqual(actctualvfs[0][key], expectvfs[1][key])
+
+ def _fake_caps_numa_topology(self):
+ topology = vconfig.LibvirtConfigCapsNUMATopology()
+
+ cell_0 = vconfig.LibvirtConfigCapsNUMACell()
+ cell_0.id = 0
+ cell_0.memory = 1024 * units.Ki
+ cpu_0_0 = vconfig.LibvirtConfigCapsNUMACPU()
+ cpu_0_0.id = 0
+ cpu_0_0.socket_id = 0
+ cpu_0_0.core_id = 0
+ cpu_0_0.sibling = 0
+ cpu_0_1 = vconfig.LibvirtConfigCapsNUMACPU()
+ cpu_0_1.id = 1
+ cpu_0_1.socket_id = 0
+ cpu_0_1.core_id = 1
+ cpu_0_1.sibling = 1
+ cell_0.cpus = [cpu_0_0, cpu_0_1]
+
+ cell_1 = vconfig.LibvirtConfigCapsNUMACell()
+ cell_1.id = 1
+ cell_1.memory = 1024 * units.Ki
+ cpu_1_0 = vconfig.LibvirtConfigCapsNUMACPU()
+ cpu_1_0.id = 2
+ cpu_1_0.socket_id = 1
+ cpu_1_0.core_id = 0
+ cpu_1_0.sibling = 2
+ cpu_1_1 = vconfig.LibvirtConfigCapsNUMACPU()
+ cpu_1_1.id = 3
+ cpu_1_1.socket_id = 1
+ cpu_1_1.core_id = 1
+ cpu_1_1.sibling = 3
+ cell_1.cpus = [cpu_1_0, cpu_1_1]
+
+ topology.cells = [cell_0, cell_1]
+ return topology
+
+ def test_get_host_numa_topology(self):
+ caps = vconfig.LibvirtConfigCaps()
+ caps.host = vconfig.LibvirtConfigCapsHost()
+ caps.host.topology = self._fake_caps_numa_topology()
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ expected_topo_dict = {'cells': [
+ {'cpus': '0,1', 'cpu_usage': 0,
+ 'mem': {'total': 1024, 'used': 0},
+ 'id': 0},
+ {'cpus': '3', 'cpu_usage': 0,
+ 'mem': {'total': 1024, 'used': 0},
+ 'id': 1}]}
+ with contextlib.nested(
+ mock.patch.object(conn, '_has_min_version', return_value=True),
+ mock.patch.object(
+ conn, '_get_host_capabilities', return_value=caps),
+ mock.patch.object(
+ hardware, 'get_vcpu_pin_set', return_value=set([0, 1, 3]))
+ ):
+ got_topo = conn._get_host_numa_topology()
+ got_topo_dict = got_topo._to_dict()
+ self.assertThat(
+ expected_topo_dict, matchers.DictMatches(got_topo_dict))
+
+ def test_get_host_numa_topology_empty(self):
+ caps = vconfig.LibvirtConfigCaps()
+ caps.host = vconfig.LibvirtConfigCapsHost()
+ caps.host.topology = None
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ with contextlib.nested(
+ mock.patch.object(conn, '_has_min_version', return_value=True),
+ mock.patch.object(conn, '_get_host_capabilities',
+ return_value=caps)
+ ) as (has_min_version, get_caps):
+ self.assertIsNone(conn._get_host_numa_topology())
+ get_caps.assert_called_once_with()
+
+ def test_get_host_numa_topology_not_supported(self):
+ # Tests that libvirt isn't new enough to support numa topology.
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ with mock.patch.object(conn, '_has_min_version', return_value=False):
+ self.assertIsNone(conn._get_host_numa_topology())
+
+ def test_diagnostic_vcpus_exception(self):
+ xml = """
+ <domain type='kvm'>
+ <devices>
+ <disk type='file'>
+ <source file='filename'/>
+ <target dev='vda' bus='virtio'/>
+ </disk>
+ <disk type='block'>
+ <source dev='/path/to/dev/1'/>
+ <target dev='vdb' bus='virtio'/>
+ </disk>
+ <interface type='network'>
+ <mac address='52:54:00:a4:38:38'/>
+ <source network='default'/>
+ <target dev='vnet0'/>
+ </interface>
+ </devices>
+ </domain>
+ """
+
+ class DiagFakeDomain(FakeVirtDomain):
+
+ def __init__(self):
+ super(DiagFakeDomain, self).__init__(fake_xml=xml)
+
+ def vcpus(self):
+ raise libvirt.libvirtError('vcpus missing')
+
+ def blockStats(self, path):
+ return (169L, 688640L, 0L, 0L, -1L)
+
+ def interfaceStats(self, path):
+ return (4408L, 82L, 0L, 0L, 0L, 0L, 0L, 0L)
+
+ def memoryStats(self):
+ return {'actual': 220160L, 'rss': 200164L}
+
+ def maxMemory(self):
+ return 280160L
+
+ def fake_lookup_name(name):
+ return DiagFakeDomain()
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup_name
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ actual = conn.get_diagnostics({"name": "testvirt"})
+ expect = {'vda_read': 688640L,
+ 'vda_read_req': 169L,
+ 'vda_write': 0L,
+ 'vda_write_req': 0L,
+ 'vda_errors': -1L,
+ 'vdb_read': 688640L,
+ 'vdb_read_req': 169L,
+ 'vdb_write': 0L,
+ 'vdb_write_req': 0L,
+ 'vdb_errors': -1L,
+ 'memory': 280160L,
+ 'memory-actual': 220160L,
+ 'memory-rss': 200164L,
+ 'vnet0_rx': 4408L,
+ 'vnet0_rx_drop': 0L,
+ 'vnet0_rx_errors': 0L,
+ 'vnet0_rx_packets': 82L,
+ 'vnet0_tx': 0L,
+ 'vnet0_tx_drop': 0L,
+ 'vnet0_tx_errors': 0L,
+ 'vnet0_tx_packets': 0L,
+ }
+ self.assertEqual(actual, expect)
+
+ lt = datetime.datetime(2012, 11, 22, 12, 00, 00)
+ diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
+ timeutils.set_time_override(diags_time)
+
+ actual = conn.get_instance_diagnostics({"name": "testvirt",
+ "launched_at": lt})
+ expected = {'config_drive': False,
+ 'cpu_details': [],
+ 'disk_details': [{'errors_count': 0,
+ 'id': '',
+ 'read_bytes': 688640L,
+ 'read_requests': 169L,
+ 'write_bytes': 0L,
+ 'write_requests': 0L},
+ {'errors_count': 0,
+ 'id': '',
+ 'read_bytes': 688640L,
+ 'read_requests': 169L,
+ 'write_bytes': 0L,
+ 'write_requests': 0L}],
+ 'driver': 'libvirt',
+ 'hypervisor_os': 'linux',
+ 'memory_details': {'maximum': 2048, 'used': 1234},
+ 'nic_details': [{'mac_address': '52:54:00:a4:38:38',
+ 'rx_drop': 0L,
+ 'rx_errors': 0L,
+ 'rx_octets': 4408L,
+ 'rx_packets': 82L,
+ 'tx_drop': 0L,
+ 'tx_errors': 0L,
+ 'tx_octets': 0L,
+ 'tx_packets': 0L}],
+ 'state': 'running',
+ 'uptime': 10,
+ 'version': '1.0'}
+ self.assertEqual(expected, actual.serialize())
+
+ def test_diagnostic_blockstats_exception(self):
+ xml = """
+ <domain type='kvm'>
+ <devices>
+ <disk type='file'>
+ <source file='filename'/>
+ <target dev='vda' bus='virtio'/>
+ </disk>
+ <disk type='block'>
+ <source dev='/path/to/dev/1'/>
+ <target dev='vdb' bus='virtio'/>
+ </disk>
+ <interface type='network'>
+ <mac address='52:54:00:a4:38:38'/>
+ <source network='default'/>
+ <target dev='vnet0'/>
+ </interface>
+ </devices>
+ </domain>
+ """
+
+ class DiagFakeDomain(FakeVirtDomain):
+
+ def __init__(self):
+ super(DiagFakeDomain, self).__init__(fake_xml=xml)
+
+ def vcpus(self):
+ return ([(0, 1, 15340000000L, 0),
+ (1, 1, 1640000000L, 0),
+ (2, 1, 3040000000L, 0),
+ (3, 1, 1420000000L, 0)],
+ [(True, False),
+ (True, False),
+ (True, False),
+ (True, False)])
+
+ def blockStats(self, path):
+ raise libvirt.libvirtError('blockStats missing')
+
+ def interfaceStats(self, path):
+ return (4408L, 82L, 0L, 0L, 0L, 0L, 0L, 0L)
+
+ def memoryStats(self):
+ return {'actual': 220160L, 'rss': 200164L}
+
+ def maxMemory(self):
+ return 280160L
+
+ def fake_lookup_name(name):
+ return DiagFakeDomain()
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup_name
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ actual = conn.get_diagnostics({"name": "testvirt"})
+ expect = {'cpu0_time': 15340000000L,
+ 'cpu1_time': 1640000000L,
+ 'cpu2_time': 3040000000L,
+ 'cpu3_time': 1420000000L,
+ 'memory': 280160L,
+ 'memory-actual': 220160L,
+ 'memory-rss': 200164L,
+ 'vnet0_rx': 4408L,
+ 'vnet0_rx_drop': 0L,
+ 'vnet0_rx_errors': 0L,
+ 'vnet0_rx_packets': 82L,
+ 'vnet0_tx': 0L,
+ 'vnet0_tx_drop': 0L,
+ 'vnet0_tx_errors': 0L,
+ 'vnet0_tx_packets': 0L,
+ }
+ self.assertEqual(actual, expect)
+
+ lt = datetime.datetime(2012, 11, 22, 12, 00, 00)
+ diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
+ timeutils.set_time_override(diags_time)
+
+ actual = conn.get_instance_diagnostics({"name": "testvirt",
+ "launched_at": lt})
+ expected = {'config_drive': False,
+ 'cpu_details': [{'time': 15340000000L},
+ {'time': 1640000000L},
+ {'time': 3040000000L},
+ {'time': 1420000000L}],
+ 'disk_details': [],
+ 'driver': 'libvirt',
+ 'hypervisor_os': 'linux',
+ 'memory_details': {'maximum': 2048, 'used': 1234},
+ 'nic_details': [{'mac_address': '52:54:00:a4:38:38',
+ 'rx_drop': 0L,
+ 'rx_errors': 0L,
+ 'rx_octets': 4408L,
+ 'rx_packets': 82L,
+ 'tx_drop': 0L,
+ 'tx_errors': 0L,
+ 'tx_octets': 0L,
+ 'tx_packets': 0L}],
+ 'state': 'running',
+ 'uptime': 10,
+ 'version': '1.0'}
+ self.assertEqual(expected, actual.serialize())
+
+ def test_diagnostic_interfacestats_exception(self):
+ xml = """
+ <domain type='kvm'>
+ <devices>
+ <disk type='file'>
+ <source file='filename'/>
+ <target dev='vda' bus='virtio'/>
+ </disk>
+ <disk type='block'>
+ <source dev='/path/to/dev/1'/>
+ <target dev='vdb' bus='virtio'/>
+ </disk>
+ <interface type='network'>
+ <mac address='52:54:00:a4:38:38'/>
+ <source network='default'/>
+ <target dev='vnet0'/>
+ </interface>
+ </devices>
+ </domain>
+ """
+
+ class DiagFakeDomain(FakeVirtDomain):
+
+ def __init__(self):
+ super(DiagFakeDomain, self).__init__(fake_xml=xml)
+
+ def vcpus(self):
+ return ([(0, 1, 15340000000L, 0),
+ (1, 1, 1640000000L, 0),
+ (2, 1, 3040000000L, 0),
+ (3, 1, 1420000000L, 0)],
+ [(True, False),
+ (True, False),
+ (True, False),
+ (True, False)])
+
+ def blockStats(self, path):
+ return (169L, 688640L, 0L, 0L, -1L)
+
+ def interfaceStats(self, path):
+ raise libvirt.libvirtError('interfaceStat missing')
+
+ def memoryStats(self):
+ return {'actual': 220160L, 'rss': 200164L}
+
+ def maxMemory(self):
+ return 280160L
+
+ def fake_lookup_name(name):
+ return DiagFakeDomain()
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup_name
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ actual = conn.get_diagnostics({"name": "testvirt"})
+ expect = {'cpu0_time': 15340000000L,
+ 'cpu1_time': 1640000000L,
+ 'cpu2_time': 3040000000L,
+ 'cpu3_time': 1420000000L,
+ 'vda_read': 688640L,
+ 'vda_read_req': 169L,
+ 'vda_write': 0L,
+ 'vda_write_req': 0L,
+ 'vda_errors': -1L,
+ 'vdb_read': 688640L,
+ 'vdb_read_req': 169L,
+ 'vdb_write': 0L,
+ 'vdb_write_req': 0L,
+ 'vdb_errors': -1L,
+ 'memory': 280160L,
+ 'memory-actual': 220160L,
+ 'memory-rss': 200164L,
+ }
+ self.assertEqual(actual, expect)
+
+ lt = datetime.datetime(2012, 11, 22, 12, 00, 00)
+ diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
+ timeutils.set_time_override(diags_time)
+
+ actual = conn.get_instance_diagnostics({"name": "testvirt",
+ "launched_at": lt})
+ expected = {'config_drive': False,
+ 'cpu_details': [{'time': 15340000000L},
+ {'time': 1640000000L},
+ {'time': 3040000000L},
+ {'time': 1420000000L}],
+ 'disk_details': [{'errors_count': 0,
+ 'id': '',
+ 'read_bytes': 688640L,
+ 'read_requests': 169L,
+ 'write_bytes': 0L,
+ 'write_requests': 0L},
+ {'errors_count': 0,
+ 'id': '',
+ 'read_bytes': 688640L,
+ 'read_requests': 169L,
+ 'write_bytes': 0L,
+ 'write_requests': 0L}],
+ 'driver': 'libvirt',
+ 'hypervisor_os': 'linux',
+ 'memory_details': {'maximum': 2048, 'used': 1234},
+ 'nic_details': [],
+ 'state': 'running',
+ 'uptime': 10,
+ 'version': '1.0'}
+ self.assertEqual(expected, actual.serialize())
+
+ def test_diagnostic_memorystats_exception(self):
+ xml = """
+ <domain type='kvm'>
+ <devices>
+ <disk type='file'>
+ <source file='filename'/>
+ <target dev='vda' bus='virtio'/>
+ </disk>
+ <disk type='block'>
+ <source dev='/path/to/dev/1'/>
+ <target dev='vdb' bus='virtio'/>
+ </disk>
+ <interface type='network'>
+ <mac address='52:54:00:a4:38:38'/>
+ <source network='default'/>
+ <target dev='vnet0'/>
+ </interface>
+ </devices>
+ </domain>
+ """
+
+ class DiagFakeDomain(FakeVirtDomain):
+
+ def __init__(self):
+ super(DiagFakeDomain, self).__init__(fake_xml=xml)
+
+ def vcpus(self):
+ return ([(0, 1, 15340000000L, 0),
+ (1, 1, 1640000000L, 0),
+ (2, 1, 3040000000L, 0),
+ (3, 1, 1420000000L, 0)],
+ [(True, False),
+ (True, False),
+ (True, False),
+ (True, False)])
+
+ def blockStats(self, path):
+ return (169L, 688640L, 0L, 0L, -1L)
+
+ def interfaceStats(self, path):
+ return (4408L, 82L, 0L, 0L, 0L, 0L, 0L, 0L)
+
+ def memoryStats(self):
+ raise libvirt.libvirtError('memoryStats missing')
+
+ def maxMemory(self):
+ return 280160L
+
+ def fake_lookup_name(name):
+ return DiagFakeDomain()
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup_name
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ actual = conn.get_diagnostics({"name": "testvirt"})
+ expect = {'cpu0_time': 15340000000L,
+ 'cpu1_time': 1640000000L,
+ 'cpu2_time': 3040000000L,
+ 'cpu3_time': 1420000000L,
+ 'vda_read': 688640L,
+ 'vda_read_req': 169L,
+ 'vda_write': 0L,
+ 'vda_write_req': 0L,
+ 'vda_errors': -1L,
+ 'vdb_read': 688640L,
+ 'vdb_read_req': 169L,
+ 'vdb_write': 0L,
+ 'vdb_write_req': 0L,
+ 'vdb_errors': -1L,
+ 'memory': 280160L,
+ 'vnet0_rx': 4408L,
+ 'vnet0_rx_drop': 0L,
+ 'vnet0_rx_errors': 0L,
+ 'vnet0_rx_packets': 82L,
+ 'vnet0_tx': 0L,
+ 'vnet0_tx_drop': 0L,
+ 'vnet0_tx_errors': 0L,
+ 'vnet0_tx_packets': 0L,
+ }
+ self.assertEqual(actual, expect)
+
+ lt = datetime.datetime(2012, 11, 22, 12, 00, 00)
+ diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
+ timeutils.set_time_override(diags_time)
+
+ actual = conn.get_instance_diagnostics({"name": "testvirt",
+ "launched_at": lt})
+ expected = {'config_drive': False,
+ 'cpu_details': [{'time': 15340000000L},
+ {'time': 1640000000L},
+ {'time': 3040000000L},
+ {'time': 1420000000L}],
+ 'disk_details': [{'errors_count': 0,
+ 'id': '',
+ 'read_bytes': 688640L,
+ 'read_requests': 169L,
+ 'write_bytes': 0L,
+ 'write_requests': 0L},
+ {'errors_count': 0,
+ 'id': '',
+ 'read_bytes': 688640L,
+ 'read_requests': 169L,
+ 'write_bytes': 0L,
+ 'write_requests': 0L}],
+ 'driver': 'libvirt',
+ 'hypervisor_os': 'linux',
+ 'memory_details': {'maximum': 2048, 'used': 1234},
+ 'nic_details': [{'mac_address': '52:54:00:a4:38:38',
+ 'rx_drop': 0L,
+ 'rx_errors': 0L,
+ 'rx_octets': 4408L,
+ 'rx_packets': 82L,
+ 'tx_drop': 0L,
+ 'tx_errors': 0L,
+ 'tx_octets': 0L,
+ 'tx_packets': 0L}],
+ 'state': 'running',
+ 'uptime': 10,
+ 'version': '1.0'}
+ self.assertEqual(expected, actual.serialize())
+
+ def test_diagnostic_full(self):
+ xml = """
+ <domain type='kvm'>
+ <devices>
+ <disk type='file'>
+ <source file='filename'/>
+ <target dev='vda' bus='virtio'/>
+ </disk>
+ <disk type='block'>
+ <source dev='/path/to/dev/1'/>
+ <target dev='vdb' bus='virtio'/>
+ </disk>
+ <interface type='network'>
+ <mac address='52:54:00:a4:38:38'/>
+ <source network='default'/>
+ <target dev='vnet0'/>
+ </interface>
+ </devices>
+ </domain>
+ """
+
+ class DiagFakeDomain(FakeVirtDomain):
+
+ def __init__(self):
+ super(DiagFakeDomain, self).__init__(fake_xml=xml)
+
+ def vcpus(self):
+ return ([(0, 1, 15340000000L, 0),
+ (1, 1, 1640000000L, 0),
+ (2, 1, 3040000000L, 0),
+ (3, 1, 1420000000L, 0)],
+ [(True, False),
+ (True, False),
+ (True, False),
+ (True, False)])
+
+ def blockStats(self, path):
+ return (169L, 688640L, 0L, 0L, -1L)
+
+ def interfaceStats(self, path):
+ return (4408L, 82L, 0L, 0L, 0L, 0L, 0L, 0L)
+
+ def memoryStats(self):
+ return {'actual': 220160L, 'rss': 200164L}
+
+ def maxMemory(self):
+ return 280160L
+
+ def fake_lookup_name(name):
+ return DiagFakeDomain()
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup_name
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ actual = conn.get_diagnostics({"name": "testvirt"})
+ expect = {'cpu0_time': 15340000000L,
+ 'cpu1_time': 1640000000L,
+ 'cpu2_time': 3040000000L,
+ 'cpu3_time': 1420000000L,
+ 'vda_read': 688640L,
+ 'vda_read_req': 169L,
+ 'vda_write': 0L,
+ 'vda_write_req': 0L,
+ 'vda_errors': -1L,
+ 'vdb_read': 688640L,
+ 'vdb_read_req': 169L,
+ 'vdb_write': 0L,
+ 'vdb_write_req': 0L,
+ 'vdb_errors': -1L,
+ 'memory': 280160L,
+ 'memory-actual': 220160L,
+ 'memory-rss': 200164L,
+ 'vnet0_rx': 4408L,
+ 'vnet0_rx_drop': 0L,
+ 'vnet0_rx_errors': 0L,
+ 'vnet0_rx_packets': 82L,
+ 'vnet0_tx': 0L,
+ 'vnet0_tx_drop': 0L,
+ 'vnet0_tx_errors': 0L,
+ 'vnet0_tx_packets': 0L,
+ }
+ self.assertEqual(actual, expect)
+
+ lt = datetime.datetime(2012, 11, 22, 12, 00, 00)
+ diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
+ timeutils.set_time_override(diags_time)
+
+ actual = conn.get_instance_diagnostics({"name": "testvirt",
+ "launched_at": lt})
+ expected = {'config_drive': False,
+ 'cpu_details': [{'time': 15340000000L},
+ {'time': 1640000000L},
+ {'time': 3040000000L},
+ {'time': 1420000000L}],
+ 'disk_details': [{'errors_count': 0,
+ 'id': '',
+ 'read_bytes': 688640L,
+ 'read_requests': 169L,
+ 'write_bytes': 0L,
+ 'write_requests': 0L},
+ {'errors_count': 0,
+ 'id': '',
+ 'read_bytes': 688640L,
+ 'read_requests': 169L,
+ 'write_bytes': 0L,
+ 'write_requests': 0L}],
+ 'driver': 'libvirt',
+ 'hypervisor_os': 'linux',
+ 'memory_details': {'maximum': 2048, 'used': 1234},
+ 'nic_details': [{'mac_address': '52:54:00:a4:38:38',
+ 'rx_drop': 0L,
+ 'rx_errors': 0L,
+ 'rx_octets': 4408L,
+ 'rx_packets': 82L,
+ 'tx_drop': 0L,
+ 'tx_errors': 0L,
+ 'tx_octets': 0L,
+ 'tx_packets': 0L}],
+ 'state': 'running',
+ 'uptime': 10,
+ 'version': '1.0'}
+ self.assertEqual(expected, actual.serialize())
+
+ @mock.patch.object(libvirt_driver.LibvirtDriver,
+ "_list_instance_domains")
+ def test_failing_vcpu_count(self, mock_list):
+ """Domain can fail to return the vcpu description in case it's
+ just starting up or shutting down. Make sure None is handled
+ gracefully.
+ """
+
+ class DiagFakeDomain(object):
+ def __init__(self, vcpus):
+ self._vcpus = vcpus
+
+ def vcpus(self):
+ if self._vcpus is None:
+ raise libvirt.libvirtError("fake-error")
+ else:
+ return ([1] * self._vcpus, [True] * self._vcpus)
+
+ def ID(self):
+ return 1
+
+ def name(self):
+ return "instance000001"
+
+ def UUIDString(self):
+ return "19479fee-07a5-49bb-9138-d3738280d63c"
+
+ mock_list.return_value = [
+ DiagFakeDomain(None), DiagFakeDomain(5)]
+
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ self.assertEqual(5, drvr._get_vcpu_used())
+ mock_list.assert_called_with()
+
+ @mock.patch.object(libvirt_driver.LibvirtDriver,
+ "_list_instance_domains")
+ def test_failing_vcpu_count_none(self, mock_list):
+ """Domain will return zero if the current number of vcpus used
+ is None. This is in case of VM state starting up or shutting
+ down. None type returned is counted as zero.
+ """
+
+ class DiagFakeDomain(object):
+ def __init__(self):
+ pass
+
+ def vcpus(self):
+ return None
+
+ def ID(self):
+ return 1
+
+ def name(self):
+ return "instance000001"
+
+ mock_list.return_value = [DiagFakeDomain()]
+
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.assertEqual(0, drvr._get_vcpu_used())
+ mock_list.assert_called_with()
+
+ def test_get_memory_used_normal(self):
+ m = mock.mock_open(read_data="""
+MemTotal: 16194180 kB
+MemFree: 233092 kB
+MemAvailable: 8892356 kB
+Buffers: 567708 kB
+Cached: 8362404 kB
+SwapCached: 0 kB
+Active: 8381604 kB
+""")
+ with contextlib.nested(
+ mock.patch("__builtin__.open", m, create=True),
+ mock.patch.object(libvirt_driver.LibvirtDriver,
+ "_conn"),
+ mock.patch('sys.platform', 'linux2'),
+ ) as (mock_file, mock_conn, mock_platform):
+ mock_conn.getInfo.return_value = [
+ arch.X86_64, 15814L, 8, 1208, 1, 1, 4, 2]
+
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ self.assertEqual(6866, drvr._get_memory_mb_used())
+
+ def test_get_memory_used_xen(self):
+ self.flags(virt_type='xen', group='libvirt')
+
+ class DiagFakeDomain(object):
+ def __init__(self, id, memmb):
+ self.id = id
+ self.memmb = memmb
+
+ def info(self):
+ return [0, 0, self.memmb * 1024]
+
+ def ID(self):
+ return self.id
+
+ def name(self):
+ return "instance000001"
+
+ def UUIDString(self):
+ return str(uuid.uuid4())
+
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ m = mock.mock_open(read_data="""
+MemTotal: 16194180 kB
+MemFree: 233092 kB
+MemAvailable: 8892356 kB
+Buffers: 567708 kB
+Cached: 8362404 kB
+SwapCached: 0 kB
+Active: 8381604 kB
+""")
+
+ with contextlib.nested(
+ mock.patch("__builtin__.open", m, create=True),
+ mock.patch.object(libvirt_driver.LibvirtDriver,
+ "_list_instance_domains"),
+ mock.patch.object(libvirt_driver.LibvirtDriver,
+ "_conn"),
+ mock.patch('sys.platform', 'linux2'),
+ ) as (mock_file, mock_list, mock_conn, mock_platform):
+ mock_list.return_value = [
+ DiagFakeDomain(0, 15814),
+ DiagFakeDomain(1, 750),
+ DiagFakeDomain(2, 1042)]
+ mock_conn.getInfo.return_value = [
+ arch.X86_64, 15814L, 8, 1208, 1, 1, 4, 2]
+
+ self.assertEqual(8657, drvr._get_memory_mb_used())
+ mock_list.assert_called_with(only_guests=False)
+
+ def test_get_instance_capabilities(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ def get_host_capabilities_stub(self):
+ caps = vconfig.LibvirtConfigCaps()
+
+ guest = vconfig.LibvirtConfigGuest()
+ guest.ostype = 'hvm'
+ guest.arch = arch.X86_64
+ guest.domtype = ['kvm', 'qemu']
+ caps.guests.append(guest)
+
+ guest = vconfig.LibvirtConfigGuest()
+ guest.ostype = 'hvm'
+ guest.arch = arch.I686
+ guest.domtype = ['kvm']
+ caps.guests.append(guest)
+
+ return caps
+
+ self.stubs.Set(libvirt_driver.LibvirtDriver,
+ '_get_host_capabilities',
+ get_host_capabilities_stub)
+
+ want = [(arch.X86_64, 'kvm', 'hvm'),
+ (arch.X86_64, 'qemu', 'hvm'),
+ (arch.I686, 'kvm', 'hvm')]
+ got = conn._get_instance_capabilities()
+ self.assertEqual(want, got)
+
+ def test_event_dispatch(self):
+ # Validate that the libvirt self-pipe for forwarding
+ # events between threads is working sanely
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ got_events = []
+
+ def handler(event):
+ got_events.append(event)
+
+ conn.register_event_listener(handler)
+
+ conn._init_events_pipe()
+
+ event1 = virtevent.LifecycleEvent(
+ "cef19ce0-0ca2-11df-855d-b19fbce37686",
+ virtevent.EVENT_LIFECYCLE_STARTED)
+ event2 = virtevent.LifecycleEvent(
+ "cef19ce0-0ca2-11df-855d-b19fbce37686",
+ virtevent.EVENT_LIFECYCLE_PAUSED)
+ conn._queue_event(event1)
+ conn._queue_event(event2)
+ conn._dispatch_events()
+
+ want_events = [event1, event2]
+ self.assertEqual(want_events, got_events)
+
+ event3 = virtevent.LifecycleEvent(
+ "cef19ce0-0ca2-11df-855d-b19fbce37686",
+ virtevent.EVENT_LIFECYCLE_RESUMED)
+ event4 = virtevent.LifecycleEvent(
+ "cef19ce0-0ca2-11df-855d-b19fbce37686",
+ virtevent.EVENT_LIFECYCLE_STOPPED)
+
+ conn._queue_event(event3)
+ conn._queue_event(event4)
+ conn._dispatch_events()
+
+ want_events = [event1, event2, event3, event4]
+ self.assertEqual(want_events, got_events)
+
+ def test_event_lifecycle(self):
+ # Validate that libvirt events are correctly translated
+ # to Nova events
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ got_events = []
+
+ def handler(event):
+ got_events.append(event)
+
+ conn.register_event_listener(handler)
+ conn._init_events_pipe()
+ fake_dom_xml = """
+ <domain type='kvm'>
+ <uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid>
+ <devices>
+ <disk type='file'>
+ <source file='filename'/>
+ </disk>
+ </devices>
+ </domain>
+ """
+ dom = FakeVirtDomain(fake_dom_xml,
+ "cef19ce0-0ca2-11df-855d-b19fbce37686")
+
+ conn._event_lifecycle_callback(conn._conn,
+ dom,
+ libvirt.VIR_DOMAIN_EVENT_STOPPED,
+ 0,
+ conn)
+ conn._dispatch_events()
+ self.assertEqual(len(got_events), 1)
+ self.assertIsInstance(got_events[0], virtevent.LifecycleEvent)
+ self.assertEqual(got_events[0].uuid,
+ "cef19ce0-0ca2-11df-855d-b19fbce37686")
+ self.assertEqual(got_events[0].transition,
+ virtevent.EVENT_LIFECYCLE_STOPPED)
+
+ @mock.patch.object(libvirt_driver.LibvirtDriver, 'emit_event')
+ def test_event_emit_delayed_call_now(self, emit_event_mock):
+ self.flags(virt_type="kvm", group="libvirt")
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ conn._event_emit_delayed(None)
+ emit_event_mock.assert_called_once_with(None)
+
+ @mock.patch.object(greenthread, 'spawn_after')
+ def test_event_emit_delayed_call_delayed(self, spawn_after_mock):
+ CONF.set_override("virt_type", "xen", group="libvirt")
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ event = virtevent.LifecycleEvent(
+ "cef19ce0-0ca2-11df-855d-b19fbce37686",
+ virtevent.EVENT_LIFECYCLE_STOPPED)
+ conn._event_emit_delayed(event)
+ spawn_after_mock.assert_called_once_with(15, conn.emit_event, event)
+
+ @mock.patch.object(greenthread, 'spawn_after')
+ def test_event_emit_delayed_call_delayed_pending(self, spawn_after_mock):
+ self.flags(virt_type="xen", group="libvirt")
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ uuid = "cef19ce0-0ca2-11df-855d-b19fbce37686"
+ conn._events_delayed[uuid] = None
+ event = virtevent.LifecycleEvent(
+ uuid, virtevent.EVENT_LIFECYCLE_STOPPED)
+ conn._event_emit_delayed(event)
+ self.assertFalse(spawn_after_mock.called)
+
+ def test_event_delayed_cleanup(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ uuid = "cef19ce0-0ca2-11df-855d-b19fbce37686"
+ event = virtevent.LifecycleEvent(
+ uuid, virtevent.EVENT_LIFECYCLE_STARTED)
+ gt_mock = mock.Mock()
+ conn._events_delayed[uuid] = gt_mock
+ conn._event_delayed_cleanup(event)
+ gt_mock.cancel.assert_called_once_with()
+ self.assertNotIn(uuid, conn._events_delayed.keys())
+
+ def test_set_cache_mode(self):
+ self.flags(disk_cachemodes=['file=directsync'], group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ fake_conf = FakeConfigGuestDisk()
+
+ fake_conf.source_type = 'file'
+ conn._set_cache_mode(fake_conf)
+ self.assertEqual(fake_conf.driver_cache, 'directsync')
+
+ def test_set_cache_mode_invalid_mode(self):
+ self.flags(disk_cachemodes=['file=FAKE'], group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ fake_conf = FakeConfigGuestDisk()
+
+ fake_conf.source_type = 'file'
+ conn._set_cache_mode(fake_conf)
+ self.assertIsNone(fake_conf.driver_cache)
+
+ def test_set_cache_mode_invalid_object(self):
+ self.flags(disk_cachemodes=['file=directsync'], group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ fake_conf = FakeConfigGuest()
+
+ fake_conf.driver_cache = 'fake'
+ conn._set_cache_mode(fake_conf)
+ self.assertEqual(fake_conf.driver_cache, 'fake')
+
+ def _test_shared_storage_detection(self, is_same):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ self.mox.StubOutWithMock(conn, 'get_host_ip_addr')
+ self.mox.StubOutWithMock(utils, 'execute')
+ self.mox.StubOutWithMock(os.path, 'exists')
+ self.mox.StubOutWithMock(os, 'unlink')
+ conn.get_host_ip_addr().AndReturn('bar')
+ utils.execute('ssh', 'foo', 'touch', mox.IgnoreArg())
+ os.path.exists(mox.IgnoreArg()).AndReturn(is_same)
+ if is_same:
+ os.unlink(mox.IgnoreArg())
+ else:
+ utils.execute('ssh', 'foo', 'rm', mox.IgnoreArg())
+ self.mox.ReplayAll()
+ return conn._is_storage_shared_with('foo', '/path')
+
+ def test_shared_storage_detection_same_host(self):
+ self.assertTrue(self._test_shared_storage_detection(True))
+
+ def test_shared_storage_detection_different_host(self):
+ self.assertFalse(self._test_shared_storage_detection(False))
+
+ def test_shared_storage_detection_easy(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ self.mox.StubOutWithMock(conn, 'get_host_ip_addr')
+ self.mox.StubOutWithMock(utils, 'execute')
+ self.mox.StubOutWithMock(os.path, 'exists')
+ self.mox.StubOutWithMock(os, 'unlink')
+ conn.get_host_ip_addr().AndReturn('foo')
+ self.mox.ReplayAll()
+ self.assertTrue(conn._is_storage_shared_with('foo', '/path'))
+
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._lookup_by_name')
+ def test_get_domain_info_with_more_return(self, lookup_mock):
+ instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
+ dom_mock = mock.MagicMock()
+ dom_mock.info.return_value = [
+ 1, 2048, 737, 8, 12345, 888888
+ ]
+ dom_mock.ID.return_value = mock.sentinel.instance_id
+ lookup_mock.return_value = dom_mock
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ info = conn.get_info(instance)
+ expect = {'state': 1,
+ 'max_mem': 2048,
+ 'mem': 737,
+ 'num_cpu': 8,
+ 'cpu_time': 12345,
+ 'id': mock.sentinel.instance_id}
+ self.assertEqual(expect, info)
+ dom_mock.info.assert_called_once_with()
+ dom_mock.ID.assert_called_once_with()
+ lookup_mock.assert_called_once_with(instance['name'])
+
+ @mock.patch.object(fake_libvirt_utils, 'get_instance_path')
+ @mock.patch.object(encodeutils, 'safe_decode')
+ def test_create_domain(self, mock_safe_decode, mock_get_inst_path):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ mock_domain = mock.MagicMock()
+ mock_instance = mock.MagicMock()
+ mock_get_inst_path.return_value = '/tmp/'
+
+ domain = conn._create_domain(domain=mock_domain,
+ instance=mock_instance)
+
+ self.assertEqual(mock_domain, domain)
+ mock_get_inst_path.assertHasCalls([mock.call(mock_instance)])
+ mock_domain.createWithFlags.assertHasCalls([mock.call(0)])
+ self.assertEqual(2, mock_safe_decode.call_count)
+
+ @mock.patch('nova.virt.disk.api.clean_lxc_namespace')
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info')
+ @mock.patch('nova.virt.disk.api.setup_container')
+ @mock.patch('nova.openstack.common.fileutils.ensure_tree')
+ @mock.patch.object(fake_libvirt_utils, 'get_instance_path')
+ def test_create_domain_lxc(self, mock_get_inst_path, mock_ensure_tree,
+ mock_setup_container, mock_get_info, mock_clean):
+ self.flags(virt_type='lxc', group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ mock_instance = mock.MagicMock()
+ inst_sys_meta = dict()
+ mock_instance.system_metadata = inst_sys_meta
+ mock_get_inst_path.return_value = '/tmp/'
+ mock_image_backend = mock.MagicMock()
+ conn.image_backend = mock_image_backend
+ mock_image = mock.MagicMock()
+ mock_image.path = '/tmp/test.img'
+ conn.image_backend.image.return_value = mock_image
+ mock_setup_container.return_value = '/dev/nbd0'
+ mock_get_info.return_value = {'state': power_state.RUNNING}
+
+ with contextlib.nested(
+ mock.patch.object(conn, '_create_images_and_backing'),
+ mock.patch.object(conn, '_is_booted_from_volume',
+ return_value=False),
+ mock.patch.object(conn, '_create_domain'),
+ mock.patch.object(conn, 'plug_vifs'),
+ mock.patch.object(conn.firewall_driver, 'setup_basic_filtering'),
+ mock.patch.object(conn.firewall_driver, 'prepare_instance_filter'),
+ mock.patch.object(conn.firewall_driver, 'apply_instance_filter')):
+ conn._create_domain_and_network(self.context, 'xml',
+ mock_instance, [])
+
+ self.assertEqual('/dev/nbd0', inst_sys_meta['rootfs_device_name'])
+ mock_instance.save.assert_not_called()
+ mock_get_inst_path.assert_has_calls([mock.call(mock_instance)])
+ mock_ensure_tree.assert_has_calls([mock.call('/tmp/rootfs')])
+ conn.image_backend.image.assert_has_calls([mock.call(mock_instance,
+ 'disk')])
+ setup_container_call = mock.call('/tmp/test.img',
+ container_dir='/tmp/rootfs',
+ use_cow=CONF.use_cow_images)
+ mock_setup_container.assert_has_calls([setup_container_call])
+ mock_get_info.assert_has_calls([mock.call(mock_instance)])
+ mock_clean.assert_has_calls([mock.call(container_dir='/tmp/rootfs')])
+
+ @mock.patch('nova.virt.disk.api.clean_lxc_namespace')
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info')
+ @mock.patch.object(fake_libvirt_utils, 'chown_for_id_maps')
+ @mock.patch('nova.virt.disk.api.setup_container')
+ @mock.patch('nova.openstack.common.fileutils.ensure_tree')
+ @mock.patch.object(fake_libvirt_utils, 'get_instance_path')
+ def test_create_domain_lxc_id_maps(self, mock_get_inst_path,
+ mock_ensure_tree, mock_setup_container,
+ mock_chown, mock_get_info, mock_clean):
+ self.flags(virt_type='lxc', uid_maps=["0:1000:100"],
+ gid_maps=["0:1000:100"], group='libvirt')
+
+ def chown_side_effect(path, id_maps):
+ self.assertEqual('/tmp/rootfs', path)
+ self.assertIsInstance(id_maps[0], vconfig.LibvirtConfigGuestUIDMap)
+ self.assertEqual(0, id_maps[0].start)
+ self.assertEqual(1000, id_maps[0].target)
+ self.assertEqual(100, id_maps[0].count)
+ self.assertIsInstance(id_maps[1], vconfig.LibvirtConfigGuestGIDMap)
+ self.assertEqual(0, id_maps[1].start)
+ self.assertEqual(1000, id_maps[1].target)
+ self.assertEqual(100, id_maps[1].count)
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ mock_instance = mock.MagicMock()
+ inst_sys_meta = dict()
+ mock_instance.system_metadata = inst_sys_meta
+ mock_get_inst_path.return_value = '/tmp/'
+ mock_image_backend = mock.MagicMock()
+ conn.image_backend = mock_image_backend
+ mock_image = mock.MagicMock()
+ mock_image.path = '/tmp/test.img'
+ conn.image_backend.image.return_value = mock_image
+ mock_setup_container.return_value = '/dev/nbd0'
+ mock_chown.side_effect = chown_side_effect
+ mock_get_info.return_value = {'state': power_state.RUNNING}
+
+ with contextlib.nested(
+ mock.patch.object(conn, '_create_images_and_backing'),
+ mock.patch.object(conn, '_is_booted_from_volume',
+ return_value=False),
+ mock.patch.object(conn, '_create_domain'),
+ mock.patch.object(conn, 'plug_vifs'),
+ mock.patch.object(conn.firewall_driver, 'setup_basic_filtering'),
+ mock.patch.object(conn.firewall_driver, 'prepare_instance_filter'),
+ mock.patch.object(conn.firewall_driver, 'apply_instance_filter')):
+ conn._create_domain_and_network(self.context, 'xml',
+ mock_instance, [])
+
+ self.assertEqual('/dev/nbd0', inst_sys_meta['rootfs_device_name'])
+ mock_instance.save.assert_not_called()
+ mock_get_inst_path.assert_has_calls([mock.call(mock_instance)])
+ mock_ensure_tree.assert_has_calls([mock.call('/tmp/rootfs')])
+ conn.image_backend.image.assert_has_calls([mock.call(mock_instance,
+ 'disk')])
+ setup_container_call = mock.call('/tmp/test.img',
+ container_dir='/tmp/rootfs',
+ use_cow=CONF.use_cow_images)
+ mock_setup_container.assert_has_calls([setup_container_call])
+ mock_get_info.assert_has_calls([mock.call(mock_instance)])
+ mock_clean.assert_has_calls([mock.call(container_dir='/tmp/rootfs')])
+
+ @mock.patch('nova.virt.disk.api.teardown_container')
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info')
+ @mock.patch('nova.virt.disk.api.setup_container')
+ @mock.patch('nova.openstack.common.fileutils.ensure_tree')
+ @mock.patch.object(fake_libvirt_utils, 'get_instance_path')
+ def test_create_domain_lxc_not_running(self, mock_get_inst_path,
+ mock_ensure_tree,
+ mock_setup_container,
+ mock_get_info, mock_teardown):
+ self.flags(virt_type='lxc', group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ mock_instance = mock.MagicMock()
+ inst_sys_meta = dict()
+ mock_instance.system_metadata = inst_sys_meta
+ mock_get_inst_path.return_value = '/tmp/'
+ mock_image_backend = mock.MagicMock()
+ conn.image_backend = mock_image_backend
+ mock_image = mock.MagicMock()
+ mock_image.path = '/tmp/test.img'
+ conn.image_backend.image.return_value = mock_image
+ mock_setup_container.return_value = '/dev/nbd0'
+ mock_get_info.return_value = {'state': power_state.SHUTDOWN}
+
+ with contextlib.nested(
+ mock.patch.object(conn, '_create_images_and_backing'),
+ mock.patch.object(conn, '_is_booted_from_volume',
+ return_value=False),
+ mock.patch.object(conn, '_create_domain'),
+ mock.patch.object(conn, 'plug_vifs'),
+ mock.patch.object(conn.firewall_driver, 'setup_basic_filtering'),
+ mock.patch.object(conn.firewall_driver, 'prepare_instance_filter'),
+ mock.patch.object(conn.firewall_driver, 'apply_instance_filter')):
+ conn._create_domain_and_network(self.context, 'xml',
+ mock_instance, [])
+
+ self.assertEqual('/dev/nbd0', inst_sys_meta['rootfs_device_name'])
+ mock_instance.save.assert_not_called()
+ mock_get_inst_path.assert_has_calls([mock.call(mock_instance)])
+ mock_ensure_tree.assert_has_calls([mock.call('/tmp/rootfs')])
+ conn.image_backend.image.assert_has_calls([mock.call(mock_instance,
+ 'disk')])
+ setup_container_call = mock.call('/tmp/test.img',
+ container_dir='/tmp/rootfs',
+ use_cow=CONF.use_cow_images)
+ mock_setup_container.assert_has_calls([setup_container_call])
+ mock_get_info.assert_has_calls([mock.call(mock_instance)])
+ teardown_call = mock.call(container_dir='/tmp/rootfs')
+ mock_teardown.assert_has_calls([teardown_call])
+
+ def test_create_domain_define_xml_fails(self):
+ """Tests that the xml is logged when defining the domain fails."""
+ fake_xml = "<test>this is a test</test>"
+
+ def fake_defineXML(xml):
+ self.assertEqual(fake_xml, xml)
+ raise libvirt.libvirtError('virDomainDefineXML() failed')
+
+ self.log_error_called = False
+
+ def fake_error(msg, *args):
+ self.log_error_called = True
+ self.assertIn(fake_xml, msg % args)
+
+ self.stubs.Set(nova.virt.libvirt.driver.LOG, 'error', fake_error)
+
+ self.create_fake_libvirt_mock(defineXML=fake_defineXML)
+ self.mox.ReplayAll()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ self.assertRaises(libvirt.libvirtError, conn._create_domain, fake_xml)
+ self.assertTrue(self.log_error_called)
+
+ def test_create_domain_with_flags_fails(self):
+ """Tests that the xml is logged when creating the domain with flags
+ fails
+ """
+ fake_xml = "<test>this is a test</test>"
+ fake_domain = FakeVirtDomain(fake_xml)
+
+ def fake_createWithFlags(launch_flags):
+ raise libvirt.libvirtError('virDomainCreateWithFlags() failed')
+
+ self.log_error_called = False
+
+ def fake_error(msg, *args):
+ self.log_error_called = True
+ self.assertIn(fake_xml, msg % args)
+
+ self.stubs.Set(fake_domain, 'createWithFlags', fake_createWithFlags)
+ self.stubs.Set(nova.virt.libvirt.driver.LOG, 'error', fake_error)
+
+ self.create_fake_libvirt_mock()
+ self.mox.ReplayAll()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ self.assertRaises(libvirt.libvirtError, conn._create_domain,
+ domain=fake_domain)
+ self.assertTrue(self.log_error_called)
+
+ def test_create_domain_enable_hairpin_fails(self):
+ """Tests that the xml is logged when enabling hairpin mode for the
+ domain fails.
+ """
+ fake_xml = "<test>this is a test</test>"
+ fake_domain = FakeVirtDomain(fake_xml)
+
+ def fake_enable_hairpin(launch_flags):
+ raise processutils.ProcessExecutionError('error')
+
+ self.log_error_called = False
+
+ def fake_error(msg, *args):
+ self.log_error_called = True
+ self.assertIn(fake_xml, msg % args)
+
+ self.stubs.Set(nova.virt.libvirt.driver.LOG, 'error', fake_error)
+
+ self.create_fake_libvirt_mock()
+ self.mox.ReplayAll()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ self.stubs.Set(conn, '_enable_hairpin', fake_enable_hairpin)
+
+ self.assertRaises(processutils.ProcessExecutionError,
+ conn._create_domain,
+ domain=fake_domain,
+ power_on=False)
+ self.assertTrue(self.log_error_called)
+
+ def test_get_vnc_console(self):
+ instance = objects.Instance(**self.test_instance)
+ dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
+ "<devices>"
+ "<graphics type='vnc' port='5900'/>"
+ "</devices></domain>")
+
+ vdmock = self.mox.CreateMock(libvirt.virDomain)
+ self.mox.StubOutWithMock(vdmock, "XMLDesc")
+ vdmock.XMLDesc(0).AndReturn(dummyxml)
+
+ def fake_lookup(instance_name):
+ if instance_name == instance['name']:
+ return vdmock
+ self.create_fake_libvirt_mock(lookupByName=fake_lookup)
+
+ self.mox.ReplayAll()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ vnc_dict = conn.get_vnc_console(self.context, instance)
+ self.assertEqual(vnc_dict.port, '5900')
+
+ def test_get_vnc_console_unavailable(self):
+ instance = objects.Instance(**self.test_instance)
+ dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
+ "<devices></devices></domain>")
+
+ vdmock = self.mox.CreateMock(libvirt.virDomain)
+ self.mox.StubOutWithMock(vdmock, "XMLDesc")
+ vdmock.XMLDesc(0).AndReturn(dummyxml)
+
+ def fake_lookup(instance_name):
+ if instance_name == instance['name']:
+ return vdmock
+ self.create_fake_libvirt_mock(lookupByName=fake_lookup)
+
+ self.mox.ReplayAll()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.assertRaises(exception.ConsoleTypeUnavailable,
+ conn.get_vnc_console, self.context, instance)
+
+ def test_get_spice_console(self):
+ instance = objects.Instance(**self.test_instance)
+ dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
+ "<devices>"
+ "<graphics type='spice' port='5950'/>"
+ "</devices></domain>")
+
+ vdmock = self.mox.CreateMock(libvirt.virDomain)
+ self.mox.StubOutWithMock(vdmock, "XMLDesc")
+ vdmock.XMLDesc(0).AndReturn(dummyxml)
+
+ def fake_lookup(instance_name):
+ if instance_name == instance['name']:
+ return vdmock
+ self.create_fake_libvirt_mock(lookupByName=fake_lookup)
+
+ self.mox.ReplayAll()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ spice_dict = conn.get_spice_console(self.context, instance)
+ self.assertEqual(spice_dict.port, '5950')
+
+ def test_get_spice_console_unavailable(self):
+ instance = objects.Instance(**self.test_instance)
+ dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
+ "<devices></devices></domain>")
+
+ vdmock = self.mox.CreateMock(libvirt.virDomain)
+ self.mox.StubOutWithMock(vdmock, "XMLDesc")
+ vdmock.XMLDesc(0).AndReturn(dummyxml)
+
+ def fake_lookup(instance_name):
+ if instance_name == instance['name']:
+ return vdmock
+ self.create_fake_libvirt_mock(lookupByName=fake_lookup)
+
+ self.mox.ReplayAll()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.assertRaises(exception.ConsoleTypeUnavailable,
+ conn.get_spice_console, self.context, instance)
+
+ def test_detach_volume_with_instance_not_found(self):
+ # Test that detach_volume() method does not raise exception,
+ # if the instance does not exist.
+
+ instance = objects.Instance(**self.test_instance)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ with contextlib.nested(
+ mock.patch.object(conn, '_lookup_by_name',
+ side_effect=exception.InstanceNotFound(
+ instance_id=instance.name)),
+ mock.patch.object(conn, '_disconnect_volume')
+ ) as (_lookup_by_name, _disconnect_volume):
+ connection_info = {'driver_volume_type': 'fake'}
+ conn.detach_volume(connection_info, instance, '/dev/sda')
+ _lookup_by_name.assert_called_once_with(instance.name)
+ _disconnect_volume.assert_called_once_with(connection_info,
+ 'sda')
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def _test_attach_detach_interface_get_config(self, method_name,
+ mock_flavor):
+ """Tests that the get_config() method is properly called in
+ attach_interface() and detach_interface().
+
+ method_name: either \"attach_interface\" or \"detach_interface\"
+ depending on the method to test.
+ """
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
+
+ instance = objects.Instance(**self.test_instance)
+ mock_flavor.return_value = instance.get_flavor()
+ network_info = _fake_network_info(self.stubs, 1)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ if method_name == "attach_interface":
+ fake_image_meta = {'id': instance['image_ref']}
+ elif method_name == "detach_interface":
+ fake_image_meta = None
+ else:
+ raise ValueError("Unhandled method %" % method_name)
+
+ if method_name == "attach_interface":
+ self.mox.StubOutWithMock(conn.firewall_driver,
+ 'setup_basic_filtering')
+ conn.firewall_driver.setup_basic_filtering(instance, network_info)
+
+ expected = conn.vif_driver.get_config(instance, network_info[0],
+ fake_image_meta,
+ instance.get_flavor(),
+ CONF.libvirt.virt_type)
+ self.mox.StubOutWithMock(conn.vif_driver, 'get_config')
+ conn.vif_driver.get_config(instance, network_info[0],
+ fake_image_meta,
+ mox.IsA(objects.Flavor),
+ CONF.libvirt.virt_type).\
+ AndReturn(expected)
+
+ self.mox.ReplayAll()
+
+ if method_name == "attach_interface":
+ conn.attach_interface(instance, fake_image_meta,
+ network_info[0])
+ elif method_name == "detach_interface":
+ conn.detach_interface(instance, network_info[0])
+ else:
+ raise ValueError("Unhandled method %" % method_name)
+
+ @mock.patch.object(lockutils, "external_lock")
+ def test_attach_interface_get_config(self, mock_lock):
+ """Tests that the get_config() method is properly called in
+ attach_interface().
+ """
+ mock_lock.return_value = threading.Semaphore()
+
+ self._test_attach_detach_interface_get_config("attach_interface")
+
+ def test_detach_interface_get_config(self):
+ """Tests that the get_config() method is properly called in
+ detach_interface().
+ """
+ self._test_attach_detach_interface_get_config("detach_interface")
+
+ def test_default_root_device_name(self):
+ instance = {'uuid': 'fake_instance'}
+ image_meta = {'id': 'fake'}
+ root_bdm = {'source_type': 'image',
+ 'detination_type': 'volume',
+ 'image_id': 'fake_id'}
+ self.flags(virt_type='fake_libvirt_type', group='libvirt')
+
+ self.mox.StubOutWithMock(blockinfo, 'get_disk_bus_for_device_type')
+ self.mox.StubOutWithMock(blockinfo, 'get_root_info')
+
+ blockinfo.get_disk_bus_for_device_type('fake_libvirt_type',
+ image_meta,
+ 'disk').InAnyOrder().\
+ AndReturn('virtio')
+ blockinfo.get_disk_bus_for_device_type('fake_libvirt_type',
+ image_meta,
+ 'cdrom').InAnyOrder().\
+ AndReturn('ide')
+ blockinfo.get_root_info('fake_libvirt_type',
+ image_meta, root_bdm,
+ 'virtio', 'ide').AndReturn({'dev': 'vda'})
+ self.mox.ReplayAll()
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.assertEqual(conn.default_root_device_name(instance, image_meta,
+ root_bdm), '/dev/vda')
+
+ def test_default_device_names_for_instance(self):
+ instance = {'uuid': 'fake_instance'}
+ root_device_name = '/dev/vda'
+ ephemerals = [{'device_name': 'vdb'}]
+ swap = [{'device_name': 'vdc'}]
+ block_device_mapping = [{'device_name': 'vdc'}]
+ self.flags(virt_type='fake_libvirt_type', group='libvirt')
+
+ self.mox.StubOutWithMock(blockinfo, 'default_device_names')
+
+ blockinfo.default_device_names('fake_libvirt_type', mox.IgnoreArg(),
+ instance, root_device_name,
+ ephemerals, swap, block_device_mapping)
+ self.mox.ReplayAll()
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ conn.default_device_names_for_instance(instance, root_device_name,
+ ephemerals, swap,
+ block_device_mapping)
+
+ def test_is_supported_fs_format(self):
+ supported_fs = [disk.FS_FORMAT_EXT2, disk.FS_FORMAT_EXT3,
+ disk.FS_FORMAT_EXT4, disk.FS_FORMAT_XFS]
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ for fs in supported_fs:
+ self.assertTrue(conn.is_supported_fs_format(fs))
+
+ supported_fs = ['', 'dummy']
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ for fs in supported_fs:
+ self.assertFalse(conn.is_supported_fs_format(fs))
+
+ def test_hypervisor_hostname_caching(self):
+ # Make sure that the first hostname is always returned
+ class FakeConn(object):
+ def getHostname(self):
+ pass
+
+ def getLibVersion(self):
+ return 99999
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ conn._wrapped_conn = FakeConn()
+ self.mox.StubOutWithMock(conn._wrapped_conn, 'getHostname')
+ conn._conn.getHostname().AndReturn('foo')
+ conn._conn.getHostname().AndReturn('bar')
+ self.mox.ReplayAll()
+ self.assertEqual('foo', conn._get_hypervisor_hostname())
+ self.assertEqual('foo', conn._get_hypervisor_hostname())
+
+ def test_get_connection_serial(self):
+
+ def get_conn_currency(driver):
+ driver._conn.getLibVersion()
+
+ def connect_with_block(*a, **k):
+ # enough to allow another connect to run
+ eventlet.sleep(0)
+ self.connect_calls += 1
+ return self.conn
+
+ def fake_register(*a, **k):
+ self.register_calls += 1
+
+ self.connect_calls = 0
+ self.register_calls = 0
+ self.stubs.Set(libvirt_driver.LibvirtDriver,
+ '_connect', connect_with_block)
+ driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ self.stubs.Set(self.conn, 'domainEventRegisterAny', fake_register)
+
+ # call serially
+ get_conn_currency(driver)
+ get_conn_currency(driver)
+ self.assertEqual(self.connect_calls, 1)
+ self.assertEqual(self.register_calls, 1)
+
+ def test_get_connection_concurrency(self):
+
+ def get_conn_currency(driver):
+ driver._conn.getLibVersion()
+
+ def connect_with_block(*a, **k):
+ # enough to allow another connect to run
+ eventlet.sleep(0)
+ self.connect_calls += 1
+ return self.conn
+
+ def fake_register(*a, **k):
+ self.register_calls += 1
+
+ self.connect_calls = 0
+ self.register_calls = 0
+ self.stubs.Set(libvirt_driver.LibvirtDriver,
+ '_connect', connect_with_block)
+ driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ self.stubs.Set(self.conn, 'domainEventRegisterAny', fake_register)
+
+ # call concurrently
+ thr1 = eventlet.spawn(get_conn_currency, driver=driver)
+ thr2 = eventlet.spawn(get_conn_currency, driver=driver)
+
+ # let threads run
+ eventlet.sleep(0)
+
+ thr1.wait()
+ thr2.wait()
+ self.assertEqual(self.connect_calls, 1)
+ self.assertEqual(self.register_calls, 1)
+
+ def test_post_live_migration_at_destination_with_block_device_info(self):
+ # Preparing mocks
+ mock_domain = self.mox.CreateMock(libvirt.virDomain)
+ self.resultXML = None
+
+ def fake_none(*args, **kwargs):
+ return
+
+ def fake_getLibVersion():
+ return 9011
+
+ def fake_getCapabilities():
+ return """
+ <capabilities>
+ <host>
+ <uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid>
+ <cpu>
+ <arch>x86_64</arch>
+ <model>Penryn</model>
+ <vendor>Intel</vendor>
+ <topology sockets='1' cores='2' threads='1'/>
+ <feature name='xtpr'/>
+ </cpu>
+ </host>
+ </capabilities>
+ """
+
+ def fake_to_xml(context, instance, network_info, disk_info,
+ image_meta=None, rescue=None,
+ block_device_info=None, write_to_disk=False):
+ if image_meta is None:
+ image_meta = {}
+ conf = conn._get_guest_config(instance, network_info, image_meta,
+ disk_info, rescue, block_device_info)
+ self.resultXML = conf.to_xml()
+ return self.resultXML
+
+ def fake_lookup_name(instance_name):
+ return mock_domain
+
+ def fake_defineXML(xml):
+ return
+
+ def fake_baselineCPU(cpu, flag):
+ return """<cpu mode='custom' match='exact'>
+ <model fallback='allow'>Westmere</model>
+ <vendor>Intel</vendor>
+ <feature policy='require' name='aes'/>
+ </cpu>
+ """
+
+ network_info = _fake_network_info(self.stubs, 1)
+ self.create_fake_libvirt_mock(getLibVersion=fake_getLibVersion,
+ getCapabilities=fake_getCapabilities,
+ getVersion=lambda: 1005001)
+ instance_ref = self.test_instance
+ instance_ref['image_ref'] = 123456 # we send an int to test sha1 call
+ instance = objects.Instance(**instance_ref)
+ flavor = instance.get_flavor()
+ flavor.extra_specs = {}
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.listDefinedDomains = lambda: []
+ libvirt_driver.LibvirtDriver._conn.getCapabilities = \
+ fake_getCapabilities
+ libvirt_driver.LibvirtDriver._conn.getVersion = lambda: 1005001
+ libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup_name
+ libvirt_driver.LibvirtDriver._conn.defineXML = fake_defineXML
+ libvirt_driver.LibvirtDriver._conn.baselineCPU = fake_baselineCPU
+
+ self.mox.ReplayAll()
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(conn,
+ '_get_guest_xml',
+ fake_to_xml)
+ self.stubs.Set(conn,
+ '_lookup_by_name',
+ fake_lookup_name)
+ block_device_info = {'block_device_mapping':
+ driver_block_device.convert_volumes([
+ fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 1, 'guest_format': None,
+ 'boot_index': 0,
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'device_name': '/dev/vda',
+ 'disk_bus': 'virtio',
+ 'device_type': 'disk',
+ 'delete_on_termination': False}),
+ ])}
+ block_device_info['block_device_mapping'][0]['connection_info'] = (
+ {'driver_volume_type': 'iscsi'})
+ with contextlib.nested(
+ mock.patch.object(
+ driver_block_device.DriverVolumeBlockDevice, 'save'),
+ mock.patch.object(objects.Flavor, 'get_by_id',
+ return_value=flavor),
+ mock.patch.object(objects.Instance, 'save')):
+ conn.post_live_migration_at_destination(
+ self.context, instance, network_info, True,
+ block_device_info=block_device_info)
+ self.assertTrue('fake' in self.resultXML)
+ self.assertTrue(
+ block_device_info['block_device_mapping'][0].save.called)
+
+ def test_create_propagates_exceptions(self):
+ self.flags(virt_type='lxc', group='libvirt')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ instance = objects.Instance(id=1, uuid='fake-uuid',
+ image_ref='my_fake_image')
+
+ with contextlib.nested(
+ mock.patch.object(conn, '_create_domain_setup_lxc'),
+ mock.patch.object(conn, '_create_domain_cleanup_lxc'),
+ mock.patch.object(conn, '_is_booted_from_volume',
+ return_value=False),
+ mock.patch.object(conn, 'plug_vifs'),
+ mock.patch.object(conn, 'firewall_driver'),
+ mock.patch.object(conn, '_create_domain',
+ side_effect=exception.NovaException),
+ mock.patch.object(conn, 'cleanup')):
+ self.assertRaises(exception.NovaException,
+ conn._create_domain_and_network,
+ self.context,
+ 'xml',
+ instance, None)
+
+ def test_create_without_pause(self):
+ self.flags(virt_type='lxc', group='libvirt')
+
+ @contextlib.contextmanager
+ def fake_lxc_disk_handler(*args, **kwargs):
+ yield
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ instance = objects.Instance(id=1, uuid='fake-uuid')
+
+ with contextlib.nested(
+ mock.patch.object(conn, '_lxc_disk_handler',
+ side_effect=fake_lxc_disk_handler),
+ mock.patch.object(conn, 'plug_vifs'),
+ mock.patch.object(conn, 'firewall_driver'),
+ mock.patch.object(conn, '_create_domain'),
+ mock.patch.object(conn, 'cleanup')) as (
+ _handler, cleanup, firewall_driver, create, plug_vifs):
+ domain = conn._create_domain_and_network(self.context, 'xml',
+ instance, None)
+ self.assertEqual(0, create.call_args_list[0][1]['launch_flags'])
+ self.assertEqual(0, domain.resume.call_count)
+
+ def _test_create_with_network_events(self, neutron_failure=None,
+ power_on=True):
+ generated_events = []
+
+ def wait_timeout():
+ event = mock.MagicMock()
+ if neutron_failure == 'timeout':
+ raise eventlet.timeout.Timeout()
+ elif neutron_failure == 'error':
+ event.status = 'failed'
+ else:
+ event.status = 'completed'
+ return event
+
+ def fake_prepare(instance, event_name):
+ m = mock.MagicMock()
+ m.instance = instance
+ m.event_name = event_name
+ m.wait.side_effect = wait_timeout
+ generated_events.append(m)
+ return m
+
+ virtapi = manager.ComputeVirtAPI(mock.MagicMock())
+ prepare = virtapi._compute.instance_events.prepare_for_instance_event
+ prepare.side_effect = fake_prepare
+ conn = libvirt_driver.LibvirtDriver(virtapi, False)
+
+ instance = objects.Instance(id=1, uuid='fake-uuid')
+ vifs = [{'id': 'vif1', 'active': False},
+ {'id': 'vif2', 'active': False}]
+
+ @mock.patch.object(conn, 'plug_vifs')
+ @mock.patch.object(conn, 'firewall_driver')
+ @mock.patch.object(conn, '_create_domain')
+ @mock.patch.object(conn, 'cleanup')
+ def test_create(cleanup, create, fw_driver, plug_vifs):
+ domain = conn._create_domain_and_network(self.context, 'xml',
+ instance, vifs,
+ power_on=power_on)
+ plug_vifs.assert_called_with(instance, vifs)
+
+ flag = self._get_launch_flags(conn, vifs, power_on=power_on)
+ self.assertEqual(flag,
+ create.call_args_list[0][1]['launch_flags'])
+ if flag:
+ domain.resume.assert_called_once_with()
+ if neutron_failure and CONF.vif_plugging_is_fatal:
+ cleanup.assert_called_once_with(self.context,
+ instance, network_info=vifs,
+ block_device_info=None)
+
+ test_create()
+
+ if utils.is_neutron() and CONF.vif_plugging_timeout and power_on:
+ prepare.assert_has_calls([
+ mock.call(instance, 'network-vif-plugged-vif1'),
+ mock.call(instance, 'network-vif-plugged-vif2')])
+ for event in generated_events:
+ if neutron_failure and generated_events.index(event) != 0:
+ self.assertEqual(0, event.call_count)
+ elif (neutron_failure == 'error' and
+ not CONF.vif_plugging_is_fatal):
+ event.wait.assert_called_once_with()
+ else:
+ self.assertEqual(0, prepare.call_count)
+
+ @mock.patch('nova.utils.is_neutron', return_value=True)
+ def test_create_with_network_events_neutron(self, is_neutron):
+ self._test_create_with_network_events()
+
+ @mock.patch('nova.utils.is_neutron', return_value=True)
+ def test_create_with_network_events_neutron_power_off(self,
+ is_neutron):
+ # Tests that we don't wait for events if we don't start the instance.
+ self._test_create_with_network_events(power_on=False)
+
+ @mock.patch('nova.utils.is_neutron', return_value=True)
+ def test_create_with_network_events_neutron_nowait(self, is_neutron):
+ self.flags(vif_plugging_timeout=0)
+ self._test_create_with_network_events()
+
+ @mock.patch('nova.utils.is_neutron', return_value=True)
+ def test_create_with_network_events_neutron_failed_nonfatal_timeout(
+ self, is_neutron):
+ self.flags(vif_plugging_is_fatal=False)
+ self._test_create_with_network_events(neutron_failure='timeout')
+
+ @mock.patch('nova.utils.is_neutron', return_value=True)
+ def test_create_with_network_events_neutron_failed_fatal_timeout(
+ self, is_neutron):
+ self.assertRaises(exception.VirtualInterfaceCreateException,
+ self._test_create_with_network_events,
+ neutron_failure='timeout')
+
+ @mock.patch('nova.utils.is_neutron', return_value=True)
+ def test_create_with_network_events_neutron_failed_nonfatal_error(
+ self, is_neutron):
+ self.flags(vif_plugging_is_fatal=False)
+ self._test_create_with_network_events(neutron_failure='error')
+
+ @mock.patch('nova.utils.is_neutron', return_value=True)
+ def test_create_with_network_events_neutron_failed_fatal_error(
+ self, is_neutron):
+ self.assertRaises(exception.VirtualInterfaceCreateException,
+ self._test_create_with_network_events,
+ neutron_failure='error')
+
+ @mock.patch('nova.utils.is_neutron', return_value=False)
+ def test_create_with_network_events_non_neutron(self, is_neutron):
+ self._test_create_with_network_events()
+
+ @mock.patch('nova.volume.encryptors.get_encryption_metadata')
+ @mock.patch('nova.virt.libvirt.blockinfo.get_info_from_bdm')
+ def test_create_with_bdm(self, get_info_from_bdm, get_encryption_metadata):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
+ mock_dom = mock.MagicMock()
+ mock_encryption_meta = mock.MagicMock()
+ get_encryption_metadata.return_value = mock_encryption_meta
+
+ fake_xml = """
+ <domain>
+ <name>instance-00000001</name>
+ <memory>1048576</memory>
+ <vcpu>1</vcpu>
+ <devices>
+ <disk type='file' device='disk'>
+ <driver name='qemu' type='raw' cache='none'/>
+ <source file='/path/fake-volume1'/>
+ <target dev='vda' bus='virtio'/>
+ </disk>
+ </devices>
+ </domain>
+ """
+ fake_volume_id = "fake-volume-id"
+ connection_info = {"driver_volume_type": "fake",
+ "data": {"access_mode": "rw",
+ "volume_id": fake_volume_id}}
+
+ def fake_getitem(*args, **kwargs):
+ fake_bdm = {'connection_info': connection_info,
+ 'mount_device': '/dev/vda'}
+ return fake_bdm.get(args[0])
+
+ mock_volume = mock.MagicMock()
+ mock_volume.__getitem__.side_effect = fake_getitem
+ bdi = {'block_device_mapping': [mock_volume]}
+ network_info = [network_model.VIF(id='1'),
+ network_model.VIF(id='2', active=True)]
+
+ with contextlib.nested(
+ mock.patch.object(conn, '_get_volume_encryptor'),
+ mock.patch.object(conn, 'plug_vifs'),
+ mock.patch.object(conn.firewall_driver, 'setup_basic_filtering'),
+ mock.patch.object(conn.firewall_driver,
+ 'prepare_instance_filter'),
+ mock.patch.object(conn, '_create_domain'),
+ mock.patch.object(conn.firewall_driver, 'apply_instance_filter'),
+ ) as (get_volume_encryptor, plug_vifs, setup_basic_filtering,
+ prepare_instance_filter, create_domain, apply_instance_filter):
+ create_domain.return_value = mock_dom
+
+ domain = conn._create_domain_and_network(self.context, fake_xml,
+ instance, network_info,
+ block_device_info=bdi)
+
+ get_encryption_metadata.assert_called_once_with(self.context,
+ conn._volume_api, fake_volume_id, connection_info)
+ get_volume_encryptor.assert_called_once_with(connection_info,
+ mock_encryption_meta)
+ plug_vifs.assert_called_once_with(instance, network_info)
+ setup_basic_filtering.assert_called_once_with(instance,
+ network_info)
+ prepare_instance_filter.assert_called_once_with(instance,
+ network_info)
+ flags = self._get_launch_flags(conn, network_info)
+ create_domain.assert_called_once_with(fake_xml, instance=instance,
+ launch_flags=flags,
+ power_on=True)
+ self.assertEqual(mock_dom, domain)
+
+ def test_get_guest_storage_config(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ test_instance = copy.deepcopy(self.test_instance)
+ test_instance["default_swap_device"] = None
+ instance = objects.Instance(**test_instance)
+ flavor = instance.get_flavor()
+ flavor.extra_specs = {}
+ conn_info = {'driver_volume_type': 'fake', 'data': {}}
+ bdi = {'block_device_mapping':
+ driver_block_device.convert_volumes([
+ fake_block_device.FakeDbBlockDeviceDict({
+ 'id': 1,
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'device_name': '/dev/vdc'})
+ ])}
+ bdm = bdi['block_device_mapping'][0]
+ bdm['connection_info'] = conn_info
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance, bdi)
+ mock_conf = mock.MagicMock(source_path='fake')
+
+ with contextlib.nested(
+ mock.patch.object(driver_block_device.DriverVolumeBlockDevice,
+ 'save'),
+ mock.patch.object(conn, '_connect_volume'),
+ mock.patch.object(conn, '_get_volume_config',
+ return_value=mock_conf),
+ mock.patch.object(conn, '_set_cache_mode')
+ ) as (volume_save, connect_volume, get_volume_config, set_cache_mode):
+ devices = conn._get_guest_storage_config(instance, None,
+ disk_info, False, bdi, flavor)
+
+ self.assertEqual(3, len(devices))
+ self.assertEqual('/dev/vdb', instance.default_ephemeral_device)
+ self.assertIsNone(instance.default_swap_device)
+ connect_volume.assert_called_with(bdm['connection_info'],
+ {'bus': 'virtio', 'type': 'disk', 'dev': 'vdc'})
+ get_volume_config.assert_called_with(bdm['connection_info'],
+ {'bus': 'virtio', 'type': 'disk', 'dev': 'vdc'})
+ self.assertEqual(1, volume_save.call_count)
+ self.assertEqual(3, set_cache_mode.call_count)
+
+ def test_get_neutron_events(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ network_info = [network_model.VIF(id='1'),
+ network_model.VIF(id='2', active=True)]
+ events = conn._get_neutron_events(network_info)
+ self.assertEqual([('network-vif-plugged', '1')], events)
+
+ def test_unplug_vifs_ignores_errors(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
+ with mock.patch.object(conn, 'vif_driver') as vif_driver:
+ vif_driver.unplug.side_effect = exception.AgentError(
+ method='unplug')
+ conn._unplug_vifs('inst', [1], ignore_errors=True)
+ vif_driver.unplug.assert_called_once_with('inst', 1)
+
+ def test_unplug_vifs_reports_errors(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
+ with mock.patch.object(conn, 'vif_driver') as vif_driver:
+ vif_driver.unplug.side_effect = exception.AgentError(
+ method='unplug')
+ self.assertRaises(exception.AgentError,
+ conn.unplug_vifs, 'inst', [1])
+ vif_driver.unplug.assert_called_once_with('inst', 1)
+
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._unplug_vifs')
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._undefine_domain')
+ def test_cleanup_pass_with_no_mount_device(self, undefine, unplug):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
+ conn.firewall_driver = mock.Mock()
+ conn._disconnect_volume = mock.Mock()
+ fake_inst = {'name': 'foo'}
+ fake_bdms = [{'connection_info': 'foo',
+ 'mount_device': None}]
+ with mock.patch('nova.virt.driver'
+ '.block_device_info_get_mapping',
+ return_value=fake_bdms):
+ conn.cleanup('ctxt', fake_inst, 'netinfo', destroy_disks=False)
+ self.assertTrue(conn._disconnect_volume.called)
+
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._unplug_vifs')
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._undefine_domain')
+ def test_cleanup_wants_vif_errors_ignored(self, undefine, unplug):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
+ fake_inst = {'name': 'foo'}
+ with mock.patch.object(conn._conn, 'lookupByName') as lookup:
+ lookup.return_value = fake_inst
+ # NOTE(danms): Make unplug cause us to bail early, since
+ # we only care about how it was called
+ unplug.side_effect = test.TestingException
+ self.assertRaises(test.TestingException,
+ conn.cleanup, 'ctxt', fake_inst, 'netinfo')
+ unplug.assert_called_once_with(fake_inst, 'netinfo', True)
+
+ @mock.patch('nova.virt.driver.block_device_info_get_mapping')
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
+ '_get_serial_ports_from_instance')
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._undefine_domain')
+ def test_cleanup_serial_console_enabled(
+ self, undefine, get_ports,
+ block_device_info_get_mapping):
+ self.flags(enabled="True", group='serial_console')
+ instance = 'i1'
+ network_info = {}
+ bdm_info = {}
+ firewall_driver = mock.MagicMock()
+
+ get_ports.return_value = iter([('127.0.0.1', 10000)])
+ block_device_info_get_mapping.return_value = ()
+
+ # We want to ensure undefine_domain is called after
+ # lookup_domain.
+ def undefine_domain(instance):
+ get_ports.side_effect = Exception("domain undefined")
+ undefine.side_effect = undefine_domain
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
+ conn.firewall_driver = firewall_driver
+ conn.cleanup(
+ 'ctx', instance, network_info,
+ block_device_info=bdm_info,
+ destroy_disks=False, destroy_vifs=False)
+
+ get_ports.assert_called_once_with(instance)
+ undefine.assert_called_once_with(instance)
+ firewall_driver.unfilter_instance.assert_called_once_with(
+ instance, network_info=network_info)
+ block_device_info_get_mapping.assert_called_once_with(bdm_info)
+
+ def test_swap_volume(self):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
+
+ mock_dom = mock.MagicMock()
+
+ with mock.patch.object(drvr._conn, 'defineXML',
+ create=True) as mock_define:
+ xmldoc = "<domain/>"
+ srcfile = "/first/path"
+ dstfile = "/second/path"
+
+ mock_dom.XMLDesc.return_value = xmldoc
+ mock_dom.isPersistent.return_value = True
+ mock_dom.blockJobInfo.return_value = {}
+
+ drvr._swap_volume(mock_dom, srcfile, dstfile, 1)
+
+ mock_dom.XMLDesc.assert_called_once_with(
+ fakelibvirt.VIR_DOMAIN_XML_INACTIVE |
+ fakelibvirt.VIR_DOMAIN_XML_SECURE)
+ mock_dom.blockRebase.assert_called_once_with(
+ srcfile, dstfile, 0,
+ libvirt.VIR_DOMAIN_BLOCK_REBASE_COPY |
+ libvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT)
+ mock_dom.blockResize.assert_called_once_with(
+ srcfile, 1 * units.Gi / units.Ki)
+ mock_define.assert_called_once_with(xmldoc)
+
+ def test_live_snapshot(self):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
+
+ mock_dom = mock.MagicMock()
+
+ with contextlib.nested(
+ mock.patch.object(drvr._conn, 'defineXML', create=True),
+ mock.patch.object(fake_libvirt_utils, 'get_disk_size'),
+ mock.patch.object(fake_libvirt_utils, 'get_disk_backing_file'),
+ mock.patch.object(fake_libvirt_utils, 'create_cow_image'),
+ mock.patch.object(fake_libvirt_utils, 'chown'),
+ mock.patch.object(fake_libvirt_utils, 'extract_snapshot'),
+ ) as (mock_define, mock_size, mock_backing, mock_create_cow,
+ mock_chown, mock_snapshot):
+
+ xmldoc = "<domain/>"
+ srcfile = "/first/path"
+ dstfile = "/second/path"
+ bckfile = "/other/path"
+ dltfile = dstfile + ".delta"
+
+ mock_dom.XMLDesc.return_value = xmldoc
+ mock_dom.isPersistent.return_value = True
+ mock_size.return_value = 1004009
+ mock_backing.return_value = bckfile
+
+ drvr._live_snapshot(mock_dom, srcfile, dstfile, "qcow2")
+
+ mock_dom.XMLDesc.assert_called_once_with(
+ fakelibvirt.VIR_DOMAIN_XML_INACTIVE |
+ fakelibvirt.VIR_DOMAIN_XML_SECURE)
+ mock_dom.blockRebase.assert_called_once_with(
+ srcfile, dltfile, 0,
+ libvirt.VIR_DOMAIN_BLOCK_REBASE_COPY |
+ libvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT |
+ libvirt.VIR_DOMAIN_BLOCK_REBASE_SHALLOW)
+
+ mock_size.assert_called_once_with(srcfile)
+ mock_backing.assert_called_once_with(srcfile, basename=False)
+ mock_create_cow.assert_called_once_with(bckfile, dltfile, 1004009)
+ mock_chown.assert_called_once_with(dltfile, os.getuid())
+ mock_snapshot.assert_called_once_with(dltfile, "qcow2",
+ dstfile, "qcow2")
+ mock_define.assert_called_once_with(xmldoc)
+
+ @mock.patch.object(greenthread, "spawn")
+ def test_live_migration_hostname_valid(self, mock_spawn):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ drvr.live_migration(self.context, self.test_instance,
+ "host1.example.com",
+ lambda x: x,
+ lambda x: x)
+ self.assertEqual(1, mock_spawn.call_count)
+
+ @mock.patch.object(greenthread, "spawn")
+ @mock.patch.object(fake_libvirt_utils, "is_valid_hostname")
+ def test_live_migration_hostname_invalid(self, mock_hostname, mock_spawn):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ mock_hostname.return_value = False
+ self.assertRaises(exception.InvalidHostname,
+ drvr.live_migration,
+ self.context, self.test_instance,
+ "foo/?com=/bin/sh",
+ lambda x: x,
+ lambda x: x)
+
+ @mock.patch('os.path.exists', return_value=True)
+ @mock.patch('tempfile.mkstemp')
+ @mock.patch('os.close', return_value=None)
+ def test_check_instance_shared_storage_local_raw(self,
+ mock_close,
+ mock_mkstemp,
+ mock_exists):
+ instance_uuid = str(uuid.uuid4())
+ self.flags(images_type='raw', group='libvirt')
+ self.flags(instances_path='/tmp')
+ mock_mkstemp.return_value = (-1,
+ '/tmp/{0}/file'.format(instance_uuid))
+ driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ instance = fake_instance.fake_instance_obj(self.context)
+ temp_file = driver.check_instance_shared_storage_local(self.context,
+ instance)
+ self.assertEqual('/tmp/{0}/file'.format(instance_uuid),
+ temp_file['filename'])
+
+ def test_check_instance_shared_storage_local_rbd(self):
+ self.flags(images_type='rbd', group='libvirt')
+ driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ instance = fake_instance.fake_instance_obj(self.context)
+ self.assertIsNone(driver.
+ check_instance_shared_storage_local(self.context,
+ instance))
+
+
+class HostStateTestCase(test.NoDBTestCase):
+
+ cpu_info = ('{"vendor": "Intel", "model": "pentium", "arch": "i686", '
+ '"features": ["ssse3", "monitor", "pni", "sse2", "sse", '
+ '"fxsr", "clflush", "pse36", "pat", "cmov", "mca", "pge", '
+ '"mtrr", "sep", "apic"], '
+ '"topology": {"cores": "1", "threads": "1", "sockets": "1"}}')
+ instance_caps = [(arch.X86_64, "kvm", "hvm"),
+ (arch.I686, "kvm", "hvm")]
+ pci_devices = [{
+ "dev_id": "pci_0000_04_00_3",
+ "address": "0000:04:10.3",
+ "product_id": '1521',
+ "vendor_id": '8086',
+ "dev_type": 'type-PF',
+ "phys_function": None}]
+ numa_topology = hardware.VirtNUMAHostTopology(
+ cells=[hardware.VirtNUMATopologyCellUsage(
+ 1, set([1, 2]), 1024),
+ hardware.VirtNUMATopologyCellUsage(
+ 2, set([3, 4]), 1024)])
+
+ class FakeConnection(libvirt_driver.LibvirtDriver):
+ """Fake connection object."""
+ def __init__(self):
+ super(HostStateTestCase.FakeConnection,
+ self).__init__(fake.FakeVirtAPI(), True)
+
+ def _get_vcpu_total(self):
+ return 1
+
+ def _get_vcpu_used(self):
+ return 0
+
+ def _get_cpu_info(self):
+ return HostStateTestCase.cpu_info
+
+ def _get_disk_over_committed_size_total(self):
+ return 0
+
+ def _get_local_gb_info(self):
+ return {'total': 100, 'used': 20, 'free': 80}
+
+ def _get_memory_mb_total(self):
+ return 497
+
+ def _get_memory_mb_used(self):
+ return 88
+
+ def _get_hypervisor_type(self):
+ return 'QEMU'
+
+ def _get_hypervisor_version(self):
+ return 13091
+
+ def _get_hypervisor_hostname(self):
+ return 'compute1'
+
+ def get_host_uptime(self):
+ return ('10:01:16 up 1:36, 6 users, '
+ 'load average: 0.21, 0.16, 0.19')
+
+ def _get_disk_available_least(self):
+ return 13091
+
+ def _get_instance_capabilities(self):
+ return HostStateTestCase.instance_caps
+
+ def _get_pci_passthrough_devices(self):
+ return jsonutils.dumps(HostStateTestCase.pci_devices)
+
+ def _get_host_numa_topology(self):
+ return HostStateTestCase.numa_topology
+
+ def test_update_status(self):
+ drvr = HostStateTestCase.FakeConnection()
+
+ stats = drvr.get_available_resource("compute1")
+ self.assertEqual(stats["vcpus"], 1)
+ self.assertEqual(stats["memory_mb"], 497)
+ self.assertEqual(stats["local_gb"], 100)
+ self.assertEqual(stats["vcpus_used"], 0)
+ self.assertEqual(stats["memory_mb_used"], 88)
+ self.assertEqual(stats["local_gb_used"], 20)
+ self.assertEqual(stats["hypervisor_type"], 'QEMU')
+ self.assertEqual(stats["hypervisor_version"], 13091)
+ self.assertEqual(stats["hypervisor_hostname"], 'compute1')
+ self.assertEqual(jsonutils.loads(stats["cpu_info"]),
+ {"vendor": "Intel", "model": "pentium",
+ "arch": arch.I686,
+ "features": ["ssse3", "monitor", "pni", "sse2", "sse",
+ "fxsr", "clflush", "pse36", "pat", "cmov",
+ "mca", "pge", "mtrr", "sep", "apic"],
+ "topology": {"cores": "1", "threads": "1", "sockets": "1"}
+ })
+ self.assertEqual(stats["disk_available_least"], 80)
+ self.assertEqual(jsonutils.loads(stats["pci_passthrough_devices"]),
+ HostStateTestCase.pci_devices)
+ self.assertThat(hardware.VirtNUMAHostTopology.from_json(
+ stats['numa_topology'])._to_dict(),
+ matchers.DictMatches(
+ HostStateTestCase.numa_topology._to_dict()))
+
+
+class LibvirtDriverTestCase(test.NoDBTestCase):
+ """Test for nova.virt.libvirt.libvirt_driver.LibvirtDriver."""
+ def setUp(self):
+ super(LibvirtDriverTestCase, self).setUp()
+ self.libvirtconnection = libvirt_driver.LibvirtDriver(
+ fake.FakeVirtAPI(), read_only=True)
+ self.context = context.get_admin_context()
+
+ def _create_instance(self, params=None):
+ """Create a test instance."""
+ if not params:
+ params = {}
+
+ sys_meta = {
+ 'instance_type_memory_mb': 512,
+ 'instance_type_swap': 0,
+ 'instance_type_vcpu_weight': None,
+ 'instance_type_root_gb': 1,
+ 'instance_type_id': 2,
+ 'instance_type_name': u'm1.tiny',
+ 'instance_type_ephemeral_gb': 0,
+ 'instance_type_rxtx_factor': 1.0,
+ 'instance_type_flavorid': u'1',
+ 'instance_type_vcpus': 1
+ }
+
+ inst = {}
+ inst['id'] = 1
+ inst['uuid'] = '52d3b512-1152-431f-a8f7-28f0288a622b'
+ inst['os_type'] = 'linux'
+ inst['image_ref'] = '1'
+ inst['reservation_id'] = 'r-fakeres'
+ inst['user_id'] = 'fake'
+ inst['project_id'] = 'fake'
+ inst['instance_type_id'] = 2
+ inst['ami_launch_index'] = 0
+ inst['host'] = 'host1'
+ inst['root_gb'] = 10
+ inst['ephemeral_gb'] = 20
+ inst['config_drive'] = True
+ inst['kernel_id'] = 2
+ inst['ramdisk_id'] = 3
+ inst['key_data'] = 'ABCDEFG'
+ inst['system_metadata'] = sys_meta
+
+ inst.update(params)
+
+ return objects.Instance(**inst)
+
+ def test_migrate_disk_and_power_off_exception(self):
+ """Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
+ .migrate_disk_and_power_off.
+ """
+
+ self.counter = 0
+ self.checked_shared_storage = False
+
+ def fake_get_instance_disk_info(instance,
+ block_device_info=None):
+ return '[]'
+
+ def fake_destroy(instance):
+ pass
+
+ def fake_get_host_ip_addr():
+ return '10.0.0.1'
+
+ def fake_execute(*args, **kwargs):
+ self.counter += 1
+ if self.counter == 1:
+ assert False, "intentional failure"
+
+ def fake_os_path_exists(path):
+ return True
+
+ def fake_is_storage_shared(dest, inst_base):
+ self.checked_shared_storage = True
+ return False
+
+ self.stubs.Set(self.libvirtconnection, 'get_instance_disk_info',
+ fake_get_instance_disk_info)
+ self.stubs.Set(self.libvirtconnection, '_destroy', fake_destroy)
+ self.stubs.Set(self.libvirtconnection, 'get_host_ip_addr',
+ fake_get_host_ip_addr)
+ self.stubs.Set(self.libvirtconnection, '_is_storage_shared_with',
+ fake_is_storage_shared)
+ self.stubs.Set(utils, 'execute', fake_execute)
+ self.stubs.Set(os.path, 'exists', fake_os_path_exists)
+
+ ins_ref = self._create_instance()
+ flavor = {'root_gb': 10, 'ephemeral_gb': 20}
+
+ self.assertRaises(AssertionError,
+ self.libvirtconnection.migrate_disk_and_power_off,
+ None, ins_ref, '10.0.0.2', flavor, None)
+
+ def test_migrate_disk_and_power_off(self):
+ """Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
+ .migrate_disk_and_power_off.
+ """
+
+ disk_info = [{'type': 'qcow2', 'path': '/test/disk',
+ 'virt_disk_size': '10737418240',
+ 'backing_file': '/base/disk',
+ 'disk_size': '83886080'},
+ {'type': 'raw', 'path': '/test/disk.local',
+ 'virt_disk_size': '10737418240',
+ 'backing_file': '/base/disk.local',
+ 'disk_size': '83886080'}]
+ disk_info_text = jsonutils.dumps(disk_info)
+
+ def fake_get_instance_disk_info(instance,
+ block_device_info=None):
+ return disk_info_text
+
+ def fake_destroy(instance):
+ pass
+
+ def fake_get_host_ip_addr():
+ return '10.0.0.1'
+
+ def fake_execute(*args, **kwargs):
+ pass
+
+ self.stubs.Set(self.libvirtconnection, 'get_instance_disk_info',
+ fake_get_instance_disk_info)
+ self.stubs.Set(self.libvirtconnection, '_destroy', fake_destroy)
+ self.stubs.Set(self.libvirtconnection, 'get_host_ip_addr',
+ fake_get_host_ip_addr)
+ self.stubs.Set(utils, 'execute', fake_execute)
+
+ ins_ref = self._create_instance()
+ flavor = {'root_gb': 10, 'ephemeral_gb': 20}
+
+ # dest is different host case
+ out = self.libvirtconnection.migrate_disk_and_power_off(
+ None, ins_ref, '10.0.0.2', flavor, None)
+ self.assertEqual(out, disk_info_text)
+
+ # dest is same host case
+ out = self.libvirtconnection.migrate_disk_and_power_off(
+ None, ins_ref, '10.0.0.1', flavor, None)
+ self.assertEqual(out, disk_info_text)
+
+ @mock.patch('nova.utils.execute')
+ @mock.patch('nova.virt.libvirt.utils.copy_image')
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._destroy')
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_host_ip_addr')
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
+ '.get_instance_disk_info')
+ def test_migrate_disk_and_power_off_swap(self, mock_get_disk_info,
+ get_host_ip_addr,
+ mock_destroy,
+ mock_copy_image,
+ mock_execute):
+ """Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
+ .migrate_disk_and_power_off.
+ """
+ self.copy_or_move_swap_called = False
+
+ # 10G root and 512M swap disk
+ disk_info = [{'disk_size': 1, 'type': 'qcow2',
+ 'virt_disk_size': 10737418240, 'path': '/test/disk',
+ 'backing_file': '/base/disk'},
+ {'disk_size': 1, 'type': 'qcow2',
+ 'virt_disk_size': 536870912, 'path': '/test/disk.swap',
+ 'backing_file': '/base/swap_512'}]
+ disk_info_text = jsonutils.dumps(disk_info)
+ mock_get_disk_info.return_value = disk_info_text
+ get_host_ip_addr.return_value = '10.0.0.1'
+
+ def fake_copy_image(*args, **kwargs):
+ # disk.swap should not be touched since it is skipped over
+ if '/test/disk.swap' in list(args):
+ self.copy_or_move_swap_called = True
+
+ def fake_execute(*args, **kwargs):
+ # disk.swap should not be touched since it is skipped over
+ if set(['mv', '/test/disk.swap']).issubset(list(args)):
+ self.copy_or_move_swap_called = True
+
+ mock_copy_image.side_effect = fake_copy_image
+ mock_execute.side_effect = fake_execute
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ # Original instance config
+ instance = self._create_instance({'root_gb': 10,
+ 'ephemeral_gb': 0})
+
+ # Re-size fake instance to 20G root and 1024M swap disk
+ flavor = {'root_gb': 20, 'ephemeral_gb': 0, 'swap': 1024}
+
+ # Destination is same host
+ out = conn.migrate_disk_and_power_off(None, instance, '10.0.0.1',
+ flavor, None)
+
+ mock_get_disk_info.assert_called_once_with(instance.name,
+ block_device_info=None)
+ self.assertTrue(get_host_ip_addr.called)
+ mock_destroy.assert_called_once_with(instance)
+ self.assertFalse(self.copy_or_move_swap_called)
+ self.assertEqual(disk_info_text, out)
+
+ def test_migrate_disk_and_power_off_lvm(self):
+ """Test for nova.virt.libvirt.libvirt_driver.LibvirtConnection
+ .migrate_disk_and_power_off.
+ """
+
+ self.flags(images_type='lvm', group='libvirt')
+ disk_info = [{'type': 'raw', 'path': '/dev/vg/disk',
+ 'disk_size': '83886080'},
+ {'type': 'raw', 'path': '/dev/disk.local',
+ 'disk_size': '83886080'}]
+ disk_info_text = jsonutils.dumps(disk_info)
+
+ def fake_get_instance_disk_info(instance, xml=None,
+ block_device_info=None):
+ return disk_info_text
+
+ def fake_destroy(instance):
+ pass
+
+ def fake_get_host_ip_addr():
+ return '10.0.0.1'
+
+ def fake_execute(*args, **kwargs):
+ pass
+
+ self.stubs.Set(self.libvirtconnection, 'get_instance_disk_info',
+ fake_get_instance_disk_info)
+ self.stubs.Set(self.libvirtconnection, '_destroy', fake_destroy)
+ self.stubs.Set(self.libvirtconnection, 'get_host_ip_addr',
+ fake_get_host_ip_addr)
+ self.stubs.Set(utils, 'execute', fake_execute)
+
+ ins_ref = self._create_instance()
+ flavor = {'root_gb': 10, 'ephemeral_gb': 20}
+
+ # Migration is not implemented for LVM backed instances
+ self.assertRaises(exception.MigrationPreCheckError,
+ self.libvirtconnection.migrate_disk_and_power_off,
+ None, ins_ref, '10.0.0.1', flavor, None)
+
+ def test_migrate_disk_and_power_off_resize_error(self):
+ instance = self._create_instance()
+ flavor = {'root_gb': 5}
+ self.assertRaises(
+ exception.InstanceFaultRollback,
+ self.libvirtconnection.migrate_disk_and_power_off,
+ 'ctx', instance, '10.0.0.1', flavor, None)
+
+ def test_wait_for_running(self):
+ def fake_get_info(instance):
+ if instance['name'] == "not_found":
+ raise exception.InstanceNotFound(instance_id=instance['uuid'])
+ elif instance['name'] == "running":
+ return {'state': power_state.RUNNING}
+ else:
+ return {'state': power_state.SHUTDOWN}
+
+ self.stubs.Set(self.libvirtconnection, 'get_info',
+ fake_get_info)
+
+ # instance not found case
+ self.assertRaises(exception.InstanceNotFound,
+ self.libvirtconnection._wait_for_running,
+ {'name': 'not_found',
+ 'uuid': 'not_found_uuid'})
+
+ # instance is running case
+ self.assertRaises(loopingcall.LoopingCallDone,
+ self.libvirtconnection._wait_for_running,
+ {'name': 'running',
+ 'uuid': 'running_uuid'})
+
+ # else case
+ self.libvirtconnection._wait_for_running({'name': 'else',
+ 'uuid': 'other_uuid'})
+
+ def test_disk_size_from_instance_disk_info(self):
+ inst = {'root_gb': 10, 'ephemeral_gb': 20, 'swap_gb': 30}
+
+ info = {'path': '/path/disk'}
+ self.assertEqual(10 * units.Gi,
+ self.libvirtconnection._disk_size_from_instance(inst, info))
+
+ info = {'path': '/path/disk.local'}
+ self.assertEqual(20 * units.Gi,
+ self.libvirtconnection._disk_size_from_instance(inst, info))
+
+ info = {'path': '/path/disk.swap'}
+ self.assertEqual(0,
+ self.libvirtconnection._disk_size_from_instance(inst, info))
+
+ @mock.patch('nova.utils.execute')
+ def test_disk_raw_to_qcow2(self, mock_execute):
+ path = '/test/disk'
+ _path_qcow = path + '_qcow'
+
+ self.libvirtconnection._disk_raw_to_qcow2(path)
+ mock_execute.assert_has_calls([
+ mock.call('qemu-img', 'convert', '-f', 'raw',
+ '-O', 'qcow2', path, _path_qcow),
+ mock.call('mv', _path_qcow, path)])
+
+ @mock.patch('nova.utils.execute')
+ def test_disk_qcow2_to_raw(self, mock_execute):
+ path = '/test/disk'
+ _path_raw = path + '_raw'
+
+ self.libvirtconnection._disk_qcow2_to_raw(path)
+ mock_execute.assert_has_calls([
+ mock.call('qemu-img', 'convert', '-f', 'qcow2',
+ '-O', 'raw', path, _path_raw),
+ mock.call('mv', _path_raw, path)])
+
+ @mock.patch('nova.virt.disk.api.extend')
+ def test_disk_resize_raw(self, mock_extend):
+ info = {'type': 'raw', 'path': '/test/disk'}
+
+ self.libvirtconnection._disk_resize(info, 50)
+ mock_extend.assert_called_once_with(info['path'], 50, use_cow=False)
+
+ @mock.patch('nova.virt.disk.api.can_resize_image')
+ @mock.patch('nova.virt.disk.api.is_image_partitionless')
+ @mock.patch('nova.virt.disk.api.extend')
+ def test_disk_resize_qcow2(
+ self, mock_extend, mock_can_resize, mock_is_partitionless):
+ info = {'type': 'qcow2', 'path': '/test/disk'}
+
+ with contextlib.nested(
+ mock.patch.object(
+ self.libvirtconnection, '_disk_qcow2_to_raw'),
+ mock.patch.object(
+ self.libvirtconnection, '_disk_raw_to_qcow2'))\
+ as (mock_disk_qcow2_to_raw, mock_disk_raw_to_qcow2):
+
+ mock_can_resize.return_value = True
+ mock_is_partitionless.return_value = True
+
+ self.libvirtconnection._disk_resize(info, 50)
+
+ mock_disk_qcow2_to_raw.assert_called_once_with(info['path'])
+ mock_extend.assert_called_once_with(
+ info['path'], 50, use_cow=False)
+ mock_disk_raw_to_qcow2.assert_called_once_with(info['path'])
+
+ def _test_finish_migration(self, power_on, resize_instance=False):
+ """Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
+ .finish_migration.
+ """
+
+ disk_info = [{'type': 'qcow2', 'path': '/test/disk',
+ 'local_gb': 10, 'backing_file': '/base/disk'},
+ {'type': 'raw', 'path': '/test/disk.local',
+ 'local_gb': 10, 'backing_file': '/base/disk.local'}]
+ disk_info_text = jsonutils.dumps(disk_info)
+ powered_on = power_on
+ self.fake_create_domain_called = False
+ self.fake_disk_resize_called = False
+
+ def fake_to_xml(context, instance, network_info, disk_info,
+ image_meta=None, rescue=None,
+ block_device_info=None, write_to_disk=False):
+ return ""
+
+ def fake_plug_vifs(instance, network_info):
+ pass
+
+ def fake_create_image(context, inst,
+ disk_mapping, suffix='',
+ disk_images=None, network_info=None,
+ block_device_info=None, inject_files=True):
+ self.assertFalse(inject_files)
+
+ def fake_create_domain_and_network(
+ context, xml, instance, network_info,
+ block_device_info=None, power_on=True, reboot=False,
+ vifs_already_plugged=False):
+ self.fake_create_domain_called = True
+ self.assertEqual(powered_on, power_on)
+ self.assertTrue(vifs_already_plugged)
+
+ def fake_enable_hairpin(instance):
+ pass
+
+ def fake_execute(*args, **kwargs):
+ pass
+
+ def fake_get_info(instance):
+ if powered_on:
+ return {'state': power_state.RUNNING}
+ else:
+ return {'state': power_state.SHUTDOWN}
+
+ def fake_disk_resize(info, size):
+ self.fake_disk_resize_called = True
+
+ self.flags(use_cow_images=True)
+ self.stubs.Set(self.libvirtconnection, '_disk_resize',
+ fake_disk_resize)
+ self.stubs.Set(self.libvirtconnection, '_get_guest_xml', fake_to_xml)
+ self.stubs.Set(self.libvirtconnection, 'plug_vifs', fake_plug_vifs)
+ self.stubs.Set(self.libvirtconnection, '_create_image',
+ fake_create_image)
+ self.stubs.Set(self.libvirtconnection, '_create_domain_and_network',
+ fake_create_domain_and_network)
+ self.stubs.Set(self.libvirtconnection, '_enable_hairpin',
+ fake_enable_hairpin)
+ self.stubs.Set(utils, 'execute', fake_execute)
+ fw = base_firewall.NoopFirewallDriver()
+ self.stubs.Set(self.libvirtconnection, 'firewall_driver', fw)
+ self.stubs.Set(self.libvirtconnection, 'get_info',
+ fake_get_info)
+
+ ins_ref = self._create_instance()
+
+ self.libvirtconnection.finish_migration(
+ context.get_admin_context(), None, ins_ref,
+ disk_info_text, [], None,
+ resize_instance, None, power_on)
+ self.assertTrue(self.fake_create_domain_called)
+ self.assertEqual(
+ resize_instance, self.fake_disk_resize_called)
+
+ def test_finish_migration_resize(self):
+ self._test_finish_migration(True, resize_instance=True)
+
+ def test_finish_migration_power_on(self):
+ self._test_finish_migration(True)
+
+ def test_finish_migration_power_off(self):
+ self._test_finish_migration(False)
+
+ def _test_finish_revert_migration(self, power_on):
+ """Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
+ .finish_revert_migration.
+ """
+ powered_on = power_on
+ self.fake_create_domain_called = False
+
+ def fake_execute(*args, **kwargs):
+ pass
+
+ def fake_plug_vifs(instance, network_info):
+ pass
+
+ def fake_create_domain(xml, instance=None, launch_flags=0,
+ power_on=True):
+ self.fake_create_domain_called = True
+ self.assertEqual(powered_on, power_on)
+ return mock.MagicMock()
+
+ def fake_enable_hairpin(instance):
+ pass
+
+ def fake_get_info(instance):
+ if powered_on:
+ return {'state': power_state.RUNNING}
+ else:
+ return {'state': power_state.SHUTDOWN}
+
+ def fake_to_xml(context, instance, network_info, disk_info,
+ image_meta=None, rescue=None,
+ block_device_info=None):
+ return ""
+
+ self.stubs.Set(self.libvirtconnection, '_get_guest_xml', fake_to_xml)
+ self.stubs.Set(self.libvirtconnection, 'plug_vifs', fake_plug_vifs)
+ self.stubs.Set(utils, 'execute', fake_execute)
+ fw = base_firewall.NoopFirewallDriver()
+ self.stubs.Set(self.libvirtconnection, 'firewall_driver', fw)
+ self.stubs.Set(self.libvirtconnection, '_create_domain',
+ fake_create_domain)
+ self.stubs.Set(self.libvirtconnection, '_enable_hairpin',
+ fake_enable_hairpin)
+ self.stubs.Set(self.libvirtconnection, 'get_info',
+ fake_get_info)
+
+ with utils.tempdir() as tmpdir:
+ self.flags(instances_path=tmpdir)
+ ins_ref = self._create_instance()
+ os.mkdir(os.path.join(tmpdir, ins_ref['name']))
+ libvirt_xml_path = os.path.join(tmpdir,
+ ins_ref['name'],
+ 'libvirt.xml')
+ f = open(libvirt_xml_path, 'w')
+ f.close()
+
+ self.libvirtconnection.finish_revert_migration(
+ context.get_admin_context(), ins_ref,
+ [], None, power_on)
+ self.assertTrue(self.fake_create_domain_called)
+
+ def test_finish_revert_migration_power_on(self):
+ self._test_finish_revert_migration(True)
+
+ def test_finish_revert_migration_power_off(self):
+ self._test_finish_revert_migration(False)
+
+ def _test_finish_revert_migration_after_crash(self, backup_made=True,
+ del_inst_failed=False):
+ class FakeLoopingCall:
+ def start(self, *a, **k):
+ return self
+
+ def wait(self):
+ return None
+ context = 'fake_context'
+
+ self.mox.StubOutWithMock(libvirt_utils, 'get_instance_path')
+ self.mox.StubOutWithMock(os.path, 'exists')
+ self.mox.StubOutWithMock(shutil, 'rmtree')
+ self.mox.StubOutWithMock(utils, 'execute')
+
+ self.stubs.Set(blockinfo, 'get_disk_info', lambda *a: None)
+ self.stubs.Set(self.libvirtconnection, '_get_guest_xml',
+ lambda *a, **k: None)
+ self.stubs.Set(self.libvirtconnection, '_create_domain_and_network',
+ lambda *a: None)
+ self.stubs.Set(loopingcall, 'FixedIntervalLoopingCall',
+ lambda *a, **k: FakeLoopingCall())
+
+ libvirt_utils.get_instance_path({}).AndReturn('/fake/foo')
+ os.path.exists('/fake/foo_resize').AndReturn(backup_made)
+ if backup_made:
+ if del_inst_failed:
+ os_error = OSError(errno.ENOENT, 'No such file or directory')
+ shutil.rmtree('/fake/foo').AndRaise(os_error)
+ else:
+ shutil.rmtree('/fake/foo')
+ utils.execute('mv', '/fake/foo_resize', '/fake/foo')
+
+ self.mox.ReplayAll()
+
+ self.libvirtconnection.finish_revert_migration(context, {}, [])
+
+ def test_finish_revert_migration_after_crash(self):
+ self._test_finish_revert_migration_after_crash(backup_made=True)
+
+ def test_finish_revert_migration_after_crash_before_new(self):
+ self._test_finish_revert_migration_after_crash(backup_made=True)
+
+ def test_finish_revert_migration_after_crash_before_backup(self):
+ self._test_finish_revert_migration_after_crash(backup_made=False)
+
+ def test_finish_revert_migration_after_crash_delete_failed(self):
+ self._test_finish_revert_migration_after_crash(backup_made=True,
+ del_inst_failed=True)
+
+ def test_cleanup_failed_migration(self):
+ self.mox.StubOutWithMock(shutil, 'rmtree')
+ shutil.rmtree('/fake/inst')
+ self.mox.ReplayAll()
+ self.libvirtconnection._cleanup_failed_migration('/fake/inst')
+
+ def test_confirm_migration(self):
+ ins_ref = self._create_instance()
+
+ self.mox.StubOutWithMock(self.libvirtconnection, "_cleanup_resize")
+ self.libvirtconnection._cleanup_resize(ins_ref,
+ _fake_network_info(self.stubs, 1))
+
+ self.mox.ReplayAll()
+ self.libvirtconnection.confirm_migration("migration_ref", ins_ref,
+ _fake_network_info(self.stubs, 1))
+
+ def test_cleanup_resize_same_host(self):
+ CONF.set_override('policy_dirs', [])
+ ins_ref = self._create_instance({'host': CONF.host})
+
+ def fake_os_path_exists(path):
+ return True
+
+ self.stubs.Set(os.path, 'exists', fake_os_path_exists)
+
+ self.mox.StubOutWithMock(libvirt_utils, 'get_instance_path')
+ self.mox.StubOutWithMock(utils, 'execute')
+
+ libvirt_utils.get_instance_path(ins_ref,
+ forceold=True).AndReturn('/fake/inst')
+ utils.execute('rm', '-rf', '/fake/inst_resize', delay_on_retry=True,
+ attempts=5)
+
+ self.mox.ReplayAll()
+ self.libvirtconnection._cleanup_resize(ins_ref,
+ _fake_network_info(self.stubs, 1))
+
+ def test_cleanup_resize_not_same_host(self):
+ CONF.set_override('policy_dirs', [])
+ host = 'not' + CONF.host
+ ins_ref = self._create_instance({'host': host})
+
+ def fake_os_path_exists(path):
+ return True
+
+ def fake_undefine_domain(instance):
+ pass
+
+ def fake_unplug_vifs(instance, network_info, ignore_errors=False):
+ pass
+
+ def fake_unfilter_instance(instance, network_info):
+ pass
+
+ self.stubs.Set(os.path, 'exists', fake_os_path_exists)
+ self.stubs.Set(self.libvirtconnection, '_undefine_domain',
+ fake_undefine_domain)
+ self.stubs.Set(self.libvirtconnection, 'unplug_vifs',
+ fake_unplug_vifs)
+ self.stubs.Set(self.libvirtconnection.firewall_driver,
+ 'unfilter_instance', fake_unfilter_instance)
+
+ self.mox.StubOutWithMock(libvirt_utils, 'get_instance_path')
+ self.mox.StubOutWithMock(utils, 'execute')
+
+ libvirt_utils.get_instance_path(ins_ref,
+ forceold=True).AndReturn('/fake/inst')
+ utils.execute('rm', '-rf', '/fake/inst_resize', delay_on_retry=True,
+ attempts=5)
+
+ self.mox.ReplayAll()
+ self.libvirtconnection._cleanup_resize(ins_ref,
+ _fake_network_info(self.stubs, 1))
+
+ def test_get_instance_disk_info_exception(self):
+ instance_name = "fake-instance-name"
+
+ class FakeExceptionDomain(FakeVirtDomain):
+ def __init__(self):
+ super(FakeExceptionDomain, self).__init__()
+
+ def XMLDesc(self, *args):
+ raise libvirt.libvirtError("Libvirt error")
+
+ def fake_lookup_by_name(instance_name):
+ return FakeExceptionDomain()
+
+ self.stubs.Set(self.libvirtconnection, '_lookup_by_name',
+ fake_lookup_by_name)
+ self.assertRaises(exception.InstanceNotFound,
+ self.libvirtconnection.get_instance_disk_info,
+ instance_name)
+
+ @mock.patch('os.path.exists')
+ @mock.patch('nova.virt.libvirt.lvm.list_volumes')
+ def test_lvm_disks(self, listlvs, exists):
+ instance = objects.Instance(uuid='fake-uuid', id=1)
+ self.flags(images_volume_group='vols', group='libvirt')
+ exists.return_value = True
+ listlvs.return_value = ['fake-uuid_foo',
+ 'other-uuid_foo']
+ disks = self.libvirtconnection._lvm_disks(instance)
+ self.assertEqual(['/dev/vols/fake-uuid_foo'], disks)
+
+ def test_is_booted_from_volume(self):
+ func = libvirt_driver.LibvirtDriver._is_booted_from_volume
+ instance, disk_mapping = {}, {}
+
+ self.assertTrue(func(instance, disk_mapping))
+ disk_mapping['disk'] = 'map'
+ self.assertTrue(func(instance, disk_mapping))
+
+ instance['image_ref'] = 'uuid'
+ self.assertFalse(func(instance, disk_mapping))
+
+ @mock.patch('nova.virt.netutils.get_injected_network_template')
+ @mock.patch('nova.virt.disk.api.inject_data')
+ def _test_inject_data(self, driver_params, disk_params,
+ disk_inject_data, inj_network,
+ called=True):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
+
+ class ImageBackend(object):
+ path = '/path'
+
+ def check_image_exists(self):
+ if self.path == '/fail/path':
+ return False
+ return True
+
+ def fake_inj_network(*args, **kwds):
+ return args[0] or None
+ inj_network.side_effect = fake_inj_network
+
+ image_backend = ImageBackend()
+ image_backend.path = disk_params[0]
+
+ with mock.patch.object(
+ conn.image_backend,
+ 'image',
+ return_value=image_backend):
+ self.flags(inject_partition=0, group='libvirt')
+
+ conn._inject_data(**driver_params)
+
+ if called:
+ disk_inject_data.assert_called_once_with(
+ *disk_params,
+ partition=None, mandatory=('files',), use_cow=True)
+
+ self.assertEqual(disk_inject_data.called, called)
+
+ def _test_inject_data_default_driver_params(self):
+ return {
+ 'instance': {
+ 'uuid': 'fake-uuid',
+ 'id': 1,
+ 'kernel_id': None,
+ 'image_ref': 1,
+ 'key_data': None,
+ 'metadata': None
+ },
+ 'network_info': None,
+ 'admin_pass': None,
+ 'files': None,
+ 'suffix': ''
+ }
+
+ def test_inject_data_adminpass(self):
+ self.flags(inject_password=True, group='libvirt')
+ driver_params = self._test_inject_data_default_driver_params()
+ driver_params['admin_pass'] = 'foobar'
+ disk_params = [
+ '/path', # injection_path
+ None, # key
+ None, # net
+ None, # metadata
+ 'foobar', # admin_pass
+ None, # files
+ ]
+ self._test_inject_data(driver_params, disk_params)
+
+ # Test with the configuration setted to false.
+ self.flags(inject_password=False, group='libvirt')
+ self._test_inject_data(driver_params, disk_params, called=False)
+
+ def test_inject_data_key(self):
+ driver_params = self._test_inject_data_default_driver_params()
+ driver_params['instance']['key_data'] = 'key-content'
+
+ self.flags(inject_key=True, group='libvirt')
+ disk_params = [
+ '/path', # injection_path
+ 'key-content', # key
+ None, # net
+ None, # metadata
+ None, # admin_pass
+ None, # files
+ ]
+ self._test_inject_data(driver_params, disk_params)
+
+ # Test with the configuration setted to false.
+ self.flags(inject_key=False, group='libvirt')
+ self._test_inject_data(driver_params, disk_params, called=False)
+
+ def test_inject_data_metadata(self):
+ driver_params = self._test_inject_data_default_driver_params()
+ driver_params['instance']['metadata'] = 'data'
+ disk_params = [
+ '/path', # injection_path
+ None, # key
+ None, # net
+ 'data', # metadata
+ None, # admin_pass
+ None, # files
+ ]
+ self._test_inject_data(driver_params, disk_params)
+
+ def test_inject_data_files(self):
+ driver_params = self._test_inject_data_default_driver_params()
+ driver_params['files'] = ['file1', 'file2']
+ disk_params = [
+ '/path', # injection_path
+ None, # key
+ None, # net
+ None, # metadata
+ None, # admin_pass
+ ['file1', 'file2'], # files
+ ]
+ self._test_inject_data(driver_params, disk_params)
+
+ def test_inject_data_net(self):
+ driver_params = self._test_inject_data_default_driver_params()
+ driver_params['network_info'] = {'net': 'eno1'}
+ disk_params = [
+ '/path', # injection_path
+ None, # key
+ {'net': 'eno1'}, # net
+ None, # metadata
+ None, # admin_pass
+ None, # files
+ ]
+ self._test_inject_data(driver_params, disk_params)
+
+ def test_inject_not_exist_image(self):
+ driver_params = self._test_inject_data_default_driver_params()
+ disk_params = [
+ '/fail/path', # injection_path
+ 'key-content', # key
+ None, # net
+ None, # metadata
+ None, # admin_pass
+ None, # files
+ ]
+ self._test_inject_data(driver_params, disk_params, called=False)
+
+ def _test_attach_detach_interface(self, method, power_state,
+ expected_flags):
+ instance = self._create_instance()
+ network_info = _fake_network_info(self.stubs, 1)
+ domain = FakeVirtDomain()
+ self.mox.StubOutWithMock(self.libvirtconnection, '_lookup_by_name')
+ self.mox.StubOutWithMock(self.libvirtconnection.firewall_driver,
+ 'setup_basic_filtering')
+ self.mox.StubOutWithMock(domain, 'attachDeviceFlags')
+ self.mox.StubOutWithMock(domain, 'info')
+ self.mox.StubOutWithMock(objects.Flavor, 'get_by_id')
+
+ self.libvirtconnection._lookup_by_name(
+ 'instance-00000001').AndReturn(domain)
+ if method == 'attach_interface':
+ self.libvirtconnection.firewall_driver.setup_basic_filtering(
+ instance, [network_info[0]])
+
+ fake_flavor = instance.get_flavor()
+
+ objects.Flavor.get_by_id(mox.IgnoreArg(), 2).AndReturn(fake_flavor)
+
+ if method == 'attach_interface':
+ fake_image_meta = {'id': instance['image_ref']}
+ elif method == 'detach_interface':
+ fake_image_meta = None
+ expected = self.libvirtconnection.vif_driver.get_config(
+ instance, network_info[0], fake_image_meta, fake_flavor,
+ CONF.libvirt.virt_type)
+
+ self.mox.StubOutWithMock(self.libvirtconnection.vif_driver,
+ 'get_config')
+ self.libvirtconnection.vif_driver.get_config(
+ instance, network_info[0],
+ fake_image_meta,
+ mox.IsA(objects.Flavor),
+ CONF.libvirt.virt_type).AndReturn(expected)
+ domain.info().AndReturn([power_state])
+ if method == 'attach_interface':
+ domain.attachDeviceFlags(expected.to_xml(), expected_flags)
+ elif method == 'detach_interface':
+ domain.detachDeviceFlags(expected.to_xml(), expected_flags)
+
+ self.mox.ReplayAll()
+ if method == 'attach_interface':
+ self.libvirtconnection.attach_interface(
+ instance, fake_image_meta, network_info[0])
+ elif method == 'detach_interface':
+ self.libvirtconnection.detach_interface(
+ instance, network_info[0])
+ self.mox.VerifyAll()
+
+ def test_attach_interface_with_running_instance(self):
+ self._test_attach_detach_interface(
+ 'attach_interface', power_state.RUNNING,
+ expected_flags=(libvirt.VIR_DOMAIN_AFFECT_CONFIG |
+ libvirt.VIR_DOMAIN_AFFECT_LIVE))
+
+ def test_attach_interface_with_pause_instance(self):
+ self._test_attach_detach_interface(
+ 'attach_interface', power_state.PAUSED,
+ expected_flags=(libvirt.VIR_DOMAIN_AFFECT_CONFIG |
+ libvirt.VIR_DOMAIN_AFFECT_LIVE))
+
+ def test_attach_interface_with_shutdown_instance(self):
+ self._test_attach_detach_interface(
+ 'attach_interface', power_state.SHUTDOWN,
+ expected_flags=(libvirt.VIR_DOMAIN_AFFECT_CONFIG))
+
+ def test_detach_interface_with_running_instance(self):
+ self._test_attach_detach_interface(
+ 'detach_interface', power_state.RUNNING,
+ expected_flags=(libvirt.VIR_DOMAIN_AFFECT_CONFIG |
+ libvirt.VIR_DOMAIN_AFFECT_LIVE))
+
+ def test_detach_interface_with_pause_instance(self):
+ self._test_attach_detach_interface(
+ 'detach_interface', power_state.PAUSED,
+ expected_flags=(libvirt.VIR_DOMAIN_AFFECT_CONFIG |
+ libvirt.VIR_DOMAIN_AFFECT_LIVE))
+
+ def test_detach_interface_with_shutdown_instance(self):
+ self._test_attach_detach_interface(
+ 'detach_interface', power_state.SHUTDOWN,
+ expected_flags=(libvirt.VIR_DOMAIN_AFFECT_CONFIG))
+
+ def test_rescue(self):
+ instance = self._create_instance({'config_drive': None})
+ dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
+ "<devices>"
+ "<disk type='file'><driver name='qemu' type='raw'/>"
+ "<source file='/test/disk'/>"
+ "<target dev='vda' bus='virtio'/></disk>"
+ "<disk type='file'><driver name='qemu' type='qcow2'/>"
+ "<source file='/test/disk.local'/>"
+ "<target dev='vdb' bus='virtio'/></disk>"
+ "</devices></domain>")
+ network_info = _fake_network_info(self.stubs, 1)
+
+ self.mox.StubOutWithMock(self.libvirtconnection,
+ '_get_existing_domain_xml')
+ self.mox.StubOutWithMock(libvirt_utils, 'write_to_file')
+ self.mox.StubOutWithMock(imagebackend.Backend, 'image')
+ self.mox.StubOutWithMock(imagebackend.Image, 'cache')
+ self.mox.StubOutWithMock(self.libvirtconnection, '_get_guest_xml')
+ self.mox.StubOutWithMock(self.libvirtconnection, '_destroy')
+ self.mox.StubOutWithMock(self.libvirtconnection, '_create_domain')
+
+ self.libvirtconnection._get_existing_domain_xml(mox.IgnoreArg(),
+ mox.IgnoreArg()).MultipleTimes().AndReturn(dummyxml)
+ libvirt_utils.write_to_file(mox.IgnoreArg(), mox.IgnoreArg())
+ libvirt_utils.write_to_file(mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg())
+ imagebackend.Backend.image(instance, 'kernel.rescue', 'raw'
+ ).AndReturn(fake_imagebackend.Raw())
+ imagebackend.Backend.image(instance, 'ramdisk.rescue', 'raw'
+ ).AndReturn(fake_imagebackend.Raw())
+ imagebackend.Backend.image(instance, 'disk.rescue', 'default'
+ ).AndReturn(fake_imagebackend.Raw())
+ imagebackend.Image.cache(context=mox.IgnoreArg(),
+ fetch_func=mox.IgnoreArg(),
+ filename=mox.IgnoreArg(),
+ image_id=mox.IgnoreArg(),
+ project_id=mox.IgnoreArg(),
+ user_id=mox.IgnoreArg()).MultipleTimes()
+
+ imagebackend.Image.cache(context=mox.IgnoreArg(),
+ fetch_func=mox.IgnoreArg(),
+ filename=mox.IgnoreArg(),
+ image_id=mox.IgnoreArg(),
+ project_id=mox.IgnoreArg(),
+ size=None, user_id=mox.IgnoreArg())
+
+ image_meta = {'id': 'fake', 'name': 'fake'}
+ self.libvirtconnection._get_guest_xml(mox.IgnoreArg(), instance,
+ network_info, mox.IgnoreArg(),
+ image_meta, rescue=mox.IgnoreArg(),
+ write_to_disk=mox.IgnoreArg()
+ ).AndReturn(dummyxml)
+
+ self.libvirtconnection._destroy(instance)
+ self.libvirtconnection._create_domain(mox.IgnoreArg())
+
+ self.mox.ReplayAll()
+
+ rescue_password = 'fake_password'
+
+ self.libvirtconnection.rescue(self.context, instance,
+ network_info, image_meta, rescue_password)
+ self.mox.VerifyAll()
+
+ def test_rescue_config_drive(self):
+ instance = self._create_instance()
+ uuid = instance.uuid
+ configdrive_path = uuid + '/disk.config.rescue'
+ dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
+ "<devices>"
+ "<disk type='file'><driver name='qemu' type='raw'/>"
+ "<source file='/test/disk'/>"
+ "<target dev='vda' bus='virtio'/></disk>"
+ "<disk type='file'><driver name='qemu' type='qcow2'/>"
+ "<source file='/test/disk.local'/>"
+ "<target dev='vdb' bus='virtio'/></disk>"
+ "</devices></domain>")
+ network_info = _fake_network_info(self.stubs, 1)
+
+ self.mox.StubOutWithMock(self.libvirtconnection,
+ '_get_existing_domain_xml')
+ self.mox.StubOutWithMock(libvirt_utils, 'write_to_file')
+ self.mox.StubOutWithMock(imagebackend.Backend, 'image')
+ self.mox.StubOutWithMock(imagebackend.Image, 'cache')
+ self.mox.StubOutWithMock(instance_metadata.InstanceMetadata,
+ '__init__')
+ self.mox.StubOutWithMock(configdrive, 'ConfigDriveBuilder')
+ self.mox.StubOutWithMock(configdrive.ConfigDriveBuilder, 'make_drive')
+ self.mox.StubOutWithMock(self.libvirtconnection, '_get_guest_xml')
+ self.mox.StubOutWithMock(self.libvirtconnection, '_destroy')
+ self.mox.StubOutWithMock(self.libvirtconnection, '_create_domain')
+
+ self.libvirtconnection._get_existing_domain_xml(mox.IgnoreArg(),
+ mox.IgnoreArg()).MultipleTimes().AndReturn(dummyxml)
+ libvirt_utils.write_to_file(mox.IgnoreArg(), mox.IgnoreArg())
+ libvirt_utils.write_to_file(mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg())
+
+ imagebackend.Backend.image(instance, 'kernel.rescue', 'raw'
+ ).AndReturn(fake_imagebackend.Raw())
+ imagebackend.Backend.image(instance, 'ramdisk.rescue', 'raw'
+ ).AndReturn(fake_imagebackend.Raw())
+ imagebackend.Backend.image(instance, 'disk.rescue', 'default'
+ ).AndReturn(fake_imagebackend.Raw())
+
+ imagebackend.Image.cache(context=mox.IgnoreArg(),
+ fetch_func=mox.IgnoreArg(),
+ filename=mox.IgnoreArg(),
+ image_id=mox.IgnoreArg(),
+ project_id=mox.IgnoreArg(),
+ user_id=mox.IgnoreArg()).MultipleTimes()
+
+ imagebackend.Image.cache(context=mox.IgnoreArg(),
+ fetch_func=mox.IgnoreArg(),
+ filename=mox.IgnoreArg(),
+ image_id=mox.IgnoreArg(),
+ project_id=mox.IgnoreArg(),
+ size=None, user_id=mox.IgnoreArg())
+
+ instance_metadata.InstanceMetadata.__init__(mox.IgnoreArg(),
+ content=mox.IgnoreArg(),
+ extra_md=mox.IgnoreArg(),
+ network_info=mox.IgnoreArg())
+ cdb = self.mox.CreateMockAnything()
+ m = configdrive.ConfigDriveBuilder(instance_md=mox.IgnoreArg())
+ m.AndReturn(cdb)
+ # __enter__ and __exit__ are required by "with"
+ cdb.__enter__().AndReturn(cdb)
+ cdb.make_drive(mox.Regex(configdrive_path))
+ cdb.__exit__(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()
+ ).AndReturn(None)
+ image_meta = {'id': 'fake', 'name': 'fake'}
+ self.libvirtconnection._get_guest_xml(mox.IgnoreArg(), instance,
+ network_info, mox.IgnoreArg(),
+ image_meta, rescue=mox.IgnoreArg(),
+ write_to_disk=mox.IgnoreArg()
+ ).AndReturn(dummyxml)
+ self.libvirtconnection._destroy(instance)
+ self.libvirtconnection._create_domain(mox.IgnoreArg())
+
+ self.mox.ReplayAll()
+
+ rescue_password = 'fake_password'
+
+ self.libvirtconnection.rescue(self.context, instance, network_info,
+ image_meta, rescue_password)
+ self.mox.VerifyAll()
+
+ @mock.patch('shutil.rmtree')
+ @mock.patch('nova.utils.execute')
+ @mock.patch('os.path.exists')
+ @mock.patch('nova.virt.libvirt.utils.get_instance_path')
+ def test_delete_instance_files(self, get_instance_path, exists, exe,
+ shutil):
+ lv = self.libvirtconnection
+ get_instance_path.return_value = '/path'
+ instance = objects.Instance(uuid='fake-uuid', id=1)
+
+ exists.side_effect = [False, False, True, False]
+
+ result = lv.delete_instance_files(instance)
+ get_instance_path.assert_called_with(instance)
+ exe.assert_called_with('mv', '/path', '/path_del')
+ shutil.assert_called_with('/path_del')
+ self.assertTrue(result)
+
+ @mock.patch('shutil.rmtree')
+ @mock.patch('nova.utils.execute')
+ @mock.patch('os.path.exists')
+ @mock.patch('nova.virt.libvirt.utils.get_instance_path')
+ def test_delete_instance_files_resize(self, get_instance_path, exists,
+ exe, shutil):
+ lv = self.libvirtconnection
+ get_instance_path.return_value = '/path'
+ instance = objects.Instance(uuid='fake-uuid', id=1)
+
+ nova.utils.execute.side_effect = [Exception(), None]
+ exists.side_effect = [False, False, True, False]
+
+ result = lv.delete_instance_files(instance)
+ get_instance_path.assert_called_with(instance)
+ expected = [mock.call('mv', '/path', '/path_del'),
+ mock.call('mv', '/path_resize', '/path_del')]
+ self.assertEqual(expected, exe.mock_calls)
+ shutil.assert_called_with('/path_del')
+ self.assertTrue(result)
+
+ @mock.patch('shutil.rmtree')
+ @mock.patch('nova.utils.execute')
+ @mock.patch('os.path.exists')
+ @mock.patch('nova.virt.libvirt.utils.get_instance_path')
+ def test_delete_instance_files_failed(self, get_instance_path, exists, exe,
+ shutil):
+ lv = self.libvirtconnection
+ get_instance_path.return_value = '/path'
+ instance = objects.Instance(uuid='fake-uuid', id=1)
+
+ exists.side_effect = [False, False, True, True]
+
+ result = lv.delete_instance_files(instance)
+ get_instance_path.assert_called_with(instance)
+ exe.assert_called_with('mv', '/path', '/path_del')
+ shutil.assert_called_with('/path_del')
+ self.assertFalse(result)
+
+ @mock.patch('shutil.rmtree')
+ @mock.patch('nova.utils.execute')
+ @mock.patch('os.path.exists')
+ @mock.patch('nova.virt.libvirt.utils.get_instance_path')
+ def test_delete_instance_files_mv_failed(self, get_instance_path, exists,
+ exe, shutil):
+ lv = self.libvirtconnection
+ get_instance_path.return_value = '/path'
+ instance = objects.Instance(uuid='fake-uuid', id=1)
+
+ nova.utils.execute.side_effect = Exception()
+ exists.side_effect = [True, True]
+
+ result = lv.delete_instance_files(instance)
+ get_instance_path.assert_called_with(instance)
+ expected = [mock.call('mv', '/path', '/path_del'),
+ mock.call('mv', '/path_resize', '/path_del')] * 2
+ self.assertEqual(expected, exe.mock_calls)
+ self.assertFalse(result)
+
+ @mock.patch('shutil.rmtree')
+ @mock.patch('nova.utils.execute')
+ @mock.patch('os.path.exists')
+ @mock.patch('nova.virt.libvirt.utils.get_instance_path')
+ def test_delete_instance_files_resume(self, get_instance_path, exists,
+ exe, shutil):
+ lv = self.libvirtconnection
+ get_instance_path.return_value = '/path'
+ instance = objects.Instance(uuid='fake-uuid', id=1)
+
+ nova.utils.execute.side_effect = Exception()
+ exists.side_effect = [False, False, True, False]
+
+ result = lv.delete_instance_files(instance)
+ get_instance_path.assert_called_with(instance)
+ expected = [mock.call('mv', '/path', '/path_del'),
+ mock.call('mv', '/path_resize', '/path_del')] * 2
+ self.assertEqual(expected, exe.mock_calls)
+ self.assertTrue(result)
+
+ @mock.patch('shutil.rmtree')
+ @mock.patch('nova.utils.execute')
+ @mock.patch('os.path.exists')
+ @mock.patch('nova.virt.libvirt.utils.get_instance_path')
+ def test_delete_instance_files_none(self, get_instance_path, exists,
+ exe, shutil):
+ lv = self.libvirtconnection
+ get_instance_path.return_value = '/path'
+ instance = objects.Instance(uuid='fake-uuid', id=1)
+
+ nova.utils.execute.side_effect = Exception()
+ exists.side_effect = [False, False, False, False]
+
+ result = lv.delete_instance_files(instance)
+ get_instance_path.assert_called_with(instance)
+ expected = [mock.call('mv', '/path', '/path_del'),
+ mock.call('mv', '/path_resize', '/path_del')] * 2
+ self.assertEqual(expected, exe.mock_calls)
+ self.assertEqual(0, len(shutil.mock_calls))
+ self.assertTrue(result)
+
+ @mock.patch('shutil.rmtree')
+ @mock.patch('nova.utils.execute')
+ @mock.patch('os.path.exists')
+ @mock.patch('nova.virt.libvirt.utils.get_instance_path')
+ def test_delete_instance_files_concurrent(self, get_instance_path, exists,
+ exe, shutil):
+ lv = self.libvirtconnection
+ get_instance_path.return_value = '/path'
+ instance = objects.Instance(uuid='fake-uuid', id=1)
+
+ nova.utils.execute.side_effect = [Exception(), Exception(), None]
+ exists.side_effect = [False, False, True, False]
+
+ result = lv.delete_instance_files(instance)
+ get_instance_path.assert_called_with(instance)
+ expected = [mock.call('mv', '/path', '/path_del'),
+ mock.call('mv', '/path_resize', '/path_del')]
+ expected.append(expected[0])
+ self.assertEqual(expected, exe.mock_calls)
+ shutil.assert_called_with('/path_del')
+ self.assertTrue(result)
+
+ def _assert_on_id_map(self, idmap, klass, start, target, count):
+ self.assertIsInstance(idmap, klass)
+ self.assertEqual(start, idmap.start)
+ self.assertEqual(target, idmap.target)
+ self.assertEqual(count, idmap.count)
+
+ def test_get_id_maps(self):
+ self.flags(virt_type="lxc", group="libvirt")
+ CONF.libvirt.virt_type = "lxc"
+ CONF.libvirt.uid_maps = ["0:10000:1", "1:20000:10"]
+ CONF.libvirt.gid_maps = ["0:10000:1", "1:20000:10"]
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
+
+ idmaps = conn._get_guest_idmaps()
+
+ self.assertEqual(len(idmaps), 4)
+ self._assert_on_id_map(idmaps[0],
+ vconfig.LibvirtConfigGuestUIDMap,
+ 0, 10000, 1)
+ self._assert_on_id_map(idmaps[1],
+ vconfig.LibvirtConfigGuestUIDMap,
+ 1, 20000, 10)
+ self._assert_on_id_map(idmaps[2],
+ vconfig.LibvirtConfigGuestGIDMap,
+ 0, 10000, 1)
+ self._assert_on_id_map(idmaps[3],
+ vconfig.LibvirtConfigGuestGIDMap,
+ 1, 20000, 10)
+
+ def test_get_id_maps_not_lxc(self):
+ CONF.libvirt.uid_maps = ["0:10000:1", "1:20000:10"]
+ CONF.libvirt.gid_maps = ["0:10000:1", "1:20000:10"]
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
+
+ idmaps = conn._get_guest_idmaps()
+
+ self.assertEqual(0, len(idmaps))
+
+ def test_get_id_maps_only_uid(self):
+ self.flags(virt_type="lxc", group="libvirt")
+ CONF.libvirt.uid_maps = ["0:10000:1", "1:20000:10"]
+ CONF.libvirt.gid_maps = []
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
+
+ idmaps = conn._get_guest_idmaps()
+
+ self.assertEqual(2, len(idmaps))
+ self._assert_on_id_map(idmaps[0],
+ vconfig.LibvirtConfigGuestUIDMap,
+ 0, 10000, 1)
+ self._assert_on_id_map(idmaps[1],
+ vconfig.LibvirtConfigGuestUIDMap,
+ 1, 20000, 10)
+
+ def test_get_id_maps_only_gid(self):
+ self.flags(virt_type="lxc", group="libvirt")
+ CONF.libvirt.uid_maps = []
+ CONF.libvirt.gid_maps = ["0:10000:1", "1:20000:10"]
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
+
+ idmaps = conn._get_guest_idmaps()
+
+ self.assertEqual(2, len(idmaps))
+ self._assert_on_id_map(idmaps[0],
+ vconfig.LibvirtConfigGuestGIDMap,
+ 0, 10000, 1)
+ self._assert_on_id_map(idmaps[1],
+ vconfig.LibvirtConfigGuestGIDMap,
+ 1, 20000, 10)
+
+ def test_instance_on_disk(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ instance = objects.Instance(uuid='fake-uuid', id=1)
+ self.assertFalse(conn.instance_on_disk(instance))
+
+ def test_instance_on_disk_rbd(self):
+ self.flags(images_type='rbd', group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ instance = objects.Instance(uuid='fake-uuid', id=1)
+ self.assertTrue(conn.instance_on_disk(instance))
+
+ @mock.patch("nova.objects.Flavor.get_by_id")
+ @mock.patch("nova.compute.utils.get_image_metadata")
+ def test_prepare_args_for_get_config(self, mock_image, mock_get):
+ instance = self._create_instance()
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
+
+ def fake_get_by_id(context, id):
+ self.assertEqual('yes', context.read_deleted)
+
+ mock_get.side_effect = fake_get_by_id
+
+ conn._prepare_args_for_get_config(self.context, instance)
+
+ mock_get.assert_called_once_with(self.context,
+ instance['instance_type_id'])
+
+
+class LibvirtVolumeUsageTestCase(test.NoDBTestCase):
+ """Test for LibvirtDriver.get_all_volume_usage."""
+
+ def setUp(self):
+ super(LibvirtVolumeUsageTestCase, self).setUp()
+ self.conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.c = context.get_admin_context()
+
+ self.ins_ref = objects.Instance(
+ id=1729,
+ uuid='875a8070-d0b9-4949-8b31-104d125c9a64'
+ )
+
+ # verify bootable volume device path also
+ self.bdms = [{'volume_id': 1,
+ 'device_name': '/dev/vde'},
+ {'volume_id': 2,
+ 'device_name': 'vda'}]
+
+ def test_get_all_volume_usage(self):
+ def fake_block_stats(instance_name, disk):
+ return (169L, 688640L, 0L, 0L, -1L)
+
+ self.stubs.Set(self.conn, 'block_stats', fake_block_stats)
+ vol_usage = self.conn.get_all_volume_usage(self.c,
+ [dict(instance=self.ins_ref, instance_bdms=self.bdms)])
+
+ expected_usage = [{'volume': 1,
+ 'instance': self.ins_ref,
+ 'rd_bytes': 688640L, 'wr_req': 0L,
+ 'flush_operations': -1L, 'rd_req': 169L,
+ 'wr_bytes': 0L},
+ {'volume': 2,
+ 'instance': self.ins_ref,
+ 'rd_bytes': 688640L, 'wr_req': 0L,
+ 'flush_operations': -1L, 'rd_req': 169L,
+ 'wr_bytes': 0L}]
+ self.assertEqual(vol_usage, expected_usage)
+
+ def test_get_all_volume_usage_device_not_found(self):
+ def fake_lookup(instance_name):
+ raise libvirt.libvirtError('invalid path')
+
+ self.stubs.Set(self.conn, '_lookup_by_name', fake_lookup)
+ vol_usage = self.conn.get_all_volume_usage(self.c,
+ [dict(instance=self.ins_ref, instance_bdms=self.bdms)])
+ self.assertEqual(vol_usage, [])
+
+
+class LibvirtNonblockingTestCase(test.NoDBTestCase):
+ """Test libvirtd calls are nonblocking."""
+
+ def setUp(self):
+ super(LibvirtNonblockingTestCase, self).setUp()
+ self.flags(connection_uri="test:///default",
+ group='libvirt')
+
+ def test_connection_to_primitive(self):
+ # Test bug 962840.
+ import nova.virt.libvirt.driver as libvirt_driver
+ connection = libvirt_driver.LibvirtDriver('')
+ connection.set_host_enabled = mock.Mock()
+ jsonutils.to_primitive(connection._conn, convert_instances=True)
+
+ def test_tpool_execute_calls_libvirt(self):
+ conn = libvirt.virConnect()
+ conn.is_expected = True
+
+ self.mox.StubOutWithMock(eventlet.tpool, 'execute')
+ eventlet.tpool.execute(
+ libvirt.openAuth,
+ 'test:///default',
+ mox.IgnoreArg(),
+ mox.IgnoreArg()).AndReturn(conn)
+ eventlet.tpool.execute(
+ conn.domainEventRegisterAny,
+ None,
+ libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE,
+ mox.IgnoreArg(),
+ mox.IgnoreArg())
+ if hasattr(libvirt.virConnect, 'registerCloseCallback'):
+ eventlet.tpool.execute(
+ conn.registerCloseCallback,
+ mox.IgnoreArg(),
+ mox.IgnoreArg())
+ self.mox.ReplayAll()
+
+ driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ c = driver._get_connection()
+ self.assertEqual(True, c.is_expected)
+
+
+class LibvirtVolumeSnapshotTestCase(test.NoDBTestCase):
+ """Tests for libvirtDriver.volume_snapshot_create/delete."""
+
+ def setUp(self):
+ super(LibvirtVolumeSnapshotTestCase, self).setUp()
+
+ self.conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.c = context.get_admin_context()
+
+ self.flags(instance_name_template='instance-%s')
+ self.flags(qemu_allowed_storage_drivers=[], group='libvirt')
+
+ # creating instance
+ self.inst = {}
+ self.inst['uuid'] = uuidutils.generate_uuid()
+ self.inst['id'] = '1'
+
+ # create domain info
+ self.dom_xml = """
+ <domain type='kvm'>
+ <devices>
+ <disk type='file'>
+ <source file='disk1_file'/>
+ <target dev='vda' bus='virtio'/>
+ <serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial>
+ </disk>
+ <disk type='block'>
+ <source dev='/path/to/dev/1'/>
+ <target dev='vdb' bus='virtio' serial='1234'/>
+ </disk>
+ </devices>
+ </domain>"""
+
+ # alternate domain info with network-backed snapshot chain
+ self.dom_netdisk_xml = """
+ <domain type='kvm'>
+ <devices>
+ <disk type='file'>
+ <source file='disk1_file'/>
+ <target dev='vda' bus='virtio'/>
+ <serial>0e38683e-f0af-418f-a3f1-6b67eaffffff</serial>
+ </disk>
+ <disk type='network' device='disk'>
+ <driver name='qemu' type='qcow2'/>
+ <source protocol='gluster' name='vol1/root.img'>
+ <host name='server1' port='24007'/>
+ </source>
+ <backingStore type='network' index='1'>
+ <driver name='qemu' type='qcow2'/>
+ <source protocol='gluster' name='vol1/snap.img'>
+ <host name='server1' port='24007'/>
+ </source>
+ <backingStore type='network' index='2'>
+ <driver name='qemu' type='qcow2'/>
+ <source protocol='gluster' name='vol1/snap-b.img'>
+ <host name='server1' port='24007'/>
+ </source>
+ <backingStore/>
+ </backingStore>
+ </backingStore>
+ <target dev='vdb' bus='virtio'/>
+ <serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial>
+ </disk>
+ </devices>
+ </domain>
+ """
+
+ self.create_info = {'type': 'qcow2',
+ 'snapshot_id': '1234-5678',
+ 'new_file': 'new-file'}
+
+ self.volume_uuid = '0e38683e-f0af-418f-a3f1-6b67ea0f919d'
+ self.snapshot_id = '9c3ca9f4-9f4e-4dba-bedd-5c5e4b52b162'
+
+ self.delete_info_1 = {'type': 'qcow2',
+ 'file_to_merge': 'snap.img',
+ 'merge_target_file': None}
+
+ self.delete_info_2 = {'type': 'qcow2',
+ 'file_to_merge': 'snap.img',
+ 'merge_target_file': 'other-snap.img'}
+
+ self.delete_info_netdisk = {'type': 'qcow2',
+ 'file_to_merge': 'snap.img',
+ 'merge_target_file': 'root.img'}
+
+ self.delete_info_invalid_type = {'type': 'made_up_type',
+ 'file_to_merge': 'some_file',
+ 'merge_target_file':
+ 'some_other_file'}
+
+ def tearDown(self):
+ super(LibvirtVolumeSnapshotTestCase, self).tearDown()
+
+ @mock.patch('nova.virt.block_device.DriverVolumeBlockDevice.'
+ 'refresh_connection_info')
+ @mock.patch('nova.objects.block_device.BlockDeviceMapping.'
+ 'get_by_volume_id')
+ def test_volume_refresh_connection_info(self, mock_get_by_volume_id,
+ mock_refresh_connection_info):
+ fake_bdm = fake_block_device.FakeDbBlockDeviceDict({
+ 'id': 123,
+ 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/sdb',
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'volume_id': 'fake-volume-id-1',
+ 'connection_info': '{"fake": "connection_info"}'})
+ mock_get_by_volume_id.return_value = fake_bdm
+
+ self.conn._volume_refresh_connection_info(self.c, self.inst,
+ self.volume_uuid)
+
+ mock_get_by_volume_id.assert_called_once_with(self.c, self.volume_uuid)
+ mock_refresh_connection_info.assert_called_once_with(self.c, self.inst,
+ self.conn._volume_api, self.conn)
+
+ def test_volume_snapshot_create(self, quiesce=True):
+ """Test snapshot creation with file-based disk."""
+ self.flags(instance_name_template='instance-%s')
+ self.mox.StubOutWithMock(self.conn, '_lookup_by_name')
+ self.mox.StubOutWithMock(self.conn, '_volume_api')
+
+ instance = objects.Instance(**self.inst)
+
+ new_file = 'new-file'
+
+ domain = FakeVirtDomain(fake_xml=self.dom_xml)
+ self.mox.StubOutWithMock(domain, 'XMLDesc')
+ self.mox.StubOutWithMock(domain, 'snapshotCreateXML')
+ domain.XMLDesc(0).AndReturn(self.dom_xml)
+
+ snap_xml_src = (
+ '<domainsnapshot>\n'
+ ' <disks>\n'
+ ' <disk name="disk1_file" snapshot="external" type="file">\n'
+ ' <source file="new-file"/>\n'
+ ' </disk>\n'
+ ' <disk name="vdb" snapshot="no"/>\n'
+ ' </disks>\n'
+ '</domainsnapshot>\n')
+
+ # Older versions of libvirt may be missing these.
+ libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT = 32
+ libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE = 64
+
+ snap_flags = (libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY |
+ libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA |
+ libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT)
+
+ snap_flags_q = snap_flags | libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE
+
+ if quiesce:
+ domain.snapshotCreateXML(snap_xml_src, snap_flags_q)
+ else:
+ domain.snapshotCreateXML(snap_xml_src, snap_flags_q).\
+ AndRaise(libvirt.libvirtError('quiescing failed, no qemu-ga'))
+ domain.snapshotCreateXML(snap_xml_src, snap_flags).AndReturn(0)
+
+ self.mox.ReplayAll()
+
+ self.conn._volume_snapshot_create(self.c, instance, domain,
+ self.volume_uuid, new_file)
+
+ self.mox.VerifyAll()
+
+ def test_volume_snapshot_create_libgfapi(self, quiesce=True):
+ """Test snapshot creation with libgfapi network disk."""
+ self.flags(instance_name_template = 'instance-%s')
+ self.flags(qemu_allowed_storage_drivers = ['gluster'], group='libvirt')
+ self.mox.StubOutWithMock(self.conn, '_lookup_by_name')
+ self.mox.StubOutWithMock(self.conn, '_volume_api')
+
+ self.dom_xml = """
+ <domain type='kvm'>
+ <devices>
+ <disk type='file'>
+ <source file='disk1_file'/>
+ <target dev='vda' bus='virtio'/>
+ <serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial>
+ </disk>
+ <disk type='block'>
+ <source protocol='gluster' name='gluster1/volume-1234'>
+ <host name='127.3.4.5' port='24007'/>
+ </source>
+ <target dev='vdb' bus='virtio' serial='1234'/>
+ </disk>
+ </devices>
+ </domain>"""
+
+ instance = objects.Instance(**self.inst)
+
+ new_file = 'new-file'
+
+ domain = FakeVirtDomain(fake_xml=self.dom_xml)
+ self.mox.StubOutWithMock(domain, 'XMLDesc')
+ self.mox.StubOutWithMock(domain, 'snapshotCreateXML')
+ domain.XMLDesc(0).AndReturn(self.dom_xml)
+
+ snap_xml_src = (
+ '<domainsnapshot>\n'
+ ' <disks>\n'
+ ' <disk name="disk1_file" snapshot="external" type="file">\n'
+ ' <source file="new-file"/>\n'
+ ' </disk>\n'
+ ' <disk name="vdb" snapshot="no"/>\n'
+ ' </disks>\n'
+ '</domainsnapshot>\n')
+
+ # Older versions of libvirt may be missing these.
+ libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT = 32
+ libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE = 64
+
+ snap_flags = (libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY |
+ libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA |
+ libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT)
+
+ snap_flags_q = snap_flags | libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE
+
+ if quiesce:
+ domain.snapshotCreateXML(snap_xml_src, snap_flags_q)
+ else:
+ domain.snapshotCreateXML(snap_xml_src, snap_flags_q).\
+ AndRaise(libvirt.libvirtError('quiescing failed, no qemu-ga'))
+ domain.snapshotCreateXML(snap_xml_src, snap_flags).AndReturn(0)
+
+ self.mox.ReplayAll()
+
+ self.conn._volume_snapshot_create(self.c, instance, domain,
+ self.volume_uuid, new_file)
+
+ self.mox.VerifyAll()
+
+ def test_volume_snapshot_create_noquiesce(self):
+ self.test_volume_snapshot_create(quiesce=False)
+
+ def test_volume_snapshot_create_outer_success(self):
+ instance = objects.Instance(**self.inst)
+
+ domain = FakeVirtDomain(fake_xml=self.dom_xml)
+
+ self.mox.StubOutWithMock(self.conn, '_lookup_by_name')
+ self.mox.StubOutWithMock(self.conn, '_volume_api')
+ self.mox.StubOutWithMock(self.conn, '_volume_snapshot_create')
+
+ self.conn._lookup_by_name('instance-1').AndReturn(domain)
+
+ self.conn._volume_snapshot_create(self.c,
+ instance,
+ domain,
+ self.volume_uuid,
+ self.create_info['new_file'])
+
+ self.conn._volume_api.update_snapshot_status(
+ self.c, self.create_info['snapshot_id'], 'creating')
+
+ self.mox.StubOutWithMock(self.conn._volume_api, 'get_snapshot')
+ self.conn._volume_api.get_snapshot(self.c,
+ self.create_info['snapshot_id']).AndReturn({'status': 'available'})
+ self.mox.StubOutWithMock(self.conn, '_volume_refresh_connection_info')
+ self.conn._volume_refresh_connection_info(self.c, instance,
+ self.volume_uuid)
+
+ self.mox.ReplayAll()
+
+ self.conn.volume_snapshot_create(self.c, instance, self.volume_uuid,
+ self.create_info)
+
+ def test_volume_snapshot_create_outer_failure(self):
+ instance = objects.Instance(**self.inst)
+
+ domain = FakeVirtDomain(fake_xml=self.dom_xml)
+
+ self.mox.StubOutWithMock(self.conn, '_lookup_by_name')
+ self.mox.StubOutWithMock(self.conn, '_volume_api')
+ self.mox.StubOutWithMock(self.conn, '_volume_snapshot_create')
+
+ self.conn._lookup_by_name('instance-1').AndReturn(domain)
+
+ self.conn._volume_snapshot_create(self.c,
+ instance,
+ domain,
+ self.volume_uuid,
+ self.create_info['new_file']).\
+ AndRaise(exception.NovaException('oops'))
+
+ self.conn._volume_api.update_snapshot_status(
+ self.c, self.create_info['snapshot_id'], 'error')
+
+ self.mox.ReplayAll()
+
+ self.assertRaises(exception.NovaException,
+ self.conn.volume_snapshot_create,
+ self.c,
+ instance,
+ self.volume_uuid,
+ self.create_info)
+
+ def test_volume_snapshot_delete_1(self):
+ """Deleting newest snapshot -- blockRebase."""
+
+ instance = objects.Instance(**self.inst)
+ snapshot_id = 'snapshot-1234'
+
+ domain = FakeVirtDomain(fake_xml=self.dom_xml)
+ self.mox.StubOutWithMock(domain, 'XMLDesc')
+ domain.XMLDesc(0).AndReturn(self.dom_xml)
+
+ self.mox.StubOutWithMock(self.conn, '_lookup_by_name')
+ self.mox.StubOutWithMock(self.conn, '_has_min_version')
+ self.mox.StubOutWithMock(domain, 'blockRebase')
+ self.mox.StubOutWithMock(domain, 'blockCommit')
+ self.mox.StubOutWithMock(domain, 'blockJobInfo')
+
+ self.conn._lookup_by_name('instance-%s' % instance['id']).\
+ AndReturn(domain)
+ self.conn._has_min_version(mox.IgnoreArg()).AndReturn(True)
+
+ domain.blockRebase('vda', 'snap.img', 0, 0)
+
+ domain.blockJobInfo('vda', 0).AndReturn({'cur': 1, 'end': 1000})
+ domain.blockJobInfo('vda', 0).AndReturn({'cur': 1000, 'end': 1000})
+
+ self.mox.ReplayAll()
+
+ self.conn._volume_snapshot_delete(self.c, instance, self.volume_uuid,
+ snapshot_id, self.delete_info_1)
+
+ self.mox.VerifyAll()
+
+ def test_volume_snapshot_delete_2(self):
+ """Deleting older snapshot -- blockCommit."""
+
+ instance = objects.Instance(**self.inst)
+ snapshot_id = 'snapshot-1234'
+
+ domain = FakeVirtDomain(fake_xml=self.dom_xml)
+ self.mox.StubOutWithMock(domain, 'XMLDesc')
+ domain.XMLDesc(0).AndReturn(self.dom_xml)
+
+ self.mox.StubOutWithMock(self.conn, '_lookup_by_name')
+ self.mox.StubOutWithMock(self.conn, '_has_min_version')
+ self.mox.StubOutWithMock(domain, 'blockRebase')
+ self.mox.StubOutWithMock(domain, 'blockCommit')
+ self.mox.StubOutWithMock(domain, 'blockJobInfo')
+
+ self.conn._lookup_by_name('instance-%s' % instance['id']).\
+ AndReturn(domain)
+ self.conn._has_min_version(mox.IgnoreArg()).AndReturn(True)
+
+ domain.blockCommit('vda', 'other-snap.img', 'snap.img', 0, 0)
+
+ domain.blockJobInfo('vda', 0).AndReturn({'cur': 1, 'end': 1000})
+ domain.blockJobInfo('vda', 0).AndReturn({})
+
+ self.mox.ReplayAll()
+
+ self.conn._volume_snapshot_delete(self.c, instance, self.volume_uuid,
+ snapshot_id, self.delete_info_2)
+
+ self.mox.VerifyAll()
+
+ def test_volume_snapshot_delete_outer_success(self):
+ instance = objects.Instance(**self.inst)
+ snapshot_id = 'snapshot-1234'
+
+ FakeVirtDomain(fake_xml=self.dom_xml)
+
+ self.mox.StubOutWithMock(self.conn, '_lookup_by_name')
+ self.mox.StubOutWithMock(self.conn, '_volume_api')
+ self.mox.StubOutWithMock(self.conn, '_volume_snapshot_delete')
+
+ self.conn._volume_snapshot_delete(self.c,
+ instance,
+ self.volume_uuid,
+ snapshot_id,
+ delete_info=self.delete_info_1)
+
+ self.conn._volume_api.update_snapshot_status(
+ self.c, snapshot_id, 'deleting')
+
+ self.mox.StubOutWithMock(self.conn, '_volume_refresh_connection_info')
+ self.conn._volume_refresh_connection_info(self.c, instance,
+ self.volume_uuid)
+
+ self.mox.ReplayAll()
+
+ self.conn.volume_snapshot_delete(self.c, instance, self.volume_uuid,
+ snapshot_id,
+ self.delete_info_1)
+
+ self.mox.VerifyAll()
+
+ def test_volume_snapshot_delete_outer_failure(self):
+ instance = objects.Instance(**self.inst)
+ snapshot_id = '1234-9876'
+
+ FakeVirtDomain(fake_xml=self.dom_xml)
+
+ self.mox.StubOutWithMock(self.conn, '_lookup_by_name')
+ self.mox.StubOutWithMock(self.conn, '_volume_api')
+ self.mox.StubOutWithMock(self.conn, '_volume_snapshot_delete')
+
+ self.conn._volume_snapshot_delete(self.c,
+ instance,
+ self.volume_uuid,
+ snapshot_id,
+ delete_info=self.delete_info_1).\
+ AndRaise(exception.NovaException('oops'))
+
+ self.conn._volume_api.update_snapshot_status(
+ self.c, snapshot_id, 'error_deleting')
+
+ self.mox.ReplayAll()
+
+ self.assertRaises(exception.NovaException,
+ self.conn.volume_snapshot_delete,
+ self.c,
+ instance,
+ self.volume_uuid,
+ snapshot_id,
+ self.delete_info_1)
+
+ self.mox.VerifyAll()
+
+ def test_volume_snapshot_delete_invalid_type(self):
+ instance = objects.Instance(**self.inst)
+
+ FakeVirtDomain(fake_xml=self.dom_xml)
+
+ self.mox.StubOutWithMock(self.conn, '_lookup_by_name')
+ self.mox.StubOutWithMock(self.conn, '_volume_api')
+ self.mox.StubOutWithMock(self.conn, '_has_min_version')
+
+ self.conn._has_min_version(mox.IgnoreArg()).AndReturn(True)
+
+ self.conn._volume_api.update_snapshot_status(
+ self.c, self.snapshot_id, 'error_deleting')
+
+ self.mox.ReplayAll()
+
+ self.assertRaises(exception.NovaException,
+ self.conn.volume_snapshot_delete,
+ self.c,
+ instance,
+ self.volume_uuid,
+ self.snapshot_id,
+ self.delete_info_invalid_type)
+
+ def test_volume_snapshot_delete_netdisk_1(self):
+ """Delete newest snapshot -- blockRebase for libgfapi/network disk."""
+
+ class FakeNetdiskDomain(FakeVirtDomain):
+ def __init__(self, *args, **kwargs):
+ super(FakeNetdiskDomain, self).__init__(*args, **kwargs)
+
+ def XMLDesc(self, *args):
+ return self.dom_netdisk_xml
+
+ # Ensure the libvirt lib has VIR_DOMAIN_BLOCK_COMMIT_RELATIVE
+ self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt)
+
+ instance = objects.Instance(**self.inst)
+ snapshot_id = 'snapshot-1234'
+
+ domain = FakeNetdiskDomain(fake_xml=self.dom_netdisk_xml)
+ self.mox.StubOutWithMock(domain, 'XMLDesc')
+ domain.XMLDesc(0).AndReturn(self.dom_netdisk_xml)
+
+ self.mox.StubOutWithMock(self.conn, '_lookup_by_name')
+ self.mox.StubOutWithMock(self.conn, '_has_min_version')
+ self.mox.StubOutWithMock(domain, 'blockRebase')
+ self.mox.StubOutWithMock(domain, 'blockCommit')
+ self.mox.StubOutWithMock(domain, 'blockJobInfo')
+
+ self.conn._lookup_by_name('instance-%s' % instance['id']).\
+ AndReturn(domain)
+ self.conn._has_min_version(mox.IgnoreArg()).AndReturn(True)
+
+ domain.blockRebase('vdb', 'vdb[1]', 0, 0)
+
+ domain.blockJobInfo('vdb', 0).AndReturn({'cur': 1, 'end': 1000})
+ domain.blockJobInfo('vdb', 0).AndReturn({'cur': 1000, 'end': 1000})
+
+ self.mox.ReplayAll()
+
+ self.conn._volume_snapshot_delete(self.c, instance, self.volume_uuid,
+ snapshot_id, self.delete_info_1)
+
+ self.mox.VerifyAll()
+
+ def test_volume_snapshot_delete_netdisk_2(self):
+ """Delete older snapshot -- blockCommit for libgfapi/network disk."""
+
+ class FakeNetdiskDomain(FakeVirtDomain):
+ def __init__(self, *args, **kwargs):
+ super(FakeNetdiskDomain, self).__init__(*args, **kwargs)
+
+ def XMLDesc(self, *args):
+ return self.dom_netdisk_xml
+
+ # Ensure the libvirt lib has VIR_DOMAIN_BLOCK_COMMIT_RELATIVE
+ self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt)
+
+ instance = objects.Instance(**self.inst)
+ snapshot_id = 'snapshot-1234'
+
+ domain = FakeNetdiskDomain(fake_xml=self.dom_netdisk_xml)
+ self.mox.StubOutWithMock(domain, 'XMLDesc')
+ domain.XMLDesc(0).AndReturn(self.dom_netdisk_xml)
+
+ self.mox.StubOutWithMock(self.conn, '_lookup_by_name')
+ self.mox.StubOutWithMock(self.conn, '_has_min_version')
+ self.mox.StubOutWithMock(domain, 'blockRebase')
+ self.mox.StubOutWithMock(domain, 'blockCommit')
+ self.mox.StubOutWithMock(domain, 'blockJobInfo')
+
+ self.conn._lookup_by_name('instance-%s' % instance['id']).\
+ AndReturn(domain)
+ self.conn._has_min_version(mox.IgnoreArg()).AndReturn(True)
+
+ domain.blockCommit('vdb', 'vdb[0]', 'vdb[1]', 0,
+ fakelibvirt.VIR_DOMAIN_BLOCK_COMMIT_RELATIVE)
+
+ domain.blockJobInfo('vdb', 0).AndReturn({'cur': 1, 'end': 1000})
+ domain.blockJobInfo('vdb', 0).AndReturn({'cur': 1000, 'end': 1000})
+
+ self.mox.ReplayAll()
+
+ self.conn._volume_snapshot_delete(self.c, instance, self.volume_uuid,
+ snapshot_id,
+ self.delete_info_netdisk)
+
+ self.mox.VerifyAll()
diff --git a/nova/tests/unit/virt/libvirt/test_fakelibvirt.py b/nova/tests/unit/virt/libvirt/test_fakelibvirt.py
new file mode 100644
index 0000000000..7a6d020426
--- /dev/null
+++ b/nova/tests/unit/virt/libvirt/test_fakelibvirt.py
@@ -0,0 +1,386 @@
+# Copyright 2010 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import test
+
+from lxml import etree
+
+from nova.compute import arch
+import nova.tests.unit.virt.libvirt.fakelibvirt as libvirt
+
+
+def get_vm_xml(name="testname", uuid=None, source_type='file',
+ interface_type='bridge'):
+ uuid_tag = ''
+ if uuid:
+ uuid_tag = '<uuid>%s</uuid>' % (uuid,)
+
+ return '''<domain type='kvm'>
+ <name>%(name)s</name>
+%(uuid_tag)s
+ <memory>128000</memory>
+ <vcpu>1</vcpu>
+ <os>
+ <type>hvm</type>
+ <kernel>/somekernel</kernel>
+ <cmdline>root=/dev/sda</cmdline>
+ <boot dev='hd'/>
+ </os>
+ <features>
+ <acpi/>
+ </features>
+ <devices>
+ <disk type='file' device='disk'>
+ <driver name='qemu' type='qcow2'/>
+ <source %(source_type)s='/somefile'/>
+ <target dev='vda' bus='virtio'/>
+ </disk>
+ <interface type='%(interface_type)s'>
+ <mac address='05:26:3e:31:28:1f'/>
+ <source %(interface_type)s='br100'/>
+ </interface>
+ <input type='mouse' bus='ps2'/>
+ <graphics type='vnc' port='5901' autoport='yes' keymap='en-us'/>
+ <graphics type='spice' port='5901' autoport='yes' keymap='en-us'/>
+ </devices>
+</domain>''' % {'name': name,
+ 'uuid_tag': uuid_tag,
+ 'source_type': source_type,
+ 'interface_type': interface_type}
+
+
+class FakeLibvirtTests(test.NoDBTestCase):
+ def tearDown(self):
+ super(FakeLibvirtTests, self).tearDown()
+ libvirt._reset()
+
+ def get_openAuth_curry_func(self, readOnly=False):
+ def fake_cb(credlist):
+ return 0
+
+ creds = [[libvirt.VIR_CRED_AUTHNAME,
+ libvirt.VIR_CRED_NOECHOPROMPT],
+ fake_cb,
+ None]
+ flags = 0
+ if readOnly:
+ flags = libvirt.VIR_CONNECT_RO
+ return lambda uri: libvirt.openAuth(uri, creds, flags)
+
+ def test_openAuth_accepts_None_uri_by_default(self):
+ conn_method = self.get_openAuth_curry_func()
+ conn = conn_method(None)
+ self.assertNotEqual(conn, None, "Connecting to fake libvirt failed")
+
+ def test_openAuth_can_refuse_None_uri(self):
+ conn_method = self.get_openAuth_curry_func()
+ libvirt.allow_default_uri_connection = False
+ self.addCleanup(libvirt._reset)
+ self.assertRaises(ValueError, conn_method, None)
+
+ def test_openAuth_refuses_invalid_URI(self):
+ conn_method = self.get_openAuth_curry_func()
+ self.assertRaises(libvirt.libvirtError, conn_method, 'blah')
+
+ def test_getInfo(self):
+ conn_method = self.get_openAuth_curry_func(readOnly=True)
+ res = conn_method(None).getInfo()
+ self.assertIn(res[0], (arch.I686, arch.X86_64))
+ self.assertTrue(1024 <= res[1] <= 16384,
+ "Memory unusually high or low.")
+ self.assertTrue(1 <= res[2] <= 32,
+ "Active CPU count unusually high or low.")
+ self.assertTrue(800 <= res[3] <= 4500,
+ "CPU speed unusually high or low.")
+ self.assertTrue(res[2] <= (res[5] * res[6]),
+ "More active CPUs than num_sockets*cores_per_socket")
+
+ def test_createXML_detects_invalid_xml(self):
+ self._test_XML_func_detects_invalid_xml('createXML', [0])
+
+ def test_defineXML_detects_invalid_xml(self):
+ self._test_XML_func_detects_invalid_xml('defineXML', [])
+
+ def _test_XML_func_detects_invalid_xml(self, xmlfunc_name, args):
+ conn = self.get_openAuth_curry_func()('qemu:///system')
+ try:
+ getattr(conn, xmlfunc_name)("this is not valid </xml>", *args)
+ except libvirt.libvirtError as e:
+ self.assertEqual(e.get_error_code(), libvirt.VIR_ERR_XML_DETAIL)
+ self.assertEqual(e.get_error_domain(), libvirt.VIR_FROM_DOMAIN)
+ return
+ raise self.failureException("Invalid XML didn't raise libvirtError")
+
+ def test_defineXML_defines_domain(self):
+ conn = self.get_openAuth_curry_func()('qemu:///system')
+ conn.defineXML(get_vm_xml())
+ dom = conn.lookupByName('testname')
+ self.assertEqual('testname', dom.name())
+ self.assertEqual(0, dom.isActive())
+ dom.undefine()
+ self.assertRaises(libvirt.libvirtError,
+ conn.lookupByName,
+ 'testname')
+
+ def test_blockStats(self):
+ conn = self.get_openAuth_curry_func()('qemu:///system')
+ conn.createXML(get_vm_xml(), 0)
+ dom = conn.lookupByName('testname')
+ blockstats = dom.blockStats('vda')
+ self.assertEqual(len(blockstats), 5)
+ for x in blockstats:
+ self.assertIn(type(x), [int, long])
+
+ def test_attach_detach(self):
+ conn = self.get_openAuth_curry_func()('qemu:///system')
+ conn.createXML(get_vm_xml(), 0)
+ dom = conn.lookupByName('testname')
+ xml = '''<disk type='block'>
+ <driver name='qemu' type='raw'/>
+ <source dev='/dev/nbd0'/>
+ <target dev='/dev/vdc' bus='virtio'/>
+ </disk>'''
+ self.assertTrue(dom.attachDevice(xml))
+ self.assertTrue(dom.detachDevice(xml))
+
+ def test_info(self):
+ conn = self.get_openAuth_curry_func()('qemu:///system')
+ conn.createXML(get_vm_xml(), 0)
+ dom = conn.lookupByName('testname')
+ info = dom.info()
+ self.assertEqual(info[0], libvirt.VIR_DOMAIN_RUNNING)
+ self.assertEqual(info[1], 128000)
+ self.assertTrue(info[2] <= 128000)
+ self.assertEqual(info[3], 1)
+ self.assertIn(type(info[4]), [int, long])
+
+ def test_createXML_runs_domain(self):
+ conn = self.get_openAuth_curry_func()('qemu:///system')
+ conn.createXML(get_vm_xml(), 0)
+ dom = conn.lookupByName('testname')
+ self.assertEqual('testname', dom.name())
+ self.assertEqual(1, dom.isActive())
+ dom.destroy()
+ try:
+ dom = conn.lookupByName('testname')
+ except libvirt.libvirtError as e:
+ self.assertEqual(e.get_error_code(), libvirt.VIR_ERR_NO_DOMAIN)
+ self.assertEqual(e.get_error_domain(), libvirt.VIR_FROM_QEMU)
+ return
+ self.fail("lookupByName succeeded for destroyed non-defined VM")
+
+ def test_defineXML_remembers_uuid(self):
+ conn = self.get_openAuth_curry_func()('qemu:///system')
+ uuid = 'b21f957d-a72f-4b93-b5a5-45b1161abb02'
+ conn.defineXML(get_vm_xml(uuid=uuid))
+ dom = conn.lookupByName('testname')
+ self.assertEqual(dom.UUIDString(), uuid)
+
+ def test_createWithFlags(self):
+ conn = self.get_openAuth_curry_func()('qemu:///system')
+ conn.defineXML(get_vm_xml())
+ dom = conn.lookupByName('testname')
+ self.assertFalse(dom.isActive(), 'Defined domain was running.')
+ dom.createWithFlags(0)
+ self.assertTrue(dom.isActive(),
+ 'Domain wasn\'t running after createWithFlags')
+
+ def test_managedSave(self):
+ conn = self.get_openAuth_curry_func()('qemu:///system')
+ conn.defineXML(get_vm_xml())
+ dom = conn.lookupByName('testname')
+ self.assertFalse(dom.isActive(), 'Defined domain was running.')
+ dom.createWithFlags(0)
+ self.assertEqual(dom.hasManagedSaveImage(0), 0)
+ dom.managedSave(0)
+ self.assertEqual(dom.hasManagedSaveImage(0), 1)
+ dom.managedSaveRemove(0)
+ self.assertEqual(dom.hasManagedSaveImage(0), 0)
+
+ def test_listDomainsId_and_lookupById(self):
+ conn = self.get_openAuth_curry_func()('qemu:///system')
+ self.assertEqual(conn.listDomainsID(), [])
+ conn.defineXML(get_vm_xml())
+ dom = conn.lookupByName('testname')
+ dom.createWithFlags(0)
+ self.assertEqual(len(conn.listDomainsID()), 1)
+
+ dom_id = conn.listDomainsID()[0]
+ self.assertEqual(conn.lookupByID(dom_id), dom)
+
+ dom_id = conn.listDomainsID()[0]
+ try:
+ conn.lookupByID(dom_id + 1)
+ except libvirt.libvirtError as e:
+ self.assertEqual(e.get_error_code(), libvirt.VIR_ERR_NO_DOMAIN)
+ self.assertEqual(e.get_error_domain(), libvirt.VIR_FROM_QEMU)
+ return
+ raise self.failureException("Looking up an invalid domain ID didn't "
+ "raise libvirtError")
+
+ def test_define_and_retrieve(self):
+ conn = self.get_openAuth_curry_func()('qemu:///system')
+ self.assertEqual(conn.listDomainsID(), [])
+ conn.defineXML(get_vm_xml())
+ dom = conn.lookupByName('testname')
+ xml = dom.XMLDesc(0)
+ etree.fromstring(xml)
+
+ def _test_accepts_source_type(self, source_type):
+ conn = self.get_openAuth_curry_func()('qemu:///system')
+ self.assertEqual(conn.listDomainsID(), [])
+ conn.defineXML(get_vm_xml(source_type=source_type))
+ dom = conn.lookupByName('testname')
+ xml = dom.XMLDesc(0)
+ tree = etree.fromstring(xml)
+ elem = tree.find('./devices/disk/source')
+ self.assertEqual(elem.get('file'), '/somefile')
+
+ def test_accepts_source_dev(self):
+ self._test_accepts_source_type('dev')
+
+ def test_accepts_source_path(self):
+ self._test_accepts_source_type('path')
+
+ def test_network_type_bridge_sticks(self):
+ self._test_network_type_sticks('bridge')
+
+ def test_network_type_network_sticks(self):
+ self._test_network_type_sticks('network')
+
+ def _test_network_type_sticks(self, network_type):
+ conn = self.get_openAuth_curry_func()('qemu:///system')
+ self.assertEqual(conn.listDomainsID(), [])
+ conn.defineXML(get_vm_xml(interface_type=network_type))
+ dom = conn.lookupByName('testname')
+ xml = dom.XMLDesc(0)
+ tree = etree.fromstring(xml)
+ elem = tree.find('./devices/interface')
+ self.assertEqual(elem.get('type'), network_type)
+ elem = elem.find('./source')
+ self.assertEqual(elem.get(network_type), 'br100')
+
+ def test_getType(self):
+ conn = self.get_openAuth_curry_func()('qemu:///system')
+ self.assertEqual(conn.getType(), 'QEMU')
+
+ def test_getVersion(self):
+ conn = self.get_openAuth_curry_func()('qemu:///system')
+ self.assertIsInstance(conn.getVersion(), int)
+
+ def test_getCapabilities(self):
+ conn = self.get_openAuth_curry_func()('qemu:///system')
+ etree.fromstring(conn.getCapabilities())
+
+ def test_nwfilter_define_undefine(self):
+ conn = self.get_openAuth_curry_func()('qemu:///system')
+ # Will raise an exception if it's not valid XML
+ xml = '''<filter name='nova-instance-instance-789' chain='root'>
+ <uuid>946878c6-3ad3-82b2-87f3-c709f3807f58</uuid>
+ </filter>'''
+
+ conn.nwfilterDefineXML(xml)
+ nwfilter = conn.nwfilterLookupByName('nova-instance-instance-789')
+ nwfilter.undefine()
+ try:
+ conn.nwfilterLookupByName('nova-instance-instance-789320334')
+ except libvirt.libvirtError as e:
+ self.assertEqual(e.get_error_code(), libvirt.VIR_ERR_NO_NWFILTER)
+ self.assertEqual(e.get_error_domain(), libvirt.VIR_FROM_NWFILTER)
+ return
+ raise self.failureException("Invalid NWFilter name didn't"
+ " raise libvirtError")
+
+ def test_compareCPU_compatible(self):
+ conn = self.get_openAuth_curry_func()('qemu:///system')
+
+ xml = '''<cpu>
+ <arch>%s</arch>
+ <model>%s</model>
+ <vendor>%s</vendor>
+ <topology sockets="%d" cores="%d" threads="%d"/>
+ </cpu>''' % (libvirt.node_arch,
+ libvirt.node_cpu_model,
+ libvirt.node_cpu_vendor,
+ libvirt.node_sockets,
+ libvirt.node_cores,
+ libvirt.node_threads)
+ self.assertEqual(conn.compareCPU(xml, 0),
+ libvirt.VIR_CPU_COMPARE_IDENTICAL)
+
+ def test_compareCPU_incompatible_vendor(self):
+ conn = self.get_openAuth_curry_func()('qemu:///system')
+
+ xml = '''<cpu>
+ <arch>%s</arch>
+ <model>%s</model>
+ <vendor>%s</vendor>
+ <topology sockets="%d" cores="%d" threads="%d"/>
+ </cpu>''' % (libvirt.node_arch,
+ libvirt.node_cpu_model,
+ "AnotherVendor",
+ libvirt.node_sockets,
+ libvirt.node_cores,
+ libvirt.node_threads)
+ self.assertEqual(conn.compareCPU(xml, 0),
+ libvirt.VIR_CPU_COMPARE_INCOMPATIBLE)
+
+ def test_compareCPU_incompatible_arch(self):
+ conn = self.get_openAuth_curry_func()('qemu:///system')
+
+ xml = '''<cpu>
+ <arch>%s</arch>
+ <model>%s</model>
+ <vendor>%s</vendor>
+ <topology sockets="%d" cores="%d" threads="%d"/>
+ </cpu>''' % ('not-a-valid-arch',
+ libvirt.node_cpu_model,
+ libvirt.node_cpu_vendor,
+ libvirt.node_sockets,
+ libvirt.node_cores,
+ libvirt.node_threads)
+ self.assertEqual(conn.compareCPU(xml, 0),
+ libvirt.VIR_CPU_COMPARE_INCOMPATIBLE)
+
+ def test_compareCPU_incompatible_model(self):
+ conn = self.get_openAuth_curry_func()('qemu:///system')
+
+ xml = '''<cpu>
+ <arch>%s</arch>
+ <model>%s</model>
+ <vendor>%s</vendor>
+ <topology sockets="%d" cores="%d" threads="%d"/>
+ </cpu>''' % (libvirt.node_arch,
+ "AnotherModel",
+ libvirt.node_cpu_vendor,
+ libvirt.node_sockets,
+ libvirt.node_cores,
+ libvirt.node_threads)
+ self.assertEqual(conn.compareCPU(xml, 0),
+ libvirt.VIR_CPU_COMPARE_INCOMPATIBLE)
+
+ def test_compareCPU_compatible_unspecified_model(self):
+ conn = self.get_openAuth_curry_func()('qemu:///system')
+
+ xml = '''<cpu>
+ <arch>%s</arch>
+ <vendor>%s</vendor>
+ <topology sockets="%d" cores="%d" threads="%d"/>
+ </cpu>''' % (libvirt.node_arch,
+ libvirt.node_cpu_vendor,
+ libvirt.node_sockets,
+ libvirt.node_cores,
+ libvirt.node_threads)
+ self.assertEqual(conn.compareCPU(xml, 0),
+ libvirt.VIR_CPU_COMPARE_IDENTICAL)
diff --git a/nova/tests/unit/virt/libvirt/test_firewall.py b/nova/tests/unit/virt/libvirt/test_firewall.py
new file mode 100644
index 0000000000..b6d4cddf51
--- /dev/null
+++ b/nova/tests/unit/virt/libvirt/test_firewall.py
@@ -0,0 +1,749 @@
+# Copyright 2010 OpenStack Foundation
+# Copyright 2012 University Of Minho
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import re
+import threading
+import uuid
+from xml.dom import minidom
+
+from lxml import etree
+import mock
+import mox
+from oslo.concurrency import lockutils
+
+from nova.compute import utils as compute_utils
+from nova import exception
+from nova.network import linux_net
+from nova import objects
+from nova import test
+from nova.tests.unit import fake_network
+from nova.tests.unit.virt.libvirt import fakelibvirt
+from nova.virt.libvirt import firewall
+from nova.virt import netutils
+from nova.virt import virtapi
+
+try:
+ import libvirt
+except ImportError:
+ libvirt = fakelibvirt
+
+_fake_network_info = fake_network.fake_get_instance_nw_info
+_fake_stub_out_get_nw_info = fake_network.stub_out_nw_api_get_instance_nw_info
+_ipv4_like = fake_network.ipv4_like
+
+
+class NWFilterFakes:
+ def __init__(self):
+ self.filters = {}
+
+ def nwfilterLookupByName(self, name):
+ if name in self.filters:
+ return self.filters[name]
+ raise libvirt.libvirtError('Filter Not Found')
+
+ def filterDefineXMLMock(self, xml):
+ class FakeNWFilterInternal:
+ def __init__(self, parent, name, u, xml):
+ self.name = name
+ self.uuid = u
+ self.parent = parent
+ self.xml = xml
+
+ def XMLDesc(self, flags):
+ return self.xml
+
+ def undefine(self):
+ del self.parent.filters[self.name]
+
+ tree = etree.fromstring(xml)
+ name = tree.get('name')
+ u = tree.find('uuid')
+ if u is None:
+ u = uuid.uuid4().hex
+ else:
+ u = u.text
+ if name not in self.filters:
+ self.filters[name] = FakeNWFilterInternal(self, name, u, xml)
+ else:
+ if self.filters[name].uuid != u:
+ raise libvirt.libvirtError(
+ "Mismatching name '%s' with uuid '%s' vs '%s'"
+ % (name, self.filters[name].uuid, u))
+ self.filters[name].xml = xml
+ return True
+
+
+class FakeVirtAPI(virtapi.VirtAPI):
+ def provider_fw_rule_get_all(self, context):
+ return []
+
+
+class IptablesFirewallTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(IptablesFirewallTestCase, self).setUp()
+
+ class FakeLibvirtDriver(object):
+ def nwfilterDefineXML(*args, **kwargs):
+ """setup_basic_rules in nwfilter calls this."""
+ pass
+
+ self.fake_libvirt_connection = FakeLibvirtDriver()
+ self.fw = firewall.IptablesFirewallDriver(
+ FakeVirtAPI(),
+ get_connection=lambda: self.fake_libvirt_connection)
+
+ in_rules = [
+ '# Generated by iptables-save v1.4.10 on Sat Feb 19 00:03:19 2011',
+ '*nat',
+ ':PREROUTING ACCEPT [1170:189210]',
+ ':INPUT ACCEPT [844:71028]',
+ ':OUTPUT ACCEPT [5149:405186]',
+ ':POSTROUTING ACCEPT [5063:386098]',
+ '# Completed on Tue Dec 18 15:50:25 2012',
+ '# Generated by iptables-save v1.4.12 on Tue Dec 18 15:50:25 201;',
+ '*mangle',
+ ':PREROUTING ACCEPT [241:39722]',
+ ':INPUT ACCEPT [230:39282]',
+ ':FORWARD ACCEPT [0:0]',
+ ':OUTPUT ACCEPT [266:26558]',
+ ':POSTROUTING ACCEPT [267:26590]',
+ '-A POSTROUTING -o virbr0 -p udp -m udp --dport 68 -j CHECKSUM '
+ '--checksum-fill',
+ 'COMMIT',
+ '# Completed on Tue Dec 18 15:50:25 2012',
+ '# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010',
+ '*filter',
+ ':INPUT ACCEPT [969615:281627771]',
+ ':FORWARD ACCEPT [0:0]',
+ ':OUTPUT ACCEPT [915599:63811649]',
+ ':nova-block-ipv4 - [0:0]',
+ '[0:0] -A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ',
+ '[0:0] -A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED'
+ ',ESTABLISHED -j ACCEPT ',
+ '[0:0] -A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ',
+ '[0:0] -A FORWARD -i virbr0 -o virbr0 -j ACCEPT ',
+ '[0:0] -A FORWARD -o virbr0 -j REJECT '
+ '--reject-with icmp-port-unreachable ',
+ '[0:0] -A FORWARD -i virbr0 -j REJECT '
+ '--reject-with icmp-port-unreachable ',
+ 'COMMIT',
+ '# Completed on Mon Dec 6 11:54:13 2010',
+ ]
+
+ in6_filter_rules = [
+ '# Generated by ip6tables-save v1.4.4 on Tue Jan 18 23:47:56 2011',
+ '*filter',
+ ':INPUT ACCEPT [349155:75810423]',
+ ':FORWARD ACCEPT [0:0]',
+ ':OUTPUT ACCEPT [349256:75777230]',
+ 'COMMIT',
+ '# Completed on Tue Jan 18 23:47:56 2011',
+ ]
+
+ def _create_instance_ref(self,
+ uuid="74526555-9166-4893-a203-126bdcab0d67"):
+ inst = objects.Instance(
+ id=7,
+ uuid=uuid,
+ user_id="fake",
+ project_id="fake",
+ image_ref='155d900f-4e14-4e4c-a73d-069cbf4541e6',
+ instance_type_id=1)
+ inst.info_cache = objects.InstanceInfoCache()
+ inst.info_cache.deleted = False
+ return inst
+
+ @mock.patch.object(objects.InstanceList, "get_by_security_group_id")
+ @mock.patch.object(objects.SecurityGroupRuleList,
+ "get_by_security_group_id")
+ @mock.patch.object(objects.SecurityGroupList, "get_by_instance")
+ @mock.patch.object(lockutils, "external_lock")
+ def test_static_filters(self, mock_lock, mock_secgroup,
+ mock_secrule, mock_instlist):
+ mock_lock.return_value = threading.Semaphore()
+
+ UUID = "2674993b-6adb-4733-abd9-a7c10cc1f146"
+ SRC_UUID = "0e0a76b2-7c52-4bc0-9a60-d83017e42c1a"
+ instance_ref = self._create_instance_ref(UUID)
+ src_instance_ref = self._create_instance_ref(SRC_UUID)
+
+ secgroup = objects.SecurityGroup(id=1,
+ user_id='fake',
+ project_id='fake',
+ name='testgroup',
+ description='test group')
+
+ src_secgroup = objects.SecurityGroup(id=2,
+ user_id='fake',
+ project_id='fake',
+ name='testsourcegroup',
+ description='src group')
+
+ r1 = objects.SecurityGroupRule(parent_group_id=secgroup['id'],
+ protocol='icmp',
+ from_port=-1,
+ to_port=-1,
+ cidr='192.168.11.0/24',
+ grantee_group=None)
+
+ r2 = objects.SecurityGroupRule(parent_group_id=secgroup['id'],
+ protocol='icmp',
+ from_port=8,
+ to_port=-1,
+ cidr='192.168.11.0/24',
+ grantee_group=None)
+
+ r3 = objects.SecurityGroupRule(parent_group_id=secgroup['id'],
+ protocol='tcp',
+ from_port=80,
+ to_port=81,
+ cidr='192.168.10.0/24',
+ grantee_group=None)
+
+ r4 = objects.SecurityGroupRule(parent_group_id=secgroup['id'],
+ protocol='tcp',
+ from_port=80,
+ to_port=81,
+ cidr=None,
+ grantee_group=src_secgroup,
+ group_id=src_secgroup['id'])
+
+ r5 = objects.SecurityGroupRule(parent_group_id=secgroup['id'],
+ protocol=None,
+ cidr=None,
+ grantee_group=src_secgroup,
+ group_id=src_secgroup['id'])
+
+ secgroup_list = objects.SecurityGroupList()
+ secgroup_list.objects.append(secgroup)
+ src_secgroup_list = objects.SecurityGroupList()
+ src_secgroup_list.objects.append(src_secgroup)
+ instance_ref.security_groups = secgroup_list
+ src_instance_ref.security_groups = src_secgroup_list
+
+ def _fake_secgroup(ctxt, instance):
+ if instance.uuid == UUID:
+ return instance_ref.security_groups
+ else:
+ return src_instance_ref.security_groups
+
+ mock_secgroup.side_effect = _fake_secgroup
+
+ def _fake_secrule(ctxt, id):
+ if id == secgroup.id:
+ rules = objects.SecurityGroupRuleList()
+ rules.objects.extend([r1, r2, r3, r4, r5])
+ return rules
+ else:
+ return []
+
+ mock_secrule.side_effect = _fake_secrule
+
+ def _fake_instlist(ctxt, id):
+ if id == src_secgroup['id']:
+ insts = objects.InstanceList()
+ insts.objects.append(src_instance_ref)
+ return insts
+ else:
+ insts = objects.InstanceList()
+ insts.objects.append(instance_ref)
+ return insts
+
+ mock_instlist.side_effect = _fake_instlist
+
+ def fake_iptables_execute(*cmd, **kwargs):
+ process_input = kwargs.get('process_input', None)
+ if cmd == ('ip6tables-save', '-c'):
+ return '\n'.join(self.in6_filter_rules), None
+ if cmd == ('iptables-save', '-c'):
+ return '\n'.join(self.in_rules), None
+ if cmd == ('iptables-restore', '-c'):
+ lines = process_input.split('\n')
+ if '*filter' in lines:
+ self.out_rules = lines
+ return '', ''
+ if cmd == ('ip6tables-restore', '-c',):
+ lines = process_input.split('\n')
+ if '*filter' in lines:
+ self.out6_rules = lines
+ return '', ''
+
+ network_model = _fake_network_info(self.stubs, 1)
+
+ linux_net.iptables_manager.execute = fake_iptables_execute
+
+ self.stubs.Set(compute_utils, 'get_nw_info_for_instance',
+ lambda instance: network_model)
+
+ self.fw.prepare_instance_filter(instance_ref, network_model)
+ self.fw.apply_instance_filter(instance_ref, network_model)
+
+ in_rules = filter(lambda l: not l.startswith('#'),
+ self.in_rules)
+ for rule in in_rules:
+ if 'nova' not in rule:
+ self.assertTrue(rule in self.out_rules,
+ 'Rule went missing: %s' % rule)
+
+ instance_chain = None
+ for rule in self.out_rules:
+ # This is pretty crude, but it'll do for now
+ # last two octets change
+ if re.search('-d 192.168.[0-9]{1,3}.[0-9]{1,3} -j', rule):
+ instance_chain = rule.split(' ')[-1]
+ break
+ self.assertTrue(instance_chain, "The instance chain wasn't added")
+
+ security_group_chain = None
+ for rule in self.out_rules:
+ # This is pretty crude, but it'll do for now
+ if '-A %s -j' % instance_chain in rule:
+ security_group_chain = rule.split(' ')[-1]
+ break
+ self.assertTrue(security_group_chain,
+ "The security group chain wasn't added")
+
+ regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p icmp '
+ '-s 192.168.11.0/24')
+ self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
+ "ICMP acceptance rule wasn't added")
+
+ regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p icmp -m icmp '
+ '--icmp-type 8 -s 192.168.11.0/24')
+ self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
+ "ICMP Echo Request acceptance rule wasn't added")
+
+ for ip in network_model.fixed_ips():
+ if ip['version'] != 4:
+ continue
+ regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p tcp -m multiport '
+ '--dports 80:81 -s %s' % ip['address'])
+ self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
+ "TCP port 80/81 acceptance rule wasn't added")
+ regex = re.compile('\[0\:0\] -A .* -j ACCEPT -s '
+ '%s' % ip['address'])
+ self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
+ "Protocol/port-less acceptance rule wasn't added")
+
+ regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p tcp '
+ '-m multiport --dports 80:81 -s 192.168.10.0/24')
+ self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
+ "TCP port 80/81 acceptance rule wasn't added")
+
+ def test_filters_for_instance_with_ip_v6(self):
+ self.flags(use_ipv6=True)
+ network_info = _fake_network_info(self.stubs, 1)
+ rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info)
+ self.assertEqual(len(rulesv4), 2)
+ self.assertEqual(len(rulesv6), 1)
+
+ def test_filters_for_instance_without_ip_v6(self):
+ self.flags(use_ipv6=False)
+ network_info = _fake_network_info(self.stubs, 1)
+ rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info)
+ self.assertEqual(len(rulesv4), 2)
+ self.assertEqual(len(rulesv6), 0)
+
+ @mock.patch.object(objects.SecurityGroupList, "get_by_instance")
+ @mock.patch.object(lockutils, "external_lock")
+ def test_multinic_iptables(self, mock_lock, mock_secgroup):
+ mock_lock.return_value = threading.Semaphore()
+ mock_secgroup.return_value = objects.SecurityGroupList()
+
+ ipv4_rules_per_addr = 1
+ ipv4_addr_per_network = 2
+ ipv6_rules_per_addr = 1
+ ipv6_addr_per_network = 1
+ networks_count = 5
+ instance_ref = self._create_instance_ref()
+ network_info = _fake_network_info(self.stubs, networks_count,
+ ipv4_addr_per_network)
+ network_info[0]['network']['subnets'][0]['meta']['dhcp_server'] = \
+ '1.1.1.1'
+ ipv4_len = len(self.fw.iptables.ipv4['filter'].rules)
+ ipv6_len = len(self.fw.iptables.ipv6['filter'].rules)
+ inst_ipv4, inst_ipv6 = self.fw.instance_rules(instance_ref,
+ network_info)
+ self.fw.prepare_instance_filter(instance_ref, network_info)
+ ipv4 = self.fw.iptables.ipv4['filter'].rules
+ ipv6 = self.fw.iptables.ipv6['filter'].rules
+ ipv4_network_rules = len(ipv4) - len(inst_ipv4) - ipv4_len
+ ipv6_network_rules = len(ipv6) - len(inst_ipv6) - ipv6_len
+ # Extra rules are for the DHCP request
+ rules = (ipv4_rules_per_addr * ipv4_addr_per_network *
+ networks_count) + 2
+ self.assertEqual(ipv4_network_rules, rules)
+ self.assertEqual(ipv6_network_rules,
+ ipv6_rules_per_addr * ipv6_addr_per_network * networks_count)
+
+ @mock.patch.object(lockutils, "external_lock")
+ def test_do_refresh_security_group_rules(self, mock_lock):
+ mock_lock.return_value = threading.Semaphore()
+ instance_ref = self._create_instance_ref()
+ self.mox.StubOutWithMock(self.fw,
+ 'instance_rules')
+ self.mox.StubOutWithMock(self.fw,
+ 'add_filters_for_instance',
+ use_mock_anything=True)
+ self.mox.StubOutWithMock(self.fw.iptables.ipv4['filter'],
+ 'has_chain')
+
+ self.fw.instance_rules(instance_ref,
+ mox.IgnoreArg()).AndReturn((None, None))
+ self.fw.add_filters_for_instance(instance_ref, mox.IgnoreArg(),
+ mox.IgnoreArg(), mox.IgnoreArg())
+ self.fw.instance_rules(instance_ref,
+ mox.IgnoreArg()).AndReturn((None, None))
+ self.fw.iptables.ipv4['filter'].has_chain(mox.IgnoreArg()
+ ).AndReturn(True)
+ self.fw.add_filters_for_instance(instance_ref, mox.IgnoreArg(),
+ mox.IgnoreArg(), mox.IgnoreArg())
+ self.mox.ReplayAll()
+
+ self.fw.prepare_instance_filter(instance_ref, mox.IgnoreArg())
+ self.fw.instance_info[instance_ref['id']] = (instance_ref, None)
+ self.fw.do_refresh_security_group_rules("fake")
+
+ @mock.patch.object(lockutils, "external_lock")
+ def test_do_refresh_security_group_rules_instance_gone(self, mock_lock):
+ mock_lock.return_value = threading.Semaphore()
+ instance1 = {'id': 1, 'uuid': 'fake-uuid1'}
+ instance2 = {'id': 2, 'uuid': 'fake-uuid2'}
+ self.fw.instance_info = {1: (instance1, 'netinfo1'),
+ 2: (instance2, 'netinfo2')}
+ mock_filter = mock.MagicMock()
+ with mock.patch.dict(self.fw.iptables.ipv4, {'filter': mock_filter}):
+ mock_filter.has_chain.return_value = False
+ with mock.patch.object(self.fw, 'instance_rules') as mock_ir:
+ mock_ir.return_value = (None, None)
+ self.fw.do_refresh_security_group_rules('secgroup')
+ self.assertEqual(2, mock_ir.call_count)
+ # NOTE(danms): Make sure that it is checking has_chain each time,
+ # continuing to process all the instances, and never adding the
+ # new chains back if has_chain() is False
+ mock_filter.has_chain.assert_has_calls([mock.call('inst-1'),
+ mock.call('inst-2')],
+ any_order=True)
+ self.assertEqual(0, mock_filter.add_chain.call_count)
+
+ @mock.patch.object(objects.InstanceList, "get_by_security_group_id")
+ @mock.patch.object(objects.SecurityGroupRuleList,
+ "get_by_security_group_id")
+ @mock.patch.object(objects.SecurityGroupList, "get_by_instance")
+ @mock.patch.object(lockutils, "external_lock")
+ def test_unfilter_instance_undefines_nwfilter(self, mock_lock,
+ mock_secgroup,
+ mock_secrule,
+ mock_instlist):
+ mock_lock.return_value = threading.Semaphore()
+
+ fakefilter = NWFilterFakes()
+ _xml_mock = fakefilter.filterDefineXMLMock
+ self.fw.nwfilter._conn.nwfilterDefineXML = _xml_mock
+ _lookup_name = fakefilter.nwfilterLookupByName
+ self.fw.nwfilter._conn.nwfilterLookupByName = _lookup_name
+ instance_ref = self._create_instance_ref()
+
+ mock_secgroup.return_value = objects.SecurityGroupList()
+
+ network_info = _fake_network_info(self.stubs, 1)
+ self.fw.setup_basic_filtering(instance_ref, network_info)
+ self.fw.prepare_instance_filter(instance_ref, network_info)
+ self.fw.apply_instance_filter(instance_ref, network_info)
+ original_filter_count = len(fakefilter.filters)
+ self.fw.unfilter_instance(instance_ref, network_info)
+
+ # should undefine just the instance filter
+ self.assertEqual(original_filter_count - len(fakefilter.filters), 1)
+
+ @mock.patch.object(FakeVirtAPI, "provider_fw_rule_get_all")
+ @mock.patch.object(objects.SecurityGroupList, "get_by_instance")
+ @mock.patch.object(lockutils, "external_lock")
+ def test_provider_firewall_rules(self, mock_lock, mock_secgroup,
+ mock_fwrules):
+ mock_lock.return_value = threading.Semaphore()
+ mock_secgroup.return_value = objects.SecurityGroupList()
+
+ # setup basic instance data
+ instance_ref = self._create_instance_ref()
+ # FRAGILE: peeks at how the firewall names chains
+ chain_name = 'inst-%s' % instance_ref['id']
+
+ # create a firewall via setup_basic_filtering like libvirt_conn.spawn
+ # should have a chain with 0 rules
+ network_info = _fake_network_info(self.stubs, 1)
+ self.fw.setup_basic_filtering(instance_ref, network_info)
+ self.assertIn('provider', self.fw.iptables.ipv4['filter'].chains)
+ rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
+ if rule.chain == 'provider']
+ self.assertEqual(0, len(rules))
+
+ # add a rule angd send the update message, check for 1 rule
+ mock_fwrules.return_value = [{'protocol': 'tcp',
+ 'cidr': '10.99.99.99/32',
+ 'from_port': 1,
+ 'to_port': 65535}]
+ self.fw.refresh_provider_fw_rules()
+ rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
+ if rule.chain == 'provider']
+ self.assertEqual(1, len(rules))
+
+ # Add another, refresh, and make sure number of rules goes to two
+ mock_fwrules.return_value = [{'protocol': 'tcp',
+ 'cidr': '10.99.99.99/32',
+ 'from_port': 1,
+ 'to_port': 65535},
+ {'protocol': 'udp',
+ 'cidr': '10.99.99.99/32',
+ 'from_port': 1,
+ 'to_port': 65535}]
+ self.fw.refresh_provider_fw_rules()
+ rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
+ if rule.chain == 'provider']
+ self.assertEqual(2, len(rules))
+
+ # create the instance filter and make sure it has a jump rule
+ self.fw.prepare_instance_filter(instance_ref, network_info)
+ self.fw.apply_instance_filter(instance_ref, network_info)
+ inst_rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
+ if rule.chain == chain_name]
+ jump_rules = [rule for rule in inst_rules if '-j' in rule.rule]
+ provjump_rules = []
+ # IptablesTable doesn't make rules unique internally
+ for rule in jump_rules:
+ if 'provider' in rule.rule and rule not in provjump_rules:
+ provjump_rules.append(rule)
+ self.assertEqual(1, len(provjump_rules))
+
+ # remove a rule from the db, cast to compute to refresh rule
+ mock_fwrules.return_value = [{'protocol': 'udp',
+ 'cidr': '10.99.99.99/32',
+ 'from_port': 1,
+ 'to_port': 65535}]
+ self.fw.refresh_provider_fw_rules()
+ rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
+ if rule.chain == 'provider']
+ self.assertEqual(1, len(rules))
+
+
+class NWFilterTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(NWFilterTestCase, self).setUp()
+
+ class Mock(object):
+ pass
+
+ self.fake_libvirt_connection = Mock()
+
+ self.fw = firewall.NWFilterFirewall(
+ FakeVirtAPI(),
+ lambda: self.fake_libvirt_connection)
+
+ def _create_security_group(self, instance_ref):
+ secgroup = objects.SecurityGroup(id=1,
+ user_id='fake',
+ project_id='fake',
+ name='testgroup',
+ description='test group description')
+
+ secgroup_list = objects.SecurityGroupList()
+ secgroup_list.objects.append(secgroup)
+ instance_ref.security_groups = secgroup_list
+
+ return secgroup
+
+ def _create_instance(self):
+ inst = objects.Instance(
+ id=7,
+ uuid="74526555-9166-4893-a203-126bdcab0d67",
+ user_id="fake",
+ project_id="fake",
+ image_ref='155d900f-4e14-4e4c-a73d-069cbf4541e6',
+ instance_type_id=1)
+ inst.info_cache = objects.InstanceInfoCache()
+ inst.info_cache.deleted = False
+ return inst
+
+ def test_creates_base_rule_first(self):
+ # These come pre-defined by libvirt
+ self.defined_filters = ['no-mac-spoofing',
+ 'no-ip-spoofing',
+ 'no-arp-spoofing',
+ 'allow-dhcp-server']
+
+ self.recursive_depends = {}
+ for f in self.defined_filters:
+ self.recursive_depends[f] = []
+
+ def _filterDefineXMLMock(xml):
+ dom = minidom.parseString(xml)
+ name = dom.firstChild.getAttribute('name')
+ self.recursive_depends[name] = []
+ for f in dom.getElementsByTagName('filterref'):
+ ref = f.getAttribute('filter')
+ self.assertTrue(ref in self.defined_filters,
+ ('%s referenced filter that does ' +
+ 'not yet exist: %s') % (name, ref))
+ dependencies = [ref] + self.recursive_depends[ref]
+ self.recursive_depends[name] += dependencies
+
+ self.defined_filters.append(name)
+ return True
+
+ self.fake_libvirt_connection.nwfilterDefineXML = _filterDefineXMLMock
+
+ instance_ref = self._create_instance()
+ self._create_security_group(instance_ref)
+
+ def _ensure_all_called(mac, allow_dhcp):
+ instance_filter = 'nova-instance-%s-%s' % (instance_ref['name'],
+ mac.translate({ord(':'): None}))
+ requiredlist = ['no-arp-spoofing', 'no-ip-spoofing',
+ 'no-mac-spoofing']
+ required_not_list = []
+ if allow_dhcp:
+ requiredlist.append('allow-dhcp-server')
+ else:
+ required_not_list.append('allow-dhcp-server')
+ for required in requiredlist:
+ self.assertTrue(required in
+ self.recursive_depends[instance_filter],
+ "Instance's filter does not include %s" %
+ required)
+ for required_not in required_not_list:
+ self.assertFalse(required_not in
+ self.recursive_depends[instance_filter],
+ "Instance filter includes %s" % required_not)
+
+ network_info = _fake_network_info(self.stubs, 1)
+ # since there is one (network_info) there is one vif
+ # pass this vif's mac to _ensure_all_called()
+ # to set the instance_filter properly
+ mac = network_info[0]['address']
+ network_info[0]['network']['subnets'][0]['meta']['dhcp_server'] = \
+ '1.1.1.1'
+ self.fw.setup_basic_filtering(instance_ref, network_info)
+ allow_dhcp = True
+ _ensure_all_called(mac, allow_dhcp)
+
+ network_info[0]['network']['subnets'][0]['meta']['dhcp_server'] = None
+ self.fw.setup_basic_filtering(instance_ref, network_info)
+ allow_dhcp = False
+ _ensure_all_called(mac, allow_dhcp)
+
+ def test_unfilter_instance_undefines_nwfilters(self):
+ fakefilter = NWFilterFakes()
+ self.fw._conn.nwfilterDefineXML = fakefilter.filterDefineXMLMock
+ self.fw._conn.nwfilterLookupByName = fakefilter.nwfilterLookupByName
+
+ instance_ref = self._create_instance()
+ self._create_security_group(instance_ref)
+
+ network_info = _fake_network_info(self.stubs, 1)
+ self.fw.setup_basic_filtering(instance_ref, network_info)
+ original_filter_count = len(fakefilter.filters)
+ self.fw.unfilter_instance(instance_ref, network_info)
+ self.assertEqual(original_filter_count - len(fakefilter.filters), 1)
+
+ def test_redefining_nwfilters(self):
+ fakefilter = NWFilterFakes()
+ self.fw._conn.nwfilterDefineXML = fakefilter.filterDefineXMLMock
+ self.fw._conn.nwfilterLookupByName = fakefilter.nwfilterLookupByName
+
+ instance_ref = self._create_instance()
+ self._create_security_group(instance_ref)
+
+ network_info = _fake_network_info(self.stubs, 1)
+ self.fw.setup_basic_filtering(instance_ref, network_info)
+ self.fw.setup_basic_filtering(instance_ref, network_info)
+
+ def test_nwfilter_parameters(self):
+ fakefilter = NWFilterFakes()
+ self.fw._conn.nwfilterDefineXML = fakefilter.filterDefineXMLMock
+ self.fw._conn.nwfilterLookupByName = fakefilter.nwfilterLookupByName
+
+ instance_ref = self._create_instance()
+ self._create_security_group(instance_ref)
+
+ network_info = _fake_network_info(self.stubs, 1)
+ self.fw.setup_basic_filtering(instance_ref, network_info)
+
+ vif = network_info[0]
+ nic_id = vif['address'].replace(':', '')
+ instance_filter_name = self.fw._instance_filter_name(instance_ref,
+ nic_id)
+ f = fakefilter.nwfilterLookupByName(instance_filter_name)
+ tree = etree.fromstring(f.xml)
+
+ for fref in tree.findall('filterref'):
+ parameters = fref.findall('./parameter')
+ for parameter in parameters:
+ subnet_v4, subnet_v6 = vif['network']['subnets']
+ if parameter.get('name') == 'IP':
+ self.assertTrue(_ipv4_like(parameter.get('value'),
+ '192.168'))
+ elif parameter.get('name') == 'DHCPSERVER':
+ dhcp_server = subnet_v4.get('dhcp_server')
+ self.assertEqual(parameter.get('value'), dhcp_server)
+ elif parameter.get('name') == 'RASERVER':
+ ra_server = subnet_v6['gateway']['address'] + "/128"
+ self.assertEqual(parameter.get('value'), ra_server)
+ elif parameter.get('name') == 'PROJNET':
+ ipv4_cidr = subnet_v4['cidr']
+ net, mask = netutils.get_net_and_mask(ipv4_cidr)
+ self.assertEqual(parameter.get('value'), net)
+ elif parameter.get('name') == 'PROJMASK':
+ ipv4_cidr = subnet_v4['cidr']
+ net, mask = netutils.get_net_and_mask(ipv4_cidr)
+ self.assertEqual(parameter.get('value'), mask)
+ elif parameter.get('name') == 'PROJNET6':
+ ipv6_cidr = subnet_v6['cidr']
+ net, prefix = netutils.get_net_and_prefixlen(ipv6_cidr)
+ self.assertEqual(parameter.get('value'), net)
+ elif parameter.get('name') == 'PROJMASK6':
+ ipv6_cidr = subnet_v6['cidr']
+ net, prefix = netutils.get_net_and_prefixlen(ipv6_cidr)
+ self.assertEqual(parameter.get('value'), prefix)
+ else:
+ raise exception.InvalidParameterValue('unknown parameter '
+ 'in filter')
+
+ def test_multinic_base_filter_selection(self):
+ fakefilter = NWFilterFakes()
+ self.fw._conn.nwfilterDefineXML = fakefilter.filterDefineXMLMock
+ self.fw._conn.nwfilterLookupByName = fakefilter.nwfilterLookupByName
+
+ instance_ref = self._create_instance()
+ self._create_security_group(instance_ref)
+
+ network_info = _fake_network_info(self.stubs, 2)
+ network_info[0]['network']['subnets'][0]['meta']['dhcp_server'] = \
+ '1.1.1.1'
+
+ self.fw.setup_basic_filtering(instance_ref, network_info)
+
+ def assert_filterref(instance, vif, expected=None):
+ expected = expected or []
+ nic_id = vif['address'].replace(':', '')
+ filter_name = self.fw._instance_filter_name(instance, nic_id)
+ f = fakefilter.nwfilterLookupByName(filter_name)
+ tree = etree.fromstring(f.xml)
+ frefs = [fr.get('filter') for fr in tree.findall('filterref')]
+ self.assertEqual(set(expected), set(frefs))
+
+ assert_filterref(instance_ref, network_info[0],
+ expected=['nova-base'])
+ assert_filterref(instance_ref, network_info[1],
+ expected=['nova-nodhcp'])
diff --git a/nova/tests/unit/virt/libvirt/test_imagebackend.py b/nova/tests/unit/virt/libvirt/test_imagebackend.py
new file mode 100644
index 0000000000..e865c165da
--- /dev/null
+++ b/nova/tests/unit/virt/libvirt/test_imagebackend.py
@@ -0,0 +1,1309 @@
+# Copyright 2012 Grid Dynamics
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import contextlib
+import inspect
+import os
+import shutil
+import tempfile
+
+import fixtures
+import mock
+from oslo.concurrency import lockutils
+from oslo.config import cfg
+from oslo.utils import units
+
+from nova import context
+from nova import exception
+from nova import keymgr
+from nova.openstack.common.fixture import config as config_fixture
+from nova.openstack.common import imageutils
+from nova.openstack.common import uuidutils
+from nova import test
+from nova.tests.unit import fake_processutils
+from nova.tests.unit.virt.libvirt import fake_libvirt_utils
+from nova.virt import images
+from nova.virt.libvirt import imagebackend
+from nova.virt.libvirt import rbd_utils
+
+CONF = cfg.CONF
+CONF.import_opt('fixed_key', 'nova.keymgr.conf_key_mgr', group='keymgr')
+
+
+class _ImageTestCase(object):
+
+ def mock_create_image(self, image):
+ def create_image(fn, base, size, *args, **kwargs):
+ fn(target=base, *args, **kwargs)
+ image.create_image = create_image
+
+ def setUp(self):
+ super(_ImageTestCase, self).setUp()
+ self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
+ self.INSTANCES_PATH = tempfile.mkdtemp(suffix='instances')
+ self.fixture.config(disable_process_locking=True,
+ group='oslo_concurrency')
+ self.flags(instances_path=self.INSTANCES_PATH)
+ self.INSTANCE = {'name': 'instance',
+ 'uuid': uuidutils.generate_uuid()}
+ self.DISK_INFO_PATH = os.path.join(self.INSTANCES_PATH,
+ self.INSTANCE['uuid'], 'disk.info')
+ self.NAME = 'fake.vm'
+ self.TEMPLATE = 'template'
+ self.CONTEXT = context.get_admin_context()
+
+ self.OLD_STYLE_INSTANCE_PATH = \
+ fake_libvirt_utils.get_instance_path(self.INSTANCE, forceold=True)
+ self.PATH = os.path.join(
+ fake_libvirt_utils.get_instance_path(self.INSTANCE), self.NAME)
+
+ # TODO(mikal): rename template_dir to base_dir and template_path
+ # to cached_image_path. This will be less confusing.
+ self.TEMPLATE_DIR = os.path.join(CONF.instances_path, '_base')
+ self.TEMPLATE_PATH = os.path.join(self.TEMPLATE_DIR, 'template')
+
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.libvirt.imagebackend.libvirt_utils',
+ fake_libvirt_utils))
+
+ def tearDown(self):
+ super(_ImageTestCase, self).tearDown()
+ shutil.rmtree(self.INSTANCES_PATH)
+
+ def test_prealloc_image(self):
+ CONF.set_override('preallocate_images', 'space')
+
+ fake_processutils.fake_execute_clear_log()
+ fake_processutils.stub_out_processutils_execute(self.stubs)
+ image = self.image_class(self.INSTANCE, self.NAME)
+
+ def fake_fetch(target, *args, **kwargs):
+ return
+
+ self.stubs.Set(os.path, 'exists', lambda _: True)
+ self.stubs.Set(os, 'access', lambda p, w: True)
+
+ # Call twice to verify testing fallocate is only called once.
+ image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
+ image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
+
+ self.assertEqual(fake_processutils.fake_execute_get_log(),
+ ['fallocate -n -l 1 %s.fallocate_test' % self.PATH,
+ 'fallocate -n -l %s %s' % (self.SIZE, self.PATH),
+ 'fallocate -n -l %s %s' % (self.SIZE, self.PATH)])
+
+ def test_prealloc_image_without_write_access(self):
+ CONF.set_override('preallocate_images', 'space')
+
+ fake_processutils.fake_execute_clear_log()
+ fake_processutils.stub_out_processutils_execute(self.stubs)
+ image = self.image_class(self.INSTANCE, self.NAME)
+
+ def fake_fetch(target, *args, **kwargs):
+ return
+
+ self.stubs.Set(image, 'check_image_exists', lambda: True)
+ self.stubs.Set(image, '_can_fallocate', lambda: True)
+ self.stubs.Set(os.path, 'exists', lambda _: True)
+ self.stubs.Set(os, 'access', lambda p, w: False)
+
+ # Testing fallocate is only called when user has write access.
+ image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
+
+ self.assertEqual(fake_processutils.fake_execute_get_log(), [])
+
+
+class RawTestCase(_ImageTestCase, test.NoDBTestCase):
+
+ SIZE = 1024
+
+ def setUp(self):
+ self.image_class = imagebackend.Raw
+ super(RawTestCase, self).setUp()
+ self.stubs.Set(imagebackend.Raw, 'correct_format', lambda _: None)
+
+ def prepare_mocks(self):
+ fn = self.mox.CreateMockAnything()
+ self.mox.StubOutWithMock(imagebackend.utils.synchronized,
+ '__call__')
+ self.mox.StubOutWithMock(imagebackend.libvirt_utils, 'copy_image')
+ self.mox.StubOutWithMock(imagebackend.disk, 'extend')
+ return fn
+
+ def test_cache(self):
+ self.mox.StubOutWithMock(os.path, 'exists')
+ if self.OLD_STYLE_INSTANCE_PATH:
+ os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
+ os.path.exists(self.TEMPLATE_DIR).AndReturn(False)
+ os.path.exists(self.PATH).AndReturn(False)
+ os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
+ fn = self.mox.CreateMockAnything()
+ fn(target=self.TEMPLATE_PATH)
+ self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree')
+ imagebackend.fileutils.ensure_tree(self.TEMPLATE_DIR)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ self.mock_create_image(image)
+ image.cache(fn, self.TEMPLATE)
+
+ self.mox.VerifyAll()
+
+ def test_cache_image_exists(self):
+ self.mox.StubOutWithMock(os.path, 'exists')
+ if self.OLD_STYLE_INSTANCE_PATH:
+ os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
+ os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
+ os.path.exists(self.PATH).AndReturn(True)
+ os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ image.cache(None, self.TEMPLATE)
+
+ self.mox.VerifyAll()
+
+ def test_cache_base_dir_exists(self):
+ self.mox.StubOutWithMock(os.path, 'exists')
+ if self.OLD_STYLE_INSTANCE_PATH:
+ os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
+ os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
+ os.path.exists(self.PATH).AndReturn(False)
+ os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
+ fn = self.mox.CreateMockAnything()
+ fn(target=self.TEMPLATE_PATH)
+ self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree')
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ self.mock_create_image(image)
+ image.cache(fn, self.TEMPLATE)
+
+ self.mox.VerifyAll()
+
+ def test_cache_template_exists(self):
+ self.mox.StubOutWithMock(os.path, 'exists')
+ if self.OLD_STYLE_INSTANCE_PATH:
+ os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
+ os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
+ os.path.exists(self.PATH).AndReturn(False)
+ os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ self.mock_create_image(image)
+ image.cache(None, self.TEMPLATE)
+
+ self.mox.VerifyAll()
+
+ def test_create_image(self):
+ fn = self.prepare_mocks()
+ fn(target=self.TEMPLATE_PATH, max_size=None, image_id=None)
+ imagebackend.libvirt_utils.copy_image(self.TEMPLATE_PATH, self.PATH)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ image.create_image(fn, self.TEMPLATE_PATH, None, image_id=None)
+
+ self.mox.VerifyAll()
+
+ def test_create_image_generated(self):
+ fn = self.prepare_mocks()
+ fn(target=self.PATH)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ image.create_image(fn, self.TEMPLATE_PATH, None)
+
+ self.mox.VerifyAll()
+
+ @mock.patch.object(images, 'qemu_img_info',
+ return_value=imageutils.QemuImgInfo())
+ def test_create_image_extend(self, fake_qemu_img_info):
+ fn = self.prepare_mocks()
+ fn(max_size=self.SIZE, target=self.TEMPLATE_PATH, image_id=None)
+ imagebackend.libvirt_utils.copy_image(self.TEMPLATE_PATH, self.PATH)
+ imagebackend.disk.extend(self.PATH, self.SIZE, use_cow=False)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ image.create_image(fn, self.TEMPLATE_PATH, self.SIZE, image_id=None)
+
+ self.mox.VerifyAll()
+
+ def test_correct_format(self):
+ self.stubs.UnsetAll()
+
+ self.mox.StubOutWithMock(os.path, 'exists')
+ self.mox.StubOutWithMock(imagebackend.images, 'qemu_img_info')
+
+ os.path.exists(self.PATH).AndReturn(True)
+ os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
+ info = self.mox.CreateMockAnything()
+ info.file_format = 'foo'
+ imagebackend.images.qemu_img_info(self.PATH).AndReturn(info)
+ os.path.exists(CONF.instances_path).AndReturn(True)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME, path=self.PATH)
+ self.assertEqual(image.driver_format, 'foo')
+
+ self.mox.VerifyAll()
+
+ @mock.patch.object(images, 'qemu_img_info',
+ side_effect=exception.InvalidDiskInfo(
+ reason='invalid path'))
+ def test_resolve_driver_format(self, fake_qemu_img_info):
+ image = self.image_class(self.INSTANCE, self.NAME)
+ driver_format = image.resolve_driver_format()
+ self.assertEqual(driver_format, 'raw')
+
+
+class Qcow2TestCase(_ImageTestCase, test.NoDBTestCase):
+ SIZE = units.Gi
+
+ def setUp(self):
+ self.image_class = imagebackend.Qcow2
+ super(Qcow2TestCase, self).setUp()
+ self.QCOW2_BASE = (self.TEMPLATE_PATH +
+ '_%d' % (self.SIZE / units.Gi))
+
+ def prepare_mocks(self):
+ fn = self.mox.CreateMockAnything()
+ self.mox.StubOutWithMock(imagebackend.utils.synchronized,
+ '__call__')
+ self.mox.StubOutWithMock(imagebackend.libvirt_utils,
+ 'create_cow_image')
+ self.mox.StubOutWithMock(imagebackend.libvirt_utils, 'copy_image')
+ self.mox.StubOutWithMock(imagebackend.disk, 'extend')
+ return fn
+
+ def test_cache(self):
+ self.mox.StubOutWithMock(os.path, 'exists')
+ if self.OLD_STYLE_INSTANCE_PATH:
+ os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
+ os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
+ os.path.exists(CONF.instances_path).AndReturn(True)
+ os.path.exists(self.TEMPLATE_DIR).AndReturn(False)
+ os.path.exists(self.INSTANCES_PATH).AndReturn(True)
+ os.path.exists(self.PATH).AndReturn(False)
+ os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
+ fn = self.mox.CreateMockAnything()
+ fn(target=self.TEMPLATE_PATH)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ self.mock_create_image(image)
+ image.cache(fn, self.TEMPLATE)
+
+ self.mox.VerifyAll()
+
+ def test_cache_image_exists(self):
+ self.mox.StubOutWithMock(os.path, 'exists')
+ if self.OLD_STYLE_INSTANCE_PATH:
+ os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
+ os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
+ os.path.exists(self.INSTANCES_PATH).AndReturn(True)
+ os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
+ os.path.exists(self.PATH).AndReturn(True)
+ os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ image.cache(None, self.TEMPLATE)
+
+ self.mox.VerifyAll()
+
+ def test_cache_base_dir_exists(self):
+ self.mox.StubOutWithMock(os.path, 'exists')
+ if self.OLD_STYLE_INSTANCE_PATH:
+ os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
+ os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
+ os.path.exists(self.INSTANCES_PATH).AndReturn(True)
+ os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
+ os.path.exists(self.PATH).AndReturn(False)
+ os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
+ fn = self.mox.CreateMockAnything()
+ fn(target=self.TEMPLATE_PATH)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ self.mock_create_image(image)
+ image.cache(fn, self.TEMPLATE)
+
+ self.mox.VerifyAll()
+
+ def test_cache_template_exists(self):
+ self.mox.StubOutWithMock(os.path, 'exists')
+ if self.OLD_STYLE_INSTANCE_PATH:
+ os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
+ os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
+ os.path.exists(self.INSTANCES_PATH).AndReturn(True)
+ os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
+ os.path.exists(self.PATH).AndReturn(False)
+ os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ self.mock_create_image(image)
+ image.cache(None, self.TEMPLATE)
+
+ self.mox.VerifyAll()
+
+ def test_create_image(self):
+ fn = self.prepare_mocks()
+ fn(max_size=None, target=self.TEMPLATE_PATH)
+ imagebackend.libvirt_utils.create_cow_image(self.TEMPLATE_PATH,
+ self.PATH)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ image.create_image(fn, self.TEMPLATE_PATH, None)
+
+ self.mox.VerifyAll()
+
+ def test_create_image_with_size(self):
+ fn = self.prepare_mocks()
+ fn(max_size=self.SIZE, target=self.TEMPLATE_PATH)
+ self.mox.StubOutWithMock(os.path, 'exists')
+ if self.OLD_STYLE_INSTANCE_PATH:
+ os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
+ os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
+ os.path.exists(self.INSTANCES_PATH).AndReturn(True)
+ os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
+ os.path.exists(self.PATH).AndReturn(False)
+ os.path.exists(self.PATH).AndReturn(False)
+ imagebackend.libvirt_utils.create_cow_image(self.TEMPLATE_PATH,
+ self.PATH)
+ imagebackend.disk.extend(self.PATH, self.SIZE, use_cow=True)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
+
+ self.mox.VerifyAll()
+
+ def test_create_image_too_small(self):
+ fn = self.prepare_mocks()
+ self.mox.StubOutWithMock(os.path, 'exists')
+ self.mox.StubOutWithMock(imagebackend.Qcow2, 'get_disk_size')
+ if self.OLD_STYLE_INSTANCE_PATH:
+ os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
+ os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
+ os.path.exists(self.INSTANCES_PATH).AndReturn(True)
+ os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
+ imagebackend.Qcow2.get_disk_size(self.TEMPLATE_PATH
+ ).AndReturn(self.SIZE)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ self.assertRaises(exception.FlavorDiskTooSmall,
+ image.create_image, fn, self.TEMPLATE_PATH, 1)
+ self.mox.VerifyAll()
+
+ def test_generate_resized_backing_files(self):
+ fn = self.prepare_mocks()
+ fn(max_size=self.SIZE, target=self.TEMPLATE_PATH)
+ self.mox.StubOutWithMock(os.path, 'exists')
+ self.mox.StubOutWithMock(imagebackend.libvirt_utils,
+ 'get_disk_backing_file')
+ if self.OLD_STYLE_INSTANCE_PATH:
+ os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
+ os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
+ os.path.exists(CONF.instances_path).AndReturn(True)
+ os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
+ os.path.exists(self.PATH).AndReturn(True)
+
+ imagebackend.libvirt_utils.get_disk_backing_file(self.PATH)\
+ .AndReturn(self.QCOW2_BASE)
+ os.path.exists(self.QCOW2_BASE).AndReturn(False)
+ imagebackend.libvirt_utils.copy_image(self.TEMPLATE_PATH,
+ self.QCOW2_BASE)
+ imagebackend.disk.extend(self.QCOW2_BASE, self.SIZE, use_cow=True)
+
+ os.path.exists(self.PATH).AndReturn(True)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
+
+ self.mox.VerifyAll()
+
+ def test_qcow2_exists_and_has_no_backing_file(self):
+ fn = self.prepare_mocks()
+ fn(max_size=self.SIZE, target=self.TEMPLATE_PATH)
+ self.mox.StubOutWithMock(os.path, 'exists')
+ self.mox.StubOutWithMock(imagebackend.libvirt_utils,
+ 'get_disk_backing_file')
+ if self.OLD_STYLE_INSTANCE_PATH:
+ os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
+ os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
+ os.path.exists(self.INSTANCES_PATH).AndReturn(True)
+
+ os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
+ os.path.exists(self.PATH).AndReturn(True)
+
+ imagebackend.libvirt_utils.get_disk_backing_file(self.PATH)\
+ .AndReturn(None)
+ os.path.exists(self.PATH).AndReturn(True)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
+
+ self.mox.VerifyAll()
+
+ def test_resolve_driver_format(self):
+ image = self.image_class(self.INSTANCE, self.NAME)
+ driver_format = image.resolve_driver_format()
+ self.assertEqual(driver_format, 'qcow2')
+
+
+class LvmTestCase(_ImageTestCase, test.NoDBTestCase):
+ VG = 'FakeVG'
+ TEMPLATE_SIZE = 512
+ SIZE = 1024
+
+ def setUp(self):
+ self.image_class = imagebackend.Lvm
+ super(LvmTestCase, self).setUp()
+ self.flags(images_volume_group=self.VG, group='libvirt')
+ self.flags(enabled=False, group='ephemeral_storage_encryption')
+ self.INSTANCE['ephemeral_key_uuid'] = None
+ self.LV = '%s_%s' % (self.INSTANCE['uuid'], self.NAME)
+ self.OLD_STYLE_INSTANCE_PATH = None
+ self.PATH = os.path.join('/dev', self.VG, self.LV)
+ self.disk = imagebackend.disk
+ self.utils = imagebackend.utils
+ self.lvm = imagebackend.lvm
+
+ def prepare_mocks(self):
+ fn = self.mox.CreateMockAnything()
+ self.mox.StubOutWithMock(self.disk, 'resize2fs')
+ self.mox.StubOutWithMock(self.lvm, 'create_volume')
+ self.mox.StubOutWithMock(self.disk, 'get_disk_size')
+ self.mox.StubOutWithMock(self.utils, 'execute')
+ return fn
+
+ def _create_image(self, sparse):
+ fn = self.prepare_mocks()
+ fn(max_size=None, target=self.TEMPLATE_PATH)
+ self.lvm.create_volume(self.VG,
+ self.LV,
+ self.TEMPLATE_SIZE,
+ sparse=sparse)
+ self.disk.get_disk_size(self.TEMPLATE_PATH
+ ).AndReturn(self.TEMPLATE_SIZE)
+ cmd = ('qemu-img', 'convert', '-O', 'raw', self.TEMPLATE_PATH,
+ self.PATH)
+ self.utils.execute(*cmd, run_as_root=True)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ image.create_image(fn, self.TEMPLATE_PATH, None)
+
+ self.mox.VerifyAll()
+
+ def _create_image_generated(self, sparse):
+ fn = self.prepare_mocks()
+ self.lvm.create_volume(self.VG, self.LV,
+ self.SIZE, sparse=sparse)
+ fn(target=self.PATH, ephemeral_size=None)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ image.create_image(fn, self.TEMPLATE_PATH,
+ self.SIZE, ephemeral_size=None)
+
+ self.mox.VerifyAll()
+
+ def _create_image_resize(self, sparse):
+ fn = self.prepare_mocks()
+ fn(max_size=self.SIZE, target=self.TEMPLATE_PATH)
+ self.lvm.create_volume(self.VG, self.LV,
+ self.SIZE, sparse=sparse)
+ self.disk.get_disk_size(self.TEMPLATE_PATH
+ ).AndReturn(self.TEMPLATE_SIZE)
+ cmd = ('qemu-img', 'convert', '-O', 'raw', self.TEMPLATE_PATH,
+ self.PATH)
+ self.utils.execute(*cmd, run_as_root=True)
+ self.disk.resize2fs(self.PATH, run_as_root=True)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
+
+ self.mox.VerifyAll()
+
+ def test_cache(self):
+ self.mox.StubOutWithMock(os.path, 'exists')
+ if self.OLD_STYLE_INSTANCE_PATH:
+ os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
+ os.path.exists(self.TEMPLATE_DIR).AndReturn(False)
+ os.path.exists(self.PATH).AndReturn(False)
+ os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
+
+ fn = self.mox.CreateMockAnything()
+ fn(target=self.TEMPLATE_PATH)
+ self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree')
+ imagebackend.fileutils.ensure_tree(self.TEMPLATE_DIR)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ self.mock_create_image(image)
+ image.cache(fn, self.TEMPLATE)
+
+ self.mox.VerifyAll()
+
+ def test_cache_image_exists(self):
+ self.mox.StubOutWithMock(os.path, 'exists')
+ if self.OLD_STYLE_INSTANCE_PATH:
+ os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
+ os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
+ os.path.exists(self.PATH).AndReturn(True)
+ os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ image.cache(None, self.TEMPLATE)
+
+ self.mox.VerifyAll()
+
+ def test_cache_base_dir_exists(self):
+ self.mox.StubOutWithMock(os.path, 'exists')
+ if self.OLD_STYLE_INSTANCE_PATH:
+ os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
+ os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
+ os.path.exists(self.PATH).AndReturn(False)
+ os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
+ fn = self.mox.CreateMockAnything()
+ fn(target=self.TEMPLATE_PATH)
+ self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree')
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ self.mock_create_image(image)
+ image.cache(fn, self.TEMPLATE)
+
+ self.mox.VerifyAll()
+
+ def test_create_image(self):
+ self._create_image(False)
+
+ def test_create_image_sparsed(self):
+ self.flags(sparse_logical_volumes=True, group='libvirt')
+ self._create_image(True)
+
+ def test_create_image_generated(self):
+ self._create_image_generated(False)
+
+ def test_create_image_generated_sparsed(self):
+ self.flags(sparse_logical_volumes=True, group='libvirt')
+ self._create_image_generated(True)
+
+ def test_create_image_resize(self):
+ self._create_image_resize(False)
+
+ def test_create_image_resize_sparsed(self):
+ self.flags(sparse_logical_volumes=True, group='libvirt')
+ self._create_image_resize(True)
+
+ def test_create_image_negative(self):
+ fn = self.prepare_mocks()
+ fn(max_size=self.SIZE, target=self.TEMPLATE_PATH)
+ self.lvm.create_volume(self.VG,
+ self.LV,
+ self.SIZE,
+ sparse=False
+ ).AndRaise(RuntimeError())
+ self.disk.get_disk_size(self.TEMPLATE_PATH
+ ).AndReturn(self.TEMPLATE_SIZE)
+ self.mox.StubOutWithMock(self.lvm, 'remove_volumes')
+ self.lvm.remove_volumes([self.PATH])
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+
+ self.assertRaises(RuntimeError, image.create_image, fn,
+ self.TEMPLATE_PATH, self.SIZE)
+ self.mox.VerifyAll()
+
+ def test_create_image_generated_negative(self):
+ fn = self.prepare_mocks()
+ fn(target=self.PATH,
+ ephemeral_size=None).AndRaise(RuntimeError())
+ self.lvm.create_volume(self.VG,
+ self.LV,
+ self.SIZE,
+ sparse=False)
+ self.mox.StubOutWithMock(self.lvm, 'remove_volumes')
+ self.lvm.remove_volumes([self.PATH])
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+
+ self.assertRaises(RuntimeError, image.create_image, fn,
+ self.TEMPLATE_PATH, self.SIZE,
+ ephemeral_size=None)
+ self.mox.VerifyAll()
+
+ def test_prealloc_image(self):
+ CONF.set_override('preallocate_images', 'space')
+
+ fake_processutils.fake_execute_clear_log()
+ fake_processutils.stub_out_processutils_execute(self.stubs)
+ image = self.image_class(self.INSTANCE, self.NAME)
+
+ def fake_fetch(target, *args, **kwargs):
+ return
+
+ self.stubs.Set(os.path, 'exists', lambda _: True)
+ self.stubs.Set(image, 'check_image_exists', lambda: True)
+
+ image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
+
+ self.assertEqual(fake_processutils.fake_execute_get_log(), [])
+
+
+class EncryptedLvmTestCase(_ImageTestCase, test.NoDBTestCase):
+ VG = 'FakeVG'
+ TEMPLATE_SIZE = 512
+ SIZE = 1024
+
+ def setUp(self):
+ super(EncryptedLvmTestCase, self).setUp()
+ self.image_class = imagebackend.Lvm
+ self.flags(enabled=True, group='ephemeral_storage_encryption')
+ self.flags(cipher='aes-xts-plain64',
+ group='ephemeral_storage_encryption')
+ self.flags(key_size=512, group='ephemeral_storage_encryption')
+ self.flags(fixed_key='00000000000000000000000000000000'
+ '00000000000000000000000000000000',
+ group='keymgr')
+ self.flags(images_volume_group=self.VG, group='libvirt')
+ self.LV = '%s_%s' % (self.INSTANCE['uuid'], self.NAME)
+ self.OLD_STYLE_INSTANCE_PATH = None
+ self.LV_PATH = os.path.join('/dev', self.VG, self.LV)
+ self.PATH = os.path.join('/dev/mapper',
+ imagebackend.dmcrypt.volume_name(self.LV))
+ self.key_manager = keymgr.API()
+ self.INSTANCE['ephemeral_key_uuid'] =\
+ self.key_manager.create_key(self.CONTEXT)
+ self.KEY = self.key_manager.get_key(self.CONTEXT,
+ self.INSTANCE['ephemeral_key_uuid']).get_encoded()
+
+ self.lvm = imagebackend.lvm
+ self.disk = imagebackend.disk
+ self.utils = imagebackend.utils
+ self.libvirt_utils = imagebackend.libvirt_utils
+ self.dmcrypt = imagebackend.dmcrypt
+
+ def _create_image(self, sparse):
+ with contextlib.nested(
+ mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
+ mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
+ mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
+ mock.patch.object(self.disk, 'get_disk_size',
+ mock.Mock(return_value=self.TEMPLATE_SIZE)),
+ mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
+ mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
+ mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
+ mock.patch.object(self.libvirt_utils, 'create_lvm_image',
+ mock.Mock()),
+ mock.patch.object(self.libvirt_utils, 'remove_logical_volumes',
+ mock.Mock()),
+ mock.patch.object(self.utils, 'execute', mock.Mock())):
+ fn = mock.Mock()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ image.create_image(fn, self.TEMPLATE_PATH, self.TEMPLATE_SIZE,
+ context=self.CONTEXT)
+
+ fn.assert_called_with(context=self.CONTEXT,
+ max_size=self.TEMPLATE_SIZE,
+ target=self.TEMPLATE_PATH)
+ self.lvm.create_volume.assert_called_with(self.VG,
+ self.LV,
+ self.TEMPLATE_SIZE,
+ sparse=sparse)
+ self.dmcrypt.create_volume.assert_called_with(
+ self.PATH.rpartition('/')[2],
+ self.LV_PATH,
+ CONF.ephemeral_storage_encryption.cipher,
+ CONF.ephemeral_storage_encryption.key_size,
+ self.KEY)
+ cmd = ('qemu-img',
+ 'convert',
+ '-O',
+ 'raw',
+ self.TEMPLATE_PATH,
+ self.PATH)
+ self.utils.execute.assert_called_with(*cmd, run_as_root=True)
+
+ def _create_image_generated(self, sparse):
+ with contextlib.nested(
+ mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
+ mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
+ mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
+ mock.patch.object(self.disk, 'get_disk_size',
+ mock.Mock(return_value=self.TEMPLATE_SIZE)),
+ mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
+ mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
+ mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
+ mock.patch.object(self.libvirt_utils, 'create_lvm_image',
+ mock.Mock()),
+ mock.patch.object(self.libvirt_utils, 'remove_logical_volumes',
+ mock.Mock()),
+ mock.patch.object(self.utils, 'execute', mock.Mock())):
+ fn = mock.Mock()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ image.create_image(fn, self.TEMPLATE_PATH,
+ self.SIZE,
+ ephemeral_size=None,
+ context=self.CONTEXT)
+
+ self.lvm.create_volume.assert_called_with(
+ self.VG,
+ self.LV,
+ self.SIZE,
+ sparse=sparse)
+ self.dmcrypt.create_volume.assert_called_with(
+ self.PATH.rpartition('/')[2],
+ self.LV_PATH,
+ CONF.ephemeral_storage_encryption.cipher,
+ CONF.ephemeral_storage_encryption.key_size,
+ self.KEY)
+ fn.assert_called_with(target=self.PATH,
+ ephemeral_size=None, context=self.CONTEXT)
+
+ def _create_image_resize(self, sparse):
+ with contextlib.nested(
+ mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
+ mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
+ mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
+ mock.patch.object(self.disk, 'get_disk_size',
+ mock.Mock(return_value=self.TEMPLATE_SIZE)),
+ mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
+ mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
+ mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
+ mock.patch.object(self.libvirt_utils, 'create_lvm_image',
+ mock.Mock()),
+ mock.patch.object(self.libvirt_utils, 'remove_logical_volumes',
+ mock.Mock()),
+ mock.patch.object(self.utils, 'execute', mock.Mock())):
+ fn = mock.Mock()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ image.create_image(fn, self.TEMPLATE_PATH, self.SIZE,
+ context=self.CONTEXT)
+
+ fn.assert_called_with(context=self.CONTEXT, max_size=self.SIZE,
+ target=self.TEMPLATE_PATH)
+ self.disk.get_disk_size.assert_called_with(self.TEMPLATE_PATH)
+ self.lvm.create_volume.assert_called_with(
+ self.VG,
+ self.LV,
+ self.SIZE,
+ sparse=sparse)
+ self.dmcrypt.create_volume.assert_called_with(
+ self.PATH.rpartition('/')[2],
+ self.LV_PATH,
+ CONF.ephemeral_storage_encryption.cipher,
+ CONF.ephemeral_storage_encryption.key_size,
+ self.KEY)
+ cmd = ('qemu-img',
+ 'convert',
+ '-O',
+ 'raw',
+ self.TEMPLATE_PATH,
+ self.PATH)
+ self.utils.execute.assert_called_with(*cmd, run_as_root=True)
+ self.disk.resize2fs.assert_called_with(self.PATH, run_as_root=True)
+
+ def test_create_image(self):
+ self._create_image(False)
+
+ def test_create_image_sparsed(self):
+ self.flags(sparse_logical_volumes=True, group='libvirt')
+ self._create_image(True)
+
+ def test_create_image_generated(self):
+ self._create_image_generated(False)
+
+ def test_create_image_generated_sparsed(self):
+ self.flags(sparse_logical_volumes=True, group='libvirt')
+ self._create_image_generated(True)
+
+ def test_create_image_resize(self):
+ self._create_image_resize(False)
+
+ def test_create_image_resize_sparsed(self):
+ self.flags(sparse_logical_volumes=True, group='libvirt')
+ self._create_image_resize(True)
+
+ def test_create_image_negative(self):
+ with contextlib.nested(
+ mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
+ mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
+ mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
+ mock.patch.object(self.disk, 'get_disk_size',
+ mock.Mock(return_value=self.TEMPLATE_SIZE)),
+ mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
+ mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
+ mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
+ mock.patch.object(self.libvirt_utils, 'create_lvm_image',
+ mock.Mock()),
+ mock.patch.object(self.libvirt_utils, 'remove_logical_volumes',
+ mock.Mock()),
+ mock.patch.object(self.utils, 'execute', mock.Mock())):
+ fn = mock.Mock()
+ self.lvm.create_volume.side_effect = RuntimeError()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ self.assertRaises(
+ RuntimeError,
+ image.create_image,
+ fn,
+ self.TEMPLATE_PATH,
+ self.SIZE,
+ context=self.CONTEXT)
+
+ fn.assert_called_with(
+ context=self.CONTEXT,
+ max_size=self.SIZE,
+ target=self.TEMPLATE_PATH)
+ self.disk.get_disk_size.assert_called_with(
+ self.TEMPLATE_PATH)
+ self.lvm.create_volume.assert_called_with(
+ self.VG,
+ self.LV,
+ self.SIZE,
+ sparse=False)
+ self.dmcrypt.delete_volume.assert_called_with(
+ self.PATH.rpartition('/')[2])
+ self.lvm.remove_volumes.assert_called_with([self.LV_PATH])
+
+ def test_create_image_encrypt_negative(self):
+ with contextlib.nested(
+ mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
+ mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
+ mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
+ mock.patch.object(self.disk, 'get_disk_size',
+ mock.Mock(return_value=self.TEMPLATE_SIZE)),
+ mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
+ mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
+ mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
+ mock.patch.object(self.libvirt_utils, 'create_lvm_image',
+ mock.Mock()),
+ mock.patch.object(self.libvirt_utils, 'remove_logical_volumes',
+ mock.Mock()),
+ mock.patch.object(self.utils, 'execute', mock.Mock())):
+ fn = mock.Mock()
+ self.dmcrypt.create_volume.side_effect = RuntimeError()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ self.assertRaises(
+ RuntimeError,
+ image.create_image,
+ fn,
+ self.TEMPLATE_PATH,
+ self.SIZE,
+ context=self.CONTEXT)
+
+ fn.assert_called_with(
+ context=self.CONTEXT,
+ max_size=self.SIZE,
+ target=self.TEMPLATE_PATH)
+ self.disk.get_disk_size.assert_called_with(self.TEMPLATE_PATH)
+ self.lvm.create_volume.assert_called_with(
+ self.VG,
+ self.LV,
+ self.SIZE,
+ sparse=False)
+ self.dmcrypt.create_volume.assert_called_with(
+ self.dmcrypt.volume_name(self.LV),
+ self.LV_PATH,
+ CONF.ephemeral_storage_encryption.cipher,
+ CONF.ephemeral_storage_encryption.key_size,
+ self.KEY)
+ self.dmcrypt.delete_volume.assert_called_with(
+ self.PATH.rpartition('/')[2])
+ self.lvm.remove_volumes.assert_called_with([self.LV_PATH])
+
+ def test_create_image_generated_negative(self):
+ with contextlib.nested(
+ mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
+ mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
+ mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
+ mock.patch.object(self.disk, 'get_disk_size',
+ mock.Mock(return_value=self.TEMPLATE_SIZE)),
+ mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
+ mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
+ mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
+ mock.patch.object(self.libvirt_utils, 'create_lvm_image',
+ mock.Mock()),
+ mock.patch.object(self.libvirt_utils, 'remove_logical_volumes',
+ mock.Mock()),
+ mock.patch.object(self.utils, 'execute', mock.Mock())):
+ fn = mock.Mock()
+ fn.side_effect = RuntimeError()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ self.assertRaises(RuntimeError,
+ image.create_image,
+ fn,
+ self.TEMPLATE_PATH,
+ self.SIZE,
+ ephemeral_size=None,
+ context=self.CONTEXT)
+
+ self.lvm.create_volume.assert_called_with(
+ self.VG,
+ self.LV,
+ self.SIZE,
+ sparse=False)
+ self.dmcrypt.create_volume.assert_called_with(
+ self.PATH.rpartition('/')[2],
+ self.LV_PATH,
+ CONF.ephemeral_storage_encryption.cipher,
+ CONF.ephemeral_storage_encryption.key_size,
+ self.KEY)
+ fn.assert_called_with(
+ target=self.PATH,
+ ephemeral_size=None,
+ context=self.CONTEXT)
+ self.dmcrypt.delete_volume.assert_called_with(
+ self.PATH.rpartition('/')[2])
+ self.lvm.remove_volumes.assert_called_with([self.LV_PATH])
+
+ def test_create_image_generated_encrypt_negative(self):
+ with contextlib.nested(
+ mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
+ mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
+ mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
+ mock.patch.object(self.disk, 'get_disk_size',
+ mock.Mock(return_value=self.TEMPLATE_SIZE)),
+ mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
+ mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
+ mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
+ mock.patch.object(self.libvirt_utils, 'create_lvm_image',
+ mock.Mock()),
+ mock.patch.object(self.libvirt_utils, 'remove_logical_volumes',
+ mock.Mock()),
+ mock.patch.object(self.utils, 'execute', mock.Mock())):
+ fn = mock.Mock()
+ fn.side_effect = RuntimeError()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ self.assertRaises(
+ RuntimeError,
+ image.create_image,
+ fn,
+ self.TEMPLATE_PATH,
+ self.SIZE,
+ ephemeral_size=None,
+ context=self.CONTEXT)
+
+ self.lvm.create_volume.assert_called_with(
+ self.VG,
+ self.LV,
+ self.SIZE,
+ sparse=False)
+ self.dmcrypt.create_volume.assert_called_with(
+ self.PATH.rpartition('/')[2],
+ self.LV_PATH,
+ CONF.ephemeral_storage_encryption.cipher,
+ CONF.ephemeral_storage_encryption.key_size,
+ self.KEY)
+ self.dmcrypt.delete_volume.assert_called_with(
+ self.PATH.rpartition('/')[2])
+ self.lvm.remove_volumes.assert_called_with([self.LV_PATH])
+
+ def test_prealloc_image(self):
+ self.flags(preallocate_images='space')
+ fake_processutils.fake_execute_clear_log()
+ fake_processutils.stub_out_processutils_execute(self.stubs)
+ image = self.image_class(self.INSTANCE, self.NAME)
+
+ def fake_fetch(target, *args, **kwargs):
+ return
+
+ self.stubs.Set(os.path, 'exists', lambda _: True)
+ self.stubs.Set(image, 'check_image_exists', lambda: True)
+
+ image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
+
+ self.assertEqual(fake_processutils.fake_execute_get_log(), [])
+
+
+class RbdTestCase(_ImageTestCase, test.NoDBTestCase):
+ POOL = "FakePool"
+ USER = "FakeUser"
+ CONF = "FakeConf"
+ SIZE = 1024
+
+ def setUp(self):
+ self.image_class = imagebackend.Rbd
+ super(RbdTestCase, self).setUp()
+ self.flags(images_rbd_pool=self.POOL,
+ rbd_user=self.USER,
+ images_rbd_ceph_conf=self.CONF,
+ group='libvirt')
+ self.libvirt_utils = imagebackend.libvirt_utils
+ self.utils = imagebackend.utils
+ self.mox.StubOutWithMock(rbd_utils, 'rbd')
+ self.mox.StubOutWithMock(rbd_utils, 'rados')
+
+ def test_cache(self):
+ image = self.image_class(self.INSTANCE, self.NAME)
+
+ self.mox.StubOutWithMock(os.path, 'exists')
+ self.mox.StubOutWithMock(image, 'check_image_exists')
+ os.path.exists(self.TEMPLATE_DIR).AndReturn(False)
+ image.check_image_exists().AndReturn(False)
+ os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
+ fn = self.mox.CreateMockAnything()
+ fn(target=self.TEMPLATE_PATH)
+ self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree')
+ imagebackend.fileutils.ensure_tree(self.TEMPLATE_DIR)
+ self.mox.ReplayAll()
+
+ self.mock_create_image(image)
+ image.cache(fn, self.TEMPLATE)
+
+ self.mox.VerifyAll()
+
+ def test_cache_base_dir_exists(self):
+ fn = self.mox.CreateMockAnything()
+ image = self.image_class(self.INSTANCE, self.NAME)
+
+ self.mox.StubOutWithMock(os.path, 'exists')
+ self.mox.StubOutWithMock(image, 'check_image_exists')
+ os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
+ image.check_image_exists().AndReturn(False)
+ os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
+ fn = self.mox.CreateMockAnything()
+ fn(target=self.TEMPLATE_PATH)
+ self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree')
+ self.mox.ReplayAll()
+
+ self.mock_create_image(image)
+ image.cache(fn, self.TEMPLATE)
+
+ self.mox.VerifyAll()
+
+ def test_cache_image_exists(self):
+ image = self.image_class(self.INSTANCE, self.NAME)
+
+ self.mox.StubOutWithMock(os.path, 'exists')
+ self.mox.StubOutWithMock(image, 'check_image_exists')
+ os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
+ image.check_image_exists().AndReturn(True)
+ os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
+ self.mox.ReplayAll()
+
+ image.cache(None, self.TEMPLATE)
+
+ self.mox.VerifyAll()
+
+ def test_cache_template_exists(self):
+ image = self.image_class(self.INSTANCE, self.NAME)
+
+ self.mox.StubOutWithMock(os.path, 'exists')
+ self.mox.StubOutWithMock(image, 'check_image_exists')
+ os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
+ image.check_image_exists().AndReturn(False)
+ os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
+ self.mox.ReplayAll()
+
+ self.mock_create_image(image)
+ image.cache(None, self.TEMPLATE)
+
+ self.mox.VerifyAll()
+
+ def test_create_image(self):
+ fn = self.mox.CreateMockAnything()
+ fn(max_size=None, target=self.TEMPLATE_PATH)
+
+ rbd_utils.rbd.RBD_FEATURE_LAYERING = 1
+
+ fake_processutils.fake_execute_clear_log()
+ fake_processutils.stub_out_processutils_execute(self.stubs)
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ self.mox.StubOutWithMock(image, 'check_image_exists')
+ image.check_image_exists().AndReturn(False)
+ image.check_image_exists().AndReturn(False)
+ self.mox.ReplayAll()
+
+ image.create_image(fn, self.TEMPLATE_PATH, None)
+
+ rbd_name = "%s_%s" % (self.INSTANCE['uuid'], self.NAME)
+ cmd = ('rbd', 'import', '--pool', self.POOL, self.TEMPLATE_PATH,
+ rbd_name, '--new-format', '--id', self.USER,
+ '--conf', self.CONF)
+ self.assertEqual(fake_processutils.fake_execute_get_log(),
+ [' '.join(cmd)])
+ self.mox.VerifyAll()
+
+ def test_create_image_resize(self):
+ fn = self.mox.CreateMockAnything()
+ full_size = self.SIZE * 2
+ fn(max_size=full_size, target=self.TEMPLATE_PATH)
+
+ rbd_utils.rbd.RBD_FEATURE_LAYERING = 1
+
+ fake_processutils.fake_execute_clear_log()
+ fake_processutils.stub_out_processutils_execute(self.stubs)
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ self.mox.StubOutWithMock(image, 'check_image_exists')
+ image.check_image_exists().AndReturn(False)
+ image.check_image_exists().AndReturn(False)
+ rbd_name = "%s_%s" % (self.INSTANCE['uuid'], self.NAME)
+ cmd = ('rbd', 'import', '--pool', self.POOL, self.TEMPLATE_PATH,
+ rbd_name, '--new-format', '--id', self.USER,
+ '--conf', self.CONF)
+ self.mox.StubOutWithMock(image, 'get_disk_size')
+ image.get_disk_size(rbd_name).AndReturn(self.SIZE)
+ self.mox.StubOutWithMock(image.driver, 'resize')
+ image.driver.resize(rbd_name, full_size)
+
+ self.mox.ReplayAll()
+
+ image.create_image(fn, self.TEMPLATE_PATH, full_size)
+
+ self.assertEqual(fake_processutils.fake_execute_get_log(),
+ [' '.join(cmd)])
+ self.mox.VerifyAll()
+
+ def test_create_image_already_exists(self):
+ rbd_utils.rbd.RBD_FEATURE_LAYERING = 1
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ self.mox.StubOutWithMock(image, 'check_image_exists')
+ image.check_image_exists().AndReturn(True)
+ self.mox.StubOutWithMock(image, 'get_disk_size')
+ image.get_disk_size(self.TEMPLATE_PATH).AndReturn(self.SIZE)
+ image.check_image_exists().AndReturn(True)
+ rbd_name = "%s_%s" % (self.INSTANCE['uuid'], self.NAME)
+ image.get_disk_size(rbd_name).AndReturn(self.SIZE)
+
+ self.mox.ReplayAll()
+
+ fn = self.mox.CreateMockAnything()
+ image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
+
+ self.mox.VerifyAll()
+
+ def test_prealloc_image(self):
+ CONF.set_override('preallocate_images', 'space')
+
+ fake_processutils.fake_execute_clear_log()
+ fake_processutils.stub_out_processutils_execute(self.stubs)
+ image = self.image_class(self.INSTANCE, self.NAME)
+
+ def fake_fetch(target, *args, **kwargs):
+ return
+
+ def fake_resize(rbd_name, size):
+ return
+
+ self.stubs.Set(os.path, 'exists', lambda _: True)
+ self.stubs.Set(image, 'check_image_exists', lambda: True)
+
+ image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
+
+ self.assertEqual(fake_processutils.fake_execute_get_log(), [])
+
+ def test_parent_compatible(self):
+ self.assertEqual(inspect.getargspec(imagebackend.Image.libvirt_info),
+ inspect.getargspec(self.image_class.libvirt_info))
+
+ def test_image_path(self):
+
+ conf = "FakeConf"
+ pool = "FakePool"
+ user = "FakeUser"
+
+ self.flags(images_rbd_pool=pool, group='libvirt')
+ self.flags(images_rbd_ceph_conf=conf, group='libvirt')
+ self.flags(rbd_user=user, group='libvirt')
+ image = self.image_class(self.INSTANCE, self.NAME)
+ rbd_path = "rbd:%s/%s:id=%s:conf=%s" % (pool, image.rbd_name,
+ user, conf)
+
+ self.assertEqual(image.path, rbd_path)
+
+
+class BackendTestCase(test.NoDBTestCase):
+ INSTANCE = {'name': 'fake-instance',
+ 'uuid': uuidutils.generate_uuid()}
+ NAME = 'fake-name.suffix'
+
+ def setUp(self):
+ super(BackendTestCase, self).setUp()
+ self.flags(enabled=False, group='ephemeral_storage_encryption')
+ self.INSTANCE['ephemeral_key_uuid'] = None
+
+ def get_image(self, use_cow, image_type):
+ return imagebackend.Backend(use_cow).image(self.INSTANCE,
+ self.NAME,
+ image_type)
+
+ def _test_image(self, image_type, image_not_cow, image_cow):
+ image1 = self.get_image(False, image_type)
+ image2 = self.get_image(True, image_type)
+
+ def assertIsInstance(instance, class_object):
+ failure = ('Expected %s,' +
+ ' but got %s.') % (class_object.__name__,
+ instance.__class__.__name__)
+ self.assertIsInstance(instance, class_object, msg=failure)
+
+ assertIsInstance(image1, image_not_cow)
+ assertIsInstance(image2, image_cow)
+
+ def test_image_raw(self):
+ self._test_image('raw', imagebackend.Raw, imagebackend.Raw)
+
+ def test_image_qcow2(self):
+ self._test_image('qcow2', imagebackend.Qcow2, imagebackend.Qcow2)
+
+ def test_image_lvm(self):
+ self.flags(images_volume_group='FakeVG', group='libvirt')
+ self._test_image('lvm', imagebackend.Lvm, imagebackend.Lvm)
+
+ def test_image_rbd(self):
+ conf = "FakeConf"
+ pool = "FakePool"
+ self.flags(images_rbd_pool=pool, group='libvirt')
+ self.flags(images_rbd_ceph_conf=conf, group='libvirt')
+ self.mox.StubOutWithMock(rbd_utils, 'rbd')
+ self.mox.StubOutWithMock(rbd_utils, 'rados')
+ self._test_image('rbd', imagebackend.Rbd, imagebackend.Rbd)
+
+ def test_image_default(self):
+ self._test_image('default', imagebackend.Raw, imagebackend.Qcow2)
+
+
+class UtilTestCase(test.NoDBTestCase):
+ def test_get_hw_disk_discard(self):
+ self.assertEqual('unmap', imagebackend.get_hw_disk_discard("unmap"))
+ self.assertEqual('ignore', imagebackend.get_hw_disk_discard("ignore"))
+ self.assertIsNone(imagebackend.get_hw_disk_discard(None))
+ self.assertRaises(RuntimeError, imagebackend.get_hw_disk_discard,
+ "fake")
diff --git a/nova/tests/unit/virt/libvirt/test_imagecache.py b/nova/tests/unit/virt/libvirt/test_imagecache.py
new file mode 100644
index 0000000000..d7bed2fcd0
--- /dev/null
+++ b/nova/tests/unit/virt/libvirt/test_imagecache.py
@@ -0,0 +1,887 @@
+# Copyright 2012 Michael Still and Canonical Inc
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import contextlib
+import cStringIO
+import hashlib
+import os
+import time
+
+from oslo.concurrency import processutils
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+from oslo.utils import importutils
+
+from nova import conductor
+from nova import db
+from nova.openstack.common import log as logging
+from nova import test
+from nova.tests.unit import fake_instance
+from nova import utils
+from nova.virt.libvirt import imagecache
+from nova.virt.libvirt import utils as libvirt_utils
+
+CONF = cfg.CONF
+CONF.import_opt('compute_manager', 'nova.service')
+CONF.import_opt('host', 'nova.netconf')
+
+
+@contextlib.contextmanager
+def intercept_log_messages():
+ try:
+ mylog = logging.getLogger('nova')
+ stream = cStringIO.StringIO()
+ handler = logging.logging.StreamHandler(stream)
+ handler.setFormatter(logging.ContextFormatter())
+ mylog.logger.addHandler(handler)
+ yield stream
+ finally:
+ mylog.logger.removeHandler(handler)
+
+
+class ImageCacheManagerTestCase(test.NoDBTestCase):
+
+ def setUp(self):
+ super(ImageCacheManagerTestCase, self).setUp()
+ self.stock_instance_names = set(['instance-00000001',
+ 'instance-00000002',
+ 'instance-00000003',
+ 'banana-42-hamster'])
+
+ def test_read_stored_checksum_missing(self):
+ self.stubs.Set(os.path, 'exists', lambda x: False)
+ csum = imagecache.read_stored_checksum('/tmp/foo', timestamped=False)
+ self.assertIsNone(csum)
+
+ def test_read_stored_checksum(self):
+ with utils.tempdir() as tmpdir:
+ self.flags(instances_path=tmpdir)
+ self.flags(image_info_filename_pattern=('$instances_path/'
+ '%(image)s.info'),
+ group='libvirt')
+
+ csum_input = '{"sha1": "fdghkfhkgjjksfdgjksjkghsdf"}\n'
+ fname = os.path.join(tmpdir, 'aaa')
+ info_fname = imagecache.get_info_filename(fname)
+ f = open(info_fname, 'w')
+ f.write(csum_input)
+ f.close()
+
+ csum_output = imagecache.read_stored_checksum(fname,
+ timestamped=False)
+ self.assertEqual(csum_input.rstrip(),
+ '{"sha1": "%s"}' % csum_output)
+
+ def test_read_stored_checksum_legacy_essex(self):
+ with utils.tempdir() as tmpdir:
+ self.flags(instances_path=tmpdir)
+ self.flags(image_info_filename_pattern=('$instances_path/'
+ '%(image)s.info'),
+ group='libvirt')
+
+ fname = os.path.join(tmpdir, 'aaa')
+ old_fname = fname + '.sha1'
+ f = open(old_fname, 'w')
+ f.write('fdghkfhkgjjksfdgjksjkghsdf')
+ f.close()
+
+ csum_output = imagecache.read_stored_checksum(fname,
+ timestamped=False)
+ self.assertEqual(csum_output, 'fdghkfhkgjjksfdgjksjkghsdf')
+ self.assertFalse(os.path.exists(old_fname))
+ info_fname = imagecache.get_info_filename(fname)
+ self.assertTrue(os.path.exists(info_fname))
+
+ def test_list_base_images(self):
+ listing = ['00000001',
+ 'ephemeral_0_20_None',
+ '17d1b00b81642842e514494a78e804e9a511637c_5368709120.info',
+ '00000004']
+ images = ['e97222e91fc4241f49a7f520d1dcf446751129b3_sm',
+ 'e09c675c2d1cfac32dae3c2d83689c8c94bc693b_sm',
+ 'e97222e91fc4241f49a7f520d1dcf446751129b3',
+ '17d1b00b81642842e514494a78e804e9a511637c',
+ '17d1b00b81642842e514494a78e804e9a511637c_5368709120',
+ '17d1b00b81642842e514494a78e804e9a511637c_10737418240']
+ listing.extend(images)
+
+ self.stubs.Set(os, 'listdir', lambda x: listing)
+ self.stubs.Set(os.path, 'isfile', lambda x: True)
+
+ base_dir = '/var/lib/nova/instances/_base'
+ self.flags(instances_path='/var/lib/nova/instances')
+
+ image_cache_manager = imagecache.ImageCacheManager()
+ image_cache_manager._list_base_images(base_dir)
+
+ sanitized = []
+ for ent in image_cache_manager.unexplained_images:
+ sanitized.append(ent.replace(base_dir + '/', ''))
+
+ self.assertEqual(sorted(sanitized), sorted(images))
+
+ expected = os.path.join(base_dir,
+ 'e97222e91fc4241f49a7f520d1dcf446751129b3')
+ self.assertIn(expected, image_cache_manager.unexplained_images)
+
+ expected = os.path.join(base_dir,
+ '17d1b00b81642842e514494a78e804e9a511637c_'
+ '10737418240')
+ self.assertIn(expected, image_cache_manager.unexplained_images)
+
+ unexpected = os.path.join(base_dir, '00000004')
+ self.assertNotIn(unexpected, image_cache_manager.unexplained_images)
+
+ for ent in image_cache_manager.unexplained_images:
+ self.assertTrue(ent.startswith(base_dir))
+
+ self.assertEqual(len(image_cache_manager.originals), 2)
+
+ expected = os.path.join(base_dir,
+ '17d1b00b81642842e514494a78e804e9a511637c')
+ self.assertIn(expected, image_cache_manager.originals)
+
+ unexpected = os.path.join(base_dir,
+ '17d1b00b81642842e514494a78e804e9a511637c_'
+ '10737418240')
+ self.assertNotIn(unexpected, image_cache_manager.originals)
+
+ def test_list_backing_images_small(self):
+ self.stubs.Set(os, 'listdir',
+ lambda x: ['_base', 'instance-00000001',
+ 'instance-00000002', 'instance-00000003'])
+ self.stubs.Set(os.path, 'exists',
+ lambda x: x.find('instance-') != -1)
+ self.stubs.Set(libvirt_utils, 'get_disk_backing_file',
+ lambda x: 'e97222e91fc4241f49a7f520d1dcf446751129b3_sm')
+
+ found = os.path.join(CONF.instances_path,
+ CONF.image_cache_subdirectory_name,
+ 'e97222e91fc4241f49a7f520d1dcf446751129b3_sm')
+
+ image_cache_manager = imagecache.ImageCacheManager()
+ image_cache_manager.unexplained_images = [found]
+ image_cache_manager.instance_names = self.stock_instance_names
+
+ inuse_images = image_cache_manager._list_backing_images()
+
+ self.assertEqual(inuse_images, [found])
+ self.assertEqual(len(image_cache_manager.unexplained_images), 0)
+
+ def test_list_backing_images_resized(self):
+ self.stubs.Set(os, 'listdir',
+ lambda x: ['_base', 'instance-00000001',
+ 'instance-00000002', 'instance-00000003'])
+ self.stubs.Set(os.path, 'exists',
+ lambda x: x.find('instance-') != -1)
+ self.stubs.Set(libvirt_utils, 'get_disk_backing_file',
+ lambda x: ('e97222e91fc4241f49a7f520d1dcf446751129b3_'
+ '10737418240'))
+
+ found = os.path.join(CONF.instances_path,
+ CONF.image_cache_subdirectory_name,
+ 'e97222e91fc4241f49a7f520d1dcf446751129b3_'
+ '10737418240')
+
+ image_cache_manager = imagecache.ImageCacheManager()
+ image_cache_manager.unexplained_images = [found]
+ image_cache_manager.instance_names = self.stock_instance_names
+
+ inuse_images = image_cache_manager._list_backing_images()
+
+ self.assertEqual(inuse_images, [found])
+ self.assertEqual(len(image_cache_manager.unexplained_images), 0)
+
+ def test_list_backing_images_instancename(self):
+ self.stubs.Set(os, 'listdir',
+ lambda x: ['_base', 'banana-42-hamster'])
+ self.stubs.Set(os.path, 'exists',
+ lambda x: x.find('banana-42-hamster') != -1)
+ self.stubs.Set(libvirt_utils, 'get_disk_backing_file',
+ lambda x: 'e97222e91fc4241f49a7f520d1dcf446751129b3_sm')
+
+ found = os.path.join(CONF.instances_path,
+ CONF.image_cache_subdirectory_name,
+ 'e97222e91fc4241f49a7f520d1dcf446751129b3_sm')
+
+ image_cache_manager = imagecache.ImageCacheManager()
+ image_cache_manager.unexplained_images = [found]
+ image_cache_manager.instance_names = self.stock_instance_names
+
+ inuse_images = image_cache_manager._list_backing_images()
+
+ self.assertEqual(inuse_images, [found])
+ self.assertEqual(len(image_cache_manager.unexplained_images), 0)
+
+ def test_list_backing_images_disk_notexist(self):
+ self.stubs.Set(os, 'listdir',
+ lambda x: ['_base', 'banana-42-hamster'])
+ self.stubs.Set(os.path, 'exists',
+ lambda x: x.find('banana-42-hamster') != -1)
+
+ def fake_get_disk(disk_path):
+ raise processutils.ProcessExecutionError()
+
+ self.stubs.Set(libvirt_utils, 'get_disk_backing_file', fake_get_disk)
+
+ image_cache_manager = imagecache.ImageCacheManager()
+ image_cache_manager.unexplained_images = []
+ image_cache_manager.instance_names = self.stock_instance_names
+
+ self.assertRaises(processutils.ProcessExecutionError,
+ image_cache_manager._list_backing_images)
+
+ def test_find_base_file_nothing(self):
+ self.stubs.Set(os.path, 'exists', lambda x: False)
+
+ base_dir = '/var/lib/nova/instances/_base'
+ fingerprint = '549867354867'
+ image_cache_manager = imagecache.ImageCacheManager()
+ res = list(image_cache_manager._find_base_file(base_dir, fingerprint))
+
+ self.assertEqual(0, len(res))
+
+ def test_find_base_file_small(self):
+ fingerprint = '968dd6cc49e01aaa044ed11c0cce733e0fa44a6a'
+ self.stubs.Set(os.path, 'exists',
+ lambda x: x.endswith('%s_sm' % fingerprint))
+
+ base_dir = '/var/lib/nova/instances/_base'
+ image_cache_manager = imagecache.ImageCacheManager()
+ res = list(image_cache_manager._find_base_file(base_dir, fingerprint))
+
+ base_file = os.path.join(base_dir, fingerprint + '_sm')
+ self.assertEqual(res, [(base_file, True, False)])
+
+ def test_find_base_file_resized(self):
+ fingerprint = '968dd6cc49e01aaa044ed11c0cce733e0fa44a6a'
+ listing = ['00000001',
+ 'ephemeral_0_20_None',
+ '968dd6cc49e01aaa044ed11c0cce733e0fa44a6a_10737418240',
+ '00000004']
+
+ self.stubs.Set(os, 'listdir', lambda x: listing)
+ self.stubs.Set(os.path, 'exists',
+ lambda x: x.endswith('%s_10737418240' % fingerprint))
+ self.stubs.Set(os.path, 'isfile', lambda x: True)
+
+ base_dir = '/var/lib/nova/instances/_base'
+ image_cache_manager = imagecache.ImageCacheManager()
+ image_cache_manager._list_base_images(base_dir)
+ res = list(image_cache_manager._find_base_file(base_dir, fingerprint))
+
+ base_file = os.path.join(base_dir, fingerprint + '_10737418240')
+ self.assertEqual(res, [(base_file, False, True)])
+
+ def test_find_base_file_all(self):
+ fingerprint = '968dd6cc49e01aaa044ed11c0cce733e0fa44a6a'
+ listing = ['00000001',
+ 'ephemeral_0_20_None',
+ '968dd6cc49e01aaa044ed11c0cce733e0fa44a6a_sm',
+ '968dd6cc49e01aaa044ed11c0cce733e0fa44a6a_10737418240',
+ '00000004']
+
+ self.stubs.Set(os, 'listdir', lambda x: listing)
+ self.stubs.Set(os.path, 'exists', lambda x: True)
+ self.stubs.Set(os.path, 'isfile', lambda x: True)
+
+ base_dir = '/var/lib/nova/instances/_base'
+ image_cache_manager = imagecache.ImageCacheManager()
+ image_cache_manager._list_base_images(base_dir)
+ res = list(image_cache_manager._find_base_file(base_dir, fingerprint))
+
+ base_file1 = os.path.join(base_dir, fingerprint)
+ base_file2 = os.path.join(base_dir, fingerprint + '_sm')
+ base_file3 = os.path.join(base_dir, fingerprint + '_10737418240')
+ self.assertEqual(res, [(base_file1, False, False),
+ (base_file2, True, False),
+ (base_file3, False, True)])
+
+ @contextlib.contextmanager
+ def _make_base_file(self, checksum=True):
+ """Make a base file for testing."""
+
+ with utils.tempdir() as tmpdir:
+ self.flags(instances_path=tmpdir)
+ self.flags(image_info_filename_pattern=('$instances_path/'
+ '%(image)s.info'),
+ group='libvirt')
+ fname = os.path.join(tmpdir, 'aaa')
+
+ base_file = open(fname, 'w')
+ base_file.write('data')
+ base_file.close()
+ base_file = open(fname, 'r')
+
+ if checksum:
+ imagecache.write_stored_checksum(fname)
+
+ base_file.close()
+ yield fname
+
+ def test_remove_base_file(self):
+ with self._make_base_file() as fname:
+ image_cache_manager = imagecache.ImageCacheManager()
+ image_cache_manager._remove_base_file(fname)
+ info_fname = imagecache.get_info_filename(fname)
+
+ # Files are initially too new to delete
+ self.assertTrue(os.path.exists(fname))
+ self.assertTrue(os.path.exists(info_fname))
+
+ # Old files get cleaned up though
+ os.utime(fname, (-1, time.time() - 3601))
+ image_cache_manager._remove_base_file(fname)
+
+ self.assertFalse(os.path.exists(fname))
+ self.assertFalse(os.path.exists(info_fname))
+
+ def test_remove_base_file_original(self):
+ with self._make_base_file() as fname:
+ image_cache_manager = imagecache.ImageCacheManager()
+ image_cache_manager.originals = [fname]
+ image_cache_manager._remove_base_file(fname)
+ info_fname = imagecache.get_info_filename(fname)
+
+ # Files are initially too new to delete
+ self.assertTrue(os.path.exists(fname))
+ self.assertTrue(os.path.exists(info_fname))
+
+ # This file should stay longer than a resized image
+ os.utime(fname, (-1, time.time() - 3601))
+ image_cache_manager._remove_base_file(fname)
+
+ self.assertTrue(os.path.exists(fname))
+ self.assertTrue(os.path.exists(info_fname))
+
+ # Originals don't stay forever though
+ os.utime(fname, (-1, time.time() - 3600 * 25))
+ image_cache_manager._remove_base_file(fname)
+
+ self.assertFalse(os.path.exists(fname))
+ self.assertFalse(os.path.exists(info_fname))
+
+ def test_remove_base_file_dne(self):
+ # This test is solely to execute the "does not exist" code path. We
+ # don't expect the method being tested to do anything in this case.
+ with utils.tempdir() as tmpdir:
+ self.flags(instances_path=tmpdir)
+ self.flags(image_info_filename_pattern=('$instances_path/'
+ '%(image)s.info'),
+ group='libvirt')
+
+ fname = os.path.join(tmpdir, 'aaa')
+ image_cache_manager = imagecache.ImageCacheManager()
+ image_cache_manager._remove_base_file(fname)
+
+ def test_remove_base_file_oserror(self):
+ with intercept_log_messages() as stream:
+ with utils.tempdir() as tmpdir:
+ self.flags(instances_path=tmpdir)
+ self.flags(image_info_filename_pattern=('$instances_path/'
+ '%(image)s.info'),
+ group='libvirt')
+
+ fname = os.path.join(tmpdir, 'aaa')
+
+ os.mkdir(fname)
+ os.utime(fname, (-1, time.time() - 3601))
+
+ # This will raise an OSError because of file permissions
+ image_cache_manager = imagecache.ImageCacheManager()
+ image_cache_manager._remove_base_file(fname)
+
+ self.assertTrue(os.path.exists(fname))
+ self.assertNotEqual(stream.getvalue().find('Failed to remove'),
+ -1)
+
+ def test_handle_base_image_unused(self):
+ img = '123'
+
+ with self._make_base_file() as fname:
+ os.utime(fname, (-1, time.time() - 3601))
+
+ image_cache_manager = imagecache.ImageCacheManager()
+ image_cache_manager.unexplained_images = [fname]
+ image_cache_manager._handle_base_image(img, fname)
+
+ self.assertEqual(image_cache_manager.unexplained_images, [])
+ self.assertEqual(image_cache_manager.removable_base_files,
+ [fname])
+ self.assertEqual(image_cache_manager.corrupt_base_files, [])
+
+ def test_handle_base_image_used(self):
+ self.stubs.Set(libvirt_utils, 'chown', lambda x, y: None)
+ img = '123'
+
+ with self._make_base_file() as fname:
+ os.utime(fname, (-1, time.time() - 3601))
+
+ image_cache_manager = imagecache.ImageCacheManager()
+ image_cache_manager.unexplained_images = [fname]
+ image_cache_manager.used_images = {'123': (1, 0, ['banana-42'])}
+ image_cache_manager._handle_base_image(img, fname)
+
+ self.assertEqual(image_cache_manager.unexplained_images, [])
+ self.assertEqual(image_cache_manager.removable_base_files, [])
+ self.assertEqual(image_cache_manager.corrupt_base_files, [])
+
+ def test_handle_base_image_used_remotely(self):
+ self.stubs.Set(libvirt_utils, 'chown', lambda x, y: None)
+ img = '123'
+
+ with self._make_base_file() as fname:
+ os.utime(fname, (-1, time.time() - 3601))
+
+ image_cache_manager = imagecache.ImageCacheManager()
+ image_cache_manager.unexplained_images = [fname]
+ image_cache_manager.used_images = {'123': (0, 1, ['banana-42'])}
+ image_cache_manager._handle_base_image(img, fname)
+
+ self.assertEqual(image_cache_manager.unexplained_images, [])
+ self.assertEqual(image_cache_manager.removable_base_files, [])
+ self.assertEqual(image_cache_manager.corrupt_base_files, [])
+
+ def test_handle_base_image_absent(self):
+ img = '123'
+
+ with intercept_log_messages() as stream:
+ image_cache_manager = imagecache.ImageCacheManager()
+ image_cache_manager.used_images = {'123': (1, 0, ['banana-42'])}
+ image_cache_manager._handle_base_image(img, None)
+
+ self.assertEqual(image_cache_manager.unexplained_images, [])
+ self.assertEqual(image_cache_manager.removable_base_files, [])
+ self.assertEqual(image_cache_manager.corrupt_base_files, [])
+ self.assertNotEqual(stream.getvalue().find('an absent base file'),
+ -1)
+
+ def test_handle_base_image_used_missing(self):
+ img = '123'
+
+ with utils.tempdir() as tmpdir:
+ self.flags(instances_path=tmpdir)
+ self.flags(image_info_filename_pattern=('$instances_path/'
+ '%(image)s.info'),
+ group='libvirt')
+
+ fname = os.path.join(tmpdir, 'aaa')
+
+ image_cache_manager = imagecache.ImageCacheManager()
+ image_cache_manager.unexplained_images = [fname]
+ image_cache_manager.used_images = {'123': (1, 0, ['banana-42'])}
+ image_cache_manager._handle_base_image(img, fname)
+
+ self.assertEqual(image_cache_manager.unexplained_images, [])
+ self.assertEqual(image_cache_manager.removable_base_files, [])
+ self.assertEqual(image_cache_manager.corrupt_base_files, [])
+
+ def test_handle_base_image_checksum_fails(self):
+ self.flags(checksum_base_images=True, group='libvirt')
+ self.stubs.Set(libvirt_utils, 'chown', lambda x, y: None)
+
+ img = '123'
+
+ with self._make_base_file() as fname:
+ with open(fname, 'w') as f:
+ f.write('banana')
+
+ d = {'sha1': '21323454'}
+ with open('%s.info' % fname, 'w') as f:
+ f.write(jsonutils.dumps(d))
+
+ image_cache_manager = imagecache.ImageCacheManager()
+ image_cache_manager.unexplained_images = [fname]
+ image_cache_manager.used_images = {'123': (1, 0, ['banana-42'])}
+ image_cache_manager._handle_base_image(img, fname)
+
+ self.assertEqual(image_cache_manager.unexplained_images, [])
+ self.assertEqual(image_cache_manager.removable_base_files, [])
+ self.assertEqual(image_cache_manager.corrupt_base_files,
+ [fname])
+
+ def test_verify_base_images(self):
+ hashed_1 = '356a192b7913b04c54574d18c28d46e6395428ab'
+ hashed_21 = '472b07b9fcf2c2451e8781e944bf5f77cd8457c8'
+ hashed_22 = '12c6fc06c99a462375eeb3f43dfd832b08ca9e17'
+ hashed_42 = '92cfceb39d57d914ed8b14d0e37643de0797ae56'
+
+ self.flags(instances_path='/instance_path',
+ image_cache_subdirectory_name='_base')
+
+ base_file_list = ['00000001',
+ 'ephemeral_0_20_None',
+ 'e97222e91fc4241f49a7f520d1dcf446751129b3_sm',
+ 'e09c675c2d1cfac32dae3c2d83689c8c94bc693b_sm',
+ hashed_42,
+ hashed_1,
+ hashed_21,
+ hashed_22,
+ '%s_5368709120' % hashed_1,
+ '%s_10737418240' % hashed_1,
+ '00000004']
+
+ def fq_path(path):
+ return os.path.join('/instance_path/_base/', path)
+
+ # Fake base directory existence
+ orig_exists = os.path.exists
+
+ def exists(path):
+ # The python coverage tool got angry with my overly broad mocks
+ if not path.startswith('/instance_path'):
+ return orig_exists(path)
+
+ if path in ['/instance_path',
+ '/instance_path/_base',
+ '/instance_path/instance-1/disk',
+ '/instance_path/instance-2/disk',
+ '/instance_path/instance-3/disk',
+ '/instance_path/_base/%s.info' % hashed_42]:
+ return True
+
+ for p in base_file_list:
+ if path == fq_path(p):
+ return True
+ if path == fq_path(p) + '.info':
+ return False
+
+ if path in ['/instance_path/_base/%s_sm' % i for i in [hashed_1,
+ hashed_21,
+ hashed_22,
+ hashed_42]]:
+ return False
+
+ self.fail('Unexpected path existence check: %s' % path)
+
+ self.stubs.Set(os.path, 'exists', lambda x: exists(x))
+
+ self.stubs.Set(libvirt_utils, 'chown', lambda x, y: None)
+
+ # We need to stub utime as well
+ self.stubs.Set(os, 'utime', lambda x, y: None)
+
+ # Fake up some instances in the instances directory
+ orig_listdir = os.listdir
+
+ def listdir(path):
+ # The python coverage tool got angry with my overly broad mocks
+ if not path.startswith('/instance_path'):
+ return orig_listdir(path)
+
+ if path == '/instance_path':
+ return ['instance-1', 'instance-2', 'instance-3', '_base']
+
+ if path == '/instance_path/_base':
+ return base_file_list
+
+ self.fail('Unexpected directory listed: %s' % path)
+
+ self.stubs.Set(os, 'listdir', lambda x: listdir(x))
+
+ # Fake isfile for these faked images in _base
+ orig_isfile = os.path.isfile
+
+ def isfile(path):
+ # The python coverage tool got angry with my overly broad mocks
+ if not path.startswith('/instance_path'):
+ return orig_isfile(path)
+
+ for p in base_file_list:
+ if path == fq_path(p):
+ return True
+
+ self.fail('Unexpected isfile call: %s' % path)
+
+ self.stubs.Set(os.path, 'isfile', lambda x: isfile(x))
+
+ # Fake the database call which lists running instances
+ instances = [{'image_ref': '1',
+ 'host': CONF.host,
+ 'name': 'instance-1',
+ 'uuid': '123',
+ 'vm_state': '',
+ 'task_state': ''},
+ {'image_ref': '1',
+ 'kernel_id': '21',
+ 'ramdisk_id': '22',
+ 'host': CONF.host,
+ 'name': 'instance-2',
+ 'uuid': '456',
+ 'vm_state': '',
+ 'task_state': ''}]
+ all_instances = [fake_instance.fake_instance_obj(None, **instance)
+ for instance in instances]
+ image_cache_manager = imagecache.ImageCacheManager()
+
+ # Fake the utils call which finds the backing image
+ def get_disk_backing_file(path):
+ if path in ['/instance_path/instance-1/disk',
+ '/instance_path/instance-2/disk']:
+ return fq_path('%s_5368709120' % hashed_1)
+ self.fail('Unexpected backing file lookup: %s' % path)
+
+ self.stubs.Set(libvirt_utils, 'get_disk_backing_file',
+ lambda x: get_disk_backing_file(x))
+
+ # Fake out verifying checksums, as that is tested elsewhere
+ self.stubs.Set(image_cache_manager, '_verify_checksum',
+ lambda x, y: True)
+
+ # Fake getmtime as well
+ orig_getmtime = os.path.getmtime
+
+ def getmtime(path):
+ if not path.startswith('/instance_path'):
+ return orig_getmtime(path)
+
+ return 1000000
+
+ self.stubs.Set(os.path, 'getmtime', lambda x: getmtime(x))
+
+ # Make sure we don't accidentally remove a real file
+ orig_remove = os.remove
+
+ def remove(path):
+ if not path.startswith('/instance_path'):
+ return orig_remove(path)
+
+ # Don't try to remove fake files
+ return
+
+ self.stubs.Set(os, 'remove', lambda x: remove(x))
+
+ # And finally we can make the call we're actually testing...
+ # The argument here should be a context, but it is mocked out
+ image_cache_manager.update(None, all_instances)
+
+ # Verify
+ active = [fq_path(hashed_1), fq_path('%s_5368709120' % hashed_1),
+ fq_path(hashed_21), fq_path(hashed_22)]
+ for act in active:
+ self.assertIn(act, image_cache_manager.active_base_files)
+ self.assertEqual(len(image_cache_manager.active_base_files),
+ len(active))
+
+ for rem in [fq_path('e97222e91fc4241f49a7f520d1dcf446751129b3_sm'),
+ fq_path('e09c675c2d1cfac32dae3c2d83689c8c94bc693b_sm'),
+ fq_path(hashed_42),
+ fq_path('%s_10737418240' % hashed_1)]:
+ self.assertIn(rem, image_cache_manager.removable_base_files)
+
+ # Ensure there are no "corrupt" images as well
+ self.assertEqual(len(image_cache_manager.corrupt_base_files), 0)
+
+ def test_verify_base_images_no_base(self):
+ self.flags(instances_path='/tmp/no/such/dir/name/please')
+ image_cache_manager = imagecache.ImageCacheManager()
+ image_cache_manager.update(None, [])
+
+ def test_is_valid_info_file(self):
+ hashed = 'e97222e91fc4241f49a7f520d1dcf446751129b3'
+
+ self.flags(instances_path='/tmp/no/such/dir/name/please')
+ self.flags(image_info_filename_pattern=('$instances_path/_base/'
+ '%(image)s.info'),
+ group='libvirt')
+ base_filename = os.path.join(CONF.instances_path, '_base', hashed)
+
+ is_valid_info_file = imagecache.is_valid_info_file
+ self.assertFalse(is_valid_info_file('banana'))
+ self.assertFalse(is_valid_info_file(
+ os.path.join(CONF.instances_path, '_base', '00000001')))
+ self.assertFalse(is_valid_info_file(base_filename))
+ self.assertFalse(is_valid_info_file(base_filename + '.sha1'))
+ self.assertTrue(is_valid_info_file(base_filename + '.info'))
+
+ def test_configured_checksum_path(self):
+ with utils.tempdir() as tmpdir:
+ self.flags(instances_path=tmpdir)
+ self.flags(image_info_filename_pattern=('$instances_path/'
+ '%(image)s.info'),
+ group='libvirt')
+
+ # Ensure there is a base directory
+ os.mkdir(os.path.join(tmpdir, '_base'))
+
+ # Fake the database call which lists running instances
+ instances = [{'image_ref': '1',
+ 'host': CONF.host,
+ 'name': 'instance-1',
+ 'uuid': '123',
+ 'vm_state': '',
+ 'task_state': ''},
+ {'image_ref': '1',
+ 'host': CONF.host,
+ 'name': 'instance-2',
+ 'uuid': '456',
+ 'vm_state': '',
+ 'task_state': ''}]
+
+ all_instances = []
+ for instance in instances:
+ all_instances.append(fake_instance.fake_instance_obj(
+ None, **instance))
+
+ def touch(filename):
+ f = open(filename, 'w')
+ f.write('Touched')
+ f.close()
+
+ old = time.time() - (25 * 3600)
+ hashed = 'e97222e91fc4241f49a7f520d1dcf446751129b3'
+ base_filename = os.path.join(tmpdir, hashed)
+ touch(base_filename)
+ touch(base_filename + '.info')
+ os.utime(base_filename + '.info', (old, old))
+ touch(base_filename + '.info')
+ os.utime(base_filename + '.info', (old, old))
+
+ image_cache_manager = imagecache.ImageCacheManager()
+ image_cache_manager.update(None, all_instances)
+
+ self.assertTrue(os.path.exists(base_filename))
+ self.assertTrue(os.path.exists(base_filename + '.info'))
+
+ def test_compute_manager(self):
+ was = {'called': False}
+
+ def fake_get_all_by_filters(context, *args, **kwargs):
+ was['called'] = True
+ instances = []
+ for x in xrange(2):
+ instances.append(fake_instance.fake_db_instance(
+ image_ref='1',
+ uuid=x,
+ name=x,
+ vm_state='',
+ task_state=''))
+ return instances
+
+ with utils.tempdir() as tmpdir:
+ self.flags(instances_path=tmpdir)
+
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ fake_get_all_by_filters)
+ compute = importutils.import_object(CONF.compute_manager)
+ self.flags(use_local=True, group='conductor')
+ compute.conductor_api = conductor.API()
+ compute._run_image_cache_manager_pass(None)
+ self.assertTrue(was['called'])
+
+
+class VerifyChecksumTestCase(test.NoDBTestCase):
+
+ def setUp(self):
+ super(VerifyChecksumTestCase, self).setUp()
+ self.img = {'container_format': 'ami', 'id': '42'}
+ self.flags(checksum_base_images=True, group='libvirt')
+
+ def _make_checksum(self, tmpdir):
+ testdata = ('OpenStack Software delivers a massively scalable cloud '
+ 'operating system.')
+
+ fname = os.path.join(tmpdir, 'aaa')
+ info_fname = imagecache.get_info_filename(fname)
+
+ with open(fname, 'w') as f:
+ f.write(testdata)
+
+ return fname, info_fname, testdata
+
+ def _write_file(self, info_fname, info_attr, testdata):
+ f = open(info_fname, 'w')
+ if info_attr == "csum valid":
+ csum = hashlib.sha1()
+ csum.update(testdata)
+ f.write('{"sha1": "%s"}\n' % csum.hexdigest())
+ elif info_attr == "csum invalid, not json":
+ f.write('banana')
+ else:
+ f.write('{"sha1": "banana"}')
+ f.close()
+
+ def _check_body(self, tmpdir, info_attr):
+ self.flags(instances_path=tmpdir)
+ self.flags(image_info_filename_pattern=('$instances_path/'
+ '%(image)s.info'),
+ group='libvirt')
+ fname, info_fname, testdata = self._make_checksum(tmpdir)
+ self._write_file(info_fname, info_attr, testdata)
+ image_cache_manager = imagecache.ImageCacheManager()
+ return image_cache_manager, fname
+
+ def test_verify_checksum(self):
+ with utils.tempdir() as tmpdir:
+ image_cache_manager, fname = self._check_body(tmpdir, "csum valid")
+ res = image_cache_manager._verify_checksum(self.img, fname)
+ self.assertTrue(res)
+
+ def test_verify_checksum_disabled(self):
+ self.flags(checksum_base_images=False, group='libvirt')
+ with utils.tempdir() as tmpdir:
+ image_cache_manager, fname = self._check_body(tmpdir, "csum valid")
+ res = image_cache_manager._verify_checksum(self.img, fname)
+ self.assertIsNone(res)
+
+ def test_verify_checksum_invalid_json(self):
+ with intercept_log_messages() as stream:
+ with utils.tempdir() as tmpdir:
+ image_cache_manager, fname = (
+ self._check_body(tmpdir, "csum invalid, not json"))
+ res = image_cache_manager._verify_checksum(
+ self.img, fname, create_if_missing=False)
+ self.assertFalse(res)
+ log = stream.getvalue()
+
+ # NOTE(mikal): this is a skip not a fail because the file is
+ # present, but is not in valid json format and therefore is
+ # skipped.
+ self.assertNotEqual(log.find('image verification skipped'), -1)
+
+ def test_verify_checksum_invalid_repaired(self):
+ with utils.tempdir() as tmpdir:
+ image_cache_manager, fname = (
+ self._check_body(tmpdir, "csum invalid, not json"))
+ res = image_cache_manager._verify_checksum(
+ self.img, fname, create_if_missing=True)
+ self.assertIsNone(res)
+
+ def test_verify_checksum_invalid(self):
+ with intercept_log_messages() as stream:
+ with utils.tempdir() as tmpdir:
+ image_cache_manager, fname = (
+ self._check_body(tmpdir, "csum invalid, valid json"))
+ res = image_cache_manager._verify_checksum(self.img, fname)
+ self.assertFalse(res)
+ log = stream.getvalue()
+ self.assertNotEqual(log.find('image verification failed'), -1)
+
+ def test_verify_checksum_file_missing(self):
+ with utils.tempdir() as tmpdir:
+ self.flags(instances_path=tmpdir)
+ self.flags(image_info_filename_pattern=('$instances_path/'
+ '%(image)s.info'),
+ group='libvirt')
+ fname, info_fname, testdata = self._make_checksum(tmpdir)
+
+ image_cache_manager = imagecache.ImageCacheManager()
+ res = image_cache_manager._verify_checksum('aaa', fname)
+ self.assertIsNone(res)
+
+ # Checksum requests for a file with no checksum now have the
+ # side effect of creating the checksum
+ self.assertTrue(os.path.exists(info_fname))
diff --git a/nova/tests/unit/virt/libvirt/test_lvm.py b/nova/tests/unit/virt/libvirt/test_lvm.py
new file mode 100644
index 0000000000..fdb3e4b9f6
--- /dev/null
+++ b/nova/tests/unit/virt/libvirt/test_lvm.py
@@ -0,0 +1,183 @@
+# Copyright 2012 NTT Data. All Rights Reserved.
+# Copyright 2012 Yahoo! Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import contextlib
+
+import mock
+from oslo.concurrency import processutils
+from oslo.config import cfg
+
+from nova import exception
+from nova import test
+from nova import utils
+from nova.virt.libvirt import lvm
+from nova.virt.libvirt import utils as libvirt_utils
+
+CONF = cfg.CONF
+
+
+class LvmTestCase(test.NoDBTestCase):
+ def test_get_volume_size(self):
+ executes = []
+
+ def fake_execute(*cmd, **kwargs):
+ executes.append(cmd)
+ return 123456789, None
+
+ expected_commands = [('blockdev', '--getsize64', '/dev/foo')]
+ self.stubs.Set(utils, 'execute', fake_execute)
+ size = lvm.get_volume_size('/dev/foo')
+ self.assertEqual(expected_commands, executes)
+ self.assertEqual(size, 123456789)
+
+ @mock.patch.object(utils, 'execute',
+ side_effect=processutils.ProcessExecutionError(
+ stderr=('blockdev: cannot open /dev/foo: '
+ 'No such device or address')))
+ def test_get_volume_size_not_found(self, mock_execute):
+ self.assertRaises(exception.VolumeBDMPathNotFound,
+ lvm.get_volume_size, '/dev/foo')
+
+ @mock.patch.object(utils, 'execute',
+ side_effect=processutils.ProcessExecutionError(
+ stderr='blockdev: i am sad in other ways'))
+ def test_get_volume_size_unexpectd_error(self, mock_execute):
+ self.assertRaises(processutils.ProcessExecutionError,
+ lvm.get_volume_size, '/dev/foo')
+
+ def test_lvm_clear(self):
+ def fake_lvm_size(path):
+ return lvm_size
+
+ def fake_execute(*cmd, **kwargs):
+ executes.append(cmd)
+
+ self.stubs.Set(lvm, 'get_volume_size', fake_lvm_size)
+ self.stubs.Set(utils, 'execute', fake_execute)
+
+ # Test the correct dd commands are run for various sizes
+ lvm_size = 1
+ executes = []
+ expected_commands = [('dd', 'bs=1', 'if=/dev/zero', 'of=/dev/v1',
+ 'seek=0', 'count=1', 'conv=fdatasync')]
+ lvm.clear_volume('/dev/v1')
+ self.assertEqual(expected_commands, executes)
+
+ lvm_size = 1024
+ executes = []
+ expected_commands = [('dd', 'bs=1024', 'if=/dev/zero', 'of=/dev/v2',
+ 'seek=0', 'count=1', 'conv=fdatasync')]
+ lvm.clear_volume('/dev/v2')
+ self.assertEqual(expected_commands, executes)
+
+ lvm_size = 1025
+ executes = []
+ expected_commands = [('dd', 'bs=1024', 'if=/dev/zero', 'of=/dev/v3',
+ 'seek=0', 'count=1', 'conv=fdatasync')]
+ expected_commands += [('dd', 'bs=1', 'if=/dev/zero', 'of=/dev/v3',
+ 'seek=1024', 'count=1', 'conv=fdatasync')]
+ lvm.clear_volume('/dev/v3')
+ self.assertEqual(expected_commands, executes)
+
+ lvm_size = 1048576
+ executes = []
+ expected_commands = [('dd', 'bs=1048576', 'if=/dev/zero', 'of=/dev/v4',
+ 'seek=0', 'count=1', 'oflag=direct')]
+ lvm.clear_volume('/dev/v4')
+ self.assertEqual(expected_commands, executes)
+
+ lvm_size = 1048577
+ executes = []
+ expected_commands = [('dd', 'bs=1048576', 'if=/dev/zero', 'of=/dev/v5',
+ 'seek=0', 'count=1', 'oflag=direct')]
+ expected_commands += [('dd', 'bs=1', 'if=/dev/zero', 'of=/dev/v5',
+ 'seek=1048576', 'count=1', 'conv=fdatasync')]
+ lvm.clear_volume('/dev/v5')
+ self.assertEqual(expected_commands, executes)
+
+ lvm_size = 1234567
+ executes = []
+ expected_commands = [('dd', 'bs=1048576', 'if=/dev/zero', 'of=/dev/v6',
+ 'seek=0', 'count=1', 'oflag=direct')]
+ expected_commands += [('dd', 'bs=1024', 'if=/dev/zero', 'of=/dev/v6',
+ 'seek=1024', 'count=181', 'conv=fdatasync')]
+ expected_commands += [('dd', 'bs=1', 'if=/dev/zero', 'of=/dev/v6',
+ 'seek=1233920', 'count=647', 'conv=fdatasync')]
+ lvm.clear_volume('/dev/v6')
+ self.assertEqual(expected_commands, executes)
+
+ # Test volume_clear_size limits the size
+ lvm_size = 10485761
+ CONF.set_override('volume_clear_size', '1', 'libvirt')
+ executes = []
+ expected_commands = [('dd', 'bs=1048576', 'if=/dev/zero', 'of=/dev/v7',
+ 'seek=0', 'count=1', 'oflag=direct')]
+ lvm.clear_volume('/dev/v7')
+ self.assertEqual(expected_commands, executes)
+
+ CONF.set_override('volume_clear_size', '2', 'libvirt')
+ lvm_size = 1048576
+ executes = []
+ expected_commands = [('dd', 'bs=1048576', 'if=/dev/zero', 'of=/dev/v9',
+ 'seek=0', 'count=1', 'oflag=direct')]
+ lvm.clear_volume('/dev/v9')
+ self.assertEqual(expected_commands, executes)
+
+ # Test volume_clear=shred
+ CONF.set_override('volume_clear', 'shred', 'libvirt')
+ CONF.set_override('volume_clear_size', '0', 'libvirt')
+ lvm_size = 1048576
+ executes = []
+ expected_commands = [('shred', '-n3', '-s1048576', '/dev/va')]
+ lvm.clear_volume('/dev/va')
+ self.assertEqual(expected_commands, executes)
+
+ CONF.set_override('volume_clear', 'shred', 'libvirt')
+ CONF.set_override('volume_clear_size', '1', 'libvirt')
+ lvm_size = 10485761
+ executes = []
+ expected_commands = [('shred', '-n3', '-s1048576', '/dev/vb')]
+ lvm.clear_volume('/dev/vb')
+ self.assertEqual(expected_commands, executes)
+
+ # Test volume_clear=none does nothing
+ CONF.set_override('volume_clear', 'none', 'libvirt')
+ executes = []
+ expected_commands = []
+ lvm.clear_volume('/dev/vc')
+ self.assertEqual(expected_commands, executes)
+
+ # Test volume_clear=invalid falls back to the default 'zero'
+ CONF.set_override('volume_clear', 'invalid', 'libvirt')
+ lvm_size = 1
+ executes = []
+ expected_commands = [('dd', 'bs=1', 'if=/dev/zero', 'of=/dev/vd',
+ 'seek=0', 'count=1', 'conv=fdatasync')]
+ lvm.clear_volume('/dev/vd')
+ self.assertEqual(expected_commands, executes)
+
+ def test_fail_remove_all_logical_volumes(self):
+ def fake_execute(*args, **kwargs):
+ if 'vol2' in args:
+ raise processutils.ProcessExecutionError('Error')
+
+ with contextlib.nested(
+ mock.patch.object(lvm, 'clear_volume'),
+ mock.patch.object(libvirt_utils, 'execute',
+ side_effect=fake_execute)) as (mock_clear, mock_execute):
+ self.assertRaises(exception.VolumesNotRemoved,
+ lvm.remove_volumes,
+ ['vol1', 'vol2', 'vol3'])
+ self.assertEqual(3, mock_execute.call_count)
diff --git a/nova/tests/unit/virt/libvirt/test_rbd.py b/nova/tests/unit/virt/libvirt/test_rbd.py
new file mode 100644
index 0000000000..bcbdc25f59
--- /dev/null
+++ b/nova/tests/unit/virt/libvirt/test_rbd.py
@@ -0,0 +1,283 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import mock
+
+from nova import exception
+from nova.openstack.common import log as logging
+from nova import test
+from nova import utils
+from nova.virt.libvirt import rbd_utils
+
+
+LOG = logging.getLogger(__name__)
+
+
+CEPH_MON_DUMP = """dumped monmap epoch 1
+{ "epoch": 1,
+ "fsid": "33630410-6d93-4d66-8e42-3b953cf194aa",
+ "modified": "2013-05-22 17:44:56.343618",
+ "created": "2013-05-22 17:44:56.343618",
+ "mons": [
+ { "rank": 0,
+ "name": "a",
+ "addr": "[::1]:6789\/0"},
+ { "rank": 1,
+ "name": "b",
+ "addr": "[::1]:6790\/0"},
+ { "rank": 2,
+ "name": "c",
+ "addr": "[::1]:6791\/0"},
+ { "rank": 3,
+ "name": "d",
+ "addr": "127.0.0.1:6792\/0"},
+ { "rank": 4,
+ "name": "e",
+ "addr": "example.com:6791\/0"}],
+ "quorum": [
+ 0,
+ 1,
+ 2]}
+"""
+
+
+class RbdTestCase(test.NoDBTestCase):
+
+ @mock.patch.object(rbd_utils, 'rbd')
+ @mock.patch.object(rbd_utils, 'rados')
+ def setUp(self, mock_rados, mock_rbd):
+ super(RbdTestCase, self).setUp()
+
+ self.mock_rados = mock_rados
+ self.mock_rados.Rados = mock.Mock
+ self.mock_rados.Rados.ioctx = mock.Mock()
+ self.mock_rados.Rados.connect = mock.Mock()
+ self.mock_rados.Rados.shutdown = mock.Mock()
+ self.mock_rados.Rados.open_ioctx = mock.Mock()
+ self.mock_rados.Rados.open_ioctx.return_value = \
+ self.mock_rados.Rados.ioctx
+ self.mock_rados.Error = Exception
+
+ self.mock_rbd = mock_rbd
+ self.mock_rbd.RBD = mock.Mock
+ self.mock_rbd.Image = mock.Mock
+ self.mock_rbd.Image.close = mock.Mock()
+ self.mock_rbd.RBD.Error = Exception
+
+ self.rbd_pool = 'rbd'
+ self.driver = rbd_utils.RBDDriver(self.rbd_pool, None, None)
+
+ self.volume_name = u'volume-00000001'
+
+ def tearDown(self):
+ super(RbdTestCase, self).tearDown()
+
+ def test_good_locations(self):
+ locations = ['rbd://fsid/pool/image/snap',
+ 'rbd://%2F/%2F/%2F/%2F', ]
+ map(self.driver.parse_url, locations)
+
+ def test_bad_locations(self):
+ locations = ['rbd://image',
+ 'http://path/to/somewhere/else',
+ 'rbd://image/extra',
+ 'rbd://image/',
+ 'rbd://fsid/pool/image/',
+ 'rbd://fsid/pool/image/snap/',
+ 'rbd://///', ]
+ for loc in locations:
+ self.assertRaises(exception.ImageUnacceptable,
+ self.driver.parse_url, loc)
+ self.assertFalse(self.driver.is_cloneable({'url': loc},
+ {'disk_format': 'raw'}))
+
+ @mock.patch.object(rbd_utils.RBDDriver, '_get_fsid')
+ @mock.patch.object(rbd_utils, 'rbd')
+ @mock.patch.object(rbd_utils, 'rados')
+ def test_cloneable(self, mock_rados, mock_rbd, mock_get_fsid):
+ mock_get_fsid.return_value = 'abc'
+ location = {'url': 'rbd://abc/pool/image/snap'}
+ info = {'disk_format': 'raw'}
+ self.assertTrue(self.driver.is_cloneable(location, info))
+ self.assertTrue(mock_get_fsid.called)
+
+ @mock.patch.object(rbd_utils.RBDDriver, '_get_fsid')
+ def test_uncloneable_different_fsid(self, mock_get_fsid):
+ mock_get_fsid.return_value = 'abc'
+ location = {'url': 'rbd://def/pool/image/snap'}
+ self.assertFalse(
+ self.driver.is_cloneable(location, {'disk_format': 'raw'}))
+ self.assertTrue(mock_get_fsid.called)
+
+ @mock.patch.object(rbd_utils.RBDDriver, '_get_fsid')
+ @mock.patch.object(rbd_utils, 'RBDVolumeProxy')
+ @mock.patch.object(rbd_utils, 'rbd')
+ @mock.patch.object(rbd_utils, 'rados')
+ def test_uncloneable_unreadable(self, mock_rados, mock_rbd, mock_proxy,
+ mock_get_fsid):
+ mock_get_fsid.return_value = 'abc'
+ location = {'url': 'rbd://abc/pool/image/snap'}
+
+ mock_proxy.side_effect = mock_rbd.Error
+
+ self.assertFalse(
+ self.driver.is_cloneable(location, {'disk_format': 'raw'}))
+ mock_proxy.assert_called_once_with(self.driver, 'image', pool='pool',
+ snapshot='snap', read_only=True)
+ self.assertTrue(mock_get_fsid.called)
+
+ @mock.patch.object(rbd_utils.RBDDriver, '_get_fsid')
+ def test_uncloneable_bad_format(self, mock_get_fsid):
+ mock_get_fsid.return_value = 'abc'
+ location = {'url': 'rbd://abc/pool/image/snap'}
+ formats = ['qcow2', 'vmdk', 'vdi']
+ for f in formats:
+ self.assertFalse(
+ self.driver.is_cloneable(location, {'disk_format': f}))
+ self.assertTrue(mock_get_fsid.called)
+
+ @mock.patch.object(utils, 'execute')
+ def test_get_mon_addrs(self, mock_execute):
+ mock_execute.return_value = (CEPH_MON_DUMP, '')
+ hosts = ['::1', '::1', '::1', '127.0.0.1', 'example.com']
+ ports = ['6789', '6790', '6791', '6792', '6791']
+ self.assertEqual((hosts, ports), self.driver.get_mon_addrs())
+
+ @mock.patch.object(rbd_utils, 'RADOSClient')
+ @mock.patch.object(rbd_utils, 'rbd')
+ @mock.patch.object(rbd_utils, 'rados')
+ def test_clone(self, mock_rados, mock_rbd, mock_client):
+ pool = u'images'
+ image = u'image-name'
+ snap = u'snapshot-name'
+ location = {'url': u'rbd://fsid/%s/%s/%s' % (pool, image, snap)}
+
+ client_stack = []
+
+ def mock__enter__(inst):
+ def _inner():
+ client_stack.append(inst)
+ return inst
+ return _inner
+
+ client = mock_client.return_value
+ # capture both rados client used to perform the clone
+ client.__enter__.side_effect = mock__enter__(client)
+
+ rbd = mock_rbd.RBD.return_value
+
+ self.driver.clone(location, self.volume_name)
+
+ args = [client_stack[0].ioctx, str(image), str(snap),
+ client_stack[1].ioctx, str(self.volume_name)]
+ kwargs = {'features': mock_rbd.RBD_FEATURE_LAYERING}
+ rbd.clone.assert_called_once_with(*args, **kwargs)
+ self.assertEqual(client.__enter__.call_count, 2)
+
+ @mock.patch.object(rbd_utils, 'RBDVolumeProxy')
+ def test_resize(self, mock_proxy):
+ size = 1024
+ proxy = mock_proxy.return_value
+ proxy.__enter__.return_value = proxy
+ self.driver.resize(self.volume_name, size)
+ proxy.resize.assert_called_once_with(size)
+
+ @mock.patch.object(rbd_utils.RBDDriver, '_disconnect_from_rados')
+ @mock.patch.object(rbd_utils.RBDDriver, '_connect_to_rados')
+ @mock.patch.object(rbd_utils, 'rbd')
+ @mock.patch.object(rbd_utils, 'rados')
+ def test_rbd_volume_proxy_init(self, mock_rados, mock_rbd,
+ mock_connect_from_rados,
+ mock_disconnect_from_rados):
+ mock_connect_from_rados.return_value = (None, None)
+ mock_disconnect_from_rados.return_value = (None, None)
+
+ with rbd_utils.RBDVolumeProxy(self.driver, self.volume_name):
+ mock_connect_from_rados.assert_called_once_with(None)
+ self.assertFalse(mock_disconnect_from_rados.called)
+
+ mock_disconnect_from_rados.assert_called_once_with(None, None)
+
+ @mock.patch.object(rbd_utils, 'rbd')
+ @mock.patch.object(rbd_utils, 'rados')
+ def test_connect_to_rados_default(self, mock_rados, mock_rbd):
+ ret = self.driver._connect_to_rados()
+ self.assertTrue(self.mock_rados.Rados.connect.called)
+ self.assertTrue(self.mock_rados.Rados.open_ioctx.called)
+ self.assertIsInstance(ret[0], self.mock_rados.Rados)
+ self.assertEqual(ret[1], self.mock_rados.Rados.ioctx)
+ self.mock_rados.Rados.open_ioctx.assert_called_with(self.rbd_pool)
+
+ @mock.patch.object(rbd_utils, 'rbd')
+ @mock.patch.object(rbd_utils, 'rados')
+ def test_connect_to_rados_different_pool(self, mock_rados, mock_rbd):
+ ret = self.driver._connect_to_rados('alt_pool')
+ self.assertTrue(self.mock_rados.Rados.connect.called)
+ self.assertTrue(self.mock_rados.Rados.open_ioctx.called)
+ self.assertIsInstance(ret[0], self.mock_rados.Rados)
+ self.assertEqual(ret[1], self.mock_rados.Rados.ioctx)
+ self.mock_rados.Rados.open_ioctx.assert_called_with('alt_pool')
+
+ @mock.patch.object(rbd_utils, 'rados')
+ def test_connect_to_rados_error(self, mock_rados):
+ mock_rados.Rados.open_ioctx.side_effect = mock_rados.Error
+ self.assertRaises(mock_rados.Error, self.driver._connect_to_rados)
+ mock_rados.Rados.open_ioctx.assert_called_once_with(self.rbd_pool)
+ mock_rados.Rados.shutdown.assert_called_once_with()
+
+ def test_ceph_args_none(self):
+ self.driver.rbd_user = None
+ self.driver.ceph_conf = None
+ self.assertEqual([], self.driver.ceph_args())
+
+ def test_ceph_args_rbd_user(self):
+ self.driver.rbd_user = 'foo'
+ self.driver.ceph_conf = None
+ self.assertEqual(['--id', 'foo'], self.driver.ceph_args())
+
+ def test_ceph_args_ceph_conf(self):
+ self.driver.rbd_user = None
+ self.driver.ceph_conf = '/path/bar.conf'
+ self.assertEqual(['--conf', '/path/bar.conf'],
+ self.driver.ceph_args())
+
+ def test_ceph_args_rbd_user_and_ceph_conf(self):
+ self.driver.rbd_user = 'foo'
+ self.driver.ceph_conf = '/path/bar.conf'
+ self.assertEqual(['--id', 'foo', '--conf', '/path/bar.conf'],
+ self.driver.ceph_args())
+
+ @mock.patch.object(rbd_utils, 'RBDVolumeProxy')
+ def test_exists(self, mock_proxy):
+ snapshot = 'snap'
+ proxy = mock_proxy.return_value
+ self.assertTrue(self.driver.exists(self.volume_name,
+ self.rbd_pool,
+ snapshot))
+ proxy.__enter__.assert_called_once_with()
+ proxy.__exit__.assert_called_once_with(None, None, None)
+
+ @mock.patch.object(rbd_utils, 'rbd')
+ @mock.patch.object(rbd_utils, 'rados')
+ @mock.patch.object(rbd_utils, 'RADOSClient')
+ def test_cleanup_volumes(self, mock_client, mock_rados, mock_rbd):
+ instance = {'uuid': '12345'}
+
+ rbd = mock_rbd.RBD.return_value
+ rbd.list.return_value = ['12345_test', '111_test']
+
+ client = mock_client.return_value
+ self.driver.cleanup_volumes(instance)
+ rbd.remove.assert_called_once_with(client.ioctx, '12345_test')
+ client.__enter__.assert_called_once_with()
+ client.__exit__.assert_called_once_with(None, None, None)
diff --git a/nova/tests/unit/virt/libvirt/test_utils.py b/nova/tests/unit/virt/libvirt/test_utils.py
new file mode 100644
index 0000000000..4114c03516
--- /dev/null
+++ b/nova/tests/unit/virt/libvirt/test_utils.py
@@ -0,0 +1,652 @@
+# Copyright 2012 NTT Data. All Rights Reserved.
+# Copyright 2012 Yahoo! Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import functools
+import os
+import tempfile
+
+import mock
+from oslo.concurrency import processutils
+from oslo.config import cfg
+
+from nova import exception
+from nova.openstack.common import fileutils
+from nova import test
+from nova import utils
+from nova.virt.disk import api as disk
+from nova.virt import images
+from nova.virt.libvirt import config as vconfig
+from nova.virt.libvirt import utils as libvirt_utils
+
+CONF = cfg.CONF
+
+
+class LibvirtUtilsTestCase(test.NoDBTestCase):
+
+ @mock.patch('os.path.exists', return_value=True)
+ @mock.patch('nova.utils.execute')
+ def test_get_disk_type(self, mock_execute, mock_exists):
+ path = "disk.config"
+ example_output = """image: disk.config
+file format: raw
+virtual size: 64M (67108864 bytes)
+cluster_size: 65536
+disk size: 96K
+blah BLAH: bb
+"""
+ mock_execute.return_value = (example_output, '')
+ disk_type = libvirt_utils.get_disk_type(path)
+ mock_execute.assert_called_once_with('env', 'LC_ALL=C', 'LANG=C',
+ 'qemu-img', 'info', path)
+ mock_exists.assert_called_once_with(path)
+ self.assertEqual('raw', disk_type)
+
+ @mock.patch('nova.utils.execute')
+ def test_copy_image_local_cp(self, mock_execute):
+ libvirt_utils.copy_image('src', 'dest')
+ mock_execute.assert_called_once_with('cp', 'src', 'dest')
+
+ _rsync_call = functools.partial(mock.call,
+ 'rsync', '--sparse', '--compress')
+
+ @mock.patch('nova.utils.execute')
+ def test_copy_image_rsync(self, mock_execute):
+ libvirt_utils.copy_image('src', 'dest', host='host')
+
+ mock_execute.assert_has_calls([
+ self._rsync_call('--dry-run', 'src', 'host:dest'),
+ self._rsync_call('src', 'host:dest'),
+ ])
+ self.assertEqual(2, mock_execute.call_count)
+
+ @mock.patch('nova.utils.execute')
+ def test_copy_image_scp(self, mock_execute):
+ mock_execute.side_effect = [
+ processutils.ProcessExecutionError,
+ mock.DEFAULT,
+ ]
+
+ libvirt_utils.copy_image('src', 'dest', host='host')
+
+ mock_execute.assert_has_calls([
+ self._rsync_call('--dry-run', 'src', 'host:dest'),
+ mock.call('scp', 'src', 'host:dest'),
+ ])
+ self.assertEqual(2, mock_execute.call_count)
+
+ @mock.patch('os.path.exists', return_value=True)
+ def test_disk_type(self, mock_exists):
+ # Seems like lvm detection
+ # if its in /dev ??
+ for p in ['/dev/b', '/dev/blah/blah']:
+ d_type = libvirt_utils.get_disk_type(p)
+ self.assertEqual('lvm', d_type)
+
+ # Try rbd detection
+ d_type = libvirt_utils.get_disk_type('rbd:pool/instance')
+ self.assertEqual('rbd', d_type)
+
+ # Try the other types
+ template_output = """image: %(path)s
+file format: %(format)s
+virtual size: 64M (67108864 bytes)
+cluster_size: 65536
+disk size: 96K
+"""
+ path = '/myhome/disk.config'
+ for f in ['raw', 'qcow2']:
+ output = template_output % ({
+ 'format': f,
+ 'path': path,
+ })
+ with mock.patch('nova.utils.execute',
+ return_value=(output, '')) as mock_execute:
+ d_type = libvirt_utils.get_disk_type(path)
+ mock_execute.assert_called_once_with(
+ 'env', 'LC_ALL=C', 'LANG=C',
+ 'qemu-img', 'info', path)
+ self.assertEqual(f, d_type)
+
+ @mock.patch('os.path.exists', return_value=True)
+ @mock.patch('nova.utils.execute')
+ def test_disk_backing(self, mock_execute, mock_exists):
+ path = '/myhome/disk.config'
+ template_output = """image: %(path)s
+file format: raw
+virtual size: 2K (2048 bytes)
+cluster_size: 65536
+disk size: 96K
+"""
+ output = template_output % ({
+ 'path': path,
+ })
+ mock_execute.return_value = (output, '')
+ d_backing = libvirt_utils.get_disk_backing_file(path)
+ mock_execute.assert_called_once_with('env', 'LC_ALL=C', 'LANG=C',
+ 'qemu-img', 'info', path)
+ mock_exists.assert_called_once_with(path)
+ self.assertIsNone(d_backing)
+
+ def _test_disk_size(self, mock_execute, path, expected_size):
+ d_size = libvirt_utils.get_disk_size(path)
+ self.assertEqual(expected_size, d_size)
+ mock_execute.assert_called_once_with('env', 'LC_ALL=C', 'LANG=C',
+ 'qemu-img', 'info', path)
+
+ @mock.patch('os.path.exists', return_value=True)
+ def test_disk_size(self, mock_exists):
+ path = '/myhome/disk.config'
+ template_output = """image: %(path)s
+file format: raw
+virtual size: %(v_size)s (%(vsize_b)s bytes)
+cluster_size: 65536
+disk size: 96K
+"""
+ for i in range(0, 128):
+ bytes = i * 65336
+ kbytes = bytes / 1024
+ mbytes = kbytes / 1024
+ output = template_output % ({
+ 'v_size': "%sM" % (mbytes),
+ 'vsize_b': i,
+ 'path': path,
+ })
+ with mock.patch('nova.utils.execute',
+ return_value=(output, '')) as mock_execute:
+ self._test_disk_size(mock_execute, path, i)
+ output = template_output % ({
+ 'v_size': "%sK" % (kbytes),
+ 'vsize_b': i,
+ 'path': path,
+ })
+ with mock.patch('nova.utils.execute',
+ return_value=(output, '')) as mock_execute:
+ self._test_disk_size(mock_execute, path, i)
+
+ @mock.patch('os.path.exists', return_value=True)
+ @mock.patch('nova.utils.execute')
+ def test_qemu_info_canon(self, mock_execute, mock_exists):
+ path = "disk.config"
+ example_output = """image: disk.config
+file format: raw
+virtual size: 64M (67108864 bytes)
+cluster_size: 65536
+disk size: 96K
+blah BLAH: bb
+"""
+ mock_execute.return_value = (example_output, '')
+ image_info = images.qemu_img_info(path)
+ mock_execute.assert_called_once_with('env', 'LC_ALL=C', 'LANG=C',
+ 'qemu-img', 'info', path)
+ mock_exists.assert_called_once_with(path)
+ self.assertEqual('disk.config', image_info.image)
+ self.assertEqual('raw', image_info.file_format)
+ self.assertEqual(67108864, image_info.virtual_size)
+ self.assertEqual(98304, image_info.disk_size)
+ self.assertEqual(65536, image_info.cluster_size)
+
+ @mock.patch('os.path.exists', return_value=True)
+ @mock.patch('nova.utils.execute')
+ def test_qemu_info_canon2(self, mock_execute, mock_exists):
+ path = "disk.config"
+ example_output = """image: disk.config
+file format: QCOW2
+virtual size: 67108844
+cluster_size: 65536
+disk size: 963434
+backing file: /var/lib/nova/a328c7998805951a_2
+"""
+ mock_execute.return_value = (example_output, '')
+ image_info = images.qemu_img_info(path)
+ mock_execute.assert_called_once_with('env', 'LC_ALL=C', 'LANG=C',
+ 'qemu-img', 'info', path)
+ mock_exists.assert_called_once_with(path)
+ self.assertEqual('disk.config', image_info.image)
+ self.assertEqual('qcow2', image_info.file_format)
+ self.assertEqual(67108844, image_info.virtual_size)
+ self.assertEqual(963434, image_info.disk_size)
+ self.assertEqual(65536, image_info.cluster_size)
+ self.assertEqual('/var/lib/nova/a328c7998805951a_2',
+ image_info.backing_file)
+
+ @mock.patch('os.path.exists', return_value=True)
+ @mock.patch('nova.utils.execute')
+ def test_qemu_backing_file_actual(self,
+ mock_execute, mock_exists):
+ path = "disk.config"
+ example_output = """image: disk.config
+file format: raw
+virtual size: 64M (67108864 bytes)
+cluster_size: 65536
+disk size: 96K
+Snapshot list:
+ID TAG VM SIZE DATE VM CLOCK
+1 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
+backing file: /var/lib/nova/a328c7998805951a_2 (actual path: /b/3a988059e51a_2)
+"""
+ mock_execute.return_value = (example_output, '')
+ image_info = images.qemu_img_info(path)
+ mock_execute.assert_called_once_with('env', 'LC_ALL=C', 'LANG=C',
+ 'qemu-img', 'info', path)
+ mock_exists.assert_called_once_with(path)
+ self.assertEqual('disk.config', image_info.image)
+ self.assertEqual('raw', image_info.file_format)
+ self.assertEqual(67108864, image_info.virtual_size)
+ self.assertEqual(98304, image_info.disk_size)
+ self.assertEqual(1, len(image_info.snapshots))
+ self.assertEqual('/b/3a988059e51a_2',
+ image_info.backing_file)
+
+ @mock.patch('os.path.exists', return_value=True)
+ @mock.patch('nova.utils.execute')
+ def test_qemu_info_convert(self, mock_execute, mock_exists):
+ path = "disk.config"
+ example_output = """image: disk.config
+file format: raw
+virtual size: 64M
+disk size: 96K
+Snapshot list:
+ID TAG VM SIZE DATE VM CLOCK
+1 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
+3 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
+4 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
+junk stuff: bbb
+"""
+ mock_execute.return_value = (example_output, '')
+ image_info = images.qemu_img_info(path)
+ mock_execute.assert_called_once_with('env', 'LC_ALL=C', 'LANG=C',
+ 'qemu-img', 'info', path)
+ mock_exists.assert_called_once_with(path)
+ self.assertEqual('disk.config', image_info.image)
+ self.assertEqual('raw', image_info.file_format)
+ self.assertEqual(67108864, image_info.virtual_size)
+ self.assertEqual(98304, image_info.disk_size)
+
+ @mock.patch('os.path.exists', return_value=True)
+ @mock.patch('nova.utils.execute')
+ def test_qemu_info_snaps(self, mock_execute, mock_exists):
+ path = "disk.config"
+ example_output = """image: disk.config
+file format: raw
+virtual size: 64M (67108864 bytes)
+disk size: 96K
+Snapshot list:
+ID TAG VM SIZE DATE VM CLOCK
+1 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
+3 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
+4 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
+"""
+ mock_execute.return_value = (example_output, '')
+ image_info = images.qemu_img_info(path)
+ mock_execute.assert_called_once_with('env', 'LC_ALL=C', 'LANG=C',
+ 'qemu-img', 'info', path)
+ mock_exists.assert_called_once_with(path)
+ self.assertEqual('disk.config', image_info.image)
+ self.assertEqual('raw', image_info.file_format)
+ self.assertEqual(67108864, image_info.virtual_size)
+ self.assertEqual(98304, image_info.disk_size)
+ self.assertEqual(3, len(image_info.snapshots))
+
+ def test_valid_hostname_normal(self):
+ self.assertTrue(libvirt_utils.is_valid_hostname("hello.world.com"))
+
+ def test_valid_hostname_ipv4addr(self):
+ self.assertTrue(libvirt_utils.is_valid_hostname("10.0.2.1"))
+
+ def test_valid_hostname_ipv6addr(self):
+ self.assertTrue(libvirt_utils.is_valid_hostname("240:2ac3::2"))
+
+ def test_valid_hostname_bad(self):
+ self.assertFalse(libvirt_utils.is_valid_hostname("foo/?com=/bin/sh"))
+
+ @mock.patch('nova.utils.execute')
+ def test_create_image(self, mock_execute):
+ libvirt_utils.create_image('raw', '/some/path', '10G')
+ libvirt_utils.create_image('qcow2', '/some/stuff', '1234567891234')
+ expected_args = [(('qemu-img', 'create', '-f', 'raw',
+ '/some/path', '10G'),),
+ (('qemu-img', 'create', '-f', 'qcow2',
+ '/some/stuff', '1234567891234'),)]
+ self.assertEqual(expected_args, mock_execute.call_args_list)
+
+ @mock.patch('os.path.exists', return_value=True)
+ @mock.patch('nova.utils.execute')
+ def test_create_cow_image(self, mock_execute, mock_exists):
+ mock_execute.return_value = ('stdout', None)
+ libvirt_utils.create_cow_image('/some/path', '/the/new/cow')
+ expected_args = [(('env', 'LC_ALL=C', 'LANG=C',
+ 'qemu-img', 'info', '/some/path'),),
+ (('qemu-img', 'create', '-f', 'qcow2',
+ '-o', 'backing_file=/some/path',
+ '/the/new/cow'),)]
+ self.assertEqual(expected_args, mock_execute.call_args_list)
+
+ def test_pick_disk_driver_name(self):
+ type_map = {'kvm': ([True, 'qemu'], [False, 'qemu'], [None, 'qemu']),
+ 'qemu': ([True, 'qemu'], [False, 'qemu'], [None, 'qemu']),
+ 'xen': ([True, 'phy'], [False, 'tap2'], [None, 'tap2']),
+ 'uml': ([True, None], [False, None], [None, None]),
+ 'lxc': ([True, None], [False, None], [None, None])}
+
+ for (virt_type, checks) in type_map.iteritems():
+ if virt_type == "xen":
+ version = 4001000
+ else:
+ version = 1005001
+
+ self.flags(virt_type=virt_type, group='libvirt')
+ for (is_block_dev, expected_result) in checks:
+ result = libvirt_utils.pick_disk_driver_name(version,
+ is_block_dev)
+ self.assertEqual(result, expected_result)
+
+ def test_pick_disk_driver_name_xen_4_0_0(self):
+ self.flags(virt_type="xen", group='libvirt')
+ result = libvirt_utils.pick_disk_driver_name(4000000, False)
+ self.assertEqual(result, "tap")
+
+ @mock.patch('os.path.exists', return_value=True)
+ @mock.patch('nova.utils.execute')
+ def test_get_disk_size(self, mock_execute, mock_exists):
+ path = '/some/path'
+ example_output = """image: 00000001
+file format: raw
+virtual size: 4.4M (4592640 bytes)
+disk size: 4.4M
+"""
+ mock_execute.return_value = (example_output, '')
+ self.assertEqual(4592640, disk.get_disk_size('/some/path'))
+ mock_execute.assert_called_once_with('env', 'LC_ALL=C', 'LANG=C',
+ 'qemu-img', 'info', path)
+ mock_exists.assert_called_once_with(path)
+
+ def test_copy_image(self):
+ dst_fd, dst_path = tempfile.mkstemp()
+ try:
+ os.close(dst_fd)
+
+ src_fd, src_path = tempfile.mkstemp()
+ try:
+ with os.fdopen(src_fd, 'w') as fp:
+ fp.write('canary')
+
+ libvirt_utils.copy_image(src_path, dst_path)
+ with open(dst_path, 'r') as fp:
+ self.assertEqual(fp.read(), 'canary')
+ finally:
+ os.unlink(src_path)
+ finally:
+ os.unlink(dst_path)
+
+ def test_write_to_file(self):
+ dst_fd, dst_path = tempfile.mkstemp()
+ try:
+ os.close(dst_fd)
+
+ libvirt_utils.write_to_file(dst_path, 'hello')
+ with open(dst_path, 'r') as fp:
+ self.assertEqual(fp.read(), 'hello')
+ finally:
+ os.unlink(dst_path)
+
+ def test_write_to_file_with_umask(self):
+ dst_fd, dst_path = tempfile.mkstemp()
+ try:
+ os.close(dst_fd)
+ os.unlink(dst_path)
+
+ libvirt_utils.write_to_file(dst_path, 'hello', umask=0o277)
+ with open(dst_path, 'r') as fp:
+ self.assertEqual(fp.read(), 'hello')
+ mode = os.stat(dst_path).st_mode
+ self.assertEqual(mode & 0o277, 0)
+ finally:
+ os.unlink(dst_path)
+
+ @mock.patch.object(utils, 'execute')
+ def test_chown(self, mock_execute):
+ libvirt_utils.chown('/some/path', 'soren')
+ mock_execute.assert_called_once_with('chown', 'soren', '/some/path',
+ run_as_root=True)
+
+ @mock.patch.object(utils, 'execute')
+ def test_chown_for_id_maps(self, mock_execute):
+ id_maps = [vconfig.LibvirtConfigGuestUIDMap(),
+ vconfig.LibvirtConfigGuestUIDMap(),
+ vconfig.LibvirtConfigGuestGIDMap(),
+ vconfig.LibvirtConfigGuestGIDMap()]
+ id_maps[0].target = 10000
+ id_maps[0].count = 2000
+ id_maps[1].start = 2000
+ id_maps[1].target = 40000
+ id_maps[1].count = 2000
+ id_maps[2].target = 10000
+ id_maps[2].count = 2000
+ id_maps[3].start = 2000
+ id_maps[3].target = 40000
+ id_maps[3].count = 2000
+ libvirt_utils.chown_for_id_maps('/some/path', id_maps)
+ execute_args = ('nova-idmapshift', '-i',
+ '-u', '0:10000:2000,2000:40000:2000',
+ '-g', '0:10000:2000,2000:40000:2000',
+ '/some/path')
+ mock_execute.assert_called_once_with(*execute_args, run_as_root=True)
+
+ def _do_test_extract_snapshot(self, mock_execute,
+ dest_format='raw', out_format='raw'):
+ libvirt_utils.extract_snapshot('/path/to/disk/image', 'qcow2',
+ '/extracted/snap', dest_format)
+ mock_execute.assert_called_once_with(
+ 'qemu-img', 'convert', '-f', 'qcow2', '-O', out_format,
+ '/path/to/disk/image', '/extracted/snap')
+
+ @mock.patch.object(utils, 'execute')
+ def test_extract_snapshot_raw(self, mock_execute):
+ self._do_test_extract_snapshot(mock_execute)
+
+ @mock.patch.object(utils, 'execute')
+ def test_extract_snapshot_iso(self, mock_execute):
+ self._do_test_extract_snapshot(mock_execute, dest_format='iso')
+
+ @mock.patch.object(utils, 'execute')
+ def test_extract_snapshot_qcow2(self, mock_execute):
+ self._do_test_extract_snapshot(mock_execute,
+ dest_format='qcow2', out_format='qcow2')
+
+ def test_load_file(self):
+ dst_fd, dst_path = tempfile.mkstemp()
+ try:
+ os.close(dst_fd)
+
+ # We have a test for write_to_file. If that is sound, this suffices
+ libvirt_utils.write_to_file(dst_path, 'hello')
+ self.assertEqual(libvirt_utils.load_file(dst_path), 'hello')
+ finally:
+ os.unlink(dst_path)
+
+ def test_file_open(self):
+ dst_fd, dst_path = tempfile.mkstemp()
+ try:
+ os.close(dst_fd)
+
+ # We have a test for write_to_file. If that is sound, this suffices
+ libvirt_utils.write_to_file(dst_path, 'hello')
+ with libvirt_utils.file_open(dst_path, 'r') as fp:
+ self.assertEqual(fp.read(), 'hello')
+ finally:
+ os.unlink(dst_path)
+
+ def test_get_fs_info(self):
+
+ class FakeStatResult(object):
+
+ def __init__(self):
+ self.f_bsize = 4096
+ self.f_frsize = 4096
+ self.f_blocks = 2000
+ self.f_bfree = 1000
+ self.f_bavail = 900
+ self.f_files = 2000
+ self.f_ffree = 1000
+ self.f_favail = 900
+ self.f_flag = 4096
+ self.f_namemax = 255
+
+ self.path = None
+
+ def fake_statvfs(path):
+ self.path = path
+ return FakeStatResult()
+
+ self.stubs.Set(os, 'statvfs', fake_statvfs)
+
+ fs_info = libvirt_utils.get_fs_info('/some/file/path')
+ self.assertEqual('/some/file/path', self.path)
+ self.assertEqual(8192000, fs_info['total'])
+ self.assertEqual(3686400, fs_info['free'])
+ self.assertEqual(4096000, fs_info['used'])
+
+ @mock.patch('nova.virt.images.fetch_to_raw')
+ def test_fetch_image(self, mock_images):
+ context = 'opaque context'
+ target = '/tmp/targetfile'
+ image_id = '4'
+ user_id = 'fake'
+ project_id = 'fake'
+ libvirt_utils.fetch_image(context, target, image_id,
+ user_id, project_id)
+ mock_images.assert_called_once_with(
+ context, image_id, target, user_id, project_id,
+ max_size=0)
+
+ def test_fetch_raw_image(self):
+
+ def fake_execute(*cmd, **kwargs):
+ self.executes.append(cmd)
+ return None, None
+
+ def fake_rename(old, new):
+ self.executes.append(('mv', old, new))
+
+ def fake_unlink(path):
+ self.executes.append(('rm', path))
+
+ def fake_rm_on_error(path, remove=None):
+ self.executes.append(('rm', '-f', path))
+
+ def fake_qemu_img_info(path):
+ class FakeImgInfo(object):
+ pass
+
+ file_format = path.split('.')[-1]
+ if file_format == 'part':
+ file_format = path.split('.')[-2]
+ elif file_format == 'converted':
+ file_format = 'raw'
+
+ if 'backing' in path:
+ backing_file = 'backing'
+ else:
+ backing_file = None
+
+ if 'big' in path:
+ virtual_size = 2
+ else:
+ virtual_size = 1
+
+ FakeImgInfo.file_format = file_format
+ FakeImgInfo.backing_file = backing_file
+ FakeImgInfo.virtual_size = virtual_size
+
+ return FakeImgInfo()
+
+ self.stubs.Set(utils, 'execute', fake_execute)
+ self.stubs.Set(os, 'rename', fake_rename)
+ self.stubs.Set(os, 'unlink', fake_unlink)
+ self.stubs.Set(images, 'fetch', lambda *_, **__: None)
+ self.stubs.Set(images, 'qemu_img_info', fake_qemu_img_info)
+ self.stubs.Set(fileutils, 'delete_if_exists', fake_rm_on_error)
+
+ # Since the remove param of fileutils.remove_path_on_error()
+ # is initialized at load time, we must provide a wrapper
+ # that explicitly resets it to our fake delete_if_exists()
+ old_rm_path_on_error = fileutils.remove_path_on_error
+ f = functools.partial(old_rm_path_on_error, remove=fake_rm_on_error)
+ self.stubs.Set(fileutils, 'remove_path_on_error', f)
+
+ context = 'opaque context'
+ image_id = '4'
+ user_id = 'fake'
+ project_id = 'fake'
+
+ target = 't.qcow2'
+ self.executes = []
+ expected_commands = [('qemu-img', 'convert', '-O', 'raw',
+ 't.qcow2.part', 't.qcow2.converted'),
+ ('rm', 't.qcow2.part'),
+ ('mv', 't.qcow2.converted', 't.qcow2')]
+ images.fetch_to_raw(context, image_id, target, user_id, project_id,
+ max_size=1)
+ self.assertEqual(self.executes, expected_commands)
+
+ target = 't.raw'
+ self.executes = []
+ expected_commands = [('mv', 't.raw.part', 't.raw')]
+ images.fetch_to_raw(context, image_id, target, user_id, project_id)
+ self.assertEqual(self.executes, expected_commands)
+
+ target = 'backing.qcow2'
+ self.executes = []
+ expected_commands = [('rm', '-f', 'backing.qcow2.part')]
+ self.assertRaises(exception.ImageUnacceptable,
+ images.fetch_to_raw,
+ context, image_id, target, user_id, project_id)
+ self.assertEqual(self.executes, expected_commands)
+
+ target = 'big.qcow2'
+ self.executes = []
+ expected_commands = [('rm', '-f', 'big.qcow2.part')]
+ self.assertRaises(exception.FlavorDiskTooSmall,
+ images.fetch_to_raw,
+ context, image_id, target, user_id, project_id,
+ max_size=1)
+ self.assertEqual(self.executes, expected_commands)
+
+ del self.executes
+
+ def test_get_disk_backing_file(self):
+ with_actual_path = False
+
+ def fake_execute(*args, **kwargs):
+ if with_actual_path:
+ return ("some: output\n"
+ "backing file: /foo/bar/baz (actual path: /a/b/c)\n"
+ "...: ...\n"), ''
+ else:
+ return ("some: output\n"
+ "backing file: /foo/bar/baz\n"
+ "...: ...\n"), ''
+
+ def return_true(*args, **kwargs):
+ return True
+
+ self.stubs.Set(utils, 'execute', fake_execute)
+ self.stubs.Set(os.path, 'exists', return_true)
+
+ out = libvirt_utils.get_disk_backing_file('')
+ self.assertEqual(out, 'baz')
+ with_actual_path = True
+ out = libvirt_utils.get_disk_backing_file('')
+ self.assertEqual(out, 'c')
diff --git a/nova/tests/unit/virt/libvirt/test_vif.py b/nova/tests/unit/virt/libvirt/test_vif.py
new file mode 100644
index 0000000000..3d64dd5ad0
--- /dev/null
+++ b/nova/tests/unit/virt/libvirt/test_vif.py
@@ -0,0 +1,959 @@
+# Copyright 2012 Nicira, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import contextlib
+
+from lxml import etree
+import mock
+from oslo.concurrency import processutils
+from oslo.config import cfg
+
+from nova import exception
+from nova.network import linux_net
+from nova.network import model as network_model
+from nova import test
+from nova.tests.unit.virt.libvirt import fakelibvirt
+from nova import utils
+from nova.virt.libvirt import config as vconfig
+from nova.virt.libvirt import vif
+
+CONF = cfg.CONF
+
+
+class LibvirtVifTestCase(test.NoDBTestCase):
+
+ gateway_bridge_4 = network_model.IP(address='101.168.1.1', type='gateway')
+ dns_bridge_4 = network_model.IP(address='8.8.8.8', type=None)
+ ips_bridge_4 = [network_model.IP(address='101.168.1.9', type=None)]
+ subnet_bridge_4 = network_model.Subnet(cidr='101.168.1.0/24',
+ dns=[dns_bridge_4],
+ gateway=gateway_bridge_4,
+ routes=None,
+ dhcp_server='191.168.1.1')
+
+ gateway_bridge_6 = network_model.IP(address='101:1db9::1', type='gateway')
+ subnet_bridge_6 = network_model.Subnet(cidr='101:1db9::/64',
+ dns=None,
+ gateway=gateway_bridge_6,
+ ips=None,
+ routes=None)
+
+ network_bridge = network_model.Network(id='network-id-xxx-yyy-zzz',
+ bridge='br0',
+ label=None,
+ subnets=[subnet_bridge_4,
+ subnet_bridge_6],
+ bridge_interface='eth0',
+ vlan=99)
+
+ vif_bridge = network_model.VIF(id='vif-xxx-yyy-zzz',
+ address='ca:fe:de:ad:be:ef',
+ network=network_bridge,
+ type=network_model.VIF_TYPE_BRIDGE,
+ devname='tap-xxx-yyy-zzz',
+ ovs_interfaceid=None)
+
+ network_bridge_neutron = network_model.Network(id='network-id-xxx-yyy-zzz',
+ bridge=None,
+ label=None,
+ subnets=[subnet_bridge_4,
+ subnet_bridge_6],
+ bridge_interface='eth0',
+ vlan=99)
+
+ vif_bridge_neutron = network_model.VIF(id='vif-xxx-yyy-zzz',
+ address='ca:fe:de:ad:be:ef',
+ network=network_bridge_neutron,
+ type=None,
+ devname='tap-xxx-yyy-zzz',
+ ovs_interfaceid='aaa-bbb-ccc')
+
+ network_ovs = network_model.Network(id='network-id-xxx-yyy-zzz',
+ bridge='br0',
+ label=None,
+ subnets=[subnet_bridge_4,
+ subnet_bridge_6],
+ bridge_interface=None,
+ vlan=99)
+
+ network_ivs = network_model.Network(id='network-id-xxx-yyy-zzz',
+ bridge='br0',
+ label=None,
+ subnets=[subnet_bridge_4,
+ subnet_bridge_6],
+ bridge_interface=None,
+ vlan=99)
+
+ vif_ovs = network_model.VIF(id='vif-xxx-yyy-zzz',
+ address='ca:fe:de:ad:be:ef',
+ network=network_ovs,
+ type=network_model.VIF_TYPE_OVS,
+ devname='tap-xxx-yyy-zzz',
+ ovs_interfaceid='aaa-bbb-ccc')
+
+ vif_ovs_hybrid = network_model.VIF(id='vif-xxx-yyy-zzz',
+ address='ca:fe:de:ad:be:ef',
+ network=network_ovs,
+ type=network_model.VIF_TYPE_OVS,
+ details={'ovs_hybrid_plug': True,
+ 'port_filter': True},
+ devname='tap-xxx-yyy-zzz',
+ ovs_interfaceid='aaa-bbb-ccc')
+
+ vif_ovs_filter_cap = network_model.VIF(id='vif-xxx-yyy-zzz',
+ address='ca:fe:de:ad:be:ef',
+ network=network_ovs,
+ type=network_model.VIF_TYPE_OVS,
+ details={'port_filter': True},
+ devname='tap-xxx-yyy-zzz',
+ ovs_interfaceid='aaa-bbb-ccc')
+
+ vif_ovs_legacy = network_model.VIF(id='vif-xxx-yyy-zzz',
+ address='ca:fe:de:ad:be:ef',
+ network=network_ovs,
+ type=None,
+ devname=None,
+ ovs_interfaceid=None)
+
+ vif_ivs = network_model.VIF(id='vif-xxx-yyy-zzz',
+ address='ca:fe:de:ad:be:ef',
+ network=network_ivs,
+ type=network_model.VIF_TYPE_IVS,
+ devname='tap-xxx-yyy-zzz',
+ ovs_interfaceid='aaa-bbb-ccc')
+
+ vif_ivs_legacy = network_model.VIF(id='vif-xxx-yyy-zzz',
+ address='ca:fe:de:ad:be:ef',
+ network=network_ovs,
+ type=None,
+ devname=None,
+ ovs_interfaceid='aaa')
+
+ vif_ivs_filter_direct = network_model.VIF(id='vif-xxx-yyy-zzz',
+ address='ca:fe:de:ad:be:ef',
+ network=network_ivs,
+ type=network_model.VIF_TYPE_IVS,
+ details={'port_filter': True},
+ devname='tap-xxx-yyy-zzz',
+ ovs_interfaceid='aaa-bbb-ccc')
+
+ vif_ivs_filter_hybrid = network_model.VIF(id='vif-xxx-yyy-zzz',
+ address='ca:fe:de:ad:be:ef',
+ network=network_ivs,
+ type=network_model.VIF_TYPE_IVS,
+ details={
+ 'port_filter': True,
+ 'ovs_hybrid_plug': True},
+ devname='tap-xxx-yyy-zzz',
+ ovs_interfaceid='aaa-bbb-ccc')
+
+ vif_none = network_model.VIF(id='vif-xxx-yyy-zzz',
+ address='ca:fe:de:ad:be:ef',
+ network=network_bridge,
+ type=None,
+ devname='tap-xxx-yyy-zzz',
+ ovs_interfaceid=None)
+
+ network_8021 = network_model.Network(id='network-id-xxx-yyy-zzz',
+ bridge=None,
+ label=None,
+ subnets=[subnet_bridge_4,
+ subnet_bridge_6],
+ interface='eth0',
+ vlan=99)
+
+ vif_8021qbh = network_model.VIF(id='vif-xxx-yyy-zzz',
+ address='ca:fe:de:ad:be:ef',
+ network=network_8021,
+ type=network_model.VIF_TYPE_802_QBH,
+ vnic_type=network_model.VNIC_TYPE_DIRECT,
+ ovs_interfaceid=None,
+ details={
+ network_model.VIF_DETAILS_PROFILEID:
+ 'MyPortProfile'},
+ profile={'pci_vendor_info': '1137:0043',
+ 'pci_slot': '0000:0a:00.1',
+ 'physical_network': 'phynet1'})
+
+ vif_hw_veb = network_model.VIF(id='vif-xxx-yyy-zzz',
+ address='ca:fe:de:ad:be:ef',
+ network=network_8021,
+ type=network_model.VIF_TYPE_HW_VEB,
+ vnic_type=network_model.VNIC_TYPE_DIRECT,
+ ovs_interfaceid=None,
+ details={
+ network_model.VIF_DETAILS_VLAN: '100'},
+ profile={'pci_vendor_info': '1137:0043',
+ 'pci_slot': '0000:0a:00.1',
+ 'physical_network': 'phynet1'})
+
+ vif_8021qbg = network_model.VIF(id='vif-xxx-yyy-zzz',
+ address='ca:fe:de:ad:be:ef',
+ network=network_8021,
+ type=network_model.VIF_TYPE_802_QBG,
+ ovs_interfaceid=None,
+ qbg_params=network_model.VIF8021QbgParams(
+ managerid="xxx-yyy-zzz",
+ typeid="aaa-bbb-ccc",
+ typeidversion="1",
+ instanceid="ddd-eee-fff"))
+
+ network_mlnx = network_model.Network(id='network-id-xxx-yyy-zzz',
+ label=None,
+ bridge=None,
+ subnets=[subnet_bridge_4,
+ subnet_bridge_6],
+ interface='eth0')
+
+ network_midonet = network_model.Network(id='network-id-xxx-yyy-zzz',
+ label=None,
+ bridge=None,
+ subnets=[subnet_bridge_4],
+ interface='eth0')
+
+ vif_mlnx = network_model.VIF(id='vif-xxx-yyy-zzz',
+ address='ca:fe:de:ad:be:ef',
+ network=network_mlnx,
+ type=network_model.VIF_TYPE_MLNX_DIRECT,
+ devname='tap-xxx-yyy-zzz')
+
+ vif_mlnx_net = network_model.VIF(id='vif-xxx-yyy-zzz',
+ address='ca:fe:de:ad:be:ef',
+ network=network_mlnx,
+ type=network_model.VIF_TYPE_MLNX_DIRECT,
+ details={'physical_network':
+ 'fake_phy_network'},
+ devname='tap-xxx-yyy-zzz')
+
+ vif_midonet = network_model.VIF(id='vif-xxx-yyy-zzz',
+ address='ca:fe:de:ad:be:ef',
+ network=network_midonet,
+ type=network_model.VIF_TYPE_MIDONET,
+ devname='tap-xxx-yyy-zzz')
+
+ vif_iovisor = network_model.VIF(id='vif-xxx-yyy-zzz',
+ address='ca:fe:de:ad:be:ef',
+ network=network_bridge,
+ type=network_model.VIF_TYPE_IOVISOR,
+ devname='tap-xxx-yyy-zzz',
+ ovs_interfaceid=None)
+
+ instance = {
+ 'name': 'instance-name',
+ 'uuid': 'instance-uuid'
+ }
+
+ bandwidth = {
+ 'quota:vif_inbound_peak': '200',
+ 'quota:vif_outbound_peak': '20',
+ 'quota:vif_inbound_average': '100',
+ 'quota:vif_outbound_average': '10',
+ 'quota:vif_inbound_burst': '300',
+ 'quota:vif_outbound_burst': '30'
+ }
+
+ def setUp(self):
+ super(LibvirtVifTestCase, self).setUp()
+ self.flags(allow_same_net_traffic=True)
+ self.executes = []
+
+ def fake_execute(*cmd, **kwargs):
+ self.executes.append(cmd)
+ return None, None
+
+ self.stubs.Set(utils, 'execute', fake_execute)
+
+ def _get_conn(self, uri="qemu:///session", ver=None):
+ def __inner():
+ if ver is None:
+ return fakelibvirt.Connection(uri, False)
+ else:
+ return fakelibvirt.Connection(uri, False, ver)
+ return __inner
+
+ def _get_node(self, xml):
+ doc = etree.fromstring(xml)
+ ret = doc.findall('./devices/interface')
+ self.assertEqual(len(ret), 1)
+ return ret[0]
+
+ def _assertMacEquals(self, node, vif):
+ mac = node.find("mac").get("address")
+ self.assertEqual(mac, vif['address'])
+
+ def _assertTypeEquals(self, node, type, attr, source, br_want,
+ prefix=None):
+ self.assertEqual(node.get("type"), type)
+ br_name = node.find(attr).get(source)
+ if prefix is None:
+ self.assertEqual(br_name, br_want)
+ else:
+ self.assertTrue(br_name.startswith(prefix))
+
+ def _assertTypeAndMacEquals(self, node, type, attr, source, vif,
+ br_want=None, size=0, prefix=None):
+ ret = node.findall("filterref")
+ self.assertEqual(len(ret), size)
+ self._assertTypeEquals(node, type, attr, source, br_want,
+ prefix)
+ self._assertMacEquals(node, vif)
+
+ def _assertModel(self, xml, model_want=None, driver_want=None):
+ node = self._get_node(xml)
+ if model_want is None:
+ ret = node.findall("model")
+ self.assertEqual(len(ret), 0)
+ else:
+ model = node.find("model").get("type")
+ self.assertEqual(model, model_want)
+ if driver_want is None:
+ ret = node.findall("driver")
+ self.assertEqual(len(ret), 0)
+ else:
+ driver = node.find("driver").get("name")
+ self.assertEqual(driver, driver_want)
+
+ def _assertTypeAndPciEquals(self, node, type, vif):
+ self.assertEqual(node.get("type"), type)
+ address = node.find("source").find("address")
+ addr_type = address.get("type")
+ self.assertEqual("pci", addr_type)
+ pci_slot = "%(domain)s:%(bus)s:%(slot)s.%(func)s" % {
+ 'domain': address.get("domain")[2:],
+ 'bus': address.get("bus")[2:],
+ 'slot': address.get("slot")[2:],
+ 'func': address.get("function")[2:]}
+
+ pci_slot_want = vif['profile']['pci_slot']
+ self.assertEqual(pci_slot, pci_slot_want)
+
+ def _get_conf(self):
+ conf = vconfig.LibvirtConfigGuest()
+ conf.virt_type = "qemu"
+ conf.name = "fake-name"
+ conf.uuid = "fake-uuid"
+ conf.memory = 100 * 1024
+ conf.vcpus = 4
+ return conf
+
+ def _get_instance_xml(self, driver, vif, image_meta=None):
+ default_inst_type = {
+ 'memory_mb': 128, 'root_gb': 0, 'deleted_at': None,
+ 'name': 'm1.micro', 'deleted': 0, 'created_at': None,
+ 'ephemeral_gb': 0, 'updated_at': None,
+ 'disabled': False, 'vcpus': 1,
+ 'swap': 0, 'rxtx_factor': 1.0, 'is_public': True,
+ 'flavorid': '1', 'vcpu_weight': None, 'id': 2,
+ 'extra_specs': dict(self.bandwidth)
+ }
+ conf = self._get_conf()
+ nic = driver.get_config(self.instance, vif, image_meta,
+ default_inst_type, CONF.libvirt.virt_type)
+ conf.add_device(nic)
+ return conf.to_xml()
+
+ def test_multiple_nics(self):
+ conf = self._get_conf()
+ # Tests multiple nic configuration and that target_dev is
+ # set for each
+ nics = [{'net_type': 'bridge',
+ 'mac_addr': '00:00:00:00:00:0b',
+ 'source_dev': 'b_source_dev',
+ 'target_dev': 'b_target_dev'},
+ {'net_type': 'ethernet',
+ 'mac_addr': '00:00:00:00:00:0e',
+ 'source_dev': 'e_source_dev',
+ 'target_dev': 'e_target_dev'},
+ {'net_type': 'direct',
+ 'mac_addr': '00:00:00:00:00:0d',
+ 'source_dev': 'd_source_dev',
+ 'target_dev': 'd_target_dev'}]
+
+ for nic in nics:
+ nic_conf = vconfig.LibvirtConfigGuestInterface()
+ nic_conf.net_type = nic['net_type']
+ nic_conf.target_dev = nic['target_dev']
+ nic_conf.mac_addr = nic['mac_addr']
+ nic_conf.source_dev = nic['source_dev']
+ conf.add_device(nic_conf)
+
+ xml = conf.to_xml()
+ doc = etree.fromstring(xml)
+ for nic in nics:
+ path = "./devices/interface/[@type='%s']" % nic['net_type']
+ node = doc.find(path)
+ self.assertEqual(nic['net_type'], node.get("type"))
+ self.assertEqual(nic['mac_addr'],
+ node.find("mac").get("address"))
+ self.assertEqual(nic['target_dev'],
+ node.find("target").get("dev"))
+
+ def test_model_novirtio(self):
+ self.flags(use_virtio_for_bridges=False,
+ virt_type='kvm',
+ group='libvirt')
+
+ d = vif.LibvirtGenericVIFDriver(self._get_conn())
+ xml = self._get_instance_xml(d, self.vif_bridge)
+ self._assertModel(xml)
+
+ def test_model_kvm(self):
+ self.flags(use_virtio_for_bridges=True,
+ virt_type='kvm',
+ group='libvirt')
+
+ d = vif.LibvirtGenericVIFDriver(self._get_conn())
+ xml = self._get_instance_xml(d, self.vif_bridge)
+ self._assertModel(xml, network_model.VIF_MODEL_VIRTIO)
+
+ def test_model_kvm_qemu_custom(self):
+ for virt in ('kvm', 'qemu'):
+ self.flags(use_virtio_for_bridges=True,
+ virt_type=virt,
+ group='libvirt')
+
+ d = vif.LibvirtGenericVIFDriver(self._get_conn())
+ supported = (network_model.VIF_MODEL_NE2K_PCI,
+ network_model.VIF_MODEL_PCNET,
+ network_model.VIF_MODEL_RTL8139,
+ network_model.VIF_MODEL_E1000,
+ network_model.VIF_MODEL_SPAPR_VLAN)
+ for model in supported:
+ image_meta = {'properties': {'hw_vif_model': model}}
+ xml = self._get_instance_xml(d, self.vif_bridge,
+ image_meta)
+ self._assertModel(xml, model)
+
+ def test_model_kvm_bogus(self):
+ self.flags(use_virtio_for_bridges=True,
+ virt_type='kvm',
+ group='libvirt')
+
+ d = vif.LibvirtGenericVIFDriver(self._get_conn())
+ image_meta = {'properties': {'hw_vif_model': 'acme'}}
+ self.assertRaises(exception.UnsupportedHardware,
+ self._get_instance_xml,
+ d,
+ self.vif_bridge,
+ image_meta)
+
+ def _test_model_qemu(self, *vif_objs, **kw):
+ libvirt_version = kw.get('libvirt_version')
+ self.flags(use_virtio_for_bridges=True,
+ virt_type='qemu',
+ group='libvirt')
+
+ for vif_obj in vif_objs:
+ d = vif.LibvirtGenericVIFDriver(self._get_conn())
+ if libvirt_version is not None:
+ d.libvirt_version = libvirt_version
+
+ xml = self._get_instance_xml(d, vif_obj)
+
+ doc = etree.fromstring(xml)
+
+ bandwidth = doc.find('./devices/interface/bandwidth')
+ self.assertNotEqual(bandwidth, None)
+
+ inbound = bandwidth.find('inbound')
+ self.assertEqual(inbound.get("average"),
+ self.bandwidth['quota:vif_inbound_average'])
+ self.assertEqual(inbound.get("peak"),
+ self.bandwidth['quota:vif_inbound_peak'])
+ self.assertEqual(inbound.get("burst"),
+ self.bandwidth['quota:vif_inbound_burst'])
+
+ outbound = bandwidth.find('outbound')
+ self.assertEqual(outbound.get("average"),
+ self.bandwidth['quota:vif_outbound_average'])
+ self.assertEqual(outbound.get("peak"),
+ self.bandwidth['quota:vif_outbound_peak'])
+ self.assertEqual(outbound.get("burst"),
+ self.bandwidth['quota:vif_outbound_burst'])
+
+ self._assertModel(xml, network_model.VIF_MODEL_VIRTIO, "qemu")
+
+ def test_model_qemu_no_firewall(self):
+ self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
+ self._test_model_qemu(
+ self.vif_bridge,
+ self.vif_8021qbg,
+ self.vif_iovisor,
+ self.vif_mlnx,
+ self.vif_ovs,
+ )
+
+ def test_model_qemu_iptables(self):
+ self.flags(firewall_driver="nova.virt.firewall.IptablesFirewallDriver")
+ self._test_model_qemu(
+ self.vif_bridge,
+ self.vif_ovs,
+ self.vif_ivs,
+ self.vif_8021qbg,
+ self.vif_iovisor,
+ self.vif_mlnx,
+ )
+
+ def test_model_xen(self):
+ self.flags(use_virtio_for_bridges=True,
+ virt_type='xen',
+ group='libvirt')
+
+ d = vif.LibvirtGenericVIFDriver(self._get_conn("xen:///system"))
+ xml = self._get_instance_xml(d, self.vif_bridge)
+ self._assertModel(xml)
+
+ def test_generic_driver_none(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn())
+ self.assertRaises(exception.NovaException,
+ self._get_instance_xml,
+ d,
+ self.vif_none)
+
+ def _check_bridge_driver(self, d, vif, br_want):
+ xml = self._get_instance_xml(d, vif)
+ node = self._get_node(xml)
+ self._assertTypeAndMacEquals(node, "bridge", "source", "bridge",
+ self.vif_bridge, br_want, 1)
+
+ def test_generic_driver_bridge(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn())
+ self._check_bridge_driver(d,
+ self.vif_bridge,
+ self.vif_bridge['network']['bridge'])
+
+ def _check_ivs_ethernet_driver(self, d, vif, dev_prefix):
+ self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
+ xml = self._get_instance_xml(d, vif)
+ node = self._get_node(xml)
+ self._assertTypeAndMacEquals(node, "ethernet", "target", "dev",
+ self.vif_ivs, prefix=dev_prefix)
+ script = node.find("script").get("path")
+ self.assertEqual(script, "")
+
+ def test_unplug_ivs_ethernet(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn())
+ with mock.patch.object(linux_net, 'delete_ivs_vif_port') as delete:
+ delete.side_effect = processutils.ProcessExecutionError
+ d.unplug_ivs_ethernet(None, self.vif_ovs)
+
+ def test_plug_ovs_hybrid(self):
+ calls = {
+ 'device_exists': [mock.call('qbrvif-xxx-yyy'),
+ mock.call('qvovif-xxx-yyy')],
+ '_create_veth_pair': [mock.call('qvbvif-xxx-yyy',
+ 'qvovif-xxx-yyy')],
+ 'execute': [mock.call('brctl', 'addbr', 'qbrvif-xxx-yyy',
+ run_as_root=True),
+ mock.call('brctl', 'setfd', 'qbrvif-xxx-yyy', 0,
+ run_as_root=True),
+ mock.call('brctl', 'stp', 'qbrvif-xxx-yyy', 'off',
+ run_as_root=True),
+ mock.call('tee', ('/sys/class/net/qbrvif-xxx-yyy'
+ '/bridge/multicast_snooping'),
+ process_input='0', run_as_root=True,
+ check_exit_code=[0, 1]),
+ mock.call('ip', 'link', 'set', 'qbrvif-xxx-yyy', 'up',
+ run_as_root=True),
+ mock.call('brctl', 'addif', 'qbrvif-xxx-yyy',
+ 'qvbvif-xxx-yyy', run_as_root=True)],
+ 'create_ovs_vif_port': [mock.call('br0',
+ 'qvovif-xxx-yyy', 'aaa-bbb-ccc',
+ 'ca:fe:de:ad:be:ef',
+ 'instance-uuid')]
+ }
+ with contextlib.nested(
+ mock.patch.object(linux_net, 'device_exists',
+ return_value=False),
+ mock.patch.object(utils, 'execute'),
+ mock.patch.object(linux_net, '_create_veth_pair'),
+ mock.patch.object(linux_net, 'create_ovs_vif_port')
+ ) as (device_exists, execute, _create_veth_pair, create_ovs_vif_port):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010))
+ d.plug_ovs_hybrid(self.instance, self.vif_ovs)
+ device_exists.assert_has_calls(calls['device_exists'])
+ _create_veth_pair.assert_has_calls(calls['_create_veth_pair'])
+ execute.assert_has_calls(calls['execute'])
+ create_ovs_vif_port.assert_has_calls(calls['create_ovs_vif_port'])
+
+ def test_unplug_ovs_hybrid(self):
+ calls = {
+ 'device_exists': [mock.call('qbrvif-xxx-yyy')],
+ 'execute': [mock.call('brctl', 'delif', 'qbrvif-xxx-yyy',
+ 'qvbvif-xxx-yyy', run_as_root=True),
+ mock.call('ip', 'link', 'set',
+ 'qbrvif-xxx-yyy', 'down', run_as_root=True),
+ mock.call('brctl', 'delbr',
+ 'qbrvif-xxx-yyy', run_as_root=True)],
+ 'delete_ovs_vif_port': [mock.call('br0', 'qvovif-xxx-yyy')]
+ }
+ with contextlib.nested(
+ mock.patch.object(linux_net, 'device_exists',
+ return_value=True),
+ mock.patch.object(utils, 'execute'),
+ mock.patch.object(linux_net, 'delete_ovs_vif_port')
+ ) as (device_exists, execute, delete_ovs_vif_port):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010))
+ d.unplug_ovs_hybrid(None, self.vif_ovs)
+ device_exists.assert_has_calls(calls['device_exists'])
+ execute.assert_has_calls(calls['execute'])
+ delete_ovs_vif_port.assert_has_calls(calls['delete_ovs_vif_port'])
+
+ def test_unplug_ovs_hybrid_bridge_does_not_exist(self):
+ calls = {
+ 'device_exists': [mock.call('qbrvif-xxx-yyy')],
+ 'delete_ovs_vif_port': [mock.call('br0', 'qvovif-xxx-yyy')]
+ }
+ with contextlib.nested(
+ mock.patch.object(linux_net, 'device_exists',
+ return_value=False),
+ mock.patch.object(linux_net, 'delete_ovs_vif_port')
+ ) as (device_exists, delete_ovs_vif_port):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010))
+ d.unplug_ovs_hybrid(None, self.vif_ovs)
+ device_exists.assert_has_calls(calls['device_exists'])
+ delete_ovs_vif_port.assert_has_calls(calls['delete_ovs_vif_port'])
+
+ def test_plug_ivs_hybrid(self):
+ calls = {
+ 'device_exists': [mock.call('qbrvif-xxx-yyy'),
+ mock.call('qvovif-xxx-yyy')],
+ '_create_veth_pair': [mock.call('qvbvif-xxx-yyy',
+ 'qvovif-xxx-yyy')],
+ 'execute': [mock.call('brctl', 'addbr', 'qbrvif-xxx-yyy',
+ run_as_root=True),
+ mock.call('brctl', 'setfd', 'qbrvif-xxx-yyy', 0,
+ run_as_root=True),
+ mock.call('brctl', 'stp', 'qbrvif-xxx-yyy', 'off',
+ run_as_root=True),
+ mock.call('tee', ('/sys/class/net/qbrvif-xxx-yyy'
+ '/bridge/multicast_snooping'),
+ process_input='0', run_as_root=True,
+ check_exit_code=[0, 1]),
+ mock.call('ip', 'link', 'set', 'qbrvif-xxx-yyy', 'up',
+ run_as_root=True),
+ mock.call('brctl', 'addif', 'qbrvif-xxx-yyy',
+ 'qvbvif-xxx-yyy', run_as_root=True)],
+ 'create_ivs_vif_port': [mock.call('qvovif-xxx-yyy', 'aaa-bbb-ccc',
+ 'ca:fe:de:ad:be:ef',
+ 'instance-uuid')]
+ }
+ with contextlib.nested(
+ mock.patch.object(linux_net, 'device_exists',
+ return_value=False),
+ mock.patch.object(utils, 'execute'),
+ mock.patch.object(linux_net, '_create_veth_pair'),
+ mock.patch.object(linux_net, 'create_ivs_vif_port')
+ ) as (device_exists, execute, _create_veth_pair, create_ivs_vif_port):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010))
+ d.plug_ivs_hybrid(self.instance, self.vif_ivs)
+ device_exists.assert_has_calls(calls['device_exists'])
+ _create_veth_pair.assert_has_calls(calls['_create_veth_pair'])
+ execute.assert_has_calls(calls['execute'])
+ create_ivs_vif_port.assert_has_calls(calls['create_ivs_vif_port'])
+
+ def test_unplug_ivs_hybrid(self):
+ calls = {
+ 'execute': [mock.call('brctl', 'delif', 'qbrvif-xxx-yyy',
+ 'qvbvif-xxx-yyy', run_as_root=True),
+ mock.call('ip', 'link', 'set',
+ 'qbrvif-xxx-yyy', 'down', run_as_root=True),
+ mock.call('brctl', 'delbr',
+ 'qbrvif-xxx-yyy', run_as_root=True)],
+ 'delete_ivs_vif_port': [mock.call('qvovif-xxx-yyy')]
+ }
+ with contextlib.nested(
+ mock.patch.object(utils, 'execute'),
+ mock.patch.object(linux_net, 'delete_ivs_vif_port')
+ ) as (execute, delete_ivs_vif_port):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010))
+ d.unplug_ivs_hybrid(None, self.vif_ivs)
+ execute.assert_has_calls(calls['execute'])
+ delete_ivs_vif_port.assert_has_calls(calls['delete_ivs_vif_port'])
+
+ def test_unplug_ivs_hybrid_bridge_does_not_exist(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010))
+ with mock.patch.object(utils, 'execute') as execute:
+ execute.side_effect = processutils.ProcessExecutionError
+ d.unplug_ivs_hybrid(None, self.vif_ivs)
+
+ def test_unplug_iovisor(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010))
+ with mock.patch.object(utils, 'execute') as execute:
+ execute.side_effect = processutils.ProcessExecutionError
+ mynetwork = network_model.Network(id='network-id-xxx-yyy-zzz',
+ label='mylabel')
+ myvif = network_model.VIF(id='vif-xxx-yyy-zzz',
+ address='ca:fe:de:ad:be:ef',
+ network=mynetwork)
+ d.unplug_iovisor(None, myvif)
+
+ @mock.patch('nova.network.linux_net.device_exists')
+ def test_plug_iovisor(self, device_exists):
+ device_exists.return_value = True
+ d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010))
+ with mock.patch.object(utils, 'execute') as execute:
+ execute.side_effect = processutils.ProcessExecutionError
+ instance = {
+ 'name': 'instance-name',
+ 'uuid': 'instance-uuid',
+ 'project_id': 'myproject'
+ }
+ d.plug_iovisor(instance, self.vif_ivs)
+
+ def test_unplug_mlnx_with_details(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010))
+ with mock.patch.object(utils, 'execute') as execute:
+ execute.side_effect = processutils.ProcessExecutionError
+ d.unplug_mlnx_direct(None, self.vif_mlnx_net)
+ execute.assert_called_once_with('ebrctl', 'del-port',
+ 'fake_phy_network',
+ 'ca:fe:de:ad:be:ef',
+ run_as_root=True)
+
+ def test_plug_mlnx_with_details(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010))
+ with mock.patch.object(utils, 'execute') as execute:
+ d.plug_mlnx_direct(self.instance, self.vif_mlnx_net)
+ execute.assert_called_once_with('ebrctl', 'add-port',
+ 'ca:fe:de:ad:be:ef',
+ 'instance-uuid',
+ 'fake_phy_network',
+ 'mlnx_direct',
+ 'eth-xxx-yyy-zzz',
+ run_as_root=True)
+
+ def test_plug_mlnx_no_physical_network(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010))
+ with mock.patch.object(utils, 'execute') as execute:
+ self.assertRaises(exception.NovaException,
+ d.plug_mlnx_direct,
+ self.instance,
+ self.vif_mlnx)
+ self.assertEqual(0, execute.call_count)
+
+ def test_ivs_ethernet_driver(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010))
+ self._check_ivs_ethernet_driver(d,
+ self.vif_ivs,
+ "tap")
+
+ def _check_ivs_virtualport_driver(self, d, vif, want_iface_id):
+ self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
+ xml = self._get_instance_xml(d, vif)
+ node = self._get_node(xml)
+ self._assertTypeAndMacEquals(node, "ethernet", "target", "dev",
+ vif, vif['devname'])
+
+ def _check_ovs_virtualport_driver(self, d, vif, want_iface_id):
+ self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
+ xml = self._get_instance_xml(d, vif)
+ node = self._get_node(xml)
+ self._assertTypeAndMacEquals(node, "bridge", "source", "bridge",
+ vif, "br0")
+ vp = node.find("virtualport")
+ self.assertEqual(vp.get("type"), "openvswitch")
+ iface_id_found = False
+ for p_elem in vp.findall("parameters"):
+ iface_id = p_elem.get("interfaceid", None)
+ if iface_id:
+ self.assertEqual(iface_id, want_iface_id)
+ iface_id_found = True
+
+ self.assertTrue(iface_id_found)
+
+ def test_generic_ovs_virtualport_driver(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9011))
+ want_iface_id = self.vif_ovs['ovs_interfaceid']
+ self._check_ovs_virtualport_driver(d,
+ self.vif_ovs,
+ want_iface_id)
+
+ def test_generic_ivs_virtualport_driver(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9011))
+ want_iface_id = self.vif_ivs['ovs_interfaceid']
+ self._check_ivs_virtualport_driver(d,
+ self.vif_ivs,
+ want_iface_id)
+
+ def test_ivs_plug_with_nova_firewall(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn())
+ br_want = "qbr" + self.vif_ivs['id']
+ br_want = br_want[:network_model.NIC_NAME_LEN]
+ xml = self._get_instance_xml(d, self.vif_ivs)
+ node = self._get_node(xml)
+ self._assertTypeAndMacEquals(node, "bridge", "source", "bridge",
+ self.vif_ivs, br_want, 1)
+
+ def test_ivs_plug_with_port_filter_direct_no_nova_firewall(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn())
+ br_want = "qbr" + self.vif_ivs_filter_hybrid['id']
+ br_want = br_want[:network_model.NIC_NAME_LEN]
+ self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
+ xml = self._get_instance_xml(d, self.vif_ivs_filter_hybrid)
+ node = self._get_node(xml)
+ self._assertTypeAndMacEquals(node, "bridge", "source", "bridge",
+ self.vif_ivs_filter_hybrid, br_want, 0)
+
+ def test_ivs_plug_with_port_filter_hybrid_no_nova_firewall(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn())
+ br_want = self.vif_ivs_filter_direct['devname']
+ self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
+ xml = self._get_instance_xml(d, self.vif_ivs_filter_direct)
+ node = self._get_node(xml)
+ self._assertTypeAndMacEquals(node, "ethernet", "target", "dev",
+ self.vif_ivs_filter_direct, br_want, 0)
+
+ def test_hybrid_plug_without_nova_firewall(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn())
+ br_want = "qbr" + self.vif_ovs_hybrid['id']
+ br_want = br_want[:network_model.NIC_NAME_LEN]
+ self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
+ xml = self._get_instance_xml(d, self.vif_ovs_hybrid)
+ node = self._get_node(xml)
+ self._assertTypeAndMacEquals(node, "bridge", "source", "bridge",
+ self.vif_ovs_hybrid, br_want, 0)
+
+ def test_direct_plug_with_port_filter_cap_no_nova_firewall(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn())
+ br_want = self.vif_midonet['devname']
+ xml = self._get_instance_xml(d, self.vif_ovs_filter_cap)
+ node = self._get_node(xml)
+ self._assertTypeAndMacEquals(node, "bridge", "target", "dev",
+ self.vif_ovs_filter_cap, br_want)
+
+ def _check_neutron_hybrid_driver(self, d, vif, br_want):
+ self.flags(firewall_driver="nova.virt.firewall.IptablesFirewallDriver")
+ xml = self._get_instance_xml(d, vif)
+ node = self._get_node(xml)
+ self._assertTypeAndMacEquals(node, "bridge", "source", "bridge",
+ vif, br_want, 1)
+
+ def test_generic_hybrid_driver(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn())
+ br_want = "qbr" + self.vif_ovs['id']
+ br_want = br_want[:network_model.NIC_NAME_LEN]
+ self._check_neutron_hybrid_driver(d,
+ self.vif_ovs,
+ br_want)
+
+ def test_ivs_hybrid_driver(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn())
+ br_want = "qbr" + self.vif_ivs['id']
+ br_want = br_want[:network_model.NIC_NAME_LEN]
+ self._check_neutron_hybrid_driver(d,
+ self.vif_ivs,
+ br_want)
+
+ def test_mlnx_direct_vif_driver(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn())
+ xml = self._get_instance_xml(d,
+ self.vif_mlnx)
+ node = self._get_node(xml)
+ self.assertEqual(node.get("type"), "direct")
+ self._assertTypeEquals(node, "direct", "source",
+ "dev", "eth-xxx-yyy-zzz")
+ self._assertTypeEquals(node, "direct", "source",
+ "mode", "passthrough")
+ self._assertMacEquals(node, self.vif_mlnx)
+ self._assertModel(xml, network_model.VIF_MODEL_VIRTIO)
+
+ def test_midonet_ethernet_vif_driver(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn())
+ self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
+ br_want = self.vif_midonet['devname']
+ xml = self._get_instance_xml(d, self.vif_midonet)
+ node = self._get_node(xml)
+ self._assertTypeAndMacEquals(node, "ethernet", "target", "dev",
+ self.vif_midonet, br_want)
+
+ def test_generic_8021qbh_driver(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn())
+ xml = self._get_instance_xml(d, self.vif_8021qbh)
+ node = self._get_node(xml)
+ self._assertTypeAndPciEquals(node, "hostdev", self.vif_8021qbh)
+ self._assertMacEquals(node, self.vif_8021qbh)
+ vp = node.find("virtualport")
+ self.assertEqual(vp.get("type"), "802.1Qbh")
+ profile_id_found = False
+ for p_elem in vp.findall("parameters"):
+ details = self.vif_8021qbh["details"]
+ profile_id = p_elem.get("profileid", None)
+ if profile_id:
+ self.assertEqual(profile_id,
+ details[network_model.VIF_DETAILS_PROFILEID])
+ profile_id_found = True
+
+ self.assertTrue(profile_id_found)
+
+ def test_hw_veb_driver(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn())
+ xml = self._get_instance_xml(d, self.vif_hw_veb)
+ node = self._get_node(xml)
+ self._assertTypeAndPciEquals(node, "hostdev", self.vif_hw_veb)
+ self._assertMacEquals(node, self.vif_hw_veb)
+ vlan = node.find("vlan").find("tag").get("id")
+ vlan_want = self.vif_hw_veb["details"]["vlan"]
+ self.assertEqual(vlan, vlan_want)
+
+ def test_generic_iovisor_driver(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn())
+ self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
+ br_want = self.vif_ivs['devname']
+ xml = self._get_instance_xml(d, self.vif_ivs)
+ node = self._get_node(xml)
+ self._assertTypeAndMacEquals(node, "ethernet", "target", "dev",
+ self.vif_ivs, br_want)
+
+ def test_generic_8021qbg_driver(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn())
+ xml = self._get_instance_xml(d, self.vif_8021qbg)
+
+ node = self._get_node(xml)
+ self._assertTypeEquals(node, "direct", "source", "dev", "eth0")
+ self._assertMacEquals(node, self.vif_8021qbg)
+
+ vp = node.find("virtualport")
+ self.assertEqual(vp.get("type"), "802.1Qbg")
+ manager_id_found = False
+ type_id_found = False
+ typeversion_id_found = False
+ instance_id_found = False
+ for p_elem in vp.findall("parameters"):
+ wantparams = self.vif_8021qbg['qbg_params']
+ manager_id = p_elem.get("managerid", None)
+ type_id = p_elem.get("typeid", None)
+ typeversion_id = p_elem.get("typeidversion", None)
+ instance_id = p_elem.get("instanceid", None)
+ if manager_id:
+ self.assertEqual(manager_id,
+ wantparams['managerid'])
+ manager_id_found = True
+ if type_id:
+ self.assertEqual(type_id,
+ wantparams['typeid'])
+ type_id_found = True
+ if typeversion_id:
+ self.assertEqual(typeversion_id,
+ wantparams['typeidversion'])
+ typeversion_id_found = True
+ if instance_id:
+ self.assertEqual(instance_id,
+ wantparams['instanceid'])
+ instance_id_found = True
+
+ self.assertTrue(manager_id_found)
+ self.assertTrue(type_id_found)
+ self.assertTrue(typeversion_id_found)
+ self.assertTrue(instance_id_found)
diff --git a/nova/tests/unit/virt/libvirt/test_volume.py b/nova/tests/unit/virt/libvirt/test_volume.py
new file mode 100644
index 0000000000..0594161638
--- /dev/null
+++ b/nova/tests/unit/virt/libvirt/test_volume.py
@@ -0,0 +1,1160 @@
+# Copyright 2010 OpenStack Foundation
+# Copyright 2012 University Of Minho
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import contextlib
+import os
+import time
+
+import fixtures
+import mock
+from oslo.concurrency import processutils
+from oslo.config import cfg
+
+from nova import exception
+from nova.storage import linuxscsi
+from nova import test
+from nova.tests.unit.virt.libvirt import fake_libvirt_utils
+from nova import utils
+from nova.virt import fake
+from nova.virt.libvirt import utils as libvirt_utils
+from nova.virt.libvirt import volume
+
+CONF = cfg.CONF
+
+
+class LibvirtVolumeTestCase(test.NoDBTestCase):
+
+ def setUp(self):
+ super(LibvirtVolumeTestCase, self).setUp()
+ self.executes = []
+
+ def fake_execute(*cmd, **kwargs):
+ self.executes.append(cmd)
+ return None, None
+
+ self.stubs.Set(utils, 'execute', fake_execute)
+
+ class FakeLibvirtDriver(object):
+ def __init__(self, hyperv="QEMU", version=1005001):
+ self.hyperv = hyperv
+ self.version = version
+
+ def _get_hypervisor_version(self):
+ return self.version
+
+ def _get_hypervisor_type(self):
+ return self.hyperv
+
+ def _get_all_block_devices(self):
+ return []
+
+ self.fake_conn = FakeLibvirtDriver(fake.FakeVirtAPI())
+ self.connr = {
+ 'ip': '127.0.0.1',
+ 'initiator': 'fake_initiator',
+ 'host': 'fake_host'
+ }
+ self.disk_info = {
+ "bus": "virtio",
+ "dev": "vde",
+ "type": "disk",
+ }
+ self.name = 'volume-00000001'
+ self.location = '10.0.2.15:3260'
+ self.iqn = 'iqn.2010-10.org.openstack:%s' % self.name
+ self.vol = {'id': 1, 'name': self.name}
+ self.uuid = '875a8070-d0b9-4949-8b31-104d125c9a64'
+ self.user = 'foo'
+
+ def _assertNetworkAndProtocolEquals(self, tree):
+ self.assertEqual(tree.get('type'), 'network')
+ self.assertEqual(tree.find('./source').get('protocol'), 'rbd')
+ rbd_name = '%s/%s' % ('rbd', self.name)
+ self.assertEqual(tree.find('./source').get('name'), rbd_name)
+
+ def _assertFileTypeEquals(self, tree, file_path):
+ self.assertEqual(tree.get('type'), 'file')
+ self.assertEqual(tree.find('./source').get('file'), file_path)
+
+ def _assertDiskInfoEquals(self, tree, disk_info):
+ self.assertEqual(tree.get('device'), disk_info['type'])
+ self.assertEqual(tree.find('./target').get('bus'),
+ disk_info['bus'])
+ self.assertEqual(tree.find('./target').get('dev'),
+ disk_info['dev'])
+
+ def _test_libvirt_volume_driver_disk_info(self):
+ libvirt_driver = volume.LibvirtVolumeDriver(self.fake_conn)
+ connection_info = {
+ 'driver_volume_type': 'fake',
+ 'data': {
+ 'device_path': '/foo',
+ },
+ 'serial': 'fake_serial',
+ }
+ conf = libvirt_driver.get_config(connection_info, self.disk_info)
+ tree = conf.format_dom()
+ self._assertDiskInfoEquals(tree, self.disk_info)
+
+ def test_libvirt_volume_disk_info_type(self):
+ self.disk_info['type'] = 'cdrom'
+ self._test_libvirt_volume_driver_disk_info()
+
+ def test_libvirt_volume_disk_info_dev(self):
+ self.disk_info['dev'] = 'hdc'
+ self._test_libvirt_volume_driver_disk_info()
+
+ def test_libvirt_volume_disk_info_bus(self):
+ self.disk_info['bus'] = 'scsi'
+ self._test_libvirt_volume_driver_disk_info()
+
+ def test_libvirt_volume_driver_serial(self):
+ libvirt_driver = volume.LibvirtVolumeDriver(self.fake_conn)
+ connection_info = {
+ 'driver_volume_type': 'fake',
+ 'data': {
+ 'device_path': '/foo',
+ },
+ 'serial': 'fake_serial',
+ }
+ conf = libvirt_driver.get_config(connection_info, self.disk_info)
+ tree = conf.format_dom()
+ self.assertEqual('block', tree.get('type'))
+ self.assertEqual('fake_serial', tree.find('./serial').text)
+ self.assertIsNone(tree.find('./blockio'))
+
+ def test_libvirt_volume_driver_blockio(self):
+ libvirt_driver = volume.LibvirtVolumeDriver(self.fake_conn)
+ connection_info = {
+ 'driver_volume_type': 'fake',
+ 'data': {
+ 'device_path': '/foo',
+ 'logical_block_size': '4096',
+ 'physical_block_size': '4096',
+ },
+ 'serial': 'fake_serial',
+ }
+ disk_info = {
+ "bus": "virtio",
+ "dev": "vde",
+ "type": "disk",
+ }
+ conf = libvirt_driver.get_config(connection_info, disk_info)
+ tree = conf.format_dom()
+ blockio = tree.find('./blockio')
+ self.assertEqual('4096', blockio.get('logical_block_size'))
+ self.assertEqual('4096', blockio.get('physical_block_size'))
+
+ def test_libvirt_volume_driver_iotune(self):
+ libvirt_driver = volume.LibvirtVolumeDriver(self.fake_conn)
+ connection_info = {
+ 'driver_volume_type': 'fake',
+ 'data': {
+ "device_path": "/foo",
+ 'qos_specs': 'bar',
+ },
+ }
+ disk_info = {
+ "bus": "virtio",
+ "dev": "vde",
+ "type": "disk",
+ }
+ conf = libvirt_driver.get_config(connection_info, disk_info)
+ tree = conf.format_dom()
+ iotune = tree.find('./iotune')
+ # ensure invalid qos_specs is ignored
+ self.assertIsNone(iotune)
+
+ specs = {
+ 'total_bytes_sec': '102400',
+ 'read_bytes_sec': '51200',
+ 'write_bytes_sec': '0',
+ 'total_iops_sec': '0',
+ 'read_iops_sec': '200',
+ 'write_iops_sec': '200',
+ }
+ del connection_info['data']['qos_specs']
+ connection_info['data'].update(dict(qos_specs=specs))
+ conf = libvirt_driver.get_config(connection_info, disk_info)
+ tree = conf.format_dom()
+ self.assertEqual('102400', tree.find('./iotune/total_bytes_sec').text)
+ self.assertEqual('51200', tree.find('./iotune/read_bytes_sec').text)
+ self.assertEqual('0', tree.find('./iotune/write_bytes_sec').text)
+ self.assertEqual('0', tree.find('./iotune/total_iops_sec').text)
+ self.assertEqual('200', tree.find('./iotune/read_iops_sec').text)
+ self.assertEqual('200', tree.find('./iotune/write_iops_sec').text)
+
+ def test_libvirt_volume_driver_readonly(self):
+ libvirt_driver = volume.LibvirtVolumeDriver(self.fake_conn)
+ connection_info = {
+ 'driver_volume_type': 'fake',
+ 'data': {
+ "device_path": "/foo",
+ 'access_mode': 'bar',
+ },
+ }
+ disk_info = {
+ "bus": "virtio",
+ "dev": "vde",
+ "type": "disk",
+ }
+ self.assertRaises(exception.InvalidVolumeAccessMode,
+ libvirt_driver.get_config,
+ connection_info, self.disk_info)
+
+ connection_info['data']['access_mode'] = 'rw'
+ conf = libvirt_driver.get_config(connection_info, disk_info)
+ tree = conf.format_dom()
+ readonly = tree.find('./readonly')
+ self.assertIsNone(readonly)
+
+ connection_info['data']['access_mode'] = 'ro'
+ conf = libvirt_driver.get_config(connection_info, disk_info)
+ tree = conf.format_dom()
+ readonly = tree.find('./readonly')
+ self.assertIsNotNone(readonly)
+
+ def iscsi_connection(self, volume, location, iqn):
+ dev_name = 'ip-%s-iscsi-%s-lun-1' % (location, iqn)
+ dev_path = '/dev/disk/by-path/%s' % (dev_name)
+ return {
+ 'driver_volume_type': 'iscsi',
+ 'data': {
+ 'volume_id': volume['id'],
+ 'target_portal': location,
+ 'target_iqn': iqn,
+ 'target_lun': 1,
+ 'device_path': dev_path,
+ 'qos_specs': {
+ 'total_bytes_sec': '102400',
+ 'read_iops_sec': '200',
+ }
+ }
+ }
+
+ def test_rescan_multipath(self):
+ libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
+ libvirt_driver._rescan_multipath()
+ expected_multipath_cmd = ('multipath', '-r')
+ self.assertIn(expected_multipath_cmd, self.executes)
+
+ def test_iscsiadm_discover_parsing(self):
+ # Ensure that parsing iscsiadm discover ignores cruft.
+
+ targets = [
+ ["192.168.204.82:3260,1",
+ ("iqn.2010-10.org.openstack:volume-"
+ "f9b12623-6ce3-4dac-a71f-09ad4249bdd3")],
+ ["192.168.204.82:3261,1",
+ ("iqn.2010-10.org.openstack:volume-"
+ "f9b12623-6ce3-4dac-a71f-09ad4249bdd4")]]
+
+ # This slight wonkiness brought to you by pep8, as the actual
+ # example output runs about 97 chars wide.
+ sample_input = """Loading iscsi modules: done
+Starting iSCSI initiator service: done
+Setting up iSCSI targets: unused
+%s %s
+%s %s
+""" % (targets[0][0], targets[0][1], targets[1][0], targets[1][1])
+ driver = volume.LibvirtISCSIVolumeDriver("none")
+ out = driver._get_target_portals_from_iscsiadm_output(sample_input)
+ self.assertEqual(out, targets)
+
+ def test_libvirt_iscsi_driver(self):
+ # NOTE(vish) exists is to make driver assume connecting worked
+ self.stubs.Set(os.path, 'exists', lambda x: True)
+ libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
+ connection_info = self.iscsi_connection(self.vol, self.location,
+ self.iqn)
+ libvirt_driver.connect_volume(connection_info, self.disk_info)
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+ expected_commands = [('iscsiadm', '-m', 'node', '-T', self.iqn,
+ '-p', self.location),
+ ('iscsiadm', '-m', 'session'),
+ ('iscsiadm', '-m', 'node', '-T', self.iqn,
+ '-p', self.location, '--login'),
+ ('iscsiadm', '-m', 'node', '-T', self.iqn,
+ '-p', self.location, '--op', 'update',
+ '-n', 'node.startup', '-v', 'automatic'),
+ ('iscsiadm', '-m', 'node', '-T', self.iqn,
+ '-p', self.location, '--rescan'),
+ ('iscsiadm', '-m', 'node', '-T', self.iqn,
+ '-p', self.location, '--op', 'update',
+ '-n', 'node.startup', '-v', 'manual'),
+ ('iscsiadm', '-m', 'node', '-T', self.iqn,
+ '-p', self.location, '--logout'),
+ ('iscsiadm', '-m', 'node', '-T', self.iqn,
+ '-p', self.location, '--op', 'delete')]
+ self.assertEqual(self.executes, expected_commands)
+
+ def test_libvirt_iscsi_driver_still_in_use(self):
+ # NOTE(vish) exists is to make driver assume connecting worked
+ self.stubs.Set(os.path, 'exists', lambda x: True)
+ libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
+ devs = ['/dev/disk/by-path/ip-%s-iscsi-%s-lun-2' % (self.location,
+ self.iqn)]
+ self.stubs.Set(self.fake_conn, '_get_all_block_devices', lambda: devs)
+ vol = {'id': 1, 'name': self.name}
+ connection_info = self.iscsi_connection(vol, self.location, self.iqn)
+ libvirt_driver.connect_volume(connection_info, self.disk_info)
+ dev_name = 'ip-%s-iscsi-%s-lun-1' % (self.location, self.iqn)
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+ expected_commands = [('iscsiadm', '-m', 'node', '-T', self.iqn,
+ '-p', self.location),
+ ('iscsiadm', '-m', 'session'),
+ ('iscsiadm', '-m', 'node', '-T', self.iqn,
+ '-p', self.location, '--login'),
+ ('iscsiadm', '-m', 'node', '-T', self.iqn,
+ '-p', self.location, '--op', 'update',
+ '-n', 'node.startup', '-v', 'automatic'),
+ ('iscsiadm', '-m', 'node', '-T', self.iqn,
+ '-p', self.location, '--rescan'),
+ ('cp', '/dev/stdin',
+ '/sys/block/%s/device/delete' % dev_name)]
+ self.assertEqual(self.executes, expected_commands)
+
+ def test_libvirt_iscsi_driver_disconnect_multipath_error(self):
+ libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
+ devs = ['/dev/disk/by-path/ip-%s-iscsi-%s-lun-2' % (self.location,
+ self.iqn)]
+ with contextlib.nested(
+ mock.patch.object(os.path, 'exists', return_value=True),
+ mock.patch.object(self.fake_conn, '_get_all_block_devices',
+ return_value=devs),
+ mock.patch.object(libvirt_driver, '_rescan_multipath'),
+ mock.patch.object(libvirt_driver, '_run_multipath'),
+ mock.patch.object(libvirt_driver, '_get_multipath_device_name',
+ return_value='/dev/mapper/fake-multipath-devname'),
+ mock.patch.object(libvirt_driver,
+ '_get_target_portals_from_iscsiadm_output',
+ return_value=[('fake-ip', 'fake-portal')]),
+ mock.patch.object(libvirt_driver, '_get_multipath_iqn',
+ return_value='fake-portal'),
+ ) as (mock_exists, mock_devices, mock_rescan_multipath,
+ mock_run_multipath, mock_device_name, mock_get_portals,
+ mock_get_iqn):
+ mock_run_multipath.side_effect = processutils.ProcessExecutionError
+ vol = {'id': 1, 'name': self.name}
+ connection_info = self.iscsi_connection(vol, self.location,
+ self.iqn)
+ libvirt_driver.connect_volume(connection_info, self.disk_info)
+
+ libvirt_driver.use_multipath = True
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+ mock_run_multipath.assert_called_once_with(
+ ['-f', 'fake-multipath-devname'],
+ check_exit_code=[0, 1])
+
+ def test_libvirt_iscsi_driver_get_config(self):
+ libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
+ dev_name = 'ip-%s-iscsi-%s-lun-1' % (self.location, self.iqn)
+ dev_path = '/dev/disk/by-path/%s' % (dev_name)
+ vol = {'id': 1, 'name': self.name}
+ connection_info = self.iscsi_connection(vol, self.location,
+ self.iqn)
+ conf = libvirt_driver.get_config(connection_info, self.disk_info)
+ tree = conf.format_dom()
+ self.assertEqual('block', tree.get('type'))
+ self.assertEqual(dev_path, tree.find('./source').get('dev'))
+
+ libvirt_driver.use_multipath = True
+ conf = libvirt_driver.get_config(connection_info, self.disk_info)
+ tree = conf.format_dom()
+ self.assertEqual('block', tree.get('type'))
+ self.assertEqual(dev_path, tree.find('./source').get('dev'))
+
+ def test_libvirt_iscsi_driver_multipath_id(self):
+ libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
+ libvirt_driver.use_multipath = True
+ self.stubs.Set(libvirt_driver, '_run_iscsiadm_bare',
+ lambda x, check_exit_code: ('',))
+ self.stubs.Set(libvirt_driver, '_rescan_iscsi', lambda: None)
+ self.stubs.Set(libvirt_driver, '_get_host_device', lambda x: None)
+ self.stubs.Set(libvirt_driver, '_rescan_multipath', lambda: None)
+ fake_multipath_id = 'fake_multipath_id'
+ fake_multipath_device = '/dev/mapper/%s' % fake_multipath_id
+ self.stubs.Set(libvirt_driver, '_get_multipath_device_name',
+ lambda x: fake_multipath_device)
+
+ def fake_disconnect_volume_multipath_iscsi(iscsi_properties,
+ multipath_device):
+ if fake_multipath_device != multipath_device:
+ raise Exception('Invalid multipath_device.')
+
+ self.stubs.Set(libvirt_driver, '_disconnect_volume_multipath_iscsi',
+ fake_disconnect_volume_multipath_iscsi)
+ with mock.patch.object(os.path, 'exists', return_value=True):
+ vol = {'id': 1, 'name': self.name}
+ connection_info = self.iscsi_connection(vol, self.location,
+ self.iqn)
+ libvirt_driver.connect_volume(connection_info,
+ self.disk_info)
+ self.assertEqual(fake_multipath_id,
+ connection_info['data']['multipath_id'])
+ libvirt_driver.disconnect_volume(connection_info, "fake")
+
+ def test_sanitize_log_run_iscsiadm(self):
+ # Tests that the parameters to the _run_iscsiadm function are sanitized
+ # for passwords when logged.
+ def fake_debug(*args, **kwargs):
+ self.assertIn('node.session.auth.password', args[0])
+ self.assertNotIn('scrubme', args[0])
+
+ libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
+ connection_info = self.iscsi_connection(self.vol, self.location,
+ self.iqn)
+ iscsi_properties = connection_info['data']
+ with mock.patch.object(volume.LOG, 'debug',
+ side_effect=fake_debug) as debug_mock:
+ libvirt_driver._iscsiadm_update(iscsi_properties,
+ 'node.session.auth.password',
+ 'scrubme')
+ # we don't care what the log message is, we just want to make sure
+ # our stub method is called which asserts the password is scrubbed
+ self.assertTrue(debug_mock.called)
+
+ def iser_connection(self, volume, location, iqn):
+ return {
+ 'driver_volume_type': 'iser',
+ 'data': {
+ 'volume_id': volume['id'],
+ 'target_portal': location,
+ 'target_iqn': iqn,
+ 'target_lun': 1,
+ }
+ }
+
+ def sheepdog_connection(self, volume):
+ return {
+ 'driver_volume_type': 'sheepdog',
+ 'data': {
+ 'name': volume['name']
+ }
+ }
+
+ def test_libvirt_sheepdog_driver(self):
+ libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
+ connection_info = self.sheepdog_connection(self.vol)
+ conf = libvirt_driver.get_config(connection_info, self.disk_info)
+ tree = conf.format_dom()
+ self.assertEqual(tree.get('type'), 'network')
+ self.assertEqual(tree.find('./source').get('protocol'), 'sheepdog')
+ self.assertEqual(tree.find('./source').get('name'), self.name)
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+
+ def rbd_connection(self, volume):
+ return {
+ 'driver_volume_type': 'rbd',
+ 'data': {
+ 'name': '%s/%s' % ('rbd', volume['name']),
+ 'auth_enabled': CONF.libvirt.rbd_secret_uuid is not None,
+ 'auth_username': CONF.libvirt.rbd_user,
+ 'secret_type': 'ceph',
+ 'secret_uuid': CONF.libvirt.rbd_secret_uuid,
+ 'qos_specs': {
+ 'total_bytes_sec': '1048576',
+ 'read_iops_sec': '500',
+ }
+ }
+ }
+
+ def test_libvirt_rbd_driver(self):
+ libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
+ connection_info = self.rbd_connection(self.vol)
+ conf = libvirt_driver.get_config(connection_info, self.disk_info)
+ tree = conf.format_dom()
+ self._assertNetworkAndProtocolEquals(tree)
+ self.assertIsNone(tree.find('./source/auth'))
+ self.assertEqual('1048576', tree.find('./iotune/total_bytes_sec').text)
+ self.assertEqual('500', tree.find('./iotune/read_iops_sec').text)
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+
+ def test_libvirt_rbd_driver_hosts(self):
+ libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
+ connection_info = self.rbd_connection(self.vol)
+ hosts = ['example.com', '1.2.3.4', '::1']
+ ports = [None, '6790', '6791']
+ connection_info['data']['hosts'] = hosts
+ connection_info['data']['ports'] = ports
+ conf = libvirt_driver.get_config(connection_info, self.disk_info)
+ tree = conf.format_dom()
+ self._assertNetworkAndProtocolEquals(tree)
+ self.assertIsNone(tree.find('./source/auth'))
+ found_hosts = tree.findall('./source/host')
+ self.assertEqual([host.get('name') for host in found_hosts], hosts)
+ self.assertEqual([host.get('port') for host in found_hosts], ports)
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+
+ def test_libvirt_rbd_driver_auth_enabled(self):
+ libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
+ connection_info = self.rbd_connection(self.vol)
+ secret_type = 'ceph'
+ connection_info['data']['auth_enabled'] = True
+ connection_info['data']['auth_username'] = self.user
+ connection_info['data']['secret_type'] = secret_type
+ connection_info['data']['secret_uuid'] = self.uuid
+
+ conf = libvirt_driver.get_config(connection_info, self.disk_info)
+ tree = conf.format_dom()
+ self._assertNetworkAndProtocolEquals(tree)
+ self.assertEqual(tree.find('./auth').get('username'), self.user)
+ self.assertEqual(tree.find('./auth/secret').get('type'), secret_type)
+ self.assertEqual(tree.find('./auth/secret').get('uuid'), self.uuid)
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+
+ def test_libvirt_rbd_driver_auth_enabled_flags_override(self):
+ libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
+ connection_info = self.rbd_connection(self.vol)
+ secret_type = 'ceph'
+ connection_info['data']['auth_enabled'] = True
+ connection_info['data']['auth_username'] = self.user
+ connection_info['data']['secret_type'] = secret_type
+ connection_info['data']['secret_uuid'] = self.uuid
+
+ flags_uuid = '37152720-1785-11e2-a740-af0c1d8b8e4b'
+ flags_user = 'bar'
+ self.flags(rbd_user=flags_user,
+ rbd_secret_uuid=flags_uuid,
+ group='libvirt')
+
+ conf = libvirt_driver.get_config(connection_info, self.disk_info)
+ tree = conf.format_dom()
+ self._assertNetworkAndProtocolEquals(tree)
+ self.assertEqual(tree.find('./auth').get('username'), flags_user)
+ self.assertEqual(tree.find('./auth/secret').get('type'), secret_type)
+ self.assertEqual(tree.find('./auth/secret').get('uuid'), flags_uuid)
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+
+ def test_libvirt_rbd_driver_auth_disabled(self):
+ libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
+ connection_info = self.rbd_connection(self.vol)
+ secret_type = 'ceph'
+ connection_info['data']['auth_enabled'] = False
+ connection_info['data']['auth_username'] = self.user
+ connection_info['data']['secret_type'] = secret_type
+ connection_info['data']['secret_uuid'] = self.uuid
+
+ conf = libvirt_driver.get_config(connection_info, self.disk_info)
+ tree = conf.format_dom()
+ self._assertNetworkAndProtocolEquals(tree)
+ self.assertIsNone(tree.find('./auth'))
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+
+ def test_libvirt_rbd_driver_auth_disabled_flags_override(self):
+ libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
+ connection_info = self.rbd_connection(self.vol)
+ secret_type = 'ceph'
+ connection_info['data']['auth_enabled'] = False
+ connection_info['data']['auth_username'] = self.user
+ connection_info['data']['secret_type'] = secret_type
+ connection_info['data']['secret_uuid'] = self.uuid
+
+ # NOTE: Supplying the rbd_secret_uuid will enable authentication
+ # locally in nova-compute even if not enabled in nova-volume/cinder
+ flags_uuid = '37152720-1785-11e2-a740-af0c1d8b8e4b'
+ flags_user = 'bar'
+ self.flags(rbd_user=flags_user,
+ rbd_secret_uuid=flags_uuid,
+ group='libvirt')
+
+ conf = libvirt_driver.get_config(connection_info, self.disk_info)
+ tree = conf.format_dom()
+ self._assertNetworkAndProtocolEquals(tree)
+ self.assertEqual(tree.find('./auth').get('username'), flags_user)
+ self.assertEqual(tree.find('./auth/secret').get('type'), secret_type)
+ self.assertEqual(tree.find('./auth/secret').get('uuid'), flags_uuid)
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+
+ def test_libvirt_kvm_volume(self):
+ self.stubs.Set(os.path, 'exists', lambda x: True)
+ libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
+ connection_info = self.iscsi_connection(self.vol, self.location,
+ self.iqn)
+ conf = libvirt_driver.get_config(connection_info, self.disk_info)
+ tree = conf.format_dom()
+ dev_str = '/dev/disk/by-path/ip-%s-iscsi-%s-lun-1' % (self.location,
+ self.iqn)
+ self.assertEqual(tree.get('type'), 'block')
+ self.assertEqual(tree.find('./source').get('dev'), dev_str)
+ libvirt_driver.disconnect_volume(connection_info, 'vde')
+
+ def test_libvirt_kvm_volume_with_multipath(self):
+ self.flags(iscsi_use_multipath=True, group='libvirt')
+ self.stubs.Set(os.path, 'exists', lambda x: True)
+ devs = ['/dev/mapper/sda', '/dev/mapper/sdb']
+ self.stubs.Set(self.fake_conn, '_get_all_block_devices', lambda: devs)
+ libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
+ connection_info = self.iscsi_connection(self.vol, self.location,
+ self.iqn)
+ mpdev_filepath = '/dev/mapper/foo'
+ connection_info['data']['device_path'] = mpdev_filepath
+ libvirt_driver._get_multipath_device_name = lambda x: mpdev_filepath
+ self.stubs.Set(libvirt_driver,
+ '_get_target_portals_from_iscsiadm_output',
+ lambda x: [[self.location, self.iqn]])
+ libvirt_driver.connect_volume(connection_info, self.disk_info)
+ conf = libvirt_driver.get_config(connection_info, self.disk_info)
+ tree = conf.format_dom()
+ self.assertEqual(tree.find('./source').get('dev'), mpdev_filepath)
+ libvirt_driver._get_multipath_iqn = lambda x: self.iqn
+ libvirt_driver.disconnect_volume(connection_info, 'vde')
+ expected_multipath_cmd = ('multipath', '-f', 'foo')
+ self.assertIn(expected_multipath_cmd, self.executes)
+
+ def test_libvirt_kvm_volume_with_multipath_still_in_use(self):
+ name = 'volume-00000001'
+ location = '10.0.2.15:3260'
+ iqn = 'iqn.2010-10.org.openstack:%s' % name
+ mpdev_filepath = '/dev/mapper/foo'
+
+ def _get_multipath_device_name(path):
+ if '%s-lun-1' % iqn in path:
+ return mpdev_filepath
+ return '/dev/mapper/donotdisconnect'
+
+ self.flags(iscsi_use_multipath=True, group='libvirt')
+ self.stubs.Set(os.path, 'exists', lambda x: True)
+
+ libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
+ libvirt_driver._get_multipath_device_name =\
+ lambda x: _get_multipath_device_name(x)
+
+ block_devs = ['/dev/disks/by-path/%s-iscsi-%s-lun-2' % (location, iqn)]
+ self.stubs.Set(self.fake_conn, '_get_all_block_devices',
+ lambda: block_devs)
+
+ vol = {'id': 1, 'name': name}
+ connection_info = self.iscsi_connection(vol, location, iqn)
+ connection_info['data']['device_path'] = mpdev_filepath
+
+ libvirt_driver._get_multipath_iqn = lambda x: iqn
+
+ iscsi_devs = ['1.2.3.4-iscsi-%s-lun-1' % iqn,
+ '%s-iscsi-%s-lun-1' % (location, iqn),
+ '%s-iscsi-%s-lun-2' % (location, iqn)]
+ libvirt_driver._get_iscsi_devices = lambda: iscsi_devs
+
+ self.stubs.Set(libvirt_driver,
+ '_get_target_portals_from_iscsiadm_output',
+ lambda x: [[location, iqn]])
+
+ # Set up disconnect volume mock expectations
+ self.mox.StubOutWithMock(libvirt_driver, '_delete_device')
+ self.mox.StubOutWithMock(libvirt_driver, '_rescan_multipath')
+ libvirt_driver._rescan_multipath()
+ libvirt_driver._delete_device('/dev/disk/by-path/%s' % iscsi_devs[0])
+ libvirt_driver._delete_device('/dev/disk/by-path/%s' % iscsi_devs[1])
+ libvirt_driver._rescan_multipath()
+
+ # Ensure that the mpath devices are deleted
+ self.mox.ReplayAll()
+ libvirt_driver.disconnect_volume(connection_info, 'vde')
+
+ def test_libvirt_kvm_volume_with_multipath_getmpdev(self):
+ self.flags(iscsi_use_multipath=True, group='libvirt')
+ self.stubs.Set(os.path, 'exists', lambda x: True)
+ libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
+ name0 = 'volume-00000000'
+ iqn0 = 'iqn.2010-10.org.openstack:%s' % name0
+ dev0 = '/dev/disk/by-path/ip-%s-iscsi-%s-lun-0' % (self.location, iqn0)
+ dev = '/dev/disk/by-path/ip-%s-iscsi-%s-lun-1' % (self.location,
+ self.iqn)
+ devs = [dev0, dev]
+ self.stubs.Set(self.fake_conn, '_get_all_block_devices', lambda: devs)
+ connection_info = self.iscsi_connection(self.vol, self.location,
+ self.iqn)
+ mpdev_filepath = '/dev/mapper/foo'
+ libvirt_driver._get_multipath_device_name = lambda x: mpdev_filepath
+ self.stubs.Set(libvirt_driver,
+ '_get_target_portals_from_iscsiadm_output',
+ lambda x: [['fake_portal1', 'fake_iqn1']])
+ libvirt_driver.connect_volume(connection_info, self.disk_info)
+ conf = libvirt_driver.get_config(connection_info, self.disk_info)
+ tree = conf.format_dom()
+ self.assertEqual(tree.find('./source').get('dev'), mpdev_filepath)
+ libvirt_driver.disconnect_volume(connection_info, 'vde')
+
+ def test_libvirt_kvm_iser_volume_with_multipath(self):
+ self.flags(iser_use_multipath=True, group='libvirt')
+ self.stubs.Set(os.path, 'exists', lambda x: True)
+ self.stubs.Set(time, 'sleep', lambda x: None)
+ devs = ['/dev/mapper/sda', '/dev/mapper/sdb']
+ self.stubs.Set(self.fake_conn, '_get_all_block_devices', lambda: devs)
+ libvirt_driver = volume.LibvirtISERVolumeDriver(self.fake_conn)
+ name = 'volume-00000001'
+ location = '10.0.2.15:3260'
+ iqn = 'iqn.2010-10.org.iser.openstack:%s' % name
+ vol = {'id': 1, 'name': name}
+ connection_info = self.iser_connection(vol, location, iqn)
+ mpdev_filepath = '/dev/mapper/foo'
+ connection_info['data']['device_path'] = mpdev_filepath
+ disk_info = {
+ "bus": "virtio",
+ "dev": "vde",
+ "type": "disk",
+ }
+ libvirt_driver._get_multipath_device_name = lambda x: mpdev_filepath
+ self.stubs.Set(libvirt_driver,
+ '_get_target_portals_from_iscsiadm_output',
+ lambda x: [[location, iqn]])
+ libvirt_driver.connect_volume(connection_info, disk_info)
+ conf = libvirt_driver.get_config(connection_info, disk_info)
+ tree = conf.format_dom()
+ self.assertEqual(tree.find('./source').get('dev'), mpdev_filepath)
+ libvirt_driver._get_multipath_iqn = lambda x: iqn
+ libvirt_driver.disconnect_volume(connection_info, 'vde')
+ expected_multipath_cmd = ('multipath', '-f', 'foo')
+ self.assertIn(expected_multipath_cmd, self.executes)
+
+ def test_libvirt_kvm_iser_volume_with_multipath_getmpdev(self):
+ self.flags(iser_use_multipath=True, group='libvirt')
+ self.stubs.Set(os.path, 'exists', lambda x: True)
+ self.stubs.Set(time, 'sleep', lambda x: None)
+ libvirt_driver = volume.LibvirtISERVolumeDriver(self.fake_conn)
+ name0 = 'volume-00000000'
+ location0 = '10.0.2.15:3260'
+ iqn0 = 'iqn.2010-10.org.iser.openstack:%s' % name0
+ dev0 = '/dev/disk/by-path/ip-%s-iscsi-%s-lun-0' % (location0, iqn0)
+ name = 'volume-00000001'
+ location = '10.0.2.15:3260'
+ iqn = 'iqn.2010-10.org.iser.openstack:%s' % name
+ vol = {'id': 1, 'name': name}
+ dev = '/dev/disk/by-path/ip-%s-iscsi-%s-lun-1' % (location, iqn)
+ devs = [dev0, dev]
+ self.stubs.Set(self.fake_conn, '_get_all_block_devices', lambda: devs)
+ self.stubs.Set(libvirt_driver, '_get_iscsi_devices', lambda: [])
+ connection_info = self.iser_connection(vol, location, iqn)
+ mpdev_filepath = '/dev/mapper/foo'
+ disk_info = {
+ "bus": "virtio",
+ "dev": "vde",
+ "type": "disk",
+ }
+ libvirt_driver._get_multipath_device_name = lambda x: mpdev_filepath
+ self.stubs.Set(libvirt_driver,
+ '_get_target_portals_from_iscsiadm_output',
+ lambda x: [['fake_portal1', 'fake_iqn1']])
+ libvirt_driver.connect_volume(connection_info, disk_info)
+ conf = libvirt_driver.get_config(connection_info, disk_info)
+ tree = conf.format_dom()
+ self.assertEqual(tree.find('./source').get('dev'), mpdev_filepath)
+ libvirt_driver.disconnect_volume(connection_info, 'vde')
+
+ def test_libvirt_nfs_driver(self):
+ # NOTE(vish) exists is to make driver assume connecting worked
+ mnt_base = '/mnt'
+ self.flags(nfs_mount_point_base=mnt_base, group='libvirt')
+
+ libvirt_driver = volume.LibvirtNFSVolumeDriver(self.fake_conn)
+ self.stubs.Set(libvirt_utils, 'is_mounted', lambda x, d: False)
+
+ export_string = '192.168.1.1:/nfs/share1'
+ export_mnt_base = os.path.join(mnt_base,
+ utils.get_hash_str(export_string))
+
+ connection_info = {'data': {'export': export_string,
+ 'name': self.name}}
+ libvirt_driver.connect_volume(connection_info, self.disk_info)
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+
+ device_path = os.path.join(export_mnt_base,
+ connection_info['data']['name'])
+ self.assertEqual(device_path, connection_info['data']['device_path'])
+ expected_commands = [
+ ('mkdir', '-p', export_mnt_base),
+ ('mount', '-t', 'nfs', export_string, export_mnt_base),
+ ('umount', export_mnt_base)]
+ self.assertEqual(expected_commands, self.executes)
+
+ @mock.patch.object(volume.utils, 'execute')
+ @mock.patch.object(volume.LOG, 'debug')
+ @mock.patch.object(volume.LOG, 'exception')
+ def test_libvirt_nfs_driver_umount_error(self, mock_LOG_exception,
+ mock_LOG_debug, mock_utils_exe):
+ export_string = '192.168.1.1:/nfs/share1'
+ connection_info = {'data': {'export': export_string,
+ 'name': self.name}}
+ libvirt_driver = volume.LibvirtNFSVolumeDriver(self.fake_conn)
+ mock_utils_exe.side_effect = processutils.ProcessExecutionError(
+ None, None, None, 'umount', 'umount: device is busy.')
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+ self.assertTrue(mock_LOG_debug.called)
+ mock_utils_exe.side_effect = processutils.ProcessExecutionError(
+ None, None, None, 'umount', 'umount: target is busy.')
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+ self.assertTrue(mock_LOG_debug.called)
+ mock_utils_exe.side_effect = processutils.ProcessExecutionError(
+ None, None, None, 'umount', 'umount: Other error.')
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+ self.assertTrue(mock_LOG_exception.called)
+
+ def test_libvirt_nfs_driver_get_config(self):
+ libvirt_driver = volume.LibvirtNFSVolumeDriver(self.fake_conn)
+ mnt_base = '/mnt'
+ self.flags(nfs_mount_point_base=mnt_base, group='libvirt')
+ export_string = '192.168.1.1:/nfs/share1'
+ export_mnt_base = os.path.join(mnt_base,
+ utils.get_hash_str(export_string))
+ file_path = os.path.join(export_mnt_base, self.name)
+
+ connection_info = {'data': {'export': export_string,
+ 'name': self.name,
+ 'device_path': file_path}}
+ conf = libvirt_driver.get_config(connection_info, self.disk_info)
+ tree = conf.format_dom()
+ self._assertFileTypeEquals(tree, file_path)
+ self.assertEqual('raw', tree.find('./driver').get('type'))
+
+ def test_libvirt_nfs_driver_already_mounted(self):
+ # NOTE(vish) exists is to make driver assume connecting worked
+ mnt_base = '/mnt'
+ self.flags(nfs_mount_point_base=mnt_base, group='libvirt')
+
+ libvirt_driver = volume.LibvirtNFSVolumeDriver(self.fake_conn)
+
+ export_string = '192.168.1.1:/nfs/share1'
+ export_mnt_base = os.path.join(mnt_base,
+ utils.get_hash_str(export_string))
+
+ connection_info = {'data': {'export': export_string,
+ 'name': self.name}}
+ libvirt_driver.connect_volume(connection_info, self.disk_info)
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+
+ expected_commands = [
+ ('findmnt', '--target', export_mnt_base, '--source',
+ export_string),
+ ('umount', export_mnt_base)]
+ self.assertEqual(self.executes, expected_commands)
+
+ def test_libvirt_nfs_driver_with_opts(self):
+ mnt_base = '/mnt'
+ self.flags(nfs_mount_point_base=mnt_base, group='libvirt')
+
+ libvirt_driver = volume.LibvirtNFSVolumeDriver(self.fake_conn)
+ self.stubs.Set(libvirt_utils, 'is_mounted', lambda x, d: False)
+ export_string = '192.168.1.1:/nfs/share1'
+ options = '-o intr,nfsvers=3'
+ export_mnt_base = os.path.join(mnt_base,
+ utils.get_hash_str(export_string))
+
+ connection_info = {'data': {'export': export_string,
+ 'name': self.name,
+ 'options': options}}
+ libvirt_driver.connect_volume(connection_info, self.disk_info)
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+
+ expected_commands = [
+ ('mkdir', '-p', export_mnt_base),
+ ('mount', '-t', 'nfs', '-o', 'intr,nfsvers=3',
+ export_string, export_mnt_base),
+ ('umount', export_mnt_base),
+ ]
+ self.assertEqual(expected_commands, self.executes)
+
+ def aoe_connection(self, shelf, lun):
+ aoedev = 'e%s.%s' % (shelf, lun)
+ aoedevpath = '/dev/etherd/%s' % (aoedev)
+ return {
+ 'driver_volume_type': 'aoe',
+ 'data': {
+ 'target_shelf': shelf,
+ 'target_lun': lun,
+ 'device_path': aoedevpath
+ }
+ }
+
+ @mock.patch('os.path.exists', return_value=True)
+ def test_libvirt_aoe_driver(self, exists):
+ libvirt_driver = volume.LibvirtAOEVolumeDriver(self.fake_conn)
+ shelf = '100'
+ lun = '1'
+ connection_info = self.aoe_connection(shelf, lun)
+ aoedev = 'e%s.%s' % (shelf, lun)
+ aoedevpath = '/dev/etherd/%s' % (aoedev)
+ libvirt_driver.connect_volume(connection_info, self.disk_info)
+ exists.assert_called_with(aoedevpath)
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+ self.assertEqual(aoedevpath, connection_info['data']['device_path'])
+ expected_commands = [('aoe-revalidate', aoedev)]
+ self.assertEqual(expected_commands, self.executes)
+
+ def test_libvirt_aoe_driver_get_config(self):
+ libvirt_driver = volume.LibvirtAOEVolumeDriver(self.fake_conn)
+ shelf = '100'
+ lun = '1'
+ connection_info = self.aoe_connection(shelf, lun)
+ conf = libvirt_driver.get_config(connection_info, self.disk_info)
+ tree = conf.format_dom()
+ aoedevpath = '/dev/etherd/e%s.%s' % (shelf, lun)
+ self.assertEqual('block', tree.get('type'))
+ self.assertEqual(aoedevpath, tree.find('./source').get('dev'))
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+
+ def test_libvirt_glusterfs_driver(self):
+ mnt_base = '/mnt'
+ self.flags(glusterfs_mount_point_base=mnt_base, group='libvirt')
+
+ libvirt_driver = volume.LibvirtGlusterfsVolumeDriver(self.fake_conn)
+ self.stubs.Set(libvirt_utils, 'is_mounted', lambda x, d: False)
+ export_string = '192.168.1.1:/volume-00001'
+ export_mnt_base = os.path.join(mnt_base,
+ utils.get_hash_str(export_string))
+
+ connection_info = {'data': {'export': export_string,
+ 'name': self.name}}
+ libvirt_driver.connect_volume(connection_info, self.disk_info)
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+
+ device_path = os.path.join(export_mnt_base,
+ connection_info['data']['name'])
+ self.assertEqual(device_path, connection_info['data']['device_path'])
+ expected_commands = [
+ ('mkdir', '-p', export_mnt_base),
+ ('mount', '-t', 'glusterfs', export_string, export_mnt_base),
+ ('umount', export_mnt_base)]
+ self.assertEqual(expected_commands, self.executes)
+
+ def test_libvirt_glusterfs_driver_get_config(self):
+ mnt_base = '/mnt'
+ self.flags(glusterfs_mount_point_base=mnt_base, group='libvirt')
+
+ libvirt_driver = volume.LibvirtGlusterfsVolumeDriver(self.fake_conn)
+ export_string = '192.168.1.1:/volume-00001'
+ export_mnt_base = os.path.join(mnt_base,
+ utils.get_hash_str(export_string))
+ file_path = os.path.join(export_mnt_base, self.name)
+
+ # Test default format - raw
+ connection_info = {'data': {'export': export_string,
+ 'name': self.name,
+ 'device_path': file_path}}
+ conf = libvirt_driver.get_config(connection_info, self.disk_info)
+ tree = conf.format_dom()
+ self._assertFileTypeEquals(tree, file_path)
+ self.assertEqual('raw', tree.find('./driver').get('type'))
+
+ # Test specified format - qcow2
+ connection_info = {'data': {'export': export_string,
+ 'name': self.name,
+ 'device_path': file_path,
+ 'format': 'qcow2'}}
+ conf = libvirt_driver.get_config(connection_info, self.disk_info)
+ tree = conf.format_dom()
+ self._assertFileTypeEquals(tree, file_path)
+ self.assertEqual('qcow2', tree.find('./driver').get('type'))
+
+ def test_libvirt_glusterfs_driver_already_mounted(self):
+ mnt_base = '/mnt'
+ self.flags(glusterfs_mount_point_base=mnt_base, group='libvirt')
+
+ libvirt_driver = volume.LibvirtGlusterfsVolumeDriver(self.fake_conn)
+ export_string = '192.168.1.1:/volume-00001'
+ export_mnt_base = os.path.join(mnt_base,
+ utils.get_hash_str(export_string))
+
+ connection_info = {'data': {'export': export_string,
+ 'name': self.name}}
+ libvirt_driver.connect_volume(connection_info, self.disk_info)
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+
+ expected_commands = [
+ ('findmnt', '--target', export_mnt_base,
+ '--source', export_string),
+ ('umount', export_mnt_base)]
+ self.assertEqual(self.executes, expected_commands)
+
+ def test_libvirt_glusterfs_driver_with_opts(self):
+ mnt_base = '/mnt'
+ self.flags(glusterfs_mount_point_base=mnt_base, group='libvirt')
+
+ libvirt_driver = volume.LibvirtGlusterfsVolumeDriver(self.fake_conn)
+ self.stubs.Set(libvirt_utils, 'is_mounted', lambda x, d: False)
+ export_string = '192.168.1.1:/volume-00001'
+ options = '-o backupvolfile-server=192.168.1.2'
+ export_mnt_base = os.path.join(mnt_base,
+ utils.get_hash_str(export_string))
+
+ connection_info = {'data': {'export': export_string,
+ 'name': self.name,
+ 'options': options}}
+ libvirt_driver.connect_volume(connection_info, self.disk_info)
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+
+ expected_commands = [
+ ('mkdir', '-p', export_mnt_base),
+ ('mount', '-t', 'glusterfs',
+ '-o', 'backupvolfile-server=192.168.1.2',
+ export_string, export_mnt_base),
+ ('umount', export_mnt_base),
+ ]
+ self.assertEqual(self.executes, expected_commands)
+
+ def test_libvirt_glusterfs_libgfapi(self):
+ self.flags(qemu_allowed_storage_drivers=['gluster'], group='libvirt')
+ libvirt_driver = volume.LibvirtGlusterfsVolumeDriver(self.fake_conn)
+ self.stubs.Set(libvirt_utils, 'is_mounted', lambda x, d: False)
+ export_string = '192.168.1.1:/volume-00001'
+ name = 'volume-00001'
+
+ connection_info = {'data': {'export': export_string, 'name': name}}
+
+ disk_info = {
+ "dev": "vde",
+ "type": "disk",
+ "bus": "virtio",
+ }
+
+ libvirt_driver.connect_volume(connection_info, disk_info)
+ conf = libvirt_driver.get_config(connection_info, disk_info)
+ tree = conf.format_dom()
+ self.assertEqual(tree.get('type'), 'network')
+ self.assertEqual(tree.find('./driver').get('type'), 'raw')
+
+ source = tree.find('./source')
+ self.assertEqual(source.get('protocol'), 'gluster')
+ self.assertEqual(source.get('name'), 'volume-00001/volume-00001')
+ self.assertEqual(source.find('./host').get('name'), '192.168.1.1')
+ self.assertEqual(source.find('./host').get('port'), '24007')
+
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+
+ def fibrechan_connection(self, volume, location, wwn):
+ return {
+ 'driver_volume_type': 'fibrechan',
+ 'data': {
+ 'volume_id': volume['id'],
+ 'target_portal': location,
+ 'target_wwn': wwn,
+ 'target_lun': 1,
+ }
+ }
+
+ def test_libvirt_fibrechan_driver(self):
+ self.stubs.Set(libvirt_utils, 'get_fc_hbas',
+ fake_libvirt_utils.get_fc_hbas)
+ self.stubs.Set(libvirt_utils, 'get_fc_hbas_info',
+ fake_libvirt_utils.get_fc_hbas_info)
+ # NOTE(vish) exists is to make driver assume connecting worked
+ self.stubs.Set(os.path, 'exists', lambda x: True)
+ self.stubs.Set(os.path, 'realpath', lambda x: '/dev/sdb')
+ libvirt_driver = volume.LibvirtFibreChannelVolumeDriver(self.fake_conn)
+ multipath_devname = '/dev/md-1'
+ devices = {"device": multipath_devname,
+ "id": "1234567890",
+ "devices": [{'device': '/dev/sdb',
+ 'address': '1:0:0:1',
+ 'host': 1, 'channel': 0,
+ 'id': 0, 'lun': 1}]}
+ self.stubs.Set(linuxscsi, 'find_multipath_device', lambda x: devices)
+ self.stubs.Set(linuxscsi, 'remove_device', lambda x: None)
+ # Should work for string, unicode, and list
+ wwns = ['1234567890123456', unicode('1234567890123456'),
+ ['1234567890123456', '1234567890123457']]
+ for wwn in wwns:
+ connection_info = self.fibrechan_connection(self.vol,
+ self.location, wwn)
+ mount_device = "vde"
+ libvirt_driver.connect_volume(connection_info, self.disk_info)
+
+ # Test the scenario where multipath_id is returned
+ libvirt_driver.disconnect_volume(connection_info, mount_device)
+ self.assertEqual(multipath_devname,
+ connection_info['data']['device_path'])
+ expected_commands = []
+ self.assertEqual(expected_commands, self.executes)
+ # Test the scenario where multipath_id is not returned
+ connection_info["data"]["devices"] = devices["devices"]
+ del connection_info["data"]["multipath_id"]
+ libvirt_driver.disconnect_volume(connection_info, mount_device)
+ expected_commands = []
+ self.assertEqual(expected_commands, self.executes)
+
+ # Should not work for anything other than string, unicode, and list
+ connection_info = self.fibrechan_connection(self.vol,
+ self.location, 123)
+ self.assertRaises(exception.NovaException,
+ libvirt_driver.connect_volume,
+ connection_info, self.disk_info)
+
+ self.stubs.Set(libvirt_utils, 'get_fc_hbas', lambda: [])
+ self.stubs.Set(libvirt_utils, 'get_fc_hbas_info', lambda: [])
+ self.assertRaises(exception.NovaException,
+ libvirt_driver.connect_volume,
+ connection_info, self.disk_info)
+
+ def test_libvirt_fibrechan_driver_get_config(self):
+ libvirt_driver = volume.LibvirtFibreChannelVolumeDriver(self.fake_conn)
+ connection_info = self.fibrechan_connection(self.vol,
+ self.location, 123)
+ connection_info['data']['device_path'] = ("/sys/devices/pci0000:00"
+ "/0000:00:03.0/0000:05:00.3/host2/fc_host/host2")
+ conf = libvirt_driver.get_config(connection_info, self.disk_info)
+ tree = conf.format_dom()
+ self.assertEqual('block', tree.get('type'))
+ self.assertEqual(connection_info['data']['device_path'],
+ tree.find('./source').get('dev'))
+
+ def test_libvirt_fibrechan_getpci_num(self):
+ libvirt_driver = volume.LibvirtFibreChannelVolumeDriver(self.fake_conn)
+ hba = {'device_path': "/sys/devices/pci0000:00/0000:00:03.0"
+ "/0000:05:00.3/host2/fc_host/host2"}
+ pci_num = libvirt_driver._get_pci_num(hba)
+ self.assertEqual("0000:05:00.3", pci_num)
+
+ hba = {'device_path': "/sys/devices/pci0000:00/0000:00:03.0"
+ "/0000:05:00.3/0000:06:00.6/host2/fc_host/host2"}
+ pci_num = libvirt_driver._get_pci_num(hba)
+ self.assertEqual("0000:06:00.6", pci_num)
+
+ def test_libvirt_scality_driver(self):
+ tempdir = self.useFixture(fixtures.TempDir()).path
+ TEST_MOUNT = os.path.join(tempdir, 'fake_mount')
+ TEST_CONFIG = os.path.join(tempdir, 'fake_config')
+ TEST_VOLDIR = 'volumes'
+ TEST_VOLNAME = 'volume_name'
+ TEST_CONN_INFO = {
+ 'data': {
+ 'sofs_path': os.path.join(TEST_VOLDIR, TEST_VOLNAME)
+ }
+ }
+ TEST_VOLPATH = os.path.join(TEST_MOUNT,
+ TEST_VOLDIR,
+ TEST_VOLNAME)
+ open(TEST_CONFIG, "w+").close()
+ os.makedirs(os.path.join(TEST_MOUNT, 'sys'))
+
+ def _access_wrapper(path, flags):
+ if path == '/sbin/mount.sofs':
+ return True
+ else:
+ return os.access(path, flags)
+
+ self.stubs.Set(os, 'access', _access_wrapper)
+ self.flags(scality_sofs_config=TEST_CONFIG,
+ scality_sofs_mount_point=TEST_MOUNT,
+ group='libvirt')
+ driver = volume.LibvirtScalityVolumeDriver(self.fake_conn)
+ driver.connect_volume(TEST_CONN_INFO, self.disk_info)
+
+ device_path = os.path.join(TEST_MOUNT,
+ TEST_CONN_INFO['data']['sofs_path'])
+ self.assertEqual(device_path,
+ TEST_CONN_INFO['data']['device_path'])
+
+ conf = driver.get_config(TEST_CONN_INFO, self.disk_info)
+ tree = conf.format_dom()
+ self._assertFileTypeEquals(tree, TEST_VOLPATH)
diff --git a/nova/tests/unit/virt/test_block_device.py b/nova/tests/unit/virt/test_block_device.py
new file mode 100644
index 0000000000..f71438eae2
--- /dev/null
+++ b/nova/tests/unit/virt/test_block_device.py
@@ -0,0 +1,684 @@
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import contextlib
+
+import mock
+from oslo.serialization import jsonutils
+
+from nova import block_device
+from nova import context
+from nova import test
+from nova.tests.unit import fake_instance
+from nova.tests.unit import matchers
+from nova.virt import block_device as driver_block_device
+from nova.virt import driver
+from nova.volume import cinder
+from nova.volume import encryptors
+
+
+class TestDriverBlockDevice(test.NoDBTestCase):
+ driver_classes = {
+ 'swap': driver_block_device.DriverSwapBlockDevice,
+ 'ephemeral': driver_block_device.DriverEphemeralBlockDevice,
+ 'volume': driver_block_device.DriverVolumeBlockDevice,
+ 'snapshot': driver_block_device.DriverSnapshotBlockDevice,
+ 'image': driver_block_device.DriverImageBlockDevice,
+ 'blank': driver_block_device.DriverBlankBlockDevice
+ }
+
+ swap_bdm = block_device.BlockDeviceDict(
+ {'id': 1, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/sdb1',
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'delete_on_termination': True,
+ 'guest_format': 'swap',
+ 'disk_bus': 'scsi',
+ 'volume_size': 2,
+ 'boot_index': -1})
+
+ swap_driver_bdm = {
+ 'device_name': '/dev/sdb1',
+ 'swap_size': 2,
+ 'disk_bus': 'scsi'}
+
+ swap_legacy_driver_bdm = {
+ 'device_name': '/dev/sdb1',
+ 'swap_size': 2}
+
+ ephemeral_bdm = block_device.BlockDeviceDict(
+ {'id': 2, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/sdc1',
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'disk_bus': 'scsi',
+ 'device_type': 'disk',
+ 'volume_size': 4,
+ 'guest_format': 'ext4',
+ 'delete_on_termination': True,
+ 'boot_index': -1})
+
+ ephemeral_driver_bdm = {
+ 'device_name': '/dev/sdc1',
+ 'size': 4,
+ 'device_type': 'disk',
+ 'guest_format': 'ext4',
+ 'disk_bus': 'scsi'}
+
+ ephemeral_legacy_driver_bdm = {
+ 'device_name': '/dev/sdc1',
+ 'size': 4,
+ 'virtual_name': 'ephemeral0',
+ 'num': 0}
+
+ volume_bdm = block_device.BlockDeviceDict(
+ {'id': 3, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/sda1',
+ 'source_type': 'volume',
+ 'disk_bus': 'scsi',
+ 'device_type': 'disk',
+ 'volume_size': 8,
+ 'destination_type': 'volume',
+ 'volume_id': 'fake-volume-id-1',
+ 'guest_format': 'ext4',
+ 'connection_info': '{"fake": "connection_info"}',
+ 'delete_on_termination': False,
+ 'boot_index': 0})
+
+ volume_driver_bdm = {
+ 'mount_device': '/dev/sda1',
+ 'connection_info': {"fake": "connection_info"},
+ 'delete_on_termination': False,
+ 'disk_bus': 'scsi',
+ 'device_type': 'disk',
+ 'guest_format': 'ext4',
+ 'boot_index': 0}
+
+ volume_legacy_driver_bdm = {
+ 'mount_device': '/dev/sda1',
+ 'connection_info': {"fake": "connection_info"},
+ 'delete_on_termination': False}
+
+ snapshot_bdm = block_device.BlockDeviceDict(
+ {'id': 4, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/sda2',
+ 'delete_on_termination': True,
+ 'volume_size': 3,
+ 'disk_bus': 'scsi',
+ 'device_type': 'disk',
+ 'source_type': 'snapshot',
+ 'destination_type': 'volume',
+ 'connection_info': '{"fake": "connection_info"}',
+ 'snapshot_id': 'fake-snapshot-id-1',
+ 'volume_id': 'fake-volume-id-2',
+ 'boot_index': -1})
+
+ snapshot_driver_bdm = {
+ 'mount_device': '/dev/sda2',
+ 'connection_info': {"fake": "connection_info"},
+ 'delete_on_termination': True,
+ 'disk_bus': 'scsi',
+ 'device_type': 'disk',
+ 'guest_format': None,
+ 'boot_index': -1}
+
+ snapshot_legacy_driver_bdm = {
+ 'mount_device': '/dev/sda2',
+ 'connection_info': {"fake": "connection_info"},
+ 'delete_on_termination': True}
+
+ image_bdm = block_device.BlockDeviceDict(
+ {'id': 5, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/sda2',
+ 'delete_on_termination': True,
+ 'volume_size': 1,
+ 'disk_bus': 'scsi',
+ 'device_type': 'disk',
+ 'source_type': 'image',
+ 'destination_type': 'volume',
+ 'connection_info': '{"fake": "connection_info"}',
+ 'image_id': 'fake-image-id-1',
+ 'volume_id': 'fake-volume-id-2',
+ 'boot_index': -1})
+
+ image_driver_bdm = {
+ 'mount_device': '/dev/sda2',
+ 'connection_info': {"fake": "connection_info"},
+ 'delete_on_termination': True,
+ 'disk_bus': 'scsi',
+ 'device_type': 'disk',
+ 'guest_format': None,
+ 'boot_index': -1}
+
+ image_legacy_driver_bdm = {
+ 'mount_device': '/dev/sda2',
+ 'connection_info': {"fake": "connection_info"},
+ 'delete_on_termination': True}
+
+ blank_bdm = block_device.BlockDeviceDict(
+ {'id': 6, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/sda2',
+ 'delete_on_termination': True,
+ 'volume_size': 3,
+ 'disk_bus': 'scsi',
+ 'device_type': 'disk',
+ 'source_type': 'blank',
+ 'destination_type': 'volume',
+ 'connection_info': '{"fake": "connection_info"}',
+ 'snapshot_id': 'fake-snapshot-id-1',
+ 'volume_id': 'fake-volume-id-2',
+ 'boot_index': -1})
+
+ blank_driver_bdm = {
+ 'mount_device': '/dev/sda2',
+ 'connection_info': {"fake": "connection_info"},
+ 'delete_on_termination': True,
+ 'disk_bus': 'scsi',
+ 'device_type': 'disk',
+ 'guest_format': None,
+ 'boot_index': -1}
+
+ blank_legacy_driver_bdm = {
+ 'mount_device': '/dev/sda2',
+ 'connection_info': {"fake": "connection_info"},
+ 'delete_on_termination': True}
+
+ def setUp(self):
+ super(TestDriverBlockDevice, self).setUp()
+ self.volume_api = self.mox.CreateMock(cinder.API)
+ self.virt_driver = self.mox.CreateMock(driver.ComputeDriver)
+ self.context = context.RequestContext('fake_user',
+ 'fake_project')
+
+ def test_no_device_raises(self):
+ for name, cls in self.driver_classes.items():
+ self.assertRaises(driver_block_device._NotTransformable,
+ cls, {'no_device': True})
+
+ def _test_driver_device(self, name):
+ db_bdm = getattr(self, "%s_bdm" % name)
+ test_bdm = self.driver_classes[name](db_bdm)
+ self.assertThat(test_bdm, matchers.DictMatches(
+ getattr(self, "%s_driver_bdm" % name)))
+
+ for k, v in db_bdm.iteritems():
+ field_val = getattr(test_bdm._bdm_obj, k)
+ if isinstance(field_val, bool):
+ v = bool(v)
+ self.assertEqual(field_val, v)
+
+ self.assertThat(test_bdm.legacy(),
+ matchers.DictMatches(
+ getattr(self, "%s_legacy_driver_bdm" % name)))
+
+ # Test passthru attributes
+ for passthru in test_bdm._proxy_as_attr:
+ self.assertEqual(getattr(test_bdm, passthru),
+ getattr(test_bdm._bdm_obj, passthru))
+
+ # Make sure that all others raise _invalidType
+ for other_name, cls in self.driver_classes.iteritems():
+ if other_name == name:
+ continue
+ self.assertRaises(driver_block_device._InvalidType,
+ cls,
+ getattr(self, '%s_bdm' % name))
+
+ # Test the save method
+ with mock.patch.object(test_bdm._bdm_obj, 'save') as save_mock:
+ test_bdm.save(self.context)
+ for fld, alias in test_bdm._update_on_save.iteritems():
+ self.assertEqual(test_bdm[alias or fld],
+ getattr(test_bdm._bdm_obj, fld))
+
+ save_mock.assert_called_once_with(self.context)
+
+ # Test the save method with no context passed
+ with mock.patch.object(test_bdm._bdm_obj, 'save') as save_mock:
+ test_bdm.save()
+ save_mock.assert_called_once_with()
+
+ def _test_driver_default_size(self, name):
+ size = 'swap_size' if name == 'swap' else 'size'
+ no_size_bdm = getattr(self, "%s_bdm" % name).copy()
+ no_size_bdm['volume_size'] = None
+
+ driver_bdm = self.driver_classes[name](no_size_bdm)
+ self.assertEqual(driver_bdm[size], 0)
+
+ del no_size_bdm['volume_size']
+
+ driver_bdm = self.driver_classes[name](no_size_bdm)
+ self.assertEqual(driver_bdm[size], 0)
+
+ def test_driver_swap_block_device(self):
+ self._test_driver_device("swap")
+
+ def test_driver_swap_default_size(self):
+ self._test_driver_default_size('swap')
+
+ def test_driver_ephemeral_block_device(self):
+ self._test_driver_device("ephemeral")
+
+ def test_driver_ephemeral_default_size(self):
+ self._test_driver_default_size('ephemeral')
+
+ def test_driver_volume_block_device(self):
+ self._test_driver_device("volume")
+
+ test_bdm = self.driver_classes['volume'](
+ self.volume_bdm)
+ self.assertEqual(test_bdm['connection_info'],
+ jsonutils.loads(test_bdm._bdm_obj.connection_info))
+ self.assertEqual(test_bdm._bdm_obj.id, 3)
+ self.assertEqual(test_bdm.volume_id, 'fake-volume-id-1')
+ self.assertEqual(test_bdm.volume_size, 8)
+
+ def test_driver_snapshot_block_device(self):
+ self._test_driver_device("snapshot")
+
+ test_bdm = self.driver_classes['snapshot'](
+ self.snapshot_bdm)
+ self.assertEqual(test_bdm._bdm_obj.id, 4)
+ self.assertEqual(test_bdm.snapshot_id, 'fake-snapshot-id-1')
+ self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
+ self.assertEqual(test_bdm.volume_size, 3)
+
+ def test_driver_image_block_device(self):
+ self._test_driver_device('image')
+
+ test_bdm = self.driver_classes['image'](
+ self.image_bdm)
+ self.assertEqual(test_bdm._bdm_obj.id, 5)
+ self.assertEqual(test_bdm.image_id, 'fake-image-id-1')
+ self.assertEqual(test_bdm.volume_size, 1)
+
+ def test_driver_image_block_device_destination_local(self):
+ self._test_driver_device('image')
+ bdm = self.image_bdm.copy()
+ bdm['destination_type'] = 'local'
+ self.assertRaises(driver_block_device._InvalidType,
+ self.driver_classes['image'], bdm)
+
+ def test_driver_blank_block_device(self):
+ self._test_driver_device('blank')
+
+ test_bdm = self.driver_classes['blank'](
+ self.blank_bdm)
+ self.assertEqual(6, test_bdm._bdm_obj.id)
+ self.assertEqual('fake-volume-id-2', test_bdm.volume_id)
+ self.assertEqual(3, test_bdm.volume_size)
+
+ def _test_volume_attach(self, driver_bdm, bdm_dict,
+ fake_volume, check_attach=True,
+ fail_check_attach=False, driver_attach=False,
+ fail_driver_attach=False, volume_attach=True,
+ access_mode='rw'):
+ elevated_context = self.context.elevated()
+ self.stubs.Set(self.context, 'elevated',
+ lambda: elevated_context)
+ self.mox.StubOutWithMock(driver_bdm._bdm_obj, 'save')
+ self.mox.StubOutWithMock(encryptors, 'get_encryption_metadata')
+ instance = {'id': 'fake_id', 'uuid': 'fake_uuid'}
+ connector = {'ip': 'fake_ip', 'host': 'fake_host'}
+ connection_info = {'data': {'access_mode': access_mode}}
+ expected_conn_info = {'data': {'access_mode': access_mode},
+ 'serial': fake_volume['id']}
+ enc_data = {'fake': 'enc_data'}
+
+ self.volume_api.get(self.context,
+ fake_volume['id']).AndReturn(fake_volume)
+ if check_attach:
+ if not fail_check_attach:
+ self.volume_api.check_attach(self.context, fake_volume,
+ instance=instance).AndReturn(None)
+ else:
+ self.volume_api.check_attach(self.context, fake_volume,
+ instance=instance).AndRaise(
+ test.TestingException)
+ return instance, expected_conn_info
+
+ self.virt_driver.get_volume_connector(instance).AndReturn(connector)
+ self.volume_api.initialize_connection(
+ elevated_context, fake_volume['id'],
+ connector).AndReturn(connection_info)
+ if driver_attach:
+ encryptors.get_encryption_metadata(
+ elevated_context, self.volume_api, fake_volume['id'],
+ connection_info).AndReturn(enc_data)
+ if not fail_driver_attach:
+ self.virt_driver.attach_volume(
+ elevated_context, expected_conn_info, instance,
+ bdm_dict['device_name'],
+ disk_bus=bdm_dict['disk_bus'],
+ device_type=bdm_dict['device_type'],
+ encryption=enc_data).AndReturn(None)
+ else:
+ self.virt_driver.attach_volume(
+ elevated_context, expected_conn_info, instance,
+ bdm_dict['device_name'],
+ disk_bus=bdm_dict['disk_bus'],
+ device_type=bdm_dict['device_type'],
+ encryption=enc_data).AndRaise(test.TestingException)
+ self.volume_api.terminate_connection(
+ elevated_context, fake_volume['id'],
+ expected_conn_info).AndReturn(None)
+ return instance, expected_conn_info
+
+ if volume_attach:
+ self.volume_api.attach(elevated_context, fake_volume['id'],
+ 'fake_uuid', bdm_dict['device_name'],
+ mode=access_mode).AndReturn(None)
+ driver_bdm._bdm_obj.save(self.context).AndReturn(None)
+ return instance, expected_conn_info
+
+ def test_volume_attach(self):
+ test_bdm = self.driver_classes['volume'](
+ self.volume_bdm)
+ volume = {'id': 'fake-volume-id-1',
+ 'attach_status': 'detached'}
+
+ instance, expected_conn_info = self._test_volume_attach(
+ test_bdm, self.volume_bdm, volume)
+
+ self.mox.ReplayAll()
+
+ test_bdm.attach(self.context, instance,
+ self.volume_api, self.virt_driver)
+ self.assertThat(test_bdm['connection_info'],
+ matchers.DictMatches(expected_conn_info))
+
+ def test_volume_attach_ro(self):
+ test_bdm = self.driver_classes['volume'](self.volume_bdm)
+ volume = {'id': 'fake-volume-id-1',
+ 'attach_status': 'detached'}
+
+ instance, expected_conn_info = self._test_volume_attach(
+ test_bdm, self.volume_bdm, volume, access_mode='ro')
+
+ self.mox.ReplayAll()
+
+ test_bdm.attach(self.context, instance,
+ self.volume_api, self.virt_driver)
+ self.assertThat(test_bdm['connection_info'],
+ matchers.DictMatches(expected_conn_info))
+
+ def check_volume_attach_check_attach_fails(self):
+ test_bdm = self.driver_classes['volume'](
+ self.volume_bdm)
+ volume = {'id': 'fake-volume-id-1'}
+
+ instance, _ = self._test_volume_attach(
+ test_bdm, self.volume_bdm, volume, fail_check_attach=True)
+ self.mox.ReplayAll()
+
+ self.asserRaises(test.TestingException, test_bdm.attach, self.context,
+ instance, self.volume_api, self.virt_driver)
+
+ def test_volume_no_volume_attach(self):
+ test_bdm = self.driver_classes['volume'](
+ self.volume_bdm)
+ volume = {'id': 'fake-volume-id-1',
+ 'attach_status': 'detached'}
+
+ instance, expected_conn_info = self._test_volume_attach(
+ test_bdm, self.volume_bdm, volume, check_attach=False,
+ driver_attach=False)
+
+ self.mox.ReplayAll()
+
+ test_bdm.attach(self.context, instance,
+ self.volume_api, self.virt_driver,
+ do_check_attach=False, do_driver_attach=False)
+ self.assertThat(test_bdm['connection_info'],
+ matchers.DictMatches(expected_conn_info))
+
+ def test_volume_attach_no_check_driver_attach(self):
+ test_bdm = self.driver_classes['volume'](
+ self.volume_bdm)
+ volume = {'id': 'fake-volume-id-1',
+ 'attach_status': 'detached'}
+
+ instance, expected_conn_info = self._test_volume_attach(
+ test_bdm, self.volume_bdm, volume, check_attach=False,
+ driver_attach=True)
+
+ self.mox.ReplayAll()
+
+ test_bdm.attach(self.context, instance,
+ self.volume_api, self.virt_driver,
+ do_check_attach=False, do_driver_attach=True)
+ self.assertThat(test_bdm['connection_info'],
+ matchers.DictMatches(expected_conn_info))
+
+ def check_volume_attach_driver_attach_fails(self):
+ test_bdm = self.driver_classes['volume'](
+ self.volume_bdm)
+ volume = {'id': 'fake-volume-id-1'}
+
+ instance, _ = self._test_volume_attach(
+ test_bdm, self.volume_bdm, volume, fail_check_attach=True)
+ self.mox.ReplayAll()
+
+ self.asserRaises(test.TestingException, test_bdm.attach, self.context,
+ instance, self.volume_api, self.virt_driver,
+ do_driver_attach=True)
+
+ def test_refresh_connection(self):
+ test_bdm = self.driver_classes['snapshot'](
+ self.snapshot_bdm)
+
+ instance = {'id': 'fake_id', 'uuid': 'fake_uuid'}
+ connector = {'ip': 'fake_ip', 'host': 'fake_host'}
+ connection_info = {'data': {'multipath_id': 'fake_multipath_id'}}
+ expected_conn_info = {'data': {'multipath_id': 'fake_multipath_id'},
+ 'serial': 'fake-volume-id-2'}
+
+ self.mox.StubOutWithMock(test_bdm._bdm_obj, 'save')
+
+ self.virt_driver.get_volume_connector(instance).AndReturn(connector)
+ self.volume_api.initialize_connection(
+ self.context, test_bdm.volume_id,
+ connector).AndReturn(connection_info)
+ test_bdm._bdm_obj.save(self.context).AndReturn(None)
+
+ self.mox.ReplayAll()
+
+ test_bdm.refresh_connection_info(self.context, instance,
+ self.volume_api, self.virt_driver)
+ self.assertThat(test_bdm['connection_info'],
+ matchers.DictMatches(expected_conn_info))
+
+ def test_snapshot_attach_no_volume(self):
+ no_volume_snapshot = self.snapshot_bdm.copy()
+ no_volume_snapshot['volume_id'] = None
+ test_bdm = self.driver_classes['snapshot'](no_volume_snapshot)
+
+ snapshot = {'id': 'fake-volume-id-1',
+ 'attach_status': 'detached'}
+ volume = {'id': 'fake-volume-id-2',
+ 'attach_status': 'detached'}
+
+ wait_func = self.mox.CreateMockAnything()
+
+ self.volume_api.get_snapshot(self.context,
+ 'fake-snapshot-id-1').AndReturn(snapshot)
+ self.volume_api.create(self.context, 3,
+ '', '', snapshot).AndReturn(volume)
+ wait_func(self.context, 'fake-volume-id-2').AndReturn(None)
+ instance, expected_conn_info = self._test_volume_attach(
+ test_bdm, no_volume_snapshot, volume)
+ self.mox.ReplayAll()
+
+ test_bdm.attach(self.context, instance, self.volume_api,
+ self.virt_driver, wait_func)
+ self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
+
+ def test_snapshot_attach_volume(self):
+ test_bdm = self.driver_classes['snapshot'](
+ self.snapshot_bdm)
+
+ instance = {'id': 'fake_id', 'uuid': 'fake_uuid'}
+
+ volume_class = self.driver_classes['volume']
+ self.mox.StubOutWithMock(volume_class, 'attach')
+
+ # Make sure theses are not called
+ self.mox.StubOutWithMock(self.volume_api, 'get_snapshot')
+ self.mox.StubOutWithMock(self.volume_api, 'create')
+
+ volume_class.attach(self.context, instance, self.volume_api,
+ self.virt_driver, do_check_attach=True
+ ).AndReturn(None)
+ self.mox.ReplayAll()
+
+ test_bdm.attach(self.context, instance, self.volume_api,
+ self.virt_driver)
+ self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
+
+ def test_image_attach_no_volume(self):
+ no_volume_image = self.image_bdm.copy()
+ no_volume_image['volume_id'] = None
+ test_bdm = self.driver_classes['image'](no_volume_image)
+
+ image = {'id': 'fake-image-id-1'}
+ volume = {'id': 'fake-volume-id-2',
+ 'attach_status': 'detached'}
+
+ wait_func = self.mox.CreateMockAnything()
+
+ self.volume_api.create(self.context, 1,
+ '', '', image_id=image['id']).AndReturn(volume)
+ wait_func(self.context, 'fake-volume-id-2').AndReturn(None)
+ instance, expected_conn_info = self._test_volume_attach(
+ test_bdm, no_volume_image, volume)
+ self.mox.ReplayAll()
+
+ test_bdm.attach(self.context, instance, self.volume_api,
+ self.virt_driver, wait_func)
+ self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
+
+ def test_image_attach_volume(self):
+ test_bdm = self.driver_classes['image'](
+ self.image_bdm)
+
+ instance = {'id': 'fake_id', 'uuid': 'fake_uuid'}
+
+ volume_class = self.driver_classes['volume']
+ self.mox.StubOutWithMock(volume_class, 'attach')
+
+ # Make sure theses are not called
+ self.mox.StubOutWithMock(self.volume_api, 'get_snapshot')
+ self.mox.StubOutWithMock(self.volume_api, 'create')
+
+ volume_class.attach(self.context, instance, self.volume_api,
+ self.virt_driver, do_check_attach=True
+ ).AndReturn(None)
+ self.mox.ReplayAll()
+
+ test_bdm.attach(self.context, instance, self.volume_api,
+ self.virt_driver)
+ self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
+
+ def test_blank_attach_volume(self):
+ no_blank_volume = self.blank_bdm.copy()
+ no_blank_volume['volume_id'] = None
+ test_bdm = self.driver_classes['blank'](no_blank_volume)
+ instance = fake_instance.fake_instance_obj(mock.sentinel.ctx,
+ **{'uuid': 'fake-uuid'})
+ volume_class = self.driver_classes['volume']
+ volume = {'id': 'fake-volume-id-2',
+ 'display_name': 'fake-uuid-blank-vol'}
+
+ with contextlib.nested(
+ mock.patch.object(self.volume_api, 'create', return_value=volume),
+ mock.patch.object(volume_class, 'attach')
+ ) as (vol_create, vol_attach):
+ test_bdm.attach(self.context, instance, self.volume_api,
+ self.virt_driver)
+
+ vol_create.assert_called_once_with(self.context,
+ test_bdm.volume_size,
+ 'fake-uuid-blank-vol',
+ '')
+ vol_attach.assert_called_once_with(self.context, instance,
+ self.volume_api,
+ self.virt_driver,
+ do_check_attach=True)
+ self.assertEqual('fake-volume-id-2', test_bdm.volume_id)
+
+ def test_convert_block_devices(self):
+ converted = driver_block_device._convert_block_devices(
+ self.driver_classes['volume'],
+ [self.volume_bdm, self.ephemeral_bdm])
+ self.assertEqual(converted, [self.volume_driver_bdm])
+
+ def test_legacy_block_devices(self):
+ test_snapshot = self.driver_classes['snapshot'](
+ self.snapshot_bdm)
+
+ block_device_mapping = [test_snapshot, test_snapshot]
+ legacy_bdm = driver_block_device.legacy_block_devices(
+ block_device_mapping)
+ self.assertEqual(legacy_bdm, [self.snapshot_legacy_driver_bdm,
+ self.snapshot_legacy_driver_bdm])
+
+ # Test that the ephemerals work as expected
+ test_ephemerals = [self.driver_classes['ephemeral'](
+ self.ephemeral_bdm) for _ in xrange(2)]
+ expected = [self.ephemeral_legacy_driver_bdm.copy()
+ for _ in xrange(2)]
+ expected[0]['virtual_name'] = 'ephemeral0'
+ expected[0]['num'] = 0
+ expected[1]['virtual_name'] = 'ephemeral1'
+ expected[1]['num'] = 1
+ legacy_ephemerals = driver_block_device.legacy_block_devices(
+ test_ephemerals)
+ self.assertEqual(expected, legacy_ephemerals)
+
+ def test_get_swap(self):
+ swap = [self.swap_driver_bdm]
+ legacy_swap = [self.swap_legacy_driver_bdm]
+ no_swap = [self.volume_driver_bdm]
+
+ self.assertEqual(swap[0], driver_block_device.get_swap(swap))
+ self.assertEqual(legacy_swap[0],
+ driver_block_device.get_swap(legacy_swap))
+ self.assertIsNone(driver_block_device.get_swap(no_swap))
+ self.assertIsNone(driver_block_device.get_swap([]))
+
+ def test_is_implemented(self):
+ for bdm in (self.image_bdm, self.volume_bdm, self.swap_bdm,
+ self.ephemeral_bdm, self.snapshot_bdm):
+ self.assertTrue(driver_block_device.is_implemented(bdm))
+ local_image = self.image_bdm.copy()
+ local_image['destination_type'] = 'local'
+ self.assertFalse(driver_block_device.is_implemented(local_image))
+
+ def test_is_block_device_mapping(self):
+ test_swap = self.driver_classes['swap'](self.swap_bdm)
+ test_ephemeral = self.driver_classes['ephemeral'](self.ephemeral_bdm)
+ test_image = self.driver_classes['image'](self.image_bdm)
+ test_snapshot = self.driver_classes['snapshot'](self.snapshot_bdm)
+ test_volume = self.driver_classes['volume'](self.volume_bdm)
+ test_blank = self.driver_classes['blank'](self.blank_bdm)
+
+ for bdm in (test_image, test_snapshot, test_volume, test_blank):
+ self.assertTrue(driver_block_device.is_block_device_mapping(
+ bdm._bdm_obj))
+
+ for bdm in (test_swap, test_ephemeral):
+ self.assertFalse(driver_block_device.is_block_device_mapping(
+ bdm._bdm_obj))
diff --git a/nova/tests/unit/virt/test_configdrive.py b/nova/tests/unit/virt/test_configdrive.py
new file mode 100644
index 0000000000..b8dc717b80
--- /dev/null
+++ b/nova/tests/unit/virt/test_configdrive.py
@@ -0,0 +1,30 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo.utils import strutils
+
+from nova import test
+from nova.virt import configdrive
+
+
+class ConfigDriveTestCase(test.NoDBTestCase):
+ def test_valid_string_values(self):
+ for value in (strutils.TRUE_STRINGS + ('always',)):
+ self.flags(force_config_drive=value)
+ self.assertTrue(configdrive.required_by({}))
+
+ def test_invalid_string_values(self):
+ for value in (strutils.FALSE_STRINGS + ('foo',)):
+ self.flags(force_config_drive=value)
+ self.assertFalse(configdrive.required_by({}))
diff --git a/nova/tests/unit/virt/test_diagnostics.py b/nova/tests/unit/virt/test_diagnostics.py
new file mode 100644
index 0000000000..f3969fc09f
--- /dev/null
+++ b/nova/tests/unit/virt/test_diagnostics.py
@@ -0,0 +1,231 @@
+# Copyright (c) 2014 VMware, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import exception
+from nova import test
+from nova.virt import diagnostics
+
+
+class DiagnosticsTests(test.NoDBTestCase):
+
+ def test_cpu_diagnostics_default(self):
+ cpu = diagnostics.CpuDiagnostics()
+ self.assertEqual(0, cpu.time)
+
+ def test_cpu_diagnostics(self):
+ cpu = diagnostics.CpuDiagnostics(time=7)
+ self.assertEqual(7, cpu.time)
+
+ def test_nic_diagnostics_default(self):
+ nic = diagnostics.NicDiagnostics()
+ self.assertEqual('00:00:00:00:00:00', nic.mac_address)
+ self.assertEqual(0, nic.rx_octets)
+ self.assertEqual(0, nic.rx_errors)
+ self.assertEqual(0, nic.rx_drop)
+ self.assertEqual(0, nic.rx_packets)
+ self.assertEqual(0, nic.tx_octets)
+ self.assertEqual(0, nic.tx_errors)
+ self.assertEqual(0, nic.tx_drop)
+ self.assertEqual(0, nic.tx_packets)
+
+ def test_nic_diagnostics(self):
+ nic = diagnostics.NicDiagnostics(mac_address='00:00:ca:fe:00:00',
+ rx_octets=1, rx_errors=2, rx_drop=3, rx_packets=4,
+ tx_octets=5, tx_errors=6, tx_drop=7, tx_packets=8)
+ self.assertEqual('00:00:ca:fe:00:00', nic.mac_address)
+ self.assertEqual(1, nic.rx_octets)
+ self.assertEqual(2, nic.rx_errors)
+ self.assertEqual(3, nic.rx_drop)
+ self.assertEqual(4, nic.rx_packets)
+ self.assertEqual(5, nic.tx_octets)
+ self.assertEqual(6, nic.tx_errors)
+ self.assertEqual(7, nic.tx_drop)
+ self.assertEqual(8, nic.tx_packets)
+
+ def test_disk_diagnostics_default(self):
+ disk = diagnostics.DiskDiagnostics()
+ self.assertEqual('', disk.id)
+ self.assertEqual(0, disk.read_bytes)
+ self.assertEqual(0, disk.read_requests)
+ self.assertEqual(0, disk.write_bytes)
+ self.assertEqual(0, disk.write_requests)
+ self.assertEqual(0, disk.errors_count)
+
+ def test_disk_diagnostics(self):
+ disk = diagnostics.DiskDiagnostics(id='fake_disk_id',
+ read_bytes=1, read_requests=2,
+ write_bytes=3, write_requests=4,
+ errors_count=5)
+ self.assertEqual('fake_disk_id', disk.id)
+ self.assertEqual(1, disk.read_bytes)
+ self.assertEqual(2, disk.read_requests)
+ self.assertEqual(3, disk.write_bytes)
+ self.assertEqual(4, disk.write_requests)
+ self.assertEqual(5, disk.errors_count)
+
+ def test_memory_diagnostics_default(self):
+ memory = diagnostics.MemoryDiagnostics()
+ self.assertEqual(0, memory.maximum)
+ self.assertEqual(0, memory.used)
+
+ def test_memory_diagnostics(self):
+ memory = diagnostics.MemoryDiagnostics(maximum=1, used=2)
+ self.assertEqual(1, memory.maximum)
+ self.assertEqual(2, memory.used)
+
+ def test_diagnostics_default(self):
+ diags = diagnostics.Diagnostics()
+ self.assertIsNone(diags.state)
+ self.assertIsNone(diags.driver)
+ self.assertIsNone(diags.hypervisor_os)
+ self.assertEqual(0, diags.uptime)
+ self.assertFalse(diags.config_drive)
+ self.assertEqual([], diags.cpu_details)
+ self.assertEqual([], diags.nic_details)
+ self.assertEqual([], diags.disk_details)
+ self.assertEqual(0, diags.memory_details.maximum)
+ self.assertEqual(0, diags.memory_details.used)
+ self.assertEqual('1.0', diags.version)
+
+ def test_diagnostics(self):
+ cpu_details = [diagnostics.CpuDiagnostics()]
+ nic_details = [diagnostics.NicDiagnostics()]
+ disk_details = [diagnostics.DiskDiagnostics()]
+ diags = diagnostics.Diagnostics(
+ state='fake-state', driver='fake-driver',
+ hypervisor_os='fake-os',
+ uptime=1, cpu_details=cpu_details,
+ nic_details=nic_details, disk_details=disk_details,
+ config_drive=True)
+ self.assertEqual('fake-state', diags.state)
+ self.assertEqual('fake-driver', diags.driver)
+ self.assertEqual('fake-os', diags.hypervisor_os)
+ self.assertEqual(1, diags.uptime)
+ self.assertTrue(diags.config_drive)
+ self.assertEqual(1, len(diags.cpu_details))
+ self.assertEqual(1, len(diags.nic_details))
+ self.assertEqual(1, len(diags.disk_details))
+ self.assertEqual(0, diags.memory_details.maximum)
+ self.assertEqual(0, diags.memory_details.used)
+ self.assertEqual('1.0', diags.version)
+
+ def test_add_cpu(self):
+ diags = diagnostics.Diagnostics()
+ self.assertEqual([], diags.cpu_details)
+ diags.add_cpu(time=7)
+ self.assertEqual(1, len(diags.cpu_details))
+ self.assertEqual(7, diags.cpu_details[0].time)
+
+ def test_add_nic(self):
+ diags = diagnostics.Diagnostics()
+ self.assertEqual([], diags.nic_details)
+ diags.add_nic(mac_address='00:00:ca:fe:00:00',
+ rx_octets=1, rx_errors=2, rx_drop=3, rx_packets=4,
+ tx_octets=5, tx_errors=6, tx_drop=7, tx_packets=8)
+ self.assertEqual(1, len(diags.nic_details))
+ self.assertEqual('00:00:ca:fe:00:00', diags.nic_details[0].mac_address)
+ self.assertEqual(1, diags.nic_details[0].rx_octets)
+ self.assertEqual(2, diags.nic_details[0].rx_errors)
+ self.assertEqual(3, diags.nic_details[0].rx_drop)
+ self.assertEqual(4, diags.nic_details[0].rx_packets)
+ self.assertEqual(5, diags.nic_details[0].tx_octets)
+ self.assertEqual(6, diags.nic_details[0].tx_errors)
+ self.assertEqual(7, diags.nic_details[0].tx_drop)
+ self.assertEqual(8, diags.nic_details[0].tx_packets)
+
+ def test_add_disk(self):
+ diags = diagnostics.Diagnostics()
+ self.assertEqual([], diags.disk_details)
+ diags.add_disk(id='fake_disk_id',
+ read_bytes=1, read_requests=2,
+ write_bytes=3, write_requests=4,
+ errors_count=5)
+ self.assertEqual(1, len(diags.disk_details))
+ self.assertEqual('fake_disk_id', diags.disk_details[0].id)
+ self.assertEqual(1, diags.disk_details[0].read_bytes)
+ self.assertEqual(2, diags.disk_details[0].read_requests)
+ self.assertEqual(3, diags.disk_details[0].write_bytes)
+ self.assertEqual(4, diags.disk_details[0].write_requests)
+ self.assertEqual(5, diags.disk_details[0].errors_count)
+
+ def test_diagnostics_serialize_default(self):
+ diags = diagnostics.Diagnostics()
+ expected = {'config_drive': False,
+ 'cpu_details': [],
+ 'disk_details': [],
+ 'driver': None,
+ 'hypervisor_os': None,
+ 'memory_details': {'maximum': 0, 'used': 0},
+ 'nic_details': [],
+ 'state': None,
+ 'uptime': 0,
+ 'version': '1.0'}
+ result = diags.serialize()
+ self.assertEqual(expected, result)
+
+ def test_diagnostics_serialize(self):
+ cpu_details = [diagnostics.CpuDiagnostics()]
+ nic_details = [diagnostics.NicDiagnostics()]
+ disk_details = [diagnostics.DiskDiagnostics()]
+ diags = diagnostics.Diagnostics(
+ state='fake-state', driver='fake-driver',
+ hypervisor_os='fake-os',
+ uptime=1, cpu_details=cpu_details,
+ nic_details=nic_details, disk_details=disk_details,
+ config_drive=True)
+ expected = {'config_drive': True,
+ 'cpu_details': [{'time': 0}],
+ 'disk_details': [{'errors_count': 0,
+ 'id': '',
+ 'read_bytes': 0,
+ 'read_requests': 0,
+ 'write_bytes': 0,
+ 'write_requests': 0}],
+ 'driver': 'fake-driver',
+ 'hypervisor_os': 'fake-os',
+ 'memory_details': {'maximum': 0, 'used': 0},
+ 'nic_details': [{'mac_address': '00:00:00:00:00:00',
+ 'rx_drop': 0,
+ 'rx_errors': 0,
+ 'rx_octets': 0,
+ 'rx_packets': 0,
+ 'tx_drop': 0,
+ 'tx_errors': 0,
+ 'tx_octets': 0,
+ 'tx_packets': 0}],
+ 'state': 'fake-state',
+ 'uptime': 1,
+ 'version': '1.0'}
+ result = diags.serialize()
+ self.assertEqual(expected, result)
+
+ def test_diagnostics_invalid_input(self):
+ self.assertRaises(exception.InvalidInput,
+ diagnostics.Diagnostics,
+ cpu_details='invalid type')
+ self.assertRaises(exception.InvalidInput,
+ diagnostics.Diagnostics,
+ cpu_details=['invalid entry'])
+ self.assertRaises(exception.InvalidInput,
+ diagnostics.Diagnostics,
+ nic_details='invalid type')
+ self.assertRaises(exception.InvalidInput,
+ diagnostics.Diagnostics,
+ nic_details=['invalid entry'])
+ self.assertRaises(exception.InvalidInput,
+ diagnostics.Diagnostics,
+ disk_details='invalid type')
+ self.assertRaises(exception.InvalidInput,
+ diagnostics.Diagnostics,
+ disk_details=['invalid entry'])
diff --git a/nova/tests/unit/virt/test_driver.py b/nova/tests/unit/virt/test_driver.py
new file mode 100644
index 0000000000..572afdedec
--- /dev/null
+++ b/nova/tests/unit/virt/test_driver.py
@@ -0,0 +1,58 @@
+# Copyright (c) 2013 Citrix Systems, Inc.
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import test
+from nova.virt import driver
+
+
+class FakeDriver(object):
+ def __init__(self, *args, **kwargs):
+ self.args = args
+ self.kwargs = kwargs
+
+
+class FakeDriver2(FakeDriver):
+ pass
+
+
+class ToDriverRegistryTestCase(test.NoDBTestCase):
+
+ def assertDriverInstance(self, inst, class_, *args, **kwargs):
+ self.assertEqual(class_, inst.__class__)
+ self.assertEqual(args, inst.args)
+ self.assertEqual(kwargs, inst.kwargs)
+
+ def test_driver_dict_from_config(self):
+ drvs = driver.driver_dict_from_config(
+ [
+ 'key1=nova.tests.unit.virt.test_driver.FakeDriver',
+ 'key2=nova.tests.unit.virt.test_driver.FakeDriver2',
+ ], 'arg1', 'arg2', param1='value1', param2='value2'
+ )
+
+ self.assertEqual(
+ sorted(['key1', 'key2']),
+ sorted(drvs.keys())
+ )
+
+ self.assertDriverInstance(
+ drvs['key1'],
+ FakeDriver, 'arg1', 'arg2', param1='value1',
+ param2='value2')
+
+ self.assertDriverInstance(
+ drvs['key2'],
+ FakeDriver2, 'arg1', 'arg2', param1='value1',
+ param2='value2')
diff --git a/nova/tests/unit/virt/test_events.py b/nova/tests/unit/virt/test_events.py
new file mode 100644
index 0000000000..792a8d0453
--- /dev/null
+++ b/nova/tests/unit/virt/test_events.py
@@ -0,0 +1,36 @@
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import time
+
+from nova import test
+from nova.virt import event
+
+
+class TestEvents(test.NoDBTestCase):
+
+ def test_event_repr(self):
+ t = time.time()
+ uuid = '1234'
+ lifecycle = event.EVENT_LIFECYCLE_RESUMED
+
+ e = event.Event(t)
+ self.assertEqual(str(e), "<Event: %s>" % t)
+
+ e = event.InstanceEvent(uuid, timestamp=t)
+ self.assertEqual(str(e), "<InstanceEvent: %s, %s>" % (t, uuid))
+
+ e = event.LifecycleEvent(uuid, lifecycle, timestamp=t)
+ self.assertEqual(str(e), "<LifecycleEvent: %s, %s => Resumed>" %
+ (t, uuid))
diff --git a/nova/tests/unit/virt/test_hardware.py b/nova/tests/unit/virt/test_hardware.py
new file mode 100644
index 0000000000..d0781a6ca7
--- /dev/null
+++ b/nova/tests/unit/virt/test_hardware.py
@@ -0,0 +1,1439 @@
+# Copyright 2014 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid
+
+import mock
+from oslo.serialization import jsonutils
+import six
+
+from nova import context
+from nova import exception
+from nova import objects
+from nova.objects import base as base_obj
+from nova import test
+from nova.tests.unit import matchers
+from nova.virt import hardware as hw
+
+
+class FakeFlavor(dict):
+ def __init__(self, vcpus, memory, extra_specs):
+ self['vcpus'] = vcpus
+ self['memory_mb'] = memory
+ self['extra_specs'] = extra_specs
+
+
+class FakeFlavorObject(object):
+ def __init__(self, vcpus, memory, extra_specs):
+ self.vcpus = vcpus
+ self.memory_mb = memory
+ self.extra_specs = extra_specs
+
+ def __getitem__(self, item):
+ try:
+ return getattr(self, item)
+ except AttributeError:
+ raise KeyError(item)
+
+ def get(self, item, default=None):
+ try:
+ return getattr(self, item)
+ except AttributeError:
+ return default
+
+
+class CpuSetTestCase(test.NoDBTestCase):
+ def test_get_vcpu_pin_set(self):
+ self.flags(vcpu_pin_set="1-3,5,^2")
+ cpuset_ids = hw.get_vcpu_pin_set()
+ self.assertEqual(set([1, 3, 5]), cpuset_ids)
+
+ def test_parse_cpu_spec_none_returns_none(self):
+ self.flags(vcpu_pin_set=None)
+ cpuset_ids = hw.get_vcpu_pin_set()
+ self.assertIsNone(cpuset_ids)
+
+ def test_parse_cpu_spec_valid_syntax_works(self):
+ cpuset_ids = hw.parse_cpu_spec("1")
+ self.assertEqual(set([1]), cpuset_ids)
+
+ cpuset_ids = hw.parse_cpu_spec("1,2")
+ self.assertEqual(set([1, 2]), cpuset_ids)
+
+ cpuset_ids = hw.parse_cpu_spec(", , 1 , ,, 2, ,")
+ self.assertEqual(set([1, 2]), cpuset_ids)
+
+ cpuset_ids = hw.parse_cpu_spec("1-1")
+ self.assertEqual(set([1]), cpuset_ids)
+
+ cpuset_ids = hw.parse_cpu_spec(" 1 - 1, 1 - 2 , 1 -3")
+ self.assertEqual(set([1, 2, 3]), cpuset_ids)
+
+ cpuset_ids = hw.parse_cpu_spec("1,^2")
+ self.assertEqual(set([1]), cpuset_ids)
+
+ cpuset_ids = hw.parse_cpu_spec("1-2, ^1")
+ self.assertEqual(set([2]), cpuset_ids)
+
+ cpuset_ids = hw.parse_cpu_spec("1-3,5,^2")
+ self.assertEqual(set([1, 3, 5]), cpuset_ids)
+
+ cpuset_ids = hw.parse_cpu_spec(" 1 - 3 , ^2, 5")
+ self.assertEqual(set([1, 3, 5]), cpuset_ids)
+
+ cpuset_ids = hw.parse_cpu_spec(" 1,1, ^1")
+ self.assertEqual(set([]), cpuset_ids)
+
+ def test_parse_cpu_spec_invalid_syntax_raises(self):
+ self.assertRaises(exception.Invalid,
+ hw.parse_cpu_spec,
+ " -1-3,5,^2")
+
+ self.assertRaises(exception.Invalid,
+ hw.parse_cpu_spec,
+ "1-3-,5,^2")
+
+ self.assertRaises(exception.Invalid,
+ hw.parse_cpu_spec,
+ "-3,5,^2")
+
+ self.assertRaises(exception.Invalid,
+ hw.parse_cpu_spec,
+ "1-,5,^2")
+
+ self.assertRaises(exception.Invalid,
+ hw.parse_cpu_spec,
+ "1-3,5,^2^")
+
+ self.assertRaises(exception.Invalid,
+ hw.parse_cpu_spec,
+ "1-3,5,^2-")
+
+ self.assertRaises(exception.Invalid,
+ hw.parse_cpu_spec,
+ "--13,^^5,^2")
+
+ self.assertRaises(exception.Invalid,
+ hw.parse_cpu_spec,
+ "a-3,5,^2")
+
+ self.assertRaises(exception.Invalid,
+ hw.parse_cpu_spec,
+ "1-a,5,^2")
+
+ self.assertRaises(exception.Invalid,
+ hw.parse_cpu_spec,
+ "1-3,b,^2")
+
+ self.assertRaises(exception.Invalid,
+ hw.parse_cpu_spec,
+ "1-3,5,^c")
+
+ self.assertRaises(exception.Invalid,
+ hw.parse_cpu_spec,
+ "3 - 1, 5 , ^ 2 ")
+
+ def test_format_cpu_spec(self):
+ cpus = set([])
+ spec = hw.format_cpu_spec(cpus)
+ self.assertEqual("", spec)
+
+ cpus = []
+ spec = hw.format_cpu_spec(cpus)
+ self.assertEqual("", spec)
+
+ cpus = set([1, 3])
+ spec = hw.format_cpu_spec(cpus)
+ self.assertEqual("1,3", spec)
+
+ cpus = [1, 3]
+ spec = hw.format_cpu_spec(cpus)
+ self.assertEqual("1,3", spec)
+
+ cpus = set([1, 2, 4, 6])
+ spec = hw.format_cpu_spec(cpus)
+ self.assertEqual("1-2,4,6", spec)
+
+ cpus = [1, 2, 4, 6]
+ spec = hw.format_cpu_spec(cpus)
+ self.assertEqual("1-2,4,6", spec)
+
+ cpus = set([10, 11, 13, 14, 15, 16, 19, 20, 40, 42, 48])
+ spec = hw.format_cpu_spec(cpus)
+ self.assertEqual("10-11,13-16,19-20,40,42,48", spec)
+
+ cpus = [10, 11, 13, 14, 15, 16, 19, 20, 40, 42, 48]
+ spec = hw.format_cpu_spec(cpus)
+ self.assertEqual("10-11,13-16,19-20,40,42,48", spec)
+
+ cpus = set([1, 2, 4, 6])
+ spec = hw.format_cpu_spec(cpus, allow_ranges=False)
+ self.assertEqual("1,2,4,6", spec)
+
+ cpus = [1, 2, 4, 6]
+ spec = hw.format_cpu_spec(cpus, allow_ranges=False)
+ self.assertEqual("1,2,4,6", spec)
+
+ cpus = set([10, 11, 13, 14, 15, 16, 19, 20, 40, 42, 48])
+ spec = hw.format_cpu_spec(cpus, allow_ranges=False)
+ self.assertEqual("10,11,13,14,15,16,19,20,40,42,48", spec)
+
+ cpus = [10, 11, 13, 14, 15, 16, 19, 20, 40, 42, 48]
+ spec = hw.format_cpu_spec(cpus, allow_ranges=False)
+ self.assertEqual("10,11,13,14,15,16,19,20,40,42,48", spec)
+
+
+class VCPUTopologyTest(test.NoDBTestCase):
+
+ def test_validate_config(self):
+ testdata = [
+ { # Flavor sets preferred topology only
+ "flavor": FakeFlavorObject(16, 2048, {
+ "hw:cpu_sockets": "8",
+ "hw:cpu_cores": "2",
+ "hw:cpu_threads": "1",
+ }),
+ "image": {
+ "properties": {}
+ },
+ "expect": (
+ 8, 2, 1, 65536, 65536, 65536
+ )
+ },
+ { # Image topology overrides flavor
+ "flavor": FakeFlavorObject(16, 2048, {
+ "hw:cpu_sockets": "8",
+ "hw:cpu_cores": "2",
+ "hw:cpu_threads": "1",
+ "hw:cpu_max_threads": "2",
+ }),
+ "image": {
+ "properties": {
+ "hw_cpu_sockets": "4",
+ "hw_cpu_cores": "2",
+ "hw_cpu_threads": "2",
+ }
+ },
+ "expect": (
+ 4, 2, 2, 65536, 65536, 2,
+ )
+ },
+ { # Partial image topology overrides flavor
+ "flavor": FakeFlavorObject(16, 2048, {
+ "hw:cpu_sockets": "8",
+ "hw:cpu_cores": "2",
+ "hw:cpu_threads": "1",
+ }),
+ "image": {
+ "properties": {
+ "hw_cpu_sockets": "2",
+ }
+ },
+ "expect": (
+ 2, -1, -1, 65536, 65536, 65536,
+ )
+ },
+ { # Restrict use of threads
+ "flavor": FakeFlavorObject(16, 2048, {
+ "hw:cpu_max_threads": "2",
+ }),
+ "image": {
+ "properties": {
+ "hw_cpu_max_threads": "1",
+ }
+ },
+ "expect": (
+ -1, -1, -1, 65536, 65536, 1,
+ )
+ },
+ { # Force use of at least two sockets
+ "flavor": FakeFlavorObject(16, 2048, {
+ "hw:cpu_max_cores": "8",
+ "hw:cpu_max_threads": "1",
+ }),
+ "image": {
+ "properties": {}
+ },
+ "expect": (
+ -1, -1, -1, 65536, 8, 1
+ )
+ },
+ { # Image limits reduce flavor
+ "flavor": FakeFlavorObject(16, 2048, {
+ "hw:cpu_max_cores": "8",
+ "hw:cpu_max_threads": "1",
+ }),
+ "image": {
+ "properties": {
+ "hw_cpu_max_cores": "4",
+ }
+ },
+ "expect": (
+ -1, -1, -1, 65536, 4, 1
+ )
+ },
+ { # Image limits kill flavor preferred
+ "flavor": FakeFlavorObject(16, 2048, {
+ "hw:cpu_sockets": "2",
+ "hw:cpu_cores": "8",
+ "hw:cpu_threads": "1",
+ }),
+ "image": {
+ "properties": {
+ "hw_cpu_max_cores": "4",
+ }
+ },
+ "expect": (
+ -1, -1, -1, 65536, 4, 65536
+ )
+ },
+ { # Image limits cannot exceed flavor
+ "flavor": FakeFlavorObject(16, 2048, {
+ "hw:cpu_max_cores": "8",
+ "hw:cpu_max_threads": "1",
+ }),
+ "image": {
+ "properties": {
+ "hw_cpu_max_cores": "16",
+ }
+ },
+ "expect": exception.ImageVCPULimitsRangeExceeded,
+ },
+ { # Image preferred cannot exceed flavor
+ "flavor": FakeFlavorObject(16, 2048, {
+ "hw:cpu_max_cores": "8",
+ "hw:cpu_max_threads": "1",
+ }),
+ "image": {
+ "properties": {
+ "hw_cpu_cores": "16",
+ }
+ },
+ "expect": exception.ImageVCPUTopologyRangeExceeded,
+ },
+ ]
+
+ for topo_test in testdata:
+ if type(topo_test["expect"]) == tuple:
+ (preferred,
+ maximum) = hw.VirtCPUTopology.get_topology_constraints(
+ topo_test["flavor"],
+ topo_test["image"])
+
+ self.assertEqual(topo_test["expect"][0], preferred.sockets)
+ self.assertEqual(topo_test["expect"][1], preferred.cores)
+ self.assertEqual(topo_test["expect"][2], preferred.threads)
+ self.assertEqual(topo_test["expect"][3], maximum.sockets)
+ self.assertEqual(topo_test["expect"][4], maximum.cores)
+ self.assertEqual(topo_test["expect"][5], maximum.threads)
+ else:
+ self.assertRaises(topo_test["expect"],
+ hw.VirtCPUTopology.get_topology_constraints,
+ topo_test["flavor"],
+ topo_test["image"])
+
+ def test_possible_configs(self):
+ testdata = [
+ {
+ "allow_threads": True,
+ "vcpus": 8,
+ "maxsockets": 8,
+ "maxcores": 8,
+ "maxthreads": 2,
+ "expect": [
+ [8, 1, 1],
+ [4, 2, 1],
+ [2, 4, 1],
+ [1, 8, 1],
+ [4, 1, 2],
+ [2, 2, 2],
+ [1, 4, 2],
+ ]
+ },
+ {
+ "allow_threads": False,
+ "vcpus": 8,
+ "maxsockets": 8,
+ "maxcores": 8,
+ "maxthreads": 2,
+ "expect": [
+ [8, 1, 1],
+ [4, 2, 1],
+ [2, 4, 1],
+ [1, 8, 1],
+ ]
+ },
+ {
+ "allow_threads": True,
+ "vcpus": 8,
+ "maxsockets": 1024,
+ "maxcores": 1024,
+ "maxthreads": 2,
+ "expect": [
+ [8, 1, 1],
+ [4, 2, 1],
+ [2, 4, 1],
+ [1, 8, 1],
+ [4, 1, 2],
+ [2, 2, 2],
+ [1, 4, 2],
+ ]
+ },
+ {
+ "allow_threads": True,
+ "vcpus": 8,
+ "maxsockets": 1024,
+ "maxcores": 1,
+ "maxthreads": 2,
+ "expect": [
+ [8, 1, 1],
+ [4, 1, 2],
+ ]
+ },
+ {
+ "allow_threads": True,
+ "vcpus": 7,
+ "maxsockets": 8,
+ "maxcores": 8,
+ "maxthreads": 2,
+ "expect": [
+ [7, 1, 1],
+ [1, 7, 1],
+ ]
+ },
+ {
+ "allow_threads": True,
+ "vcpus": 8,
+ "maxsockets": 2,
+ "maxcores": 1,
+ "maxthreads": 1,
+ "expect": exception.ImageVCPULimitsRangeImpossible,
+ },
+ {
+ "allow_threads": False,
+ "vcpus": 8,
+ "maxsockets": 2,
+ "maxcores": 1,
+ "maxthreads": 4,
+ "expect": exception.ImageVCPULimitsRangeImpossible,
+ },
+ ]
+
+ for topo_test in testdata:
+ if type(topo_test["expect"]) == list:
+ actual = []
+ for topology in hw.VirtCPUTopology.get_possible_topologies(
+ topo_test["vcpus"],
+ hw.VirtCPUTopology(topo_test["maxsockets"],
+ topo_test["maxcores"],
+ topo_test["maxthreads"]),
+ topo_test["allow_threads"]):
+ actual.append([topology.sockets,
+ topology.cores,
+ topology.threads])
+
+ self.assertEqual(topo_test["expect"], actual)
+ else:
+ self.assertRaises(topo_test["expect"],
+ hw.VirtCPUTopology.get_possible_topologies,
+ topo_test["vcpus"],
+ hw.VirtCPUTopology(topo_test["maxsockets"],
+ topo_test["maxcores"],
+ topo_test["maxthreads"]),
+ topo_test["allow_threads"])
+
+ def test_sorting_configs(self):
+ testdata = [
+ {
+ "allow_threads": True,
+ "vcpus": 8,
+ "maxsockets": 8,
+ "maxcores": 8,
+ "maxthreads": 2,
+ "sockets": 4,
+ "cores": 2,
+ "threads": 1,
+ "expect": [
+ [4, 2, 1], # score = 2
+ [8, 1, 1], # score = 1
+ [2, 4, 1], # score = 1
+ [1, 8, 1], # score = 1
+ [4, 1, 2], # score = 1
+ [2, 2, 2], # score = 1
+ [1, 4, 2], # score = 1
+ ]
+ },
+ {
+ "allow_threads": True,
+ "vcpus": 8,
+ "maxsockets": 1024,
+ "maxcores": 1024,
+ "maxthreads": 2,
+ "sockets": -1,
+ "cores": 4,
+ "threads": -1,
+ "expect": [
+ [2, 4, 1], # score = 1
+ [1, 4, 2], # score = 1
+ [8, 1, 1], # score = 0
+ [4, 2, 1], # score = 0
+ [1, 8, 1], # score = 0
+ [4, 1, 2], # score = 0
+ [2, 2, 2], # score = 0
+ ]
+ },
+ {
+ "allow_threads": True,
+ "vcpus": 8,
+ "maxsockets": 1024,
+ "maxcores": 1,
+ "maxthreads": 2,
+ "sockets": -1,
+ "cores": -1,
+ "threads": 2,
+ "expect": [
+ [4, 1, 2], # score = 1
+ [8, 1, 1], # score = 0
+ ]
+ },
+ {
+ "allow_threads": False,
+ "vcpus": 8,
+ "maxsockets": 1024,
+ "maxcores": 1,
+ "maxthreads": 2,
+ "sockets": -1,
+ "cores": -1,
+ "threads": 2,
+ "expect": [
+ [8, 1, 1], # score = 0
+ ]
+ },
+ ]
+
+ for topo_test in testdata:
+ actual = []
+ possible = hw.VirtCPUTopology.get_possible_topologies(
+ topo_test["vcpus"],
+ hw.VirtCPUTopology(topo_test["maxsockets"],
+ topo_test["maxcores"],
+ topo_test["maxthreads"]),
+ topo_test["allow_threads"])
+
+ tops = hw.VirtCPUTopology.sort_possible_topologies(
+ possible,
+ hw.VirtCPUTopology(topo_test["sockets"],
+ topo_test["cores"],
+ topo_test["threads"]))
+ for topology in tops:
+ actual.append([topology.sockets,
+ topology.cores,
+ topology.threads])
+
+ self.assertEqual(topo_test["expect"], actual)
+
+ def test_best_config(self):
+ testdata = [
+ { # Flavor sets preferred topology only
+ "allow_threads": True,
+ "flavor": FakeFlavorObject(16, 2048, {
+ "hw:cpu_sockets": "8",
+ "hw:cpu_cores": "2",
+ "hw:cpu_threads": "1"
+ }),
+ "image": {
+ "properties": {}
+ },
+ "expect": [8, 2, 1],
+ },
+ { # Image topology overrides flavor
+ "allow_threads": True,
+ "flavor": FakeFlavorObject(16, 2048, {
+ "hw:cpu_sockets": "8",
+ "hw:cpu_cores": "2",
+ "hw:cpu_threads": "1",
+ "hw:cpu_maxthreads": "2",
+ }),
+ "image": {
+ "properties": {
+ "hw_cpu_sockets": "4",
+ "hw_cpu_cores": "2",
+ "hw_cpu_threads": "2",
+ }
+ },
+ "expect": [4, 2, 2],
+ },
+ { # Image topology overrides flavor
+ "allow_threads": False,
+ "flavor": FakeFlavorObject(16, 2048, {
+ "hw:cpu_sockets": "8",
+ "hw:cpu_cores": "2",
+ "hw:cpu_threads": "1",
+ "hw:cpu_maxthreads": "2",
+ }),
+ "image": {
+ "properties": {
+ "hw_cpu_sockets": "4",
+ "hw_cpu_cores": "2",
+ "hw_cpu_threads": "2",
+ }
+ },
+ "expect": [8, 2, 1],
+ },
+ { # Partial image topology overrides flavor
+ "allow_threads": True,
+ "flavor": FakeFlavorObject(16, 2048, {
+ "hw:cpu_sockets": "8",
+ "hw:cpu_cores": "2",
+ "hw:cpu_threads": "1"
+ }),
+ "image": {
+ "properties": {
+ "hw_cpu_sockets": "2"
+ }
+ },
+ "expect": [2, 8, 1],
+ },
+ { # Restrict use of threads
+ "allow_threads": True,
+ "flavor": FakeFlavorObject(16, 2048, {
+ "hw:cpu_max_threads": "1"
+ }),
+ "image": {
+ "properties": {}
+ },
+ "expect": [16, 1, 1]
+ },
+ { # Force use of at least two sockets
+ "allow_threads": True,
+ "flavor": FakeFlavorObject(16, 2048, {
+ "hw:cpu_max_cores": "8",
+ "hw:cpu_max_threads": "1",
+ }),
+ "image": {
+ "properties": {}
+ },
+ "expect": [16, 1, 1]
+ },
+ { # Image limits reduce flavor
+ "allow_threads": True,
+ "flavor": FakeFlavorObject(16, 2048, {
+ "hw:cpu_max_sockets": "8",
+ "hw:cpu_max_cores": "8",
+ "hw:cpu_max_threads": "1",
+ }),
+ "image": {
+ "properties": {
+ "hw_cpu_max_sockets": 4,
+ }
+ },
+ "expect": [4, 4, 1]
+ },
+ { # Image limits kill flavor preferred
+ "allow_threads": True,
+ "flavor": FakeFlavorObject(16, 2048, {
+ "hw:cpu_sockets": "2",
+ "hw:cpu_cores": "8",
+ "hw:cpu_threads": "1",
+ }),
+ "image": {
+ "properties": {
+ "hw_cpu_max_cores": 4,
+ }
+ },
+ "expect": [16, 1, 1]
+ },
+ ]
+
+ for topo_test in testdata:
+ topology = hw.VirtCPUTopology.get_desirable_configs(
+ topo_test["flavor"],
+ topo_test["image"],
+ topo_test["allow_threads"])[0]
+
+ self.assertEqual(topo_test["expect"][0], topology.sockets)
+ self.assertEqual(topo_test["expect"][1], topology.cores)
+ self.assertEqual(topo_test["expect"][2], topology.threads)
+
+
+class NUMATopologyTest(test.NoDBTestCase):
+
+ def test_topology_constraints(self):
+ testdata = [
+ {
+ "flavor": FakeFlavor(8, 2048, {
+ }),
+ "image": {
+ },
+ "expect": None,
+ },
+ {
+ "flavor": FakeFlavor(8, 2048, {
+ "hw:numa_nodes": 2
+ }),
+ "image": {
+ },
+ "expect": hw.VirtNUMAInstanceTopology(
+ [
+ hw.VirtNUMATopologyCellInstance(
+ 0, set([0, 1, 2, 3]), 1024),
+ hw.VirtNUMATopologyCellInstance(
+ 1, set([4, 5, 6, 7]), 1024),
+ ]),
+ },
+ {
+ # vcpus is not a multiple of nodes, so it
+ # is an error to not provide cpu/mem mapping
+ "flavor": FakeFlavor(8, 2048, {
+ "hw:numa_nodes": 3
+ }),
+ "image": {
+ },
+ "expect": exception.ImageNUMATopologyAsymmetric,
+ },
+ {
+ "flavor": FakeFlavor(8, 2048, {
+ "hw:numa_nodes": 3,
+ "hw:numa_cpus.0": "0-3",
+ "hw:numa_mem.0": "1024",
+ "hw:numa_cpus.1": "4,6",
+ "hw:numa_mem.1": "512",
+ "hw:numa_cpus.2": "5,7",
+ "hw:numa_mem.2": "512",
+ }),
+ "image": {
+ },
+ "expect": hw.VirtNUMAInstanceTopology(
+ [
+ hw.VirtNUMATopologyCellInstance(
+ 0, set([0, 1, 2, 3]), 1024),
+ hw.VirtNUMATopologyCellInstance(
+ 1, set([4, 6]), 512),
+ hw.VirtNUMATopologyCellInstance(
+ 2, set([5, 7]), 512),
+ ]),
+ },
+ {
+ # Request a CPU that is out of range
+ # wrt vCPU count
+ "flavor": FakeFlavor(8, 2048, {
+ "hw:numa_nodes": 1,
+ "hw:numa_cpus.0": "0-16",
+ "hw:numa_mem.0": "2048",
+ }),
+ "image": {
+ },
+ "expect": exception.ImageNUMATopologyCPUOutOfRange,
+ },
+ {
+ # Request the same CPU in two nodes
+ "flavor": FakeFlavor(8, 2048, {
+ "hw:numa_nodes": 2,
+ "hw:numa_cpus.0": "0-7",
+ "hw:numa_mem.0": "1024",
+ "hw:numa_cpus.1": "0-7",
+ "hw:numa_mem.1": "1024",
+ }),
+ "image": {
+ },
+ "expect": exception.ImageNUMATopologyCPUDuplicates,
+ },
+ {
+ # Request with some CPUs not assigned
+ "flavor": FakeFlavor(8, 2048, {
+ "hw:numa_nodes": 2,
+ "hw:numa_cpus.0": "0-2",
+ "hw:numa_mem.0": "1024",
+ "hw:numa_cpus.1": "3-4",
+ "hw:numa_mem.1": "1024",
+ }),
+ "image": {
+ },
+ "expect": exception.ImageNUMATopologyCPUsUnassigned,
+ },
+ {
+ # Request too little memory vs flavor total
+ "flavor": FakeFlavor(8, 2048, {
+ "hw:numa_nodes": 2,
+ "hw:numa_cpus.0": "0-3",
+ "hw:numa_mem.0": "512",
+ "hw:numa_cpus.1": "4-7",
+ "hw:numa_mem.1": "512",
+ }),
+ "image": {
+ },
+ "expect": exception.ImageNUMATopologyMemoryOutOfRange,
+ },
+ {
+ # Request too much memory vs flavor total
+ "flavor": FakeFlavor(8, 2048, {
+ "hw:numa_nodes": 2,
+ "hw:numa_cpus.0": "0-3",
+ "hw:numa_mem.0": "1576",
+ "hw:numa_cpus.1": "4-7",
+ "hw:numa_mem.1": "1576",
+ }),
+ "image": {
+ },
+ "expect": exception.ImageNUMATopologyMemoryOutOfRange,
+ },
+ {
+ # Request missing mem.0
+ "flavor": FakeFlavor(8, 2048, {
+ "hw:numa_nodes": 2,
+ "hw:numa_cpus.0": "0-3",
+ "hw:numa_mem.1": "1576",
+ }),
+ "image": {
+ },
+ "expect": exception.ImageNUMATopologyIncomplete,
+ },
+ {
+ # Request missing cpu.0
+ "flavor": FakeFlavor(8, 2048, {
+ "hw:numa_nodes": 2,
+ "hw:numa_mem.0": "1576",
+ "hw:numa_cpus.1": "4-7",
+ }),
+ "image": {
+ },
+ "expect": exception.ImageNUMATopologyIncomplete,
+ },
+ {
+ # Image attempts to override flavor
+ "flavor": FakeFlavor(8, 2048, {
+ "hw:numa_nodes": 2,
+ }),
+ "image": {
+ "hw_numa_nodes": 4,
+ },
+ "expect": exception.ImageNUMATopologyForbidden,
+ },
+ ]
+
+ for testitem in testdata:
+ if testitem["expect"] is None:
+ topology = hw.VirtNUMAInstanceTopology.get_constraints(
+ testitem["flavor"], testitem["image"])
+ self.assertIsNone(topology)
+ elif type(testitem["expect"]) == type:
+ self.assertRaises(testitem["expect"],
+ hw.VirtNUMAInstanceTopology.get_constraints,
+ testitem["flavor"],
+ testitem["image"])
+ else:
+ topology = hw.VirtNUMAInstanceTopology.get_constraints(
+ testitem["flavor"], testitem["image"])
+ self.assertEqual(len(testitem["expect"].cells),
+ len(topology.cells))
+ for i in range(len(topology.cells)):
+ self.assertEqual(testitem["expect"].cells[i].cpuset,
+ topology.cells[i].cpuset)
+ self.assertEqual(testitem["expect"].cells[i].memory,
+ topology.cells[i].memory)
+
+ def test_can_fit_isntances(self):
+ hosttopo = hw.VirtNUMAHostTopology([
+ hw.VirtNUMATopologyCellUsage(0, set([0, 1, 2, 3]), 1024),
+ hw.VirtNUMATopologyCellUsage(1, set([4, 6]), 512)
+ ])
+ instance1 = hw.VirtNUMAInstanceTopology([
+ hw.VirtNUMATopologyCellInstance(0, set([0, 1, 2]), 256),
+ hw.VirtNUMATopologyCellInstance(1, set([4]), 256),
+ ])
+ instance2 = hw.VirtNUMAInstanceTopology([
+ hw.VirtNUMATopologyCellInstance(0, set([0, 1]), 256),
+ hw.VirtNUMATopologyCellInstance(1, set([4, 6]), 256),
+ hw.VirtNUMATopologyCellInstance(2, set([7, 8]), 256),
+ ])
+
+ self.assertTrue(hw.VirtNUMAHostTopology.can_fit_instances(
+ hosttopo, []))
+ self.assertTrue(hw.VirtNUMAHostTopology.can_fit_instances(
+ hosttopo, [instance1]))
+ self.assertFalse(hw.VirtNUMAHostTopology.can_fit_instances(
+ hosttopo, [instance2]))
+ self.assertFalse(hw.VirtNUMAHostTopology.can_fit_instances(
+ hosttopo, [instance1, instance2]))
+
+ def test_host_usage_contiguous(self):
+ hosttopo = hw.VirtNUMAHostTopology([
+ hw.VirtNUMATopologyCellUsage(0, set([0, 1, 2, 3]), 1024),
+ hw.VirtNUMATopologyCellUsage(1, set([4, 6]), 512),
+ hw.VirtNUMATopologyCellUsage(2, set([5, 7]), 512),
+ ])
+ instance1 = hw.VirtNUMAInstanceTopology([
+ hw.VirtNUMATopologyCellInstance(0, set([0, 1, 2]), 256),
+ hw.VirtNUMATopologyCellInstance(1, set([4]), 256),
+ ])
+ instance2 = hw.VirtNUMAInstanceTopology([
+ hw.VirtNUMATopologyCellInstance(0, set([0, 1]), 256),
+ hw.VirtNUMATopologyCellInstance(1, set([5, 7]), 256),
+ ])
+
+ hostusage = hw.VirtNUMAHostTopology.usage_from_instances(
+ hosttopo, [instance1, instance2])
+
+ self.assertEqual(len(hosttopo), len(hostusage))
+
+ self.assertIsInstance(hostusage.cells[0],
+ hw.VirtNUMATopologyCellUsage)
+ self.assertEqual(hosttopo.cells[0].cpuset,
+ hostusage.cells[0].cpuset)
+ self.assertEqual(hosttopo.cells[0].memory,
+ hostusage.cells[0].memory)
+ self.assertEqual(hostusage.cells[0].cpu_usage, 5)
+ self.assertEqual(hostusage.cells[0].memory_usage, 512)
+
+ self.assertIsInstance(hostusage.cells[1],
+ hw.VirtNUMATopologyCellUsage)
+ self.assertEqual(hosttopo.cells[1].cpuset,
+ hostusage.cells[1].cpuset)
+ self.assertEqual(hosttopo.cells[1].memory,
+ hostusage.cells[1].memory)
+ self.assertEqual(hostusage.cells[1].cpu_usage, 3)
+ self.assertEqual(hostusage.cells[1].memory_usage, 512)
+
+ self.assertIsInstance(hostusage.cells[2],
+ hw.VirtNUMATopologyCellUsage)
+ self.assertEqual(hosttopo.cells[2].cpuset,
+ hostusage.cells[2].cpuset)
+ self.assertEqual(hosttopo.cells[2].memory,
+ hostusage.cells[2].memory)
+ self.assertEqual(hostusage.cells[2].cpu_usage, 0)
+ self.assertEqual(hostusage.cells[2].memory_usage, 0)
+
+ def test_host_usage_sparse(self):
+ hosttopo = hw.VirtNUMAHostTopology([
+ hw.VirtNUMATopologyCellUsage(0, set([0, 1, 2, 3]), 1024),
+ hw.VirtNUMATopologyCellUsage(5, set([4, 6]), 512),
+ hw.VirtNUMATopologyCellUsage(6, set([5, 7]), 512),
+ ])
+ instance1 = hw.VirtNUMAInstanceTopology([
+ hw.VirtNUMATopologyCellInstance(0, set([0, 1, 2]), 256),
+ hw.VirtNUMATopologyCellInstance(6, set([4]), 256),
+ ])
+ instance2 = hw.VirtNUMAInstanceTopology([
+ hw.VirtNUMATopologyCellInstance(0, set([0, 1]), 256),
+ hw.VirtNUMATopologyCellInstance(5, set([5, 7]), 256),
+ ])
+
+ hostusage = hw.VirtNUMAHostTopology.usage_from_instances(
+ hosttopo, [instance1, instance2])
+
+ self.assertEqual(len(hosttopo), len(hostusage))
+
+ self.assertIsInstance(hostusage.cells[0],
+ hw.VirtNUMATopologyCellUsage)
+ self.assertEqual(hosttopo.cells[0].id,
+ hostusage.cells[0].id)
+ self.assertEqual(hosttopo.cells[0].cpuset,
+ hostusage.cells[0].cpuset)
+ self.assertEqual(hosttopo.cells[0].memory,
+ hostusage.cells[0].memory)
+ self.assertEqual(hostusage.cells[0].cpu_usage, 5)
+ self.assertEqual(hostusage.cells[0].memory_usage, 512)
+
+ self.assertIsInstance(hostusage.cells[1],
+ hw.VirtNUMATopologyCellUsage)
+ self.assertEqual(hosttopo.cells[1].id,
+ hostusage.cells[1].id)
+ self.assertEqual(hosttopo.cells[1].cpuset,
+ hostusage.cells[1].cpuset)
+ self.assertEqual(hosttopo.cells[1].memory,
+ hostusage.cells[1].memory)
+ self.assertEqual(hostusage.cells[1].cpu_usage, 2)
+ self.assertEqual(hostusage.cells[1].memory_usage, 256)
+
+ self.assertIsInstance(hostusage.cells[2],
+ hw.VirtNUMATopologyCellUsage)
+ self.assertEqual(hosttopo.cells[2].cpuset,
+ hostusage.cells[2].cpuset)
+ self.assertEqual(hosttopo.cells[2].memory,
+ hostusage.cells[2].memory)
+ self.assertEqual(hostusage.cells[2].cpu_usage, 1)
+ self.assertEqual(hostusage.cells[2].memory_usage, 256)
+
+ def test_host_usage_culmulative_with_free(self):
+ hosttopo = hw.VirtNUMAHostTopology([
+ hw.VirtNUMATopologyCellUsage(
+ 0, set([0, 1, 2, 3]), 1024, cpu_usage=2, memory_usage=512),
+ hw.VirtNUMATopologyCellUsage(
+ 1, set([4, 6]), 512, cpu_usage=1, memory_usage=512),
+ hw.VirtNUMATopologyCellUsage(2, set([5, 7]), 256),
+ ])
+ instance1 = hw.VirtNUMAInstanceTopology([
+ hw.VirtNUMATopologyCellInstance(0, set([0, 1, 2]), 512),
+ hw.VirtNUMATopologyCellInstance(1, set([3]), 256),
+ hw.VirtNUMATopologyCellInstance(2, set([4]), 256)])
+
+ hostusage = hw.VirtNUMAHostTopology.usage_from_instances(
+ hosttopo, [instance1])
+ self.assertIsInstance(hostusage.cells[0],
+ hw.VirtNUMATopologyCellUsage)
+ self.assertEqual(hostusage.cells[0].cpu_usage, 5)
+ self.assertEqual(hostusage.cells[0].memory_usage, 1024)
+
+ self.assertIsInstance(hostusage.cells[1],
+ hw.VirtNUMATopologyCellUsage)
+ self.assertEqual(hostusage.cells[1].cpu_usage, 2)
+ self.assertEqual(hostusage.cells[1].memory_usage, 768)
+
+ self.assertIsInstance(hostusage.cells[2],
+ hw.VirtNUMATopologyCellUsage)
+ self.assertEqual(hostusage.cells[2].cpu_usage, 1)
+ self.assertEqual(hostusage.cells[2].memory_usage, 256)
+
+ # Test freeing of resources
+ hostusage = hw.VirtNUMAHostTopology.usage_from_instances(
+ hostusage, [instance1], free=True)
+ self.assertEqual(hostusage.cells[0].cpu_usage, 2)
+ self.assertEqual(hostusage.cells[0].memory_usage, 512)
+
+ self.assertEqual(hostusage.cells[1].cpu_usage, 1)
+ self.assertEqual(hostusage.cells[1].memory_usage, 512)
+
+ self.assertEqual(hostusage.cells[2].cpu_usage, 0)
+ self.assertEqual(hostusage.cells[2].memory_usage, 0)
+
+ def test_topo_usage_none(self):
+ hosttopo = hw.VirtNUMAHostTopology([
+ hw.VirtNUMATopologyCellUsage(0, set([0, 1]), 512),
+ hw.VirtNUMATopologyCellUsage(1, set([2, 3]), 512),
+ ])
+ instance1 = hw.VirtNUMAInstanceTopology([
+ hw.VirtNUMATopologyCellInstance(0, set([0, 1]), 256),
+ hw.VirtNUMATopologyCellInstance(2, set([2]), 256),
+ ])
+
+ hostusage = hw.VirtNUMAHostTopology.usage_from_instances(
+ None, [instance1])
+ self.assertIsNone(hostusage)
+
+ hostusage = hw.VirtNUMAHostTopology.usage_from_instances(
+ hosttopo, [])
+ self.assertEqual(hostusage.cells[0].cpu_usage, 0)
+ self.assertEqual(hostusage.cells[0].memory_usage, 0)
+ self.assertEqual(hostusage.cells[1].cpu_usage, 0)
+ self.assertEqual(hostusage.cells[1].memory_usage, 0)
+
+ hostusage = hw.VirtNUMAHostTopology.usage_from_instances(
+ hosttopo, None)
+ self.assertEqual(hostusage.cells[0].cpu_usage, 0)
+ self.assertEqual(hostusage.cells[0].memory_usage, 0)
+ self.assertEqual(hostusage.cells[1].cpu_usage, 0)
+ self.assertEqual(hostusage.cells[1].memory_usage, 0)
+
+ def _test_to_dict(self, cell_or_topo, expected):
+ got = cell_or_topo._to_dict()
+ self.assertThat(expected, matchers.DictMatches(got))
+
+ def assertNUMACellMatches(self, expected_cell, got_cell):
+ attrs = ('cpuset', 'memory', 'id')
+ if isinstance(expected_cell, hw.VirtNUMAHostTopology):
+ attrs += ('cpu_usage', 'memory_usage')
+
+ for attr in attrs:
+ self.assertEqual(getattr(expected_cell, attr),
+ getattr(got_cell, attr))
+
+ def _test_cell_from_dict(self, data_dict, expected_cell):
+ cell_class = expected_cell.__class__
+ got_cell = cell_class._from_dict(data_dict)
+ self.assertNUMACellMatches(expected_cell, got_cell)
+
+ def _test_topo_from_dict(self, data_dict, expected_topo):
+ got_topo = expected_topo.__class__._from_dict(
+ data_dict)
+ for got_cell, expected_cell in zip(
+ got_topo.cells, expected_topo.cells):
+ self.assertNUMACellMatches(expected_cell, got_cell)
+
+ def test_numa_cell_dict(self):
+ cell = hw.VirtNUMATopologyCellInstance(1, set([1, 2]), 512)
+ cell_dict = {'cpus': '1,2',
+ 'mem': {'total': 512},
+ 'id': 1,
+ 'pagesize': None}
+ self._test_to_dict(cell, cell_dict)
+ self._test_cell_from_dict(cell_dict, cell)
+
+ def test_numa_cell_pagesize_dict(self):
+ cell = hw.VirtNUMATopologyCellInstance(
+ 1, set([1, 2]), 512, hw.VirtPageSize(2048))
+ cell_dict = {'cpus': '1,2',
+ 'mem': {'total': 512},
+ 'id': 1,
+ 'pagesize': 2048}
+ self._test_to_dict(cell, cell_dict)
+ self._test_cell_from_dict(cell_dict, cell)
+
+ def test_numa_limit_cell_dict(self):
+ cell = hw.VirtNUMATopologyCellLimit(1, set([1, 2]), 512, 4, 2048)
+ cell_dict = {'cpus': '1,2', 'cpu_limit': 4,
+ 'mem': {'total': 512, 'limit': 2048},
+ 'id': 1}
+ self._test_to_dict(cell, cell_dict)
+ self._test_cell_from_dict(cell_dict, cell)
+
+ def test_numa_cell_usage_dict(self):
+ cell = hw.VirtNUMATopologyCellUsage(1, set([1, 2]), 512)
+ cell_dict = {'cpus': '1,2', 'cpu_usage': 0,
+ 'mem': {'total': 512, 'used': 0},
+ 'id': 1}
+ self._test_to_dict(cell, cell_dict)
+ self._test_cell_from_dict(cell_dict, cell)
+
+ def test_numa_instance_topo_dict(self):
+ topo = hw.VirtNUMAInstanceTopology(
+ cells=[
+ hw.VirtNUMATopologyCellInstance(1, set([1, 2]), 1024),
+ hw.VirtNUMATopologyCellInstance(2, set([3, 4]), 1024)])
+ topo_dict = {'cells': [
+ {'cpus': '1,2',
+ 'mem': {'total': 1024},
+ 'id': 1,
+ 'pagesize': None},
+ {'cpus': '3,4',
+ 'mem': {'total': 1024},
+ 'id': 2,
+ 'pagesize': None}]}
+ self._test_to_dict(topo, topo_dict)
+ self._test_topo_from_dict(topo_dict, topo)
+
+ def test_numa_limits_topo_dict(self):
+ topo = hw.VirtNUMALimitTopology(
+ cells=[
+ hw.VirtNUMATopologyCellLimit(
+ 1, set([1, 2]), 1024, 4, 2048),
+ hw.VirtNUMATopologyCellLimit(
+ 2, set([3, 4]), 1024, 4, 2048)])
+ topo_dict = {'cells': [
+ {'cpus': '1,2', 'cpu_limit': 4,
+ 'mem': {'total': 1024, 'limit': 2048},
+ 'id': 1},
+ {'cpus': '3,4', 'cpu_limit': 4,
+ 'mem': {'total': 1024, 'limit': 2048},
+ 'id': 2}]}
+ self._test_to_dict(topo, topo_dict)
+ self._test_topo_from_dict(topo_dict, topo)
+
+ def test_numa_topo_dict_with_usage(self):
+ topo = hw.VirtNUMAHostTopology(
+ cells=[
+ hw.VirtNUMATopologyCellUsage(
+ 1, set([1, 2]), 1024),
+ hw.VirtNUMATopologyCellUsage(
+ 2, set([3, 4]), 1024)])
+ topo_dict = {'cells': [
+ {'cpus': '1,2', 'cpu_usage': 0,
+ 'mem': {'total': 1024, 'used': 0},
+ 'id': 1},
+ {'cpus': '3,4', 'cpu_usage': 0,
+ 'mem': {'total': 1024, 'used': 0},
+ 'id': 2}]}
+ self._test_to_dict(topo, topo_dict)
+ self._test_topo_from_dict(topo_dict, topo)
+
+ def test_json(self):
+ expected = hw.VirtNUMAHostTopology(
+ cells=[
+ hw.VirtNUMATopologyCellUsage(
+ 1, set([1, 2]), 1024),
+ hw.VirtNUMATopologyCellUsage(
+ 2, set([3, 4]), 1024)])
+ got = hw.VirtNUMAHostTopology.from_json(expected.to_json())
+
+ for exp_cell, got_cell in zip(expected.cells, got.cells):
+ self.assertNUMACellMatches(exp_cell, got_cell)
+
+
+class NumberOfSerialPortsTest(test.NoDBTestCase):
+ def test_flavor(self):
+ flavor = FakeFlavorObject(8, 2048, {"hw:serial_port_count": 3})
+ num_ports = hw.get_number_of_serial_ports(flavor, None)
+ self.assertEqual(3, num_ports)
+
+ def test_image_meta(self):
+ flavor = FakeFlavorObject(8, 2048, {})
+ image_meta = {"properties": {"hw_serial_port_count": 2}}
+ num_ports = hw.get_number_of_serial_ports(flavor, image_meta)
+ self.assertEqual(2, num_ports)
+
+ def test_flavor_invalid_value(self):
+ flavor = FakeFlavorObject(8, 2048, {"hw:serial_port_count": 'foo'})
+ image_meta = {"properties": {}}
+ self.assertRaises(exception.ImageSerialPortNumberInvalid,
+ hw.get_number_of_serial_ports,
+ flavor, image_meta)
+
+ def test_image_meta_invalid_value(self):
+ flavor = FakeFlavorObject(8, 2048, {})
+ image_meta = {"properties": {"hw_serial_port_count": 'bar'}}
+ self.assertRaises(exception.ImageSerialPortNumberInvalid,
+ hw.get_number_of_serial_ports,
+ flavor, image_meta)
+
+ def test_image_meta_smaller_than_flavor(self):
+ flavor = FakeFlavorObject(8, 2048, {"hw:serial_port_count": 3})
+ image_meta = {"properties": {"hw_serial_port_count": 2}}
+ num_ports = hw.get_number_of_serial_ports(flavor, image_meta)
+ self.assertEqual(2, num_ports)
+
+ def test_flavor_smaller_than_image_meta(self):
+ flavor = FakeFlavorObject(8, 2048, {"hw:serial_port_count": 3})
+ image_meta = {"properties": {"hw_serial_port_count": 4}}
+ self.assertRaises(exception.ImageSerialPortNumberExceedFlavorValue,
+ hw.get_number_of_serial_ports,
+ flavor, image_meta)
+
+
+class NUMATopologyClaimsTest(test.NoDBTestCase):
+ def setUp(self):
+ super(NUMATopologyClaimsTest, self).setUp()
+
+ self.host = hw.VirtNUMAHostTopology(
+ cells=[
+ hw.VirtNUMATopologyCellUsage(
+ 1, set([1, 2, 3, 4]), 2048,
+ cpu_usage=1, memory_usage=512),
+ hw.VirtNUMATopologyCellUsage(
+ 2, set([5, 6]), 1024)])
+
+ self.limits = hw.VirtNUMALimitTopology(
+ cells=[
+ hw.VirtNUMATopologyCellLimit(
+ 1, set([1, 2, 3, 4]), 2048,
+ cpu_limit=8, memory_limit=4096),
+ hw.VirtNUMATopologyCellLimit(
+ 2, set([5, 6]), 1024,
+ cpu_limit=4, memory_limit=2048)])
+
+ self.large_instance = hw.VirtNUMAInstanceTopology(
+ cells=[
+ hw.VirtNUMATopologyCellInstance(
+ 1, set([1, 2, 3, 4, 5, 6]), 8192),
+ hw.VirtNUMATopologyCellInstance(
+ 2, set([7, 8]), 4096)])
+ self.medium_instance = hw.VirtNUMAInstanceTopology(
+ cells=[
+ hw.VirtNUMATopologyCellInstance(
+ 1, set([1, 2, 3, 4]), 1024),
+ hw.VirtNUMATopologyCellInstance(
+ 2, set([7, 8]), 2048)])
+ self.small_instance = hw.VirtNUMAInstanceTopology(
+ cells=[
+ hw.VirtNUMATopologyCellInstance(1, set([1]), 256),
+ hw.VirtNUMATopologyCellInstance(2, set([5]), 1024)])
+ self.no_fit_instance = hw.VirtNUMAInstanceTopology(
+ cells=[
+ hw.VirtNUMATopologyCellInstance(1, set([1]), 256),
+ hw.VirtNUMATopologyCellInstance(2, set([2]), 256),
+ hw.VirtNUMATopologyCellInstance(3, set([3]), 256)])
+
+ def test_claim_not_enough_info(self):
+
+ # No limits supplied
+ self.assertIsNone(
+ hw.VirtNUMAHostTopology.claim_test(
+ self.host, [self.large_instance]))
+ # Empty topology
+ self.assertIsNone(
+ hw.VirtNUMAHostTopology.claim_test(
+ hw.VirtNUMAHostTopology(), [self.large_instance],
+ limits=self.limits))
+ # No instances to claim
+ self.assertIsNone(
+ hw.VirtNUMAHostTopology.claim_test(self.host, [], self.limits))
+
+ def test_claim_succeeds(self):
+ self.assertIsNone(
+ hw.VirtNUMAHostTopology.claim_test(
+ self.host, [self.small_instance], self.limits))
+ self.assertIsNone(
+ hw.VirtNUMAHostTopology.claim_test(
+ self.host, [self.medium_instance], self.limits))
+
+ def test_claim_fails(self):
+ self.assertIsInstance(
+ hw.VirtNUMAHostTopology.claim_test(
+ self.host, [self.large_instance], self.limits),
+ six.text_type)
+
+ self.assertIsInstance(
+ hw.VirtNUMAHostTopology.claim_test(
+ self.host, [self.medium_instance, self.small_instance],
+ self.limits),
+ six.text_type)
+
+ # Instance fails if it won't fit the topology
+ self.assertIsInstance(
+ hw.VirtNUMAHostTopology.claim_test(
+ self.host, [self.no_fit_instance], self.limits),
+ six.text_type)
+
+ # Instance fails if it won't fit the topology even with no limits
+ self.assertIsInstance(
+ hw.VirtNUMAHostTopology.claim_test(
+ self.host, [self.no_fit_instance]), six.text_type)
+
+
+class HelperMethodsTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(HelperMethodsTestCase, self).setUp()
+ self.hosttopo = hw.VirtNUMAHostTopology([
+ hw.VirtNUMATopologyCellUsage(0, set([0, 1]), 512),
+ hw.VirtNUMATopologyCellUsage(1, set([2, 3]), 512),
+ ])
+ self.instancetopo = hw.VirtNUMAInstanceTopology([
+ hw.VirtNUMATopologyCellInstance(0, set([0, 1]), 256),
+ hw.VirtNUMATopologyCellInstance(1, set([2]), 256),
+ ])
+ self.context = context.RequestContext('fake-user',
+ 'fake-project')
+
+ def _check_usage(self, host_usage):
+ self.assertEqual(2, host_usage.cells[0].cpu_usage)
+ self.assertEqual(256, host_usage.cells[0].memory_usage)
+ self.assertEqual(1, host_usage.cells[1].cpu_usage)
+ self.assertEqual(256, host_usage.cells[1].memory_usage)
+
+ def test_dicts_json(self):
+ host = {'numa_topology': self.hosttopo.to_json()}
+ instance = {'numa_topology': self.instancetopo.to_json()}
+
+ res = hw.get_host_numa_usage_from_instance(host, instance)
+ self.assertIsInstance(res, six.string_types)
+ self._check_usage(hw.VirtNUMAHostTopology.from_json(res))
+
+ def test_dicts_instance_json(self):
+ host = {'numa_topology': self.hosttopo}
+ instance = {'numa_topology': self.instancetopo.to_json()}
+
+ res = hw.get_host_numa_usage_from_instance(host, instance)
+ self.assertIsInstance(res, hw.VirtNUMAHostTopology)
+ self._check_usage(res)
+
+ def test_dicts_host_json(self):
+ host = {'numa_topology': self.hosttopo.to_json()}
+ instance = {'numa_topology': self.instancetopo}
+
+ res = hw.get_host_numa_usage_from_instance(host, instance)
+ self.assertIsInstance(res, six.string_types)
+ self._check_usage(hw.VirtNUMAHostTopology.from_json(res))
+
+ def test_object_host_instance_json(self):
+ host = objects.ComputeNode(numa_topology=self.hosttopo.to_json())
+ instance = {'numa_topology': self.instancetopo.to_json()}
+
+ res = hw.get_host_numa_usage_from_instance(host, instance)
+ self.assertIsInstance(res, six.string_types)
+ self._check_usage(hw.VirtNUMAHostTopology.from_json(res))
+
+ def test_object_host_instance(self):
+ host = objects.ComputeNode(numa_topology=self.hosttopo.to_json())
+ instance = {'numa_topology': self.instancetopo}
+
+ res = hw.get_host_numa_usage_from_instance(host, instance)
+ self.assertIsInstance(res, six.string_types)
+ self._check_usage(hw.VirtNUMAHostTopology.from_json(res))
+
+ def test_instance_with_fetch(self):
+ host = objects.ComputeNode(numa_topology=self.hosttopo.to_json())
+ fake_uuid = str(uuid.uuid4())
+ instance = {'uuid': fake_uuid}
+
+ with mock.patch.object(objects.InstanceNUMATopology,
+ 'get_by_instance_uuid', return_value=None) as get_mock:
+ res = hw.get_host_numa_usage_from_instance(host, instance)
+ self.assertIsInstance(res, six.string_types)
+ self.assertTrue(get_mock.called)
+
+ def test_object_instance_with_load(self):
+ host = objects.ComputeNode(numa_topology=self.hosttopo.to_json())
+ fake_uuid = str(uuid.uuid4())
+ instance = objects.Instance(context=self.context, uuid=fake_uuid)
+
+ with mock.patch.object(objects.InstanceNUMATopology,
+ 'get_by_instance_uuid', return_value=None) as get_mock:
+ res = hw.get_host_numa_usage_from_instance(host, instance)
+ self.assertIsInstance(res, six.string_types)
+ self.assertTrue(get_mock.called)
+
+ def test_instance_serialized_by_build_request_spec(self):
+ host = objects.ComputeNode(numa_topology=self.hosttopo.to_json())
+ fake_uuid = str(uuid.uuid4())
+ instance = objects.Instance(context=self.context, id=1, uuid=fake_uuid,
+ numa_topology=objects.InstanceNUMATopology.obj_from_topology(
+ self.instancetopo))
+ # NOTE (ndipanov): This emulates scheduler.utils.build_request_spec
+ # We can remove this test once we no longer use that method.
+ instance_raw = jsonutils.to_primitive(
+ base_obj.obj_to_primitive(instance))
+ res = hw.get_host_numa_usage_from_instance(host, instance_raw)
+ self.assertIsInstance(res, six.string_types)
+ self._check_usage(hw.VirtNUMAHostTopology.from_json(res))
+
+ def test_attr_host(self):
+ class Host(object):
+ def __init__(obj):
+ obj.numa_topology = self.hosttopo.to_json()
+
+ host = Host()
+ instance = {'numa_topology': self.instancetopo.to_json()}
+
+ res = hw.get_host_numa_usage_from_instance(host, instance)
+ self.assertIsInstance(res, six.string_types)
+ self._check_usage(hw.VirtNUMAHostTopology.from_json(res))
+
+ def test_never_serialize_result(self):
+ host = {'numa_topology': self.hosttopo.to_json()}
+ instance = {'numa_topology': self.instancetopo}
+
+ res = hw.get_host_numa_usage_from_instance(host, instance,
+ never_serialize_result=True)
+ self.assertIsInstance(res, hw.VirtNUMAHostTopology)
+ self._check_usage(res)
+
+
+class VirtMemoryPagesTestCase(test.NoDBTestCase):
+ def test_virt_pages_topology(self):
+ pages = hw.VirtPagesTopology(4, 1024, 512)
+ self.assertEqual(4, pages.size_kb)
+ self.assertEqual(1024, pages.total)
+ self.assertEqual(512, pages.used)
+
+ def test_virt_pages_topology_to_dict(self):
+ pages = hw.VirtPagesTopology(4, 1024, 512)
+ self.assertEqual({'size_kb': 4,
+ 'total': 1024,
+ 'used': 512}, pages.to_dict())
+
+ def test_virt_pages_topology_from_dict(self):
+ pages = hw.VirtPagesTopology.from_dict({'size_kb': 4,
+ 'total': 1024,
+ 'used': 512})
+ self.assertEqual(4, pages.size_kb)
+ self.assertEqual(1024, pages.total)
+ self.assertEqual(512, pages.used)
+
+ def test_cell_instance_pagesize(self):
+ pagesize = hw.VirtPageSize(2048)
+ cell = hw.VirtNUMATopologyCellInstance(
+ 0, set([0]), 1024, pagesize)
+
+ self.assertEqual(0, cell.id)
+ self.assertEqual(set([0]), cell.cpuset)
+ self.assertEqual(1024, cell.memory)
+ self.assertEqual(2048, cell.pagesize.size_kb)
diff --git a/nova/tests/unit/virt/test_imagecache.py b/nova/tests/unit/virt/test_imagecache.py
new file mode 100644
index 0000000000..dc587fb4bc
--- /dev/null
+++ b/nova/tests/unit/virt/test_imagecache.py
@@ -0,0 +1,122 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo.config import cfg
+
+from nova.compute import vm_states
+from nova import test
+from nova.tests.unit import fake_instance
+from nova.virt import imagecache
+
+CONF = cfg.CONF
+
+
+class ImageCacheManagerTests(test.NoDBTestCase):
+
+ def test_configurationi_defaults(self):
+ self.assertEqual(2400, CONF.image_cache_manager_interval)
+ self.assertEqual('_base', CONF.image_cache_subdirectory_name)
+ self.assertTrue(CONF.remove_unused_base_images)
+ self.assertEqual(24 * 3600,
+ CONF.remove_unused_original_minimum_age_seconds)
+
+ def test_cache_manager(self):
+ cache_manager = imagecache.ImageCacheManager()
+ self.assertTrue(cache_manager.remove_unused_base_images)
+ self.assertRaises(NotImplementedError,
+ cache_manager.update, None, [])
+ self.assertRaises(NotImplementedError,
+ cache_manager._get_base)
+ base_images = cache_manager._list_base_images(None)
+ self.assertEqual([], base_images['unexplained_images'])
+ self.assertEqual([], base_images['originals'])
+ self.assertRaises(NotImplementedError,
+ cache_manager._age_and_verify_cached_images,
+ None, [], None)
+
+ def test_list_running_instances(self):
+ instances = [{'image_ref': '1',
+ 'host': CONF.host,
+ 'id': '1',
+ 'uuid': '123',
+ 'vm_state': '',
+ 'task_state': ''},
+ {'image_ref': '2',
+ 'host': CONF.host,
+ 'id': '2',
+ 'uuid': '456',
+ 'vm_state': '',
+ 'task_state': ''},
+ {'image_ref': '2',
+ 'kernel_id': '21',
+ 'ramdisk_id': '22',
+ 'host': 'remotehost',
+ 'id': '3',
+ 'uuid': '789',
+ 'vm_state': '',
+ 'task_state': ''}]
+
+ all_instances = [fake_instance.fake_instance_obj(None, **instance)
+ for instance in instances]
+
+ image_cache_manager = imagecache.ImageCacheManager()
+
+ # The argument here should be a context, but it's mocked out
+ running = image_cache_manager._list_running_instances(None,
+ all_instances)
+
+ self.assertEqual(4, len(running['used_images']))
+ self.assertEqual((1, 0, ['instance-00000001']),
+ running['used_images']['1'])
+ self.assertEqual((1, 1, ['instance-00000002',
+ 'instance-00000003']),
+ running['used_images']['2'])
+ self.assertEqual((0, 1, ['instance-00000003']),
+ running['used_images']['21'])
+ self.assertEqual((0, 1, ['instance-00000003']),
+ running['used_images']['22'])
+
+ self.assertIn('instance-00000001', running['instance_names'])
+ self.assertIn('123', running['instance_names'])
+
+ self.assertEqual(4, len(running['image_popularity']))
+ self.assertEqual(1, running['image_popularity']['1'])
+ self.assertEqual(2, running['image_popularity']['2'])
+ self.assertEqual(1, running['image_popularity']['21'])
+ self.assertEqual(1, running['image_popularity']['22'])
+
+ def test_list_resizing_instances(self):
+ instances = [{'image_ref': '1',
+ 'host': CONF.host,
+ 'id': '1',
+ 'uuid': '123',
+ 'vm_state': vm_states.RESIZED,
+ 'task_state': None}]
+
+ all_instances = [fake_instance.fake_instance_obj(None, **instance)
+ for instance in instances]
+
+ image_cache_manager = imagecache.ImageCacheManager()
+ running = image_cache_manager._list_running_instances(None,
+ all_instances)
+
+ self.assertEqual(1, len(running['used_images']))
+ self.assertEqual((1, 0, ['instance-00000001']),
+ running['used_images']['1'])
+ self.assertEqual(set(['instance-00000001', '123',
+ 'instance-00000001_resize', '123_resize']),
+ running['instance_names'])
+
+ self.assertEqual(1, len(running['image_popularity']))
+ self.assertEqual(1, running['image_popularity']['1'])
diff --git a/nova/tests/unit/virt/test_images.py b/nova/tests/unit/virt/test_images.py
new file mode 100644
index 0000000000..be5ea73ef1
--- /dev/null
+++ b/nova/tests/unit/virt/test_images.py
@@ -0,0 +1,45 @@
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+
+import mock
+from oslo.concurrency import processutils
+
+from nova import exception
+from nova import test
+from nova import utils
+from nova.virt import images
+
+
+class QemuTestCase(test.NoDBTestCase):
+ def test_qemu_info_with_bad_path(self):
+ self.assertRaises(exception.InvalidDiskInfo,
+ images.qemu_img_info,
+ '/path/that/does/not/exist')
+
+ @mock.patch.object(os.path, 'exists', return_value=True)
+ def test_qemu_info_with_errors(self, path_exists):
+ self.assertRaises(processutils.ProcessExecutionError,
+ images.qemu_img_info,
+ '/fake/path')
+
+ @mock.patch.object(os.path, 'exists', return_value=True)
+ @mock.patch.object(utils, 'execute',
+ return_value=('stdout', None))
+ def test_qemu_info_with_no_errors(self, path_exists,
+ utils_execute):
+ image_info = images.qemu_img_info('/fake/path')
+ self.assertTrue(image_info)
+ self.assertTrue(str(image_info)) \ No newline at end of file
diff --git a/nova/tests/unit/virt/test_virt.py b/nova/tests/unit/virt/test_virt.py
new file mode 100644
index 0000000000..67b0ac503a
--- /dev/null
+++ b/nova/tests/unit/virt/test_virt.py
@@ -0,0 +1,287 @@
+# Copyright 2011 Isaku Yamahata
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import io
+import os
+
+import mock
+
+from nova import test
+from nova import utils
+from nova.virt.disk import api as disk_api
+from nova.virt.disk.mount import api as mount
+from nova.virt import driver
+
+PROC_MOUNTS_CONTENTS = """rootfs / rootfs rw 0 0
+sysfs /sys sysfs rw,nosuid,nodev,noexec,relatime 0 0
+proc /proc proc rw,nosuid,nodev,noexec,relatime 0 0
+udev /dev devtmpfs rw,relatime,size=1013160k,nr_inodes=253290,mode=755 0 0
+devpts /dev/pts devpts rw,nosuid,noexec,relatime,gid=5,mode=620 0 0
+tmpfs /run tmpfs rw,nosuid,relatime,size=408904k,mode=755 0 0"""
+
+
+class TestVirtDriver(test.NoDBTestCase):
+ def test_block_device(self):
+ swap = {'device_name': '/dev/sdb',
+ 'swap_size': 1}
+ ephemerals = [{'num': 0,
+ 'virtual_name': 'ephemeral0',
+ 'device_name': '/dev/sdc1',
+ 'size': 1}]
+ block_device_mapping = [{'mount_device': '/dev/sde',
+ 'device_path': 'fake_device'}]
+ block_device_info = {
+ 'root_device_name': '/dev/sda',
+ 'swap': swap,
+ 'ephemerals': ephemerals,
+ 'block_device_mapping': block_device_mapping}
+
+ empty_block_device_info = {}
+
+ self.assertEqual(
+ driver.block_device_info_get_root(block_device_info), '/dev/sda')
+ self.assertIsNone(
+ driver.block_device_info_get_root(empty_block_device_info))
+ self.assertIsNone(driver.block_device_info_get_root(None))
+
+ self.assertEqual(
+ driver.block_device_info_get_swap(block_device_info), swap)
+ self.assertIsNone(driver.block_device_info_get_swap(
+ empty_block_device_info)['device_name'])
+ self.assertEqual(driver.block_device_info_get_swap(
+ empty_block_device_info)['swap_size'], 0)
+ self.assertIsNone(
+ driver.block_device_info_get_swap({'swap': None})['device_name'])
+ self.assertEqual(
+ driver.block_device_info_get_swap({'swap': None})['swap_size'],
+ 0)
+ self.assertIsNone(
+ driver.block_device_info_get_swap(None)['device_name'])
+ self.assertEqual(
+ driver.block_device_info_get_swap(None)['swap_size'], 0)
+
+ self.assertEqual(
+ driver.block_device_info_get_ephemerals(block_device_info),
+ ephemerals)
+ self.assertEqual(
+ driver.block_device_info_get_ephemerals(empty_block_device_info),
+ [])
+ self.assertEqual(
+ driver.block_device_info_get_ephemerals(None),
+ [])
+
+ def test_swap_is_usable(self):
+ self.assertFalse(driver.swap_is_usable(None))
+ self.assertFalse(driver.swap_is_usable({'device_name': None}))
+ self.assertFalse(driver.swap_is_usable({'device_name': '/dev/sdb',
+ 'swap_size': 0}))
+ self.assertTrue(driver.swap_is_usable({'device_name': '/dev/sdb',
+ 'swap_size': 1}))
+
+
+class FakeMount(object):
+ def __init__(self, image, mount_dir, partition=None, device=None):
+ self.image = image
+ self.partition = partition
+ self.mount_dir = mount_dir
+
+ self.linked = self.mapped = self.mounted = False
+ self.device = device
+
+ def do_mount(self):
+ self.linked = True
+ self.mapped = True
+ self.mounted = True
+ self.device = '/dev/fake'
+ return True
+
+ def do_umount(self):
+ self.linked = True
+ self.mounted = False
+
+ def do_teardown(self):
+ self.linked = False
+ self.mapped = False
+ self.mounted = False
+ self.device = None
+
+
+class TestDiskImage(test.NoDBTestCase):
+ def mock_proc_mounts(self, mock_open):
+ response = io.StringIO(unicode(PROC_MOUNTS_CONTENTS))
+ mock_open.return_value = response
+
+ @mock.patch('__builtin__.open')
+ def test_mount(self, mock_open):
+ self.mock_proc_mounts(mock_open)
+ image = '/tmp/fake-image'
+ mountdir = '/mnt/fake_rootfs'
+ fakemount = FakeMount(image, mountdir, None)
+
+ def fake_instance_for_format(imgfile, mountdir, partition, imgfmt):
+ return fakemount
+
+ self.stubs.Set(mount.Mount, 'instance_for_format',
+ staticmethod(fake_instance_for_format))
+ diskimage = disk_api._DiskImage(image=image, mount_dir=mountdir)
+ dev = diskimage.mount()
+ self.assertEqual(diskimage._mounter, fakemount)
+ self.assertEqual(dev, '/dev/fake')
+
+ @mock.patch('__builtin__.open')
+ def test_umount(self, mock_open):
+ self.mock_proc_mounts(mock_open)
+
+ image = '/tmp/fake-image'
+ mountdir = '/mnt/fake_rootfs'
+ fakemount = FakeMount(image, mountdir, None)
+
+ def fake_instance_for_format(imgfile, mountdir, partition, imgfmt):
+ return fakemount
+
+ self.stubs.Set(mount.Mount, 'instance_for_format',
+ staticmethod(fake_instance_for_format))
+ diskimage = disk_api._DiskImage(image=image, mount_dir=mountdir)
+ dev = diskimage.mount()
+ self.assertEqual(diskimage._mounter, fakemount)
+ self.assertEqual(dev, '/dev/fake')
+ diskimage.umount()
+ self.assertIsNone(diskimage._mounter)
+
+ @mock.patch('__builtin__.open')
+ def test_teardown(self, mock_open):
+ self.mock_proc_mounts(mock_open)
+
+ image = '/tmp/fake-image'
+ mountdir = '/mnt/fake_rootfs'
+ fakemount = FakeMount(image, mountdir, None)
+
+ def fake_instance_for_format(imgfile, mountdir, partition, imgfmt):
+ return fakemount
+
+ self.stubs.Set(mount.Mount, 'instance_for_format',
+ staticmethod(fake_instance_for_format))
+ diskimage = disk_api._DiskImage(image=image, mount_dir=mountdir)
+ dev = diskimage.mount()
+ self.assertEqual(diskimage._mounter, fakemount)
+ self.assertEqual(dev, '/dev/fake')
+ diskimage.teardown()
+ self.assertIsNone(diskimage._mounter)
+
+
+class TestVirtDisk(test.NoDBTestCase):
+ def setUp(self):
+ super(TestVirtDisk, self).setUp()
+ self.executes = []
+
+ def fake_execute(*cmd, **kwargs):
+ self.executes.append(cmd)
+ return None, None
+
+ self.stubs.Set(utils, 'execute', fake_execute)
+
+ def test_lxc_setup_container(self):
+ image = '/tmp/fake-image'
+ container_dir = '/mnt/fake_rootfs/'
+
+ def proc_mounts(self, mount_point):
+ return None
+
+ def fake_instance_for_format(imgfile, mountdir, partition, imgfmt):
+ return FakeMount(imgfile, mountdir, partition)
+
+ self.stubs.Set(os.path, 'exists', lambda _: True)
+ self.stubs.Set(disk_api._DiskImage, '_device_for_path', proc_mounts)
+ self.stubs.Set(mount.Mount, 'instance_for_format',
+ staticmethod(fake_instance_for_format))
+
+ self.assertEqual(disk_api.setup_container(image, container_dir),
+ '/dev/fake')
+
+ def test_lxc_teardown_container(self):
+
+ def proc_mounts(self, mount_point):
+ mount_points = {
+ '/mnt/loop/nopart': '/dev/loop0',
+ '/mnt/loop/part': '/dev/mapper/loop0p1',
+ '/mnt/nbd/nopart': '/dev/nbd15',
+ '/mnt/nbd/part': '/dev/mapper/nbd15p1',
+ }
+ return mount_points[mount_point]
+
+ self.stubs.Set(os.path, 'exists', lambda _: True)
+ self.stubs.Set(disk_api._DiskImage, '_device_for_path', proc_mounts)
+ expected_commands = []
+
+ disk_api.teardown_container('/mnt/loop/nopart')
+ expected_commands += [
+ ('umount', '/dev/loop0'),
+ ('losetup', '--detach', '/dev/loop0'),
+ ]
+
+ disk_api.teardown_container('/mnt/loop/part')
+ expected_commands += [
+ ('umount', '/dev/mapper/loop0p1'),
+ ('kpartx', '-d', '/dev/loop0'),
+ ('losetup', '--detach', '/dev/loop0'),
+ ]
+
+ disk_api.teardown_container('/mnt/nbd/nopart')
+ expected_commands += [
+ ('blockdev', '--flushbufs', '/dev/nbd15'),
+ ('umount', '/dev/nbd15'),
+ ('qemu-nbd', '-d', '/dev/nbd15'),
+ ]
+
+ disk_api.teardown_container('/mnt/nbd/part')
+ expected_commands += [
+ ('blockdev', '--flushbufs', '/dev/nbd15'),
+ ('umount', '/dev/mapper/nbd15p1'),
+ ('kpartx', '-d', '/dev/nbd15'),
+ ('qemu-nbd', '-d', '/dev/nbd15'),
+ ]
+
+ self.assertEqual(self.executes, expected_commands)
+
+ def test_lxc_teardown_container_with_namespace_cleaned(self):
+
+ def proc_mounts(self, mount_point):
+ return None
+
+ self.stubs.Set(os.path, 'exists', lambda _: True)
+ self.stubs.Set(disk_api._DiskImage, '_device_for_path', proc_mounts)
+ expected_commands = []
+
+ disk_api.teardown_container('/mnt/loop/nopart', '/dev/loop0')
+ expected_commands += [
+ ('losetup', '--detach', '/dev/loop0'),
+ ]
+
+ disk_api.teardown_container('/mnt/loop/part', '/dev/loop0')
+ expected_commands += [
+ ('losetup', '--detach', '/dev/loop0'),
+ ]
+
+ disk_api.teardown_container('/mnt/nbd/nopart', '/dev/nbd15')
+ expected_commands += [
+ ('qemu-nbd', '-d', '/dev/nbd15'),
+ ]
+
+ disk_api.teardown_container('/mnt/nbd/part', '/dev/nbd15')
+ expected_commands += [
+ ('qemu-nbd', '-d', '/dev/nbd15'),
+ ]
+
+ self.assertEqual(self.executes, expected_commands)
diff --git a/nova/tests/unit/virt/test_virt_drivers.py b/nova/tests/unit/virt/test_virt_drivers.py
new file mode 100644
index 0000000000..48c009fd42
--- /dev/null
+++ b/nova/tests/unit/virt/test_virt_drivers.py
@@ -0,0 +1,881 @@
+# Copyright 2010 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import base64
+import sys
+import traceback
+
+import fixtures
+import mock
+import netaddr
+from oslo.serialization import jsonutils
+from oslo.utils import importutils
+from oslo.utils import timeutils
+import six
+
+from nova.compute import manager
+from nova.console import type as ctype
+from nova import exception
+from nova import objects
+from nova.openstack.common import log as logging
+from nova import test
+from nova.tests.unit import fake_block_device
+from nova.tests.unit.image import fake as fake_image
+from nova.tests.unit import utils as test_utils
+from nova.tests.unit.virt.libvirt import fake_libvirt_utils
+from nova.virt import block_device as driver_block_device
+from nova.virt import event as virtevent
+from nova.virt import fake
+from nova.virt import libvirt
+from nova.virt.libvirt import imagebackend
+
+LOG = logging.getLogger(__name__)
+
+
+def catch_notimplementederror(f):
+ """Decorator to simplify catching drivers raising NotImplementedError
+
+ If a particular call makes a driver raise NotImplementedError, we
+ log it so that we can extract this information afterwards as needed.
+ """
+ def wrapped_func(self, *args, **kwargs):
+ try:
+ return f(self, *args, **kwargs)
+ except NotImplementedError:
+ frame = traceback.extract_tb(sys.exc_info()[2])[-1]
+ LOG.error("%(driver)s does not implement %(method)s "
+ "required for test %(test)s" %
+ {'driver': type(self.connection),
+ 'method': frame[2], 'test': f.__name__})
+
+ wrapped_func.__name__ = f.__name__
+ wrapped_func.__doc__ = f.__doc__
+ return wrapped_func
+
+
+class _FakeDriverBackendTestCase(object):
+ def _setup_fakelibvirt(self):
+ # So that the _supports_direct_io does the test based
+ # on the current working directory, instead of the
+ # default instances_path which doesn't exist
+ self.flags(instances_path=self.useFixture(fixtures.TempDir()).path)
+
+ # Put fakelibvirt in place
+ if 'libvirt' in sys.modules:
+ self.saved_libvirt = sys.modules['libvirt']
+ else:
+ self.saved_libvirt = None
+
+ import nova.tests.unit.virt.libvirt.fake_imagebackend as \
+ fake_imagebackend
+ import nova.tests.unit.virt.libvirt.fake_libvirt_utils as \
+ fake_libvirt_utils
+ import nova.tests.unit.virt.libvirt.fakelibvirt as fakelibvirt
+
+ sys.modules['libvirt'] = fakelibvirt
+ import nova.virt.libvirt.driver
+ import nova.virt.libvirt.firewall
+
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.libvirt.driver.imagebackend',
+ fake_imagebackend))
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.libvirt.driver.libvirt',
+ fakelibvirt))
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.libvirt.driver.libvirt_utils',
+ fake_libvirt_utils))
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.libvirt.imagebackend.libvirt_utils',
+ fake_libvirt_utils))
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.libvirt.firewall.libvirt',
+ fakelibvirt))
+
+ self.flags(rescue_image_id="2",
+ rescue_kernel_id="3",
+ rescue_ramdisk_id=None,
+ snapshots_directory='./',
+ group='libvirt')
+
+ def fake_extend(image, size):
+ pass
+
+ def fake_migrateToURI(*a):
+ pass
+
+ def fake_make_drive(_self, _path):
+ pass
+
+ def fake_get_instance_disk_info(_self, instance, xml=None,
+ block_device_info=None):
+ return '[]'
+
+ def fake_delete_instance_files(_self, _instance):
+ pass
+
+ self.stubs.Set(nova.virt.libvirt.driver.LibvirtDriver,
+ '_get_instance_disk_info',
+ fake_get_instance_disk_info)
+
+ self.stubs.Set(nova.virt.libvirt.driver.disk,
+ 'extend', fake_extend)
+
+ self.stubs.Set(nova.virt.libvirt.driver.LibvirtDriver,
+ '_delete_instance_files',
+ fake_delete_instance_files)
+
+ # Like the existing fakelibvirt.migrateToURI, do nothing,
+ # but don't fail for these tests.
+ self.stubs.Set(nova.virt.libvirt.driver.libvirt.Domain,
+ 'migrateToURI', fake_migrateToURI)
+
+ # We can't actually make a config drive v2 because ensure_tree has
+ # been faked out
+ self.stubs.Set(nova.virt.configdrive.ConfigDriveBuilder,
+ 'make_drive', fake_make_drive)
+
+ def _teardown_fakelibvirt(self):
+ # Restore libvirt
+ if self.saved_libvirt:
+ sys.modules['libvirt'] = self.saved_libvirt
+
+ def setUp(self):
+ super(_FakeDriverBackendTestCase, self).setUp()
+ # TODO(sdague): it would be nice to do this in a way that only
+ # the relevant backends where replaced for tests, though this
+ # should not harm anything by doing it for all backends
+ fake_image.stub_out_image_service(self.stubs)
+ self._setup_fakelibvirt()
+
+ def tearDown(self):
+ fake_image.FakeImageService_reset()
+ self._teardown_fakelibvirt()
+ super(_FakeDriverBackendTestCase, self).tearDown()
+
+
+class VirtDriverLoaderTestCase(_FakeDriverBackendTestCase, test.TestCase):
+ """Test that ComputeManager can successfully load both
+ old style and new style drivers and end up with the correct
+ final class.
+ """
+
+ # if your driver supports being tested in a fake way, it can go here
+ #
+ # both long form and short form drivers are supported
+ new_drivers = {
+ 'nova.virt.fake.FakeDriver': 'FakeDriver',
+ 'nova.virt.libvirt.LibvirtDriver': 'LibvirtDriver',
+ 'fake.FakeDriver': 'FakeDriver',
+ 'libvirt.LibvirtDriver': 'LibvirtDriver'
+ }
+
+ def test_load_new_drivers(self):
+ for cls, driver in self.new_drivers.iteritems():
+ self.flags(compute_driver=cls)
+ # NOTE(sdague) the try block is to make it easier to debug a
+ # failure by knowing which driver broke
+ try:
+ cm = manager.ComputeManager()
+ except Exception as e:
+ self.fail("Couldn't load driver %s - %s" % (cls, e))
+
+ self.assertEqual(cm.driver.__class__.__name__, driver,
+ "Could't load driver %s" % cls)
+
+ def test_fail_to_load_new_drivers(self):
+ self.flags(compute_driver='nova.virt.amiga')
+
+ def _fake_exit(error):
+ raise test.TestingException()
+
+ self.stubs.Set(sys, 'exit', _fake_exit)
+ self.assertRaises(test.TestingException, manager.ComputeManager)
+
+
+class _VirtDriverTestCase(_FakeDriverBackendTestCase):
+ def setUp(self):
+ super(_VirtDriverTestCase, self).setUp()
+
+ self.flags(instances_path=self.useFixture(fixtures.TempDir()).path)
+ self.connection = importutils.import_object(self.driver_module,
+ fake.FakeVirtAPI())
+ self.ctxt = test_utils.get_test_admin_context()
+ self.image_service = fake_image.FakeImageService()
+ # NOTE(dripton): resolve_driver_format does some file reading and
+ # writing and chowning that complicate testing too much by requiring
+ # using real directories with proper permissions. Just stub it out
+ # here; we test it in test_imagebackend.py
+ self.stubs.Set(imagebackend.Image, 'resolve_driver_format',
+ imagebackend.Image._get_driver_format)
+
+ def _get_running_instance(self, obj=True):
+ instance_ref = test_utils.get_test_instance(obj=obj)
+ network_info = test_utils.get_test_network_info()
+ network_info[0]['network']['subnets'][0]['meta']['dhcp_server'] = \
+ '1.1.1.1'
+ image_info = test_utils.get_test_image_info(None, instance_ref)
+ self.connection.spawn(self.ctxt, instance_ref, image_info,
+ [], 'herp', network_info=network_info)
+ return instance_ref, network_info
+
+ @catch_notimplementederror
+ def test_init_host(self):
+ self.connection.init_host('myhostname')
+
+ @catch_notimplementederror
+ def test_list_instances(self):
+ self.connection.list_instances()
+
+ @catch_notimplementederror
+ def test_list_instance_uuids(self):
+ self.connection.list_instance_uuids()
+
+ @catch_notimplementederror
+ def test_spawn(self):
+ instance_ref, network_info = self._get_running_instance()
+ domains = self.connection.list_instances()
+ self.assertIn(instance_ref['name'], domains)
+
+ num_instances = self.connection.get_num_instances()
+ self.assertEqual(1, num_instances)
+
+ @catch_notimplementederror
+ def test_snapshot_not_running(self):
+ instance_ref = test_utils.get_test_instance()
+ img_ref = self.image_service.create(self.ctxt, {'name': 'snap-1'})
+ self.assertRaises(exception.InstanceNotRunning,
+ self.connection.snapshot,
+ self.ctxt, instance_ref, img_ref['id'],
+ lambda *args, **kwargs: None)
+
+ @catch_notimplementederror
+ def test_snapshot_running(self):
+ img_ref = self.image_service.create(self.ctxt, {'name': 'snap-1'})
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.snapshot(self.ctxt, instance_ref, img_ref['id'],
+ lambda *args, **kwargs: None)
+
+ @catch_notimplementederror
+ def test_post_interrupted_snapshot_cleanup(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.post_interrupted_snapshot_cleanup(self.ctxt,
+ instance_ref)
+
+ @catch_notimplementederror
+ def test_reboot(self):
+ reboot_type = "SOFT"
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.reboot(self.ctxt, instance_ref, network_info,
+ reboot_type)
+
+ @catch_notimplementederror
+ def test_get_host_ip_addr(self):
+ host_ip = self.connection.get_host_ip_addr()
+
+ # Will raise an exception if it's not a valid IP at all
+ ip = netaddr.IPAddress(host_ip)
+
+ # For now, assume IPv4.
+ self.assertEqual(ip.version, 4)
+
+ @catch_notimplementederror
+ def test_set_admin_password(self):
+ instance, network_info = self._get_running_instance(obj=True)
+ self.connection.set_admin_password(instance, 'p4ssw0rd')
+
+ @catch_notimplementederror
+ def test_inject_file(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.inject_file(instance_ref,
+ base64.b64encode('/testfile'),
+ base64.b64encode('testcontents'))
+
+ @catch_notimplementederror
+ def test_resume_state_on_host_boot(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.resume_state_on_host_boot(self.ctxt, instance_ref,
+ network_info)
+
+ @catch_notimplementederror
+ def test_rescue(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.rescue(self.ctxt, instance_ref, network_info, None, '')
+
+ @catch_notimplementederror
+ def test_unrescue_unrescued_instance(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.unrescue(instance_ref, network_info)
+
+ @catch_notimplementederror
+ def test_unrescue_rescued_instance(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.rescue(self.ctxt, instance_ref, network_info, None, '')
+ self.connection.unrescue(instance_ref, network_info)
+
+ @catch_notimplementederror
+ def test_poll_rebooting_instances(self):
+ instances = [self._get_running_instance()]
+ self.connection.poll_rebooting_instances(10, instances)
+
+ @catch_notimplementederror
+ def test_migrate_disk_and_power_off(self):
+ instance_ref, network_info = self._get_running_instance()
+ flavor_ref = test_utils.get_test_flavor()
+ self.connection.migrate_disk_and_power_off(
+ self.ctxt, instance_ref, 'dest_host', flavor_ref,
+ network_info)
+
+ @catch_notimplementederror
+ def test_power_off(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.power_off(instance_ref)
+
+ @catch_notimplementederror
+ def test_power_on_running(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.power_on(self.ctxt, instance_ref,
+ network_info, None)
+
+ @catch_notimplementederror
+ def test_power_on_powered_off(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.power_off(instance_ref)
+ self.connection.power_on(self.ctxt, instance_ref, network_info, None)
+
+ @catch_notimplementederror
+ def test_soft_delete(self):
+ instance_ref, network_info = self._get_running_instance(obj=True)
+ self.connection.soft_delete(instance_ref)
+
+ @catch_notimplementederror
+ def test_restore_running(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.restore(instance_ref)
+
+ @catch_notimplementederror
+ def test_restore_soft_deleted(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.soft_delete(instance_ref)
+ self.connection.restore(instance_ref)
+
+ @catch_notimplementederror
+ def test_pause(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.pause(instance_ref)
+
+ @catch_notimplementederror
+ def test_unpause_unpaused_instance(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.unpause(instance_ref)
+
+ @catch_notimplementederror
+ def test_unpause_paused_instance(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.pause(instance_ref)
+ self.connection.unpause(instance_ref)
+
+ @catch_notimplementederror
+ def test_suspend(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.suspend(instance_ref)
+
+ @catch_notimplementederror
+ def test_resume_unsuspended_instance(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.resume(self.ctxt, instance_ref, network_info)
+
+ @catch_notimplementederror
+ def test_resume_suspended_instance(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.suspend(instance_ref)
+ self.connection.resume(self.ctxt, instance_ref, network_info)
+
+ @catch_notimplementederror
+ def test_destroy_instance_nonexistent(self):
+ fake_instance = {'id': 42, 'name': 'I just made this up!',
+ 'uuid': 'bda5fb9e-b347-40e8-8256-42397848cb00'}
+ network_info = test_utils.get_test_network_info()
+ self.connection.destroy(self.ctxt, fake_instance, network_info)
+
+ @catch_notimplementederror
+ def test_destroy_instance(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.assertIn(instance_ref['name'],
+ self.connection.list_instances())
+ self.connection.destroy(self.ctxt, instance_ref, network_info)
+ self.assertNotIn(instance_ref['name'],
+ self.connection.list_instances())
+
+ @catch_notimplementederror
+ def test_get_volume_connector(self):
+ result = self.connection.get_volume_connector({'id': 'fake'})
+ self.assertIn('ip', result)
+ self.assertIn('initiator', result)
+ self.assertIn('host', result)
+
+ @catch_notimplementederror
+ def test_attach_detach_volume(self):
+ instance_ref, network_info = self._get_running_instance()
+ connection_info = {
+ "driver_volume_type": "fake",
+ "serial": "fake_serial",
+ "data": {}
+ }
+ self.assertIsNone(
+ self.connection.attach_volume(None, connection_info, instance_ref,
+ '/dev/sda'))
+ self.assertIsNone(
+ self.connection.detach_volume(connection_info, instance_ref,
+ '/dev/sda'))
+
+ @catch_notimplementederror
+ def test_swap_volume(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.assertIsNone(
+ self.connection.attach_volume(None, {'driver_volume_type': 'fake',
+ 'data': {}},
+ instance_ref,
+ '/dev/sda'))
+ self.assertIsNone(
+ self.connection.swap_volume({'driver_volume_type': 'fake',
+ 'data': {}},
+ {'driver_volume_type': 'fake',
+ 'data': {}},
+ instance_ref,
+ '/dev/sda', 2))
+
+ @catch_notimplementederror
+ def test_attach_detach_different_power_states(self):
+ instance_ref, network_info = self._get_running_instance()
+ connection_info = {
+ "driver_volume_type": "fake",
+ "serial": "fake_serial",
+ "data": {}
+ }
+ self.connection.power_off(instance_ref)
+ self.connection.attach_volume(None, connection_info, instance_ref,
+ '/dev/sda')
+
+ bdm = {
+ 'root_device_name': None,
+ 'swap': None,
+ 'ephemerals': [],
+ 'block_device_mapping': driver_block_device.convert_volumes([
+ fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 1, 'instance_uuid': instance_ref['uuid'],
+ 'device_name': '/dev/sda',
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'delete_on_termination': False,
+ 'snapshot_id': None,
+ 'volume_id': 'abcdedf',
+ 'volume_size': None,
+ 'no_device': None
+ }),
+ ])
+ }
+ bdm['block_device_mapping'][0]['connection_info'] = (
+ {'driver_volume_type': 'fake', 'data': {}})
+ with mock.patch.object(
+ driver_block_device.DriverVolumeBlockDevice, 'save'):
+ self.connection.power_on(
+ self.ctxt, instance_ref, network_info, bdm)
+ self.connection.detach_volume(connection_info,
+ instance_ref,
+ '/dev/sda')
+
+ @catch_notimplementederror
+ def test_get_info(self):
+ instance_ref, network_info = self._get_running_instance()
+ info = self.connection.get_info(instance_ref)
+ self.assertIn('state', info)
+ self.assertIn('max_mem', info)
+ self.assertIn('mem', info)
+ self.assertIn('num_cpu', info)
+ self.assertIn('cpu_time', info)
+
+ @catch_notimplementederror
+ def test_get_info_for_unknown_instance(self):
+ self.assertRaises(exception.NotFound,
+ self.connection.get_info,
+ {'name': 'I just made this name up'})
+
+ @catch_notimplementederror
+ def test_get_diagnostics(self):
+ instance_ref, network_info = self._get_running_instance(obj=True)
+ self.connection.get_diagnostics(instance_ref)
+
+ @catch_notimplementederror
+ def test_get_instance_diagnostics(self):
+ instance_ref, network_info = self._get_running_instance(obj=True)
+ instance_ref['launched_at'] = timeutils.utcnow()
+ self.connection.get_instance_diagnostics(instance_ref)
+
+ @catch_notimplementederror
+ def test_block_stats(self):
+ instance_ref, network_info = self._get_running_instance()
+ stats = self.connection.block_stats(instance_ref['name'], 'someid')
+ self.assertEqual(len(stats), 5)
+
+ @catch_notimplementederror
+ def test_interface_stats(self):
+ instance_ref, network_info = self._get_running_instance()
+ stats = self.connection.interface_stats(instance_ref['name'], 'someid')
+ self.assertEqual(len(stats), 8)
+
+ @catch_notimplementederror
+ def test_get_console_output(self):
+ fake_libvirt_utils.files['dummy.log'] = ''
+ instance_ref, network_info = self._get_running_instance()
+ console_output = self.connection.get_console_output(self.ctxt,
+ instance_ref)
+ self.assertIsInstance(console_output, six.string_types)
+
+ @catch_notimplementederror
+ def test_get_vnc_console(self):
+ instance, network_info = self._get_running_instance(obj=True)
+ vnc_console = self.connection.get_vnc_console(self.ctxt, instance)
+ self.assertIsInstance(vnc_console, ctype.ConsoleVNC)
+
+ @catch_notimplementederror
+ def test_get_spice_console(self):
+ instance_ref, network_info = self._get_running_instance()
+ spice_console = self.connection.get_spice_console(self.ctxt,
+ instance_ref)
+ self.assertIsInstance(spice_console, ctype.ConsoleSpice)
+
+ @catch_notimplementederror
+ def test_get_rdp_console(self):
+ instance_ref, network_info = self._get_running_instance()
+ rdp_console = self.connection.get_rdp_console(self.ctxt, instance_ref)
+ self.assertIsInstance(rdp_console, ctype.ConsoleRDP)
+
+ @catch_notimplementederror
+ def test_get_serial_console(self):
+ instance_ref, network_info = self._get_running_instance()
+ serial_console = self.connection.get_serial_console(self.ctxt,
+ instance_ref)
+ self.assertIsInstance(serial_console, ctype.ConsoleSerial)
+
+ @catch_notimplementederror
+ def test_get_console_pool_info(self):
+ instance_ref, network_info = self._get_running_instance()
+ console_pool = self.connection.get_console_pool_info(instance_ref)
+ self.assertIn('address', console_pool)
+ self.assertIn('username', console_pool)
+ self.assertIn('password', console_pool)
+
+ @catch_notimplementederror
+ def test_refresh_security_group_rules(self):
+ # FIXME: Create security group and add the instance to it
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.refresh_security_group_rules(1)
+
+ @catch_notimplementederror
+ def test_refresh_security_group_members(self):
+ # FIXME: Create security group and add the instance to it
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.refresh_security_group_members(1)
+
+ @catch_notimplementederror
+ def test_refresh_instance_security_rules(self):
+ # FIXME: Create security group and add the instance to it
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.refresh_instance_security_rules(instance_ref)
+
+ @catch_notimplementederror
+ def test_refresh_provider_fw_rules(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.refresh_provider_fw_rules()
+
+ @catch_notimplementederror
+ def test_ensure_filtering_for_instance(self):
+ instance = test_utils.get_test_instance(obj=True)
+ network_info = test_utils.get_test_network_info()
+ self.connection.ensure_filtering_rules_for_instance(instance,
+ network_info)
+
+ @catch_notimplementederror
+ def test_unfilter_instance(self):
+ instance_ref = test_utils.get_test_instance()
+ network_info = test_utils.get_test_network_info()
+ self.connection.unfilter_instance(instance_ref, network_info)
+
+ @catch_notimplementederror
+ def test_live_migration(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.live_migration(self.ctxt, instance_ref, 'otherhost',
+ lambda *a: None, lambda *a: None)
+
+ @catch_notimplementederror
+ def _check_available_resource_fields(self, host_status):
+ keys = ['vcpus', 'memory_mb', 'local_gb', 'vcpus_used',
+ 'memory_mb_used', 'hypervisor_type', 'hypervisor_version',
+ 'hypervisor_hostname', 'cpu_info', 'disk_available_least',
+ 'supported_instances']
+ for key in keys:
+ self.assertIn(key, host_status)
+ self.assertIsInstance(host_status['hypervisor_version'], int)
+
+ @catch_notimplementederror
+ def test_get_available_resource(self):
+ available_resource = self.connection.get_available_resource(
+ 'myhostname')
+ self._check_available_resource_fields(available_resource)
+
+ @catch_notimplementederror
+ def test_get_available_nodes(self):
+ self.connection.get_available_nodes(False)
+
+ @catch_notimplementederror
+ def _check_host_cpu_status_fields(self, host_cpu_status):
+ self.assertIn('kernel', host_cpu_status)
+ self.assertIn('idle', host_cpu_status)
+ self.assertIn('user', host_cpu_status)
+ self.assertIn('iowait', host_cpu_status)
+ self.assertIn('frequency', host_cpu_status)
+
+ @catch_notimplementederror
+ def test_get_host_cpu_stats(self):
+ host_cpu_status = self.connection.get_host_cpu_stats()
+ self._check_host_cpu_status_fields(host_cpu_status)
+
+ @catch_notimplementederror
+ def test_set_host_enabled(self):
+ self.connection.set_host_enabled('a useless argument?', True)
+
+ @catch_notimplementederror
+ def test_get_host_uptime(self):
+ self.connection.get_host_uptime('a useless argument?')
+
+ @catch_notimplementederror
+ def test_host_power_action_reboot(self):
+ self.connection.host_power_action('a useless argument?', 'reboot')
+
+ @catch_notimplementederror
+ def test_host_power_action_shutdown(self):
+ self.connection.host_power_action('a useless argument?', 'shutdown')
+
+ @catch_notimplementederror
+ def test_host_power_action_startup(self):
+ self.connection.host_power_action('a useless argument?', 'startup')
+
+ @catch_notimplementederror
+ def test_add_to_aggregate(self):
+ self.connection.add_to_aggregate(self.ctxt, 'aggregate', 'host')
+
+ @catch_notimplementederror
+ def test_remove_from_aggregate(self):
+ self.connection.remove_from_aggregate(self.ctxt, 'aggregate', 'host')
+
+ def test_events(self):
+ got_events = []
+
+ def handler(event):
+ got_events.append(event)
+
+ self.connection.register_event_listener(handler)
+
+ event1 = virtevent.LifecycleEvent(
+ "cef19ce0-0ca2-11df-855d-b19fbce37686",
+ virtevent.EVENT_LIFECYCLE_STARTED)
+ event2 = virtevent.LifecycleEvent(
+ "cef19ce0-0ca2-11df-855d-b19fbce37686",
+ virtevent.EVENT_LIFECYCLE_PAUSED)
+
+ self.connection.emit_event(event1)
+ self.connection.emit_event(event2)
+ want_events = [event1, event2]
+ self.assertEqual(want_events, got_events)
+
+ event3 = virtevent.LifecycleEvent(
+ "cef19ce0-0ca2-11df-855d-b19fbce37686",
+ virtevent.EVENT_LIFECYCLE_RESUMED)
+ event4 = virtevent.LifecycleEvent(
+ "cef19ce0-0ca2-11df-855d-b19fbce37686",
+ virtevent.EVENT_LIFECYCLE_STOPPED)
+
+ self.connection.emit_event(event3)
+ self.connection.emit_event(event4)
+
+ want_events = [event1, event2, event3, event4]
+ self.assertEqual(want_events, got_events)
+
+ def test_event_bad_object(self):
+ # Passing in something which does not inherit
+ # from virtevent.Event
+
+ def handler(event):
+ pass
+
+ self.connection.register_event_listener(handler)
+
+ badevent = {
+ "foo": "bar"
+ }
+
+ self.assertRaises(ValueError,
+ self.connection.emit_event,
+ badevent)
+
+ def test_event_bad_callback(self):
+ # Check that if a callback raises an exception,
+ # it does not propagate back out of the
+ # 'emit_event' call
+
+ def handler(event):
+ raise Exception("Hit Me!")
+
+ self.connection.register_event_listener(handler)
+
+ event1 = virtevent.LifecycleEvent(
+ "cef19ce0-0ca2-11df-855d-b19fbce37686",
+ virtevent.EVENT_LIFECYCLE_STARTED)
+
+ self.connection.emit_event(event1)
+
+ def test_set_bootable(self):
+ self.assertRaises(NotImplementedError, self.connection.set_bootable,
+ 'instance', True)
+
+ @catch_notimplementederror
+ def test_get_instance_disk_info(self):
+ # This should be implemented by any driver that supports live migrate.
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.get_instance_disk_info(instance_ref['name'],
+ block_device_info={})
+
+
+class AbstractDriverTestCase(_VirtDriverTestCase, test.TestCase):
+ def setUp(self):
+ self.driver_module = "nova.virt.driver.ComputeDriver"
+ super(AbstractDriverTestCase, self).setUp()
+
+
+class FakeConnectionTestCase(_VirtDriverTestCase, test.TestCase):
+ def setUp(self):
+ self.driver_module = 'nova.virt.fake.FakeDriver'
+ fake.set_nodes(['myhostname'])
+ super(FakeConnectionTestCase, self).setUp()
+
+ def _check_available_resource_fields(self, host_status):
+ super(FakeConnectionTestCase, self)._check_available_resource_fields(
+ host_status)
+
+ hypervisor_type = host_status['hypervisor_type']
+ supported_instances = host_status['supported_instances']
+ try:
+ # supported_instances could be JSON wrapped
+ supported_instances = jsonutils.loads(supported_instances)
+ except TypeError:
+ pass
+ self.assertTrue(any(hypervisor_type in x for x in supported_instances))
+
+
+class LibvirtConnTestCase(_VirtDriverTestCase, test.TestCase):
+
+ REQUIRES_LOCKING = True
+
+ def setUp(self):
+ # Point _VirtDriverTestCase at the right module
+ self.driver_module = 'nova.virt.libvirt.LibvirtDriver'
+ super(LibvirtConnTestCase, self).setUp()
+ self.stubs.Set(self.connection,
+ '_set_host_enabled', mock.MagicMock())
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.context.get_admin_context',
+ self._fake_admin_context))
+
+ def _fake_admin_context(self, *args, **kwargs):
+ return self.ctxt
+
+ def test_force_hard_reboot(self):
+ self.flags(wait_soft_reboot_seconds=0, group='libvirt')
+ self.test_reboot()
+
+ def test_migrate_disk_and_power_off(self):
+ # there is lack of fake stuff to execute this method. so pass.
+ self.skipTest("Test nothing, but this method"
+ " needed to override superclass.")
+
+ def test_internal_set_host_enabled(self):
+ self.mox.UnsetStubs()
+ service_mock = mock.MagicMock()
+
+ # Previous status of the service: disabled: False
+ service_mock.configure_mock(disabled_reason='None',
+ disabled=False)
+ with mock.patch.object(objects.Service, "get_by_compute_host",
+ return_value=service_mock):
+ self.connection._set_host_enabled(False, 'ERROR!')
+ self.assertTrue(service_mock.disabled)
+ self.assertEqual(service_mock.disabled_reason, 'AUTO: ERROR!')
+
+ def test_set_host_enabled_when_auto_disabled(self):
+ self.mox.UnsetStubs()
+ service_mock = mock.MagicMock()
+
+ # Previous status of the service: disabled: True, 'AUTO: ERROR'
+ service_mock.configure_mock(disabled_reason='AUTO: ERROR',
+ disabled=True)
+ with mock.patch.object(objects.Service, "get_by_compute_host",
+ return_value=service_mock):
+ self.connection._set_host_enabled(True)
+ self.assertFalse(service_mock.disabled)
+ self.assertEqual(service_mock.disabled_reason, 'None')
+
+ def test_set_host_enabled_when_manually_disabled(self):
+ self.mox.UnsetStubs()
+ service_mock = mock.MagicMock()
+
+ # Previous status of the service: disabled: True, 'Manually disabled'
+ service_mock.configure_mock(disabled_reason='Manually disabled',
+ disabled=True)
+ with mock.patch.object(objects.Service, "get_by_compute_host",
+ return_value=service_mock):
+ self.connection._set_host_enabled(True)
+ self.assertTrue(service_mock.disabled)
+ self.assertEqual(service_mock.disabled_reason, 'Manually disabled')
+
+ def test_set_host_enabled_dont_override_manually_disabled(self):
+ self.mox.UnsetStubs()
+ service_mock = mock.MagicMock()
+
+ # Previous status of the service: disabled: True, 'Manually disabled'
+ service_mock.configure_mock(disabled_reason='Manually disabled',
+ disabled=True)
+ with mock.patch.object(objects.Service, "get_by_compute_host",
+ return_value=service_mock):
+ self.connection._set_host_enabled(False, 'ERROR!')
+ self.assertTrue(service_mock.disabled)
+ self.assertEqual(service_mock.disabled_reason, 'Manually disabled')
+
+ @catch_notimplementederror
+ @mock.patch.object(libvirt.driver.LibvirtDriver, '_unplug_vifs')
+ def test_unplug_vifs_with_destroy_vifs_false(self, unplug_vifs_mock):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.cleanup(self.ctxt, instance_ref, network_info,
+ destroy_vifs=False)
+ self.assertEqual(unplug_vifs_mock.call_count, 0)
+
+ @catch_notimplementederror
+ @mock.patch.object(libvirt.driver.LibvirtDriver, '_unplug_vifs')
+ def test_unplug_vifs_with_destroy_vifs_true(self, unplug_vifs_mock):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.cleanup(self.ctxt, instance_ref, network_info,
+ destroy_vifs=True)
+ self.assertEqual(unplug_vifs_mock.call_count, 1)
+ unplug_vifs_mock.assert_called_once_with(instance_ref,
+ network_info, True)
diff --git a/nova/tests/unit/virt/test_volumeutils.py b/nova/tests/unit/virt/test_volumeutils.py
new file mode 100644
index 0000000000..8ba7e50399
--- /dev/null
+++ b/nova/tests/unit/virt/test_volumeutils.py
@@ -0,0 +1,47 @@
+# Copyright 2014 Hewlett-Packard Development Company, L.P.
+# Copyright 2012 University Of Minho
+# Copyright 2010 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests fot virt volumeutils.
+"""
+
+from nova import exception
+from nova import test
+from nova import utils
+from nova.virt import volumeutils
+
+
+class VolumeUtilsTestCase(test.TestCase):
+ def test_get_iscsi_initiator(self):
+ self.mox.StubOutWithMock(utils, 'execute')
+ initiator = 'fake.initiator.iqn'
+ rval = ("junk\nInitiatorName=%s\njunk\n" % initiator, None)
+ utils.execute('cat', '/etc/iscsi/initiatorname.iscsi',
+ run_as_root=True).AndReturn(rval)
+ # Start test
+ self.mox.ReplayAll()
+ result = volumeutils.get_iscsi_initiator()
+ self.assertEqual(initiator, result)
+
+ def test_get_missing_iscsi_initiator(self):
+ self.mox.StubOutWithMock(utils, 'execute')
+ file_path = '/etc/iscsi/initiatorname.iscsi'
+ utils.execute('cat', file_path, run_as_root=True).AndRaise(
+ exception.FileNotFound(file_path=file_path)
+ )
+ # Start test
+ self.mox.ReplayAll()
+ result = volumeutils.get_iscsi_initiator()
+ self.assertIsNone(result)
diff --git a/nova/tests/unit/virt/vmwareapi/__init__.py b/nova/tests/unit/virt/vmwareapi/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/virt/vmwareapi/__init__.py
diff --git a/nova/tests/unit/virt/vmwareapi/fake.py b/nova/tests/unit/virt/vmwareapi/fake.py
new file mode 100644
index 0000000000..5bd2b7fb4f
--- /dev/null
+++ b/nova/tests/unit/virt/vmwareapi/fake.py
@@ -0,0 +1,1606 @@
+# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
+# Copyright (c) 2012 VMware, Inc.
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+A fake VMware VI API implementation.
+"""
+
+import collections
+import pprint
+
+from oslo.serialization import jsonutils
+from oslo.utils import units
+from oslo.vmware import exceptions as vexc
+
+from nova import exception
+from nova.i18n import _
+from nova.openstack.common import log as logging
+from nova.openstack.common import uuidutils
+from nova.virt.vmwareapi import constants
+from nova.virt.vmwareapi import ds_util
+
+_CLASSES = ['Datacenter', 'Datastore', 'ResourcePool', 'VirtualMachine',
+ 'Network', 'HostSystem', 'HostNetworkSystem', 'Task', 'session',
+ 'files', 'ClusterComputeResource', 'HostStorageSystem']
+
+_FAKE_FILE_SIZE = 1024
+
+_db_content = {}
+_array_types = {}
+_vim_map = {}
+
+LOG = logging.getLogger(__name__)
+
+
+def log_db_contents(msg=None):
+ """Log DB Contents."""
+ LOG.debug("%(text)s: _db_content => %(content)s",
+ {'text': msg or "", 'content': pprint.pformat(_db_content)})
+
+
+def reset():
+ """Resets the db contents."""
+ cleanup()
+ create_network()
+ create_host_network_system()
+ create_host_storage_system()
+ ds_ref1 = create_datastore('ds1', 1024, 500)
+ create_host(ds_ref=ds_ref1)
+ ds_ref2 = create_datastore('ds2', 1024, 500)
+ create_host(ds_ref=ds_ref2)
+ create_datacenter('dc1', ds_ref1)
+ create_datacenter('dc2', ds_ref2)
+ create_res_pool()
+ create_cluster('test_cluster', ds_ref1)
+ create_cluster('test_cluster2', ds_ref2)
+
+
+def cleanup():
+ """Clear the db contents."""
+ for c in _CLASSES:
+ # We fake the datastore by keeping the file references as a list of
+ # names in the db
+ if c == 'files':
+ _db_content[c] = []
+ else:
+ _db_content[c] = {}
+
+
+def _create_object(table, table_obj):
+ """Create an object in the db."""
+ _db_content[table][table_obj.obj] = table_obj
+
+
+def _get_object(obj_ref):
+ """Get object for the give reference."""
+ return _db_content[obj_ref.type][obj_ref]
+
+
+def _get_objects(obj_type):
+ """Get objects of the type."""
+ lst_objs = FakeRetrieveResult()
+ for key in _db_content[obj_type]:
+ lst_objs.add_object(_db_content[obj_type][key])
+ return lst_objs
+
+
+def _convert_to_array_of_mor(mors):
+ """Wraps the given array into a DataObject."""
+ array_of_mors = DataObject()
+ array_of_mors.ManagedObjectReference = mors
+ return array_of_mors
+
+
+def _convert_to_array_of_opt_val(optvals):
+ """Wraps the given array into a DataObject."""
+ array_of_optv = DataObject()
+ array_of_optv.OptionValue = optvals
+ return array_of_optv
+
+
+def _create_array_of_type(t):
+ """Returns an array to contain objects of type t."""
+ if t in _array_types:
+ return _array_types[t]()
+
+ array_type_name = 'ArrayOf%s' % t
+ array_type = type(array_type_name, (DataObject,), {})
+
+ def __init__(self):
+ super(array_type, self).__init__(array_type_name)
+ setattr(self, t, [])
+
+ setattr(array_type, '__init__', __init__)
+
+ _array_types[t] = array_type
+ return array_type()
+
+
+class FakeRetrieveResult(object):
+ """Object to retrieve a ObjectContent list."""
+
+ def __init__(self, token=None):
+ self.objects = []
+ if token is not None:
+ self.token = token
+
+ def add_object(self, object):
+ self.objects.append(object)
+
+
+class MissingProperty(object):
+ """Missing object in ObjectContent's missing set."""
+ def __init__(self, path='fake-path', message='fake_message',
+ method_fault=None):
+ self.path = path
+ self.fault = DataObject()
+ self.fault.localizedMessage = message
+ self.fault.fault = method_fault
+
+
+def _get_object_refs(obj_type):
+ """Get object References of the type."""
+ lst_objs = []
+ for key in _db_content[obj_type]:
+ lst_objs.append(key)
+ return lst_objs
+
+
+def _update_object(table, table_obj):
+ """Update objects of the type."""
+ _db_content[table][table_obj.obj] = table_obj
+
+
+class Prop(object):
+ """Property Object base class."""
+
+ def __init__(self, name=None, val=None):
+ self.name = name
+ self.val = val
+
+
+class ManagedObjectReference(object):
+ """A managed object reference is a remote identifier."""
+
+ def __init__(self, name="ManagedObject", value=None):
+ super(ManagedObjectReference, self)
+ # Managed Object Reference value attributes
+ # typically have values like vm-123 or
+ # host-232 and not UUID.
+ self.value = value
+ # Managed Object Reference type
+ # attributes hold the name of the type
+ # of the vCenter object the value
+ # attribute is the identifier for
+ self.type = name
+ self._type = name
+
+
+class ObjectContent(object):
+ """ObjectContent array holds dynamic properties."""
+
+ # This class is a *fake* of a class sent back to us by
+ # SOAP. It has its own names. These names are decided
+ # for us by the API we are *faking* here.
+ def __init__(self, obj_ref, prop_list=None, missing_list=None):
+ self.obj = obj_ref
+
+ if not isinstance(prop_list, collections.Iterable):
+ prop_list = []
+
+ if not isinstance(missing_list, collections.Iterable):
+ missing_list = []
+
+ # propSet is the name your Python code will need to
+ # use since this is the name that the API will use
+ if prop_list:
+ self.propSet = prop_list
+
+ # missingSet is the name your python code will
+ # need to use since this is the name that the
+ # API we are talking to will use.
+ if missing_list:
+ self.missingSet = missing_list
+
+
+class ManagedObject(object):
+ """Managed Object base class."""
+ _counter = 0
+
+ def __init__(self, mo_id_prefix="obj"):
+ """Sets the obj property which acts as a reference to the object."""
+ object.__setattr__(self, 'mo_id', self._generate_moid(mo_id_prefix))
+ object.__setattr__(self, 'propSet', [])
+ object.__setattr__(self, 'obj',
+ ManagedObjectReference(self.__class__.__name__,
+ self.mo_id))
+
+ def set(self, attr, val):
+ """Sets an attribute value. Not using the __setattr__ directly for we
+ want to set attributes of the type 'a.b.c' and using this function
+ class we set the same.
+ """
+ self.__setattr__(attr, val)
+
+ def get(self, attr):
+ """Gets an attribute. Used as an intermediary to get nested
+ property like 'a.b.c' value.
+ """
+ return self.__getattr__(attr)
+
+ def delete(self, attr):
+ """Deletes an attribute."""
+ self.propSet = filter(lambda elem: elem.name != attr, self.propSet)
+
+ def __setattr__(self, attr, val):
+ # TODO(hartsocks): this is adds unnecessary complexity to the class
+ for prop in self.propSet:
+ if prop.name == attr:
+ prop.val = val
+ return
+ elem = Prop()
+ elem.name = attr
+ elem.val = val
+ self.propSet.append(elem)
+
+ def __getattr__(self, attr):
+ # TODO(hartsocks): remove this
+ # in a real ManagedObject you have to iterate the propSet
+ # in a real ManagedObject, the propSet is a *set* not a list
+ for elem in self.propSet:
+ if elem.name == attr:
+ return elem.val
+ msg = _("Property %(attr)s not set for the managed object %(name)s")
+ raise exception.NovaException(msg % {'attr': attr,
+ 'name': self.__class__.__name__})
+
+ def _generate_moid(self, prefix):
+ """Generates a new Managed Object ID."""
+ self.__class__._counter += 1
+ return prefix + "-" + str(self.__class__._counter)
+
+ def __repr__(self):
+ return jsonutils.dumps(dict([(elem.name, elem.val)
+ for elem in self.propSet]))
+
+
+class DataObject(object):
+ """Data object base class."""
+
+ def __init__(self, obj_name=None):
+ self.obj_name = obj_name
+
+ def __repr__(self):
+ return str(self.__dict__)
+
+
+class HostInternetScsiHba(DataObject):
+ """iSCSI Host Bus Adapter."""
+
+ def __init__(self):
+ super(HostInternetScsiHba, self).__init__()
+ self.device = 'vmhba33'
+ self.key = 'key-vmhba33'
+
+
+class FileAlreadyExists(DataObject):
+ """File already exists class."""
+
+ def __init__(self):
+ super(FileAlreadyExists, self).__init__()
+ self.__name__ = vexc.FILE_ALREADY_EXISTS
+
+
+class FileNotFound(DataObject):
+ """File not found class."""
+
+ def __init__(self):
+ super(FileNotFound, self).__init__()
+ self.__name__ = vexc.FILE_NOT_FOUND
+
+
+class FileFault(DataObject):
+ """File fault."""
+
+ def __init__(self):
+ super(FileFault, self).__init__()
+ self.__name__ = vexc.FILE_FAULT
+
+
+class CannotDeleteFile(DataObject):
+ """Cannot delete file."""
+
+ def __init__(self):
+ super(CannotDeleteFile, self).__init__()
+ self.__name__ = vexc.CANNOT_DELETE_FILE
+
+
+class FileLocked(DataObject):
+ """File locked."""
+
+ def __init__(self):
+ super(FileLocked, self).__init__()
+ self.__name__ = vexc.FILE_LOCKED
+
+
+class VirtualDisk(DataObject):
+ """Virtual Disk class."""
+
+ def __init__(self, controllerKey=0, unitNumber=0):
+ super(VirtualDisk, self).__init__()
+ self.key = 0
+ self.controllerKey = controllerKey
+ self.unitNumber = unitNumber
+
+
+class VirtualDiskFlatVer2BackingInfo(DataObject):
+ """VirtualDiskFlatVer2BackingInfo class."""
+
+ def __init__(self):
+ super(VirtualDiskFlatVer2BackingInfo, self).__init__()
+ self.thinProvisioned = False
+ self.eagerlyScrub = False
+
+
+class VirtualDiskRawDiskMappingVer1BackingInfo(DataObject):
+ """VirtualDiskRawDiskMappingVer1BackingInfo class."""
+
+ def __init__(self):
+ super(VirtualDiskRawDiskMappingVer1BackingInfo, self).__init__()
+ self.lunUuid = ""
+
+
+class VirtualIDEController(DataObject):
+
+ def __init__(self, key=0):
+ self.key = key
+
+
+class VirtualLsiLogicController(DataObject):
+ """VirtualLsiLogicController class."""
+ def __init__(self, key=0, scsiCtlrUnitNumber=0):
+ self.key = key
+ self.scsiCtlrUnitNumber = scsiCtlrUnitNumber
+
+
+class VirtualLsiLogicSASController(DataObject):
+ """VirtualLsiLogicSASController class."""
+ pass
+
+
+class VirtualPCNet32(DataObject):
+ """VirtualPCNet32 class."""
+
+ def __init__(self):
+ super(VirtualPCNet32, self).__init__()
+ self.key = 4000
+
+
+class OptionValue(DataObject):
+ """OptionValue class."""
+
+ def __init__(self, key=None, value=None):
+ super(OptionValue, self).__init__()
+ self.key = key
+ self.value = value
+
+
+class VirtualMachine(ManagedObject):
+ """Virtual Machine class."""
+
+ def __init__(self, **kwargs):
+ super(VirtualMachine, self).__init__("vm")
+ self.set("name", kwargs.get("name", 'test-vm'))
+ self.set("runtime.connectionState",
+ kwargs.get("conn_state", "connected"))
+ self.set("summary.config.guestId",
+ kwargs.get("guest", constants.DEFAULT_OS_TYPE))
+ ds_do = kwargs.get("ds", None)
+ self.set("datastore", _convert_to_array_of_mor(ds_do))
+ self.set("summary.guest.toolsStatus", kwargs.get("toolsstatus",
+ "toolsOk"))
+ self.set("summary.guest.toolsRunningStatus", kwargs.get(
+ "toolsrunningstate", "guestToolsRunning"))
+ self.set("runtime.powerState", kwargs.get("powerstate", "poweredOn"))
+ self.set("config.files.vmPathName", kwargs.get("vmPathName"))
+ self.set("summary.config.numCpu", kwargs.get("numCpu", 1))
+ self.set("summary.config.memorySizeMB", kwargs.get("mem", 1))
+ self.set("summary.config.instanceUuid", kwargs.get("instanceUuid"))
+
+ devices = _create_array_of_type('VirtualDevice')
+ devices.VirtualDevice = kwargs.get("virtual_device", [])
+ self.set("config.hardware.device", devices)
+
+ exconfig_do = kwargs.get("extra_config", None)
+ self.set("config.extraConfig",
+ _convert_to_array_of_opt_val(exconfig_do))
+ if exconfig_do:
+ for optval in exconfig_do:
+ self.set('config.extraConfig["%s"]' % optval.key, optval)
+ self.set('runtime.host', kwargs.get("runtime_host", None))
+ self.device = kwargs.get("virtual_device", [])
+ # Sample of diagnostics data is below.
+ config = [
+ ('template', False),
+ ('vmPathName', 'fake_path'),
+ ('memorySizeMB', 512),
+ ('cpuReservation', 0),
+ ('memoryReservation', 0),
+ ('numCpu', 1),
+ ('numEthernetCards', 1),
+ ('numVirtualDisks', 1)]
+ self.set("summary.config", config)
+
+ quickStats = [
+ ('overallCpuUsage', 0),
+ ('overallCpuDemand', 0),
+ ('guestMemoryUsage', 0),
+ ('hostMemoryUsage', 141),
+ ('balloonedMemory', 0),
+ ('consumedOverheadMemory', 20)]
+ self.set("summary.quickStats", quickStats)
+
+ key1 = {'key': 'cpuid.AES'}
+ key2 = {'key': 'cpuid.AVX'}
+ runtime = [
+ ('connectionState', 'connected'),
+ ('powerState', 'poweredOn'),
+ ('toolsInstallerMounted', False),
+ ('suspendInterval', 0),
+ ('memoryOverhead', 21417984),
+ ('maxCpuUsage', 2000),
+ ('featureRequirement', [key1, key2])]
+ self.set("summary.runtime", runtime)
+
+ def _update_extra_config(self, extra):
+ extra_config = self.get("config.extraConfig")
+ values = extra_config.OptionValue
+ for value in values:
+ if value.key == extra.key:
+ value.value = extra.value
+ return
+ kv = DataObject()
+ kv.key = extra.key
+ kv.value = extra.value
+ extra_config.OptionValue.append(kv)
+ self.set("config.extraConfig", extra_config)
+ extra_config = self.get("config.extraConfig")
+
+ def reconfig(self, factory, val):
+ """Called to reconfigure the VM. Actually customizes the property
+ setting of the Virtual Machine object.
+ """
+
+ if hasattr(val, 'name') and val.name:
+ self.set("name", val.name)
+
+ if hasattr(val, 'extraConfig'):
+ extraConfigs = _merge_extraconfig(
+ self.get("config.extraConfig").OptionValue,
+ val.extraConfig)
+ self.get("config.extraConfig").OptionValue = extraConfigs
+
+ if hasattr(val, 'instanceUuid') and val.instanceUuid is not None:
+ if val.instanceUuid == "":
+ val.instanceUuid = uuidutils.generate_uuid()
+ self.set("summary.config.instanceUuid", val.instanceUuid)
+
+ try:
+ if not hasattr(val, 'deviceChange'):
+ return
+
+ if hasattr(val, 'extraConfig'):
+ # there are 2 cases - new entry or update an existing one
+ for extra in val.extraConfig:
+ self._update_extra_config(extra)
+
+ if len(val.deviceChange) < 2:
+ return
+
+ # Case of Reconfig of VM to attach disk
+ controller_key = val.deviceChange[0].device.controllerKey
+ filename = val.deviceChange[0].device.backing.fileName
+
+ disk = VirtualDisk()
+ disk.controllerKey = controller_key
+
+ disk_backing = VirtualDiskFlatVer2BackingInfo()
+ disk_backing.fileName = filename
+ disk_backing.key = -101
+ disk.backing = disk_backing
+
+ controller = VirtualLsiLogicController()
+ controller.key = controller_key
+
+ devices = _create_array_of_type('VirtualDevice')
+ devices.VirtualDevice = [disk, controller, self.device[0]]
+ self.set("config.hardware.device", devices)
+ except AttributeError:
+ pass
+
+
+class Network(ManagedObject):
+ """Network class."""
+
+ def __init__(self):
+ super(Network, self).__init__("network")
+ self.set("summary.name", "vmnet0")
+
+
+class ResourcePool(ManagedObject):
+ """Resource Pool class."""
+
+ def __init__(self, name="test_ResPool", value="resgroup-test"):
+ super(ResourcePool, self).__init__("rp")
+ self.set("name", name)
+ summary = DataObject()
+ runtime = DataObject()
+ config = DataObject()
+ memory = DataObject()
+ cpu = DataObject()
+
+ memoryAllocation = DataObject()
+ cpuAllocation = DataObject()
+ vm_list = DataObject()
+
+ memory.maxUsage = 1000 * units.Mi
+ memory.overallUsage = 500 * units.Mi
+ cpu.maxUsage = 10000
+ cpu.overallUsage = 1000
+ runtime.cpu = cpu
+ runtime.memory = memory
+ summary.runtime = runtime
+ cpuAllocation.limit = 10000
+ memoryAllocation.limit = 1024
+ memoryAllocation.reservation = 1024
+ config.memoryAllocation = memoryAllocation
+ config.cpuAllocation = cpuAllocation
+ vm_list.ManagedObjectReference = []
+ self.set("summary", summary)
+ self.set("summary.runtime.memory", memory)
+ self.set("config", config)
+ self.set("vm", vm_list)
+ parent = ManagedObjectReference(value=value,
+ name=name)
+ owner = ManagedObjectReference(value=value,
+ name=name)
+ self.set("parent", parent)
+ self.set("owner", owner)
+
+
+class DatastoreHostMount(DataObject):
+ def __init__(self, value='host-100'):
+ super(DatastoreHostMount, self).__init__()
+ host_ref = (_db_content["HostSystem"]
+ [_db_content["HostSystem"].keys()[0]].obj)
+ host_system = DataObject()
+ host_system.ManagedObjectReference = [host_ref]
+ host_system.value = value
+ self.key = host_system
+
+
+class ClusterComputeResource(ManagedObject):
+ """Cluster class."""
+
+ def __init__(self, name="test_cluster"):
+ super(ClusterComputeResource, self).__init__("domain")
+ self.set("name", name)
+ self.set("host", None)
+ self.set("datastore", None)
+ self.set("resourcePool", None)
+
+ summary = DataObject()
+ summary.numHosts = 0
+ summary.numCpuCores = 0
+ summary.numCpuThreads = 0
+ summary.numEffectiveHosts = 0
+ summary.totalMemory = 0
+ summary.effectiveMemory = 0
+ summary.effectiveCpu = 10000
+ self.set("summary", summary)
+
+ def _add_root_resource_pool(self, r_pool):
+ if r_pool:
+ self.set("resourcePool", r_pool)
+
+ def _add_host(self, host_sys):
+ if host_sys:
+ hosts = self.get("host")
+ if hosts is None:
+ hosts = DataObject()
+ hosts.ManagedObjectReference = []
+ self.set("host", hosts)
+ hosts.ManagedObjectReference.append(host_sys)
+ # Update summary every time a new host is added
+ self._update_summary()
+
+ def _add_datastore(self, datastore):
+ if datastore:
+ datastores = self.get("datastore")
+ if datastores is None:
+ datastores = DataObject()
+ datastores.ManagedObjectReference = []
+ self.set("datastore", datastores)
+ datastores.ManagedObjectReference.append(datastore)
+
+ # Method to update summary of a cluster upon host addition
+ def _update_summary(self):
+ summary = self.get("summary")
+ summary.numHosts = 0
+ summary.numCpuCores = 0
+ summary.numCpuThreads = 0
+ summary.numEffectiveHosts = 0
+ summary.totalMemory = 0
+ summary.effectiveMemory = 0
+
+ hosts = self.get("host")
+ # Compute the aggregate stats
+ summary.numHosts = len(hosts.ManagedObjectReference)
+ for host_ref in hosts.ManagedObjectReference:
+ host_sys = _get_object(host_ref)
+ connected = host_sys.get("connected")
+ host_summary = host_sys.get("summary")
+ summary.numCpuCores += host_summary.hardware.numCpuCores
+ summary.numCpuThreads += host_summary.hardware.numCpuThreads
+ summary.totalMemory += host_summary.hardware.memorySize
+ free_memory = (host_summary.hardware.memorySize / units.Mi
+ - host_summary.quickStats.overallMemoryUsage)
+ summary.effectiveMemory += free_memory if connected else 0
+ summary.numEffectiveHosts += 1 if connected else 0
+ self.set("summary", summary)
+
+
+class Datastore(ManagedObject):
+ """Datastore class."""
+
+ def __init__(self, name="fake-ds", capacity=1024, free=500,
+ accessible=True, maintenance_mode="normal"):
+ super(Datastore, self).__init__("ds")
+ self.set("summary.type", "VMFS")
+ self.set("summary.name", name)
+ self.set("summary.capacity", capacity * units.Gi)
+ self.set("summary.freeSpace", free * units.Gi)
+ self.set("summary.accessible", accessible)
+ self.set("summary.maintenanceMode", maintenance_mode)
+ self.set("browser", "")
+
+
+class HostNetworkSystem(ManagedObject):
+ """HostNetworkSystem class."""
+
+ def __init__(self, name="networkSystem"):
+ super(HostNetworkSystem, self).__init__("ns")
+ self.set("name", name)
+
+ pnic_do = DataObject()
+ pnic_do.device = "vmnic0"
+
+ net_info_pnic = DataObject()
+ net_info_pnic.PhysicalNic = [pnic_do]
+
+ self.set("networkInfo.pnic", net_info_pnic)
+
+
+class HostStorageSystem(ManagedObject):
+ """HostStorageSystem class."""
+
+ def __init__(self):
+ super(HostStorageSystem, self).__init__("storageSystem")
+
+
+class HostSystem(ManagedObject):
+ """Host System class."""
+
+ def __init__(self, name="ha-host", connected=True, ds_ref=None,
+ maintenance_mode=False):
+ super(HostSystem, self).__init__("host")
+ self.set("name", name)
+ if _db_content.get("HostNetworkSystem", None) is None:
+ create_host_network_system()
+ if not _get_object_refs('HostStorageSystem'):
+ create_host_storage_system()
+ host_net_key = _db_content["HostNetworkSystem"].keys()[0]
+ host_net_sys = _db_content["HostNetworkSystem"][host_net_key].obj
+ self.set("configManager.networkSystem", host_net_sys)
+ host_storage_sys_key = _get_object_refs('HostStorageSystem')[0]
+ self.set("configManager.storageSystem", host_storage_sys_key)
+
+ if not ds_ref:
+ ds_ref = create_datastore('local-host-%s' % name, 500, 500)
+ datastores = DataObject()
+ datastores.ManagedObjectReference = [ds_ref]
+ self.set("datastore", datastores)
+
+ summary = DataObject()
+ hardware = DataObject()
+ hardware.numCpuCores = 8
+ hardware.numCpuPkgs = 2
+ hardware.numCpuThreads = 16
+ hardware.vendor = "Intel"
+ hardware.cpuModel = "Intel(R) Xeon(R)"
+ hardware.uuid = "host-uuid"
+ hardware.memorySize = units.Gi
+ summary.hardware = hardware
+
+ runtime = DataObject()
+ if connected:
+ runtime.connectionState = "connected"
+ else:
+ runtime.connectionState = "disconnected"
+
+ runtime.inMaintenanceMode = maintenance_mode
+
+ summary.runtime = runtime
+
+ quickstats = DataObject()
+ quickstats.overallMemoryUsage = 500
+ summary.quickStats = quickstats
+
+ product = DataObject()
+ product.name = "VMware ESXi"
+ product.version = "5.0.0"
+ config = DataObject()
+ config.product = product
+ summary.config = config
+
+ pnic_do = DataObject()
+ pnic_do.device = "vmnic0"
+ net_info_pnic = DataObject()
+ net_info_pnic.PhysicalNic = [pnic_do]
+
+ self.set("summary", summary)
+ self.set("capability.maxHostSupportedVcpus", 600)
+ self.set("summary.hardware", hardware)
+ self.set("summary.runtime", runtime)
+ self.set("config.network.pnic", net_info_pnic)
+ self.set("connected", connected)
+
+ if _db_content.get("Network", None) is None:
+ create_network()
+ net_ref = _db_content["Network"][_db_content["Network"].keys()[0]].obj
+ network_do = DataObject()
+ network_do.ManagedObjectReference = [net_ref]
+ self.set("network", network_do)
+
+ vswitch_do = DataObject()
+ vswitch_do.pnic = ["vmnic0"]
+ vswitch_do.name = "vSwitch0"
+ vswitch_do.portgroup = ["PortGroup-vmnet0"]
+
+ net_swicth = DataObject()
+ net_swicth.HostVirtualSwitch = [vswitch_do]
+ self.set("config.network.vswitch", net_swicth)
+
+ host_pg_do = DataObject()
+ host_pg_do.key = "PortGroup-vmnet0"
+
+ pg_spec = DataObject()
+ pg_spec.vlanId = 0
+ pg_spec.name = "vmnet0"
+
+ host_pg_do.spec = pg_spec
+
+ host_pg = DataObject()
+ host_pg.HostPortGroup = [host_pg_do]
+ self.set("config.network.portgroup", host_pg)
+
+ config = DataObject()
+ storageDevice = DataObject()
+
+ iscsi_hba = HostInternetScsiHba()
+ iscsi_hba.iScsiName = "iscsi-name"
+ host_bus_adapter_array = DataObject()
+ host_bus_adapter_array.HostHostBusAdapter = [iscsi_hba]
+ storageDevice.hostBusAdapter = host_bus_adapter_array
+ config.storageDevice = storageDevice
+ self.set("config.storageDevice.hostBusAdapter", host_bus_adapter_array)
+
+ # Set the same on the storage system managed object
+ host_storage_sys = _get_object(host_storage_sys_key)
+ host_storage_sys.set('storageDeviceInfo.hostBusAdapter',
+ host_bus_adapter_array)
+
+ def _add_iscsi_target(self, data):
+ default_lun = DataObject()
+ default_lun.scsiLun = 'key-vim.host.ScsiDisk-010'
+ default_lun.key = 'key-vim.host.ScsiDisk-010'
+ default_lun.deviceName = 'fake-device'
+ default_lun.uuid = 'fake-uuid'
+ scsi_lun_array = DataObject()
+ scsi_lun_array.ScsiLun = [default_lun]
+ self.set("config.storageDevice.scsiLun", scsi_lun_array)
+
+ transport = DataObject()
+ transport.address = [data['target_portal']]
+ transport.iScsiName = data['target_iqn']
+ default_target = DataObject()
+ default_target.lun = [default_lun]
+ default_target.transport = transport
+
+ iscsi_adapter = DataObject()
+ iscsi_adapter.adapter = 'key-vmhba33'
+ iscsi_adapter.transport = transport
+ iscsi_adapter.target = [default_target]
+ iscsi_topology = DataObject()
+ iscsi_topology.adapter = [iscsi_adapter]
+ self.set("config.storageDevice.scsiTopology", iscsi_topology)
+
+ def _add_port_group(self, spec):
+ """Adds a port group to the host system object in the db."""
+ pg_name = spec.name
+ vswitch_name = spec.vswitchName
+ vlanid = spec.vlanId
+
+ vswitch_do = DataObject()
+ vswitch_do.pnic = ["vmnic0"]
+ vswitch_do.name = vswitch_name
+ vswitch_do.portgroup = ["PortGroup-%s" % pg_name]
+
+ vswitches = self.get("config.network.vswitch").HostVirtualSwitch
+ vswitches.append(vswitch_do)
+
+ host_pg_do = DataObject()
+ host_pg_do.key = "PortGroup-%s" % pg_name
+
+ pg_spec = DataObject()
+ pg_spec.vlanId = vlanid
+ pg_spec.name = pg_name
+
+ host_pg_do.spec = pg_spec
+ host_pgrps = self.get("config.network.portgroup").HostPortGroup
+ host_pgrps.append(host_pg_do)
+
+
+class Datacenter(ManagedObject):
+ """Datacenter class."""
+
+ def __init__(self, name="ha-datacenter", ds_ref=None):
+ super(Datacenter, self).__init__("dc")
+ self.set("name", name)
+ self.set("vmFolder", "vm_folder_ref")
+ if _db_content.get("Network", None) is None:
+ create_network()
+ net_ref = _db_content["Network"][_db_content["Network"].keys()[0]].obj
+ network_do = DataObject()
+ network_do.ManagedObjectReference = [net_ref]
+ self.set("network", network_do)
+ if ds_ref:
+ datastore = DataObject()
+ datastore.ManagedObjectReference = [ds_ref]
+ else:
+ datastore = None
+ self.set("datastore", datastore)
+
+
+class Task(ManagedObject):
+ """Task class."""
+
+ def __init__(self, task_name, state="running", result=None,
+ error_fault=None):
+ super(Task, self).__init__("Task")
+ info = DataObject()
+ info.name = task_name
+ info.state = state
+ if state == 'error':
+ error = DataObject()
+ error.localizedMessage = "Error message"
+ if not error_fault:
+ error.fault = DataObject()
+ else:
+ error.fault = error_fault
+ info.error = error
+ info.result = result
+ self.set("info", info)
+
+
+def create_host_network_system():
+ host_net_system = HostNetworkSystem()
+ _create_object("HostNetworkSystem", host_net_system)
+
+
+def create_host_storage_system():
+ host_storage_system = HostStorageSystem()
+ _create_object("HostStorageSystem", host_storage_system)
+
+
+def create_host(ds_ref=None):
+ host_system = HostSystem(ds_ref=ds_ref)
+ _create_object('HostSystem', host_system)
+
+
+def create_datacenter(name, ds_ref=None):
+ data_center = Datacenter(name, ds_ref)
+ _create_object('Datacenter', data_center)
+
+
+def create_datastore(name, capacity, free):
+ data_store = Datastore(name, capacity, free)
+ _create_object('Datastore', data_store)
+ return data_store.obj
+
+
+def create_res_pool():
+ res_pool = ResourcePool()
+ _create_object('ResourcePool', res_pool)
+ return res_pool.obj
+
+
+def create_network():
+ network = Network()
+ _create_object('Network', network)
+
+
+def create_cluster(name, ds_ref):
+ cluster = ClusterComputeResource(name=name)
+ cluster._add_host(_get_object_refs("HostSystem")[0])
+ cluster._add_host(_get_object_refs("HostSystem")[1])
+ cluster._add_datastore(ds_ref)
+ cluster._add_root_resource_pool(create_res_pool())
+ _create_object('ClusterComputeResource', cluster)
+
+
+def create_vm(uuid=None, name=None,
+ cpus=1, memory=128, devices=None,
+ vmPathName=None, extraConfig=None,
+ res_pool_ref=None, host_ref=None):
+ if uuid is None:
+ uuid = uuidutils.generate_uuid()
+
+ if name is None:
+ name = uuid
+
+ if devices is None:
+ devices = []
+
+ if vmPathName is None:
+ vm_path = ds_util.DatastorePath(_db_content['Datastore'].values()[0])
+ else:
+ vm_path = ds_util.DatastorePath.parse(vmPathName)
+
+ if res_pool_ref is None:
+ res_pool_ref = _db_content['ResourcePool'].keys()[0]
+
+ if host_ref is None:
+ host_ref = _db_content["HostSystem"].keys()[0]
+
+ # Fill in the default path to the vmx file if we were only given a
+ # datastore. Note that if you create a VM with vmPathName '[foo]', when you
+ # retrieve vmPathName it will be '[foo] uuid/uuid.vmx'. Hence we use
+ # vm_path below for the stored value of vmPathName.
+ if vm_path.rel_path == '':
+ vm_path = vm_path.join(name, name + '.vmx')
+
+ for key, value in _db_content["Datastore"].iteritems():
+ if value.get('summary.name') == vm_path.datastore:
+ ds = key
+ break
+ else:
+ ds = create_datastore(vm_path.datastore, 1024, 500)
+
+ vm_dict = {"name": name,
+ "ds": [ds],
+ "runtime_host": host_ref,
+ "powerstate": "poweredOff",
+ "vmPathName": str(vm_path),
+ "numCpu": cpus,
+ "mem": memory,
+ "extra_config": extraConfig,
+ "virtual_device": devices,
+ "instanceUuid": uuid}
+ vm = VirtualMachine(**vm_dict)
+ _create_object("VirtualMachine", vm)
+
+ res_pool = _get_object(res_pool_ref)
+ res_pool.vm.ManagedObjectReference.append(vm.obj)
+
+ return vm.obj
+
+
+def create_task(task_name, state="running", result=None, error_fault=None):
+ task = Task(task_name, state, result, error_fault)
+ _create_object("Task", task)
+ return task
+
+
+def _add_file(file_path):
+ """Adds a file reference to the db."""
+ _db_content["files"].append(file_path)
+
+
+def _remove_file(file_path):
+ """Removes a file reference from the db."""
+ # Check if the remove is for a single file object or for a folder
+ if file_path.find(".vmdk") != -1:
+ if file_path not in _db_content.get("files"):
+ raise vexc.FileNotFoundException(file_path)
+ _db_content.get("files").remove(file_path)
+ else:
+ # Removes the files in the folder and the folder too from the db
+ to_delete = set()
+ for file in _db_content.get("files"):
+ if file.find(file_path) != -1:
+ to_delete.add(file)
+ for file in to_delete:
+ _db_content.get("files").remove(file)
+
+
+def fake_plug_vifs(*args, **kwargs):
+ """Fakes plugging vifs."""
+ pass
+
+
+def fake_get_network(*args, **kwargs):
+ """Fake get network."""
+ return {'type': 'fake'}
+
+
+def get_file(file_path):
+ """Check if file exists in the db."""
+ return file_path in _db_content.get("files")
+
+
+def fake_upload_image(context, image, instance, **kwargs):
+ """Fakes the upload of an image."""
+ pass
+
+
+def fake_fetch_image(context, instance, host, dc_name, ds_name, file_path,
+ cookies=None):
+ """Fakes the fetch of an image."""
+ ds_file_path = "[" + ds_name + "] " + file_path
+ _add_file(ds_file_path)
+
+
+def _get_vm_mdo(vm_ref):
+ """Gets the Virtual Machine with the ref from the db."""
+ if _db_content.get("VirtualMachine", None) is None:
+ raise exception.NotFound(_("There is no VM registered"))
+ if vm_ref not in _db_content.get("VirtualMachine"):
+ raise exception.NotFound(_("Virtual Machine with ref %s is not "
+ "there") % vm_ref)
+ return _db_content.get("VirtualMachine")[vm_ref]
+
+
+def _merge_extraconfig(existing, changes):
+ """Imposes the changes in extraConfig over the existing extraConfig."""
+ existing = existing or []
+ if (changes):
+ for c in changes:
+ if len([x for x in existing if x.key == c.key]) > 0:
+ extraConf = [x for x in existing if x.key == c.key][0]
+ extraConf.value = c.value
+ else:
+ existing.append(c)
+ return existing
+
+
+class FakeFactory(object):
+ """Fake factory class for the suds client."""
+
+ def create(self, obj_name):
+ """Creates a namespace object."""
+ return DataObject(obj_name)
+
+
+class FakeService(DataObject):
+ """Fake service class."""
+
+ def Logout(self, session_manager):
+ pass
+
+ def FindExtension(self, extension_manager, key):
+ return []
+
+
+class FakeClient(DataObject):
+ """Fake client class."""
+
+ def __init__(self):
+ """Creates a namespace object."""
+ self.service = FakeService()
+
+
+class FakeSession(object):
+ """Fake Session Class."""
+
+ def __init__(self):
+ self.vim = FakeVim()
+
+ def _call_method(self, module, method, *args, **kwargs):
+ raise NotImplementedError()
+
+ def _wait_for_task(self, task_ref):
+ raise NotImplementedError()
+
+
+class FakeObjectRetrievalSession(FakeSession):
+ """A session for faking object retrieval tasks.
+
+ _call_method() returns a given set of objects
+ sequentially, regardless of the method called.
+ """
+
+ def __init__(self, *ret):
+ super(FakeObjectRetrievalSession, self).__init__()
+ self.ret = ret
+ self.ind = 0
+
+ def _call_method(self, module, method, *args, **kwargs):
+ # return fake objects in a circular manner
+ self.ind = (self.ind + 1) % len(self.ret)
+ return self.ret[self.ind - 1]
+
+
+def get_fake_vim_object(vmware_api_session):
+ key = vmware_api_session.__repr__()
+ if key not in _vim_map:
+ _vim_map[key] = FakeVim()
+ return _vim_map[key]
+
+
+class FakeVim(object):
+ """Fake VIM Class."""
+
+ def __init__(self, protocol="https", host="localhost", trace=None):
+ """Initializes the suds client object, sets the service content
+ contents and the cookies for the session.
+ """
+ self._session = None
+ self.client = FakeClient()
+ self.client.factory = FakeFactory()
+
+ transport = DataObject()
+ transport.cookiejar = "Fake-CookieJar"
+ options = DataObject()
+ options.transport = transport
+
+ self.client.options = options
+
+ service_content = self.client.factory.create('ns0:ServiceContent')
+ service_content.propertyCollector = "PropCollector"
+ service_content.virtualDiskManager = "VirtualDiskManager"
+ service_content.fileManager = "FileManager"
+ service_content.rootFolder = "RootFolder"
+ service_content.sessionManager = "SessionManager"
+ service_content.extensionManager = "ExtensionManager"
+ service_content.searchIndex = "SearchIndex"
+
+ about_info = DataObject()
+ about_info.name = "VMware vCenter Server"
+ about_info.version = "5.1.0"
+ service_content.about = about_info
+
+ self._service_content = service_content
+
+ @property
+ def service_content(self):
+ return self._service_content
+
+ def __repr__(self):
+ return "Fake VIM Object"
+
+ def __str__(self):
+ return "Fake VIM Object"
+
+ def _login(self):
+ """Logs in and sets the session object in the db."""
+ self._session = uuidutils.generate_uuid()
+ session = DataObject()
+ session.key = self._session
+ session.userName = 'sessionUserName'
+ _db_content['session'][self._session] = session
+ return session
+
+ def _terminate_session(self, *args, **kwargs):
+ """Terminates a session."""
+ s = kwargs.get("sessionId")[0]
+ if s not in _db_content['session']:
+ return
+ del _db_content['session'][s]
+
+ def _check_session(self):
+ """Checks if the session is active."""
+ if (self._session is None or self._session not in
+ _db_content['session']):
+ LOG.debug("Session is faulty")
+ raise vexc.VimFaultException(
+ [vexc.NOT_AUTHENTICATED],
+ _("Session Invalid"))
+
+ def _session_is_active(self, *args, **kwargs):
+ try:
+ self._check_session()
+ return True
+ except Exception:
+ return False
+
+ def _create_vm(self, method, *args, **kwargs):
+ """Creates and registers a VM object with the Host System."""
+ config_spec = kwargs.get("config")
+
+ if config_spec.guestId not in constants.VALID_OS_TYPES:
+ ex = vexc.VMwareDriverException('A specified parameter was '
+ 'not correct.')
+ return create_task(method, "error", error_fault=ex).obj
+
+ pool = kwargs.get('pool')
+
+ devices = []
+ for device_change in config_spec.deviceChange:
+ if device_change.operation == 'add':
+ devices.append(device_change.device)
+
+ vm_ref = create_vm(config_spec.instanceUuid, config_spec.name,
+ config_spec.numCPUs, config_spec.memoryMB,
+ devices, config_spec.files.vmPathName,
+ config_spec.extraConfig, pool)
+
+ task_mdo = create_task(method, "success", result=vm_ref)
+ return task_mdo.obj
+
+ def _reconfig_vm(self, method, *args, **kwargs):
+ """Reconfigures a VM and sets the properties supplied."""
+ vm_ref = args[0]
+ vm_mdo = _get_vm_mdo(vm_ref)
+ vm_mdo.reconfig(self.client.factory, kwargs.get("spec"))
+ task_mdo = create_task(method, "success")
+ return task_mdo.obj
+
+ def _create_copy_disk(self, method, vmdk_file_path):
+ """Creates/copies a vmdk file object in the datastore."""
+ # We need to add/create both .vmdk and .-flat.vmdk files
+ flat_vmdk_file_path = vmdk_file_path.replace(".vmdk", "-flat.vmdk")
+ _add_file(vmdk_file_path)
+ _add_file(flat_vmdk_file_path)
+ task_mdo = create_task(method, "success")
+ return task_mdo.obj
+
+ def _extend_disk(self, method, size):
+ """Extend disk size when create a instance."""
+ task_mdo = create_task(method, "success")
+ return task_mdo.obj
+
+ def _snapshot_vm(self, method):
+ """Snapshots a VM. Here we do nothing for faking sake."""
+ task_mdo = create_task(method, "success")
+ return task_mdo.obj
+
+ def _find_all_by_uuid(self, *args, **kwargs):
+ uuid = kwargs.get('uuid')
+ vm_refs = []
+ for vm_ref in _db_content.get("VirtualMachine"):
+ vm = _get_object(vm_ref)
+ vm_uuid = vm.get("summary.config.instanceUuid")
+ if vm_uuid == uuid:
+ vm_refs.append(vm_ref)
+ return vm_refs
+
+ def _delete_snapshot(self, method, *args, **kwargs):
+ """Deletes a VM snapshot. Here we do nothing for faking sake."""
+ task_mdo = create_task(method, "success")
+ return task_mdo.obj
+
+ def _delete_file(self, method, *args, **kwargs):
+ """Deletes a file from the datastore."""
+ _remove_file(kwargs.get("name"))
+ task_mdo = create_task(method, "success")
+ return task_mdo.obj
+
+ def _just_return(self):
+ """Fakes a return."""
+ return
+
+ def _just_return_task(self, method):
+ """Fakes a task return."""
+ task_mdo = create_task(method, "success")
+ return task_mdo.obj
+
+ def _clone_vm(self, method, *args, **kwargs):
+ """Fakes a VM clone."""
+ """Creates and registers a VM object with the Host System."""
+ source_vmref = args[0]
+ source_vm_mdo = _get_vm_mdo(source_vmref)
+ clone_spec = kwargs.get("spec")
+ vm_dict = {
+ "name": kwargs.get("name"),
+ "ds": source_vm_mdo.get("datastore"),
+ "runtime_host": source_vm_mdo.get("runtime.host"),
+ "powerstate": source_vm_mdo.get("runtime.powerState"),
+ "vmPathName": source_vm_mdo.get("config.files.vmPathName"),
+ "numCpu": source_vm_mdo.get("summary.config.numCpu"),
+ "mem": source_vm_mdo.get("summary.config.memorySizeMB"),
+ "extra_config": source_vm_mdo.get("config.extraConfig").OptionValue,
+ "virtual_device":
+ source_vm_mdo.get("config.hardware.device").VirtualDevice,
+ "instanceUuid": source_vm_mdo.get("summary.config.instanceUuid")}
+
+ if clone_spec.config is not None:
+ # Impose the config changes specified in the config property
+ if (hasattr(clone_spec.config, 'instanceUuid') and
+ clone_spec.config.instanceUuid is not None):
+ vm_dict["instanceUuid"] = clone_spec.config.instanceUuid
+
+ if hasattr(clone_spec.config, 'extraConfig'):
+ extraConfigs = _merge_extraconfig(vm_dict["extra_config"],
+ clone_spec.config.extraConfig)
+ vm_dict["extra_config"] = extraConfigs
+
+ virtual_machine = VirtualMachine(**vm_dict)
+ _create_object("VirtualMachine", virtual_machine)
+ task_mdo = create_task(method, "success")
+ return task_mdo.obj
+
+ def _unregister_vm(self, method, *args, **kwargs):
+ """Unregisters a VM from the Host System."""
+ vm_ref = args[0]
+ _get_vm_mdo(vm_ref)
+ del _db_content["VirtualMachine"][vm_ref]
+
+ def _search_ds(self, method, *args, **kwargs):
+ """Searches the datastore for a file."""
+ # TODO(garyk): add support for spec parameter
+ ds_path = kwargs.get("datastorePath")
+ matched_files = set()
+ # Check if we are searching for a file or a directory
+ directory = False
+ dname = '%s/' % ds_path
+ for file in _db_content.get("files"):
+ if file == dname:
+ directory = True
+ break
+ # A directory search implies that we must return all
+ # subdirectories
+ if directory:
+ for file in _db_content.get("files"):
+ if file.find(ds_path) != -1:
+ if not file.endswith(ds_path):
+ path = file.lstrip(dname).split('/')
+ if path:
+ matched_files.add(path[0])
+ if not matched_files:
+ matched_files.add('/')
+ else:
+ for file in _db_content.get("files"):
+ if file.find(ds_path) != -1:
+ matched_files.add(ds_path)
+ if matched_files:
+ result = DataObject()
+ result.path = ds_path
+ result.file = []
+ for file in matched_files:
+ matched = DataObject()
+ matched.path = file
+ result.file.append(matched)
+ task_mdo = create_task(method, "success", result=result)
+ else:
+ task_mdo = create_task(method, "error", error_fault=FileNotFound())
+ return task_mdo.obj
+
+ def _move_file(self, method, *args, **kwargs):
+ source = kwargs.get('sourceName')
+ destination = kwargs.get('destinationName')
+ new_files = []
+ if source != destination:
+ for file in _db_content.get("files"):
+ if source in file:
+ new_file = file.replace(source, destination)
+ new_files.append(new_file)
+ # if source is not a file then the children will also
+ # be deleted
+ _remove_file(source)
+ for file in new_files:
+ _add_file(file)
+ task_mdo = create_task(method, "success")
+ return task_mdo.obj
+
+ def fake_transfer_file(self, ds_name, file_path):
+ """Fakes fetch image call.
+ Just adds a reference to the db for the file.
+ """
+ ds_file_path = "[" + ds_name + "] " + file_path
+ _add_file(ds_file_path)
+
+ def _make_dir(self, method, *args, **kwargs):
+ """Creates a directory in the datastore."""
+ ds_path = kwargs.get("name")
+ if get_file(ds_path):
+ raise vexc.FileAlreadyExistsException()
+ _db_content["files"].append('%s/' % ds_path)
+
+ def _set_power_state(self, method, vm_ref, pwr_state="poweredOn"):
+ """Sets power state for the VM."""
+ if _db_content.get("VirtualMachine", None) is None:
+ raise exception.NotFound(_("No Virtual Machine has been "
+ "registered yet"))
+ if vm_ref not in _db_content.get("VirtualMachine"):
+ raise exception.NotFound(_("Virtual Machine with ref %s is not "
+ "there") % vm_ref)
+ vm_mdo = _db_content.get("VirtualMachine").get(vm_ref)
+ vm_mdo.set("runtime.powerState", pwr_state)
+ task_mdo = create_task(method, "success")
+ return task_mdo.obj
+
+ def _retrieve_properties_continue(self, method, *args, **kwargs):
+ """Continues the retrieve."""
+ return FakeRetrieveResult()
+
+ def _retrieve_properties_cancel(self, method, *args, **kwargs):
+ """Cancels the retrieve."""
+ return None
+
+ def _retrieve_properties(self, method, *args, **kwargs):
+ """Retrieves properties based on the type."""
+ spec_set = kwargs.get("specSet")[0]
+ spec_type = spec_set.propSet[0].type
+ properties = spec_set.propSet[0].pathSet
+ if not isinstance(properties, list):
+ properties = properties.split()
+ objs = spec_set.objectSet
+ lst_ret_objs = FakeRetrieveResult()
+ for obj in objs:
+ try:
+ obj_ref = obj.obj
+ if obj_ref == "RootFolder":
+ # This means that we are retrieving props for all managed
+ # data objects of the specified 'type' in the entire
+ # inventory. This gets invoked by vim_util.get_objects.
+ mdo_refs = _db_content[spec_type]
+ elif obj_ref.type != spec_type:
+ # This means that we are retrieving props for the managed
+ # data objects in the parent object's 'path' property.
+ # This gets invoked by vim_util.get_inner_objects
+ # eg. obj_ref = <ManagedObjectReference of a cluster>
+ # type = 'DataStore'
+ # path = 'datastore'
+ # the above will retrieve all datastores in the given
+ # cluster.
+ parent_mdo = _db_content[obj_ref.type][obj_ref]
+ path = obj.selectSet[0].path
+ mdo_refs = parent_mdo.get(path).ManagedObjectReference
+ else:
+ # This means that we are retrieving props of the given
+ # managed data object. This gets invoked by
+ # vim_util.get_properties_for_a_collection_of_objects.
+ mdo_refs = [obj_ref]
+
+ for mdo_ref in mdo_refs:
+ mdo = _db_content[spec_type][mdo_ref]
+ prop_list = []
+ for prop_name in properties:
+ prop = Prop(prop_name, mdo.get(prop_name))
+ prop_list.append(prop)
+ obj_content = ObjectContent(mdo.obj, prop_list)
+ lst_ret_objs.add_object(obj_content)
+ except Exception as exc:
+ LOG.exception(exc)
+ continue
+ return lst_ret_objs
+
+ def _add_port_group(self, method, *args, **kwargs):
+ """Adds a port group to the host system."""
+ _host_sk = _db_content["HostSystem"].keys()[0]
+ host_mdo = _db_content["HostSystem"][_host_sk]
+ host_mdo._add_port_group(kwargs.get("portgrp"))
+
+ def _add_iscsi_send_tgt(self, method, *args, **kwargs):
+ """Adds a iscsi send target to the hba."""
+ send_targets = kwargs.get('targets')
+ host_storage_sys = _get_objects('HostStorageSystem').objects[0]
+ iscsi_hba_array = host_storage_sys.get('storageDeviceInfo'
+ '.hostBusAdapter')
+ iscsi_hba = iscsi_hba_array.HostHostBusAdapter[0]
+ if hasattr(iscsi_hba, 'configuredSendTarget'):
+ iscsi_hba.configuredSendTarget.extend(send_targets)
+ else:
+ iscsi_hba.configuredSendTarget = send_targets
+
+ def __getattr__(self, attr_name):
+ if attr_name != "Login":
+ self._check_session()
+ if attr_name == "Login":
+ return lambda *args, **kwargs: self._login()
+ elif attr_name == "SessionIsActive":
+ return lambda *args, **kwargs: self._session_is_active(
+ *args, **kwargs)
+ elif attr_name == "TerminateSession":
+ return lambda *args, **kwargs: self._terminate_session(
+ *args, **kwargs)
+ elif attr_name == "CreateVM_Task":
+ return lambda *args, **kwargs: self._create_vm(attr_name,
+ *args, **kwargs)
+ elif attr_name == "ReconfigVM_Task":
+ return lambda *args, **kwargs: self._reconfig_vm(attr_name,
+ *args, **kwargs)
+ elif attr_name == "CreateVirtualDisk_Task":
+ return lambda *args, **kwargs: self._create_copy_disk(attr_name,
+ kwargs.get("name"))
+ elif attr_name == "DeleteDatastoreFile_Task":
+ return lambda *args, **kwargs: self._delete_file(attr_name,
+ *args, **kwargs)
+ elif attr_name == "PowerOnVM_Task":
+ return lambda *args, **kwargs: self._set_power_state(attr_name,
+ args[0], "poweredOn")
+ elif attr_name == "PowerOffVM_Task":
+ return lambda *args, **kwargs: self._set_power_state(attr_name,
+ args[0], "poweredOff")
+ elif attr_name == "RebootGuest":
+ return lambda *args, **kwargs: self._just_return()
+ elif attr_name == "ResetVM_Task":
+ return lambda *args, **kwargs: self._set_power_state(attr_name,
+ args[0], "poweredOn")
+ elif attr_name == "SuspendVM_Task":
+ return lambda *args, **kwargs: self._set_power_state(attr_name,
+ args[0], "suspended")
+ elif attr_name == "CreateSnapshot_Task":
+ return lambda *args, **kwargs: self._snapshot_vm(attr_name)
+ elif attr_name == "RemoveSnapshot_Task":
+ return lambda *args, **kwargs: self._delete_snapshot(attr_name,
+ *args, **kwargs)
+ elif attr_name == "CopyVirtualDisk_Task":
+ return lambda *args, **kwargs: self._create_copy_disk(attr_name,
+ kwargs.get("destName"))
+ elif attr_name == "ExtendVirtualDisk_Task":
+ return lambda *args, **kwargs: self._extend_disk(attr_name,
+ kwargs.get("size"))
+ elif attr_name == "Destroy_Task":
+ return lambda *args, **kwargs: self._unregister_vm(attr_name,
+ *args, **kwargs)
+ elif attr_name == "UnregisterVM":
+ return lambda *args, **kwargs: self._unregister_vm(attr_name,
+ *args, **kwargs)
+ elif attr_name == "CloneVM_Task":
+ return lambda *args, **kwargs: self._clone_vm(attr_name,
+ *args, **kwargs)
+ elif attr_name == "FindAllByUuid":
+ return lambda *args, **kwargs: self._find_all_by_uuid(attr_name,
+ *args, **kwargs)
+ elif attr_name == "SearchDatastore_Task":
+ return lambda *args, **kwargs: self._search_ds(attr_name,
+ *args, **kwargs)
+ elif attr_name == "MoveDatastoreFile_Task":
+ return lambda *args, **kwargs: self._move_file(attr_name,
+ *args, **kwargs)
+ elif attr_name == "MakeDirectory":
+ return lambda *args, **kwargs: self._make_dir(attr_name,
+ *args, **kwargs)
+ elif attr_name == "RetrievePropertiesEx":
+ return lambda *args, **kwargs: self._retrieve_properties(
+ attr_name, *args, **kwargs)
+ elif attr_name == "ContinueRetrievePropertiesEx":
+ return lambda *args, **kwargs: self._retrieve_properties_continue(
+ attr_name, *args, **kwargs)
+ elif attr_name == "CancelRetrievePropertiesEx":
+ return lambda *args, **kwargs: self._retrieve_properties_cancel(
+ attr_name, *args, **kwargs)
+ elif attr_name == "AddPortGroup":
+ return lambda *args, **kwargs: self._add_port_group(attr_name,
+ *args, **kwargs)
+ elif attr_name == "RebootHost_Task":
+ return lambda *args, **kwargs: self._just_return_task(attr_name)
+ elif attr_name == "ShutdownHost_Task":
+ return lambda *args, **kwargs: self._just_return_task(attr_name)
+ elif attr_name == "PowerUpHostFromStandBy_Task":
+ return lambda *args, **kwargs: self._just_return_task(attr_name)
+ elif attr_name == "EnterMaintenanceMode_Task":
+ return lambda *args, **kwargs: self._just_return_task(attr_name)
+ elif attr_name == "ExitMaintenanceMode_Task":
+ return lambda *args, **kwargs: self._just_return_task(attr_name)
+ elif attr_name == "AddInternetScsiSendTargets":
+ return lambda *args, **kwargs: self._add_iscsi_send_tgt(attr_name,
+ *args, **kwargs)
+ elif attr_name == "RescanHba":
+ return lambda *args, **kwargs: self._just_return_task(attr_name)
diff --git a/nova/tests/unit/virt/vmwareapi/stubs.py b/nova/tests/unit/virt/vmwareapi/stubs.py
new file mode 100644
index 0000000000..d126b36e0f
--- /dev/null
+++ b/nova/tests/unit/virt/vmwareapi/stubs.py
@@ -0,0 +1,131 @@
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Stubouts for the test suite
+"""
+
+import contextlib
+
+import mock
+from oslo.vmware import exceptions as vexc
+
+from nova import db
+from nova.tests.unit import test_flavors
+from nova.tests.unit.virt.vmwareapi import fake
+from nova.virt.vmwareapi import driver
+from nova.virt.vmwareapi import images
+from nova.virt.vmwareapi import network_util
+
+
+def fake_get_vim_object(arg):
+ """Stubs out the VMwareAPISession's get_vim_object method."""
+ return fake.FakeVim()
+
+
+@property
+def fake_vim_prop(arg):
+ """Stubs out the VMwareAPISession's vim property access method."""
+ return fake.get_fake_vim_object(arg)
+
+
+def fake_is_vim_object(arg, module):
+ """Stubs out the VMwareAPISession's is_vim_object method."""
+ return isinstance(module, fake.FakeVim)
+
+
+def fake_temp_method_exception():
+ raise vexc.VimFaultException(
+ [vexc.NOT_AUTHENTICATED],
+ "Session Empty/Not Authenticated")
+
+
+def fake_temp_session_exception():
+ raise vexc.VimConnectionException("it's a fake!",
+ "Session Exception")
+
+
+def fake_session_file_exception():
+ fault_list = [vexc.FILE_ALREADY_EXISTS]
+ raise vexc.VimFaultException(fault_list,
+ Exception('fake'))
+
+
+def fake_session_permission_exception():
+ fault_list = [vexc.NO_PERMISSION]
+ fault_string = 'Permission to perform this operation was denied.'
+ details = {'privilegeId': 'Resource.AssignVMToPool', 'object': 'domain-c7'}
+ raise vexc.VimFaultException(fault_list, fault_string, details=details)
+
+
+def _fake_flavor_get(context, id):
+ for instance_type in test_flavors.DEFAULT_FLAVORS:
+ if instance_type['id'] == id:
+ return instance_type
+ return {'memory_mb': 128, 'root_gb': 0, 'deleted_at': None,
+ 'name': 'm1.micro', 'deleted': 0, 'created_at': None,
+ 'ephemeral_gb': 0, 'updated_at': None,
+ 'disabled': False, 'vcpus': 1, 'extra_specs': {},
+ 'swap': 0, 'rxtx_factor': 1.0, 'is_public': True,
+ 'flavorid': '1', 'vcpu_weight': None, 'id': 2}
+
+
+def set_stubs(stubs):
+ """Set the stubs."""
+ stubs.Set(network_util, 'get_network_with_the_name',
+ fake.fake_get_network)
+ stubs.Set(images, 'upload_image', fake.fake_upload_image)
+ stubs.Set(images, 'fetch_image', fake.fake_fetch_image)
+ stubs.Set(driver.VMwareAPISession, "vim", fake_vim_prop)
+ stubs.Set(driver.VMwareAPISession, "_is_vim_object",
+ fake_is_vim_object)
+ stubs.Set(db, 'flavor_get', _fake_flavor_get)
+
+
+def fake_suds_context(calls=None):
+ """Generate a suds client which automatically mocks all SOAP method calls.
+
+ Calls are stored in <calls>, indexed by the name of the call. If you need
+ to mock the behaviour of specific API calls you can pre-populate <calls>
+ with appropriate Mock objects.
+ """
+
+ calls = calls or {}
+
+ class fake_factory:
+ def create(self, name):
+ return mock.NonCallableMagicMock(name=name)
+
+ class fake_service:
+ def __getattr__(self, attr_name):
+ if attr_name in calls:
+ return calls[attr_name]
+
+ mock_call = mock.MagicMock(name=attr_name)
+ calls[attr_name] = mock_call
+ return mock_call
+
+ class fake_client:
+ def __init__(self, wdsl_url, **kwargs):
+ self.service = fake_service()
+ self.factory = fake_factory()
+
+ return contextlib.nested(
+ mock.patch('suds.client.Client', fake_client),
+
+ # As we're not connecting to a real host there's no need to wait
+ # between retries
+ mock.patch.object(driver, 'TIME_BETWEEN_API_CALL_RETRIES', 0)
+ )
diff --git a/nova/tests/unit/virt/vmwareapi/test_configdrive.py b/nova/tests/unit/virt/vmwareapi/test_configdrive.py
new file mode 100644
index 0000000000..7b4b1bba1f
--- /dev/null
+++ b/nova/tests/unit/virt/vmwareapi/test_configdrive.py
@@ -0,0 +1,168 @@
+# Copyright 2013 IBM Corp.
+# Copyright 2011 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import fixtures
+import mock
+import mox
+
+from nova import context
+from nova.image import glance
+from nova import test
+from nova.tests.unit import fake_instance
+import nova.tests.unit.image.fake
+from nova.tests.unit import utils
+from nova.tests.unit.virt.vmwareapi import fake as vmwareapi_fake
+from nova.tests.unit.virt.vmwareapi import stubs
+from nova.virt import fake
+from nova.virt.vmwareapi import driver
+from nova.virt.vmwareapi import images
+from nova.virt.vmwareapi import vm_util
+from nova.virt.vmwareapi import vmops
+
+
+class ConfigDriveTestCase(test.NoDBTestCase):
+
+ REQUIRES_LOCKING = True
+
+ @mock.patch.object(driver.VMwareVCDriver, '_register_openstack_extension')
+ def setUp(self, mock_register):
+ super(ConfigDriveTestCase, self).setUp()
+ vm_util.vm_refs_cache_reset()
+ self.context = context.RequestContext('fake', 'fake', is_admin=False)
+ cluster_name = 'test_cluster'
+ self.flags(cluster_name=[cluster_name],
+ host_ip='test_url',
+ host_username='test_username',
+ host_password='test_pass',
+ use_linked_clone=False, group='vmware')
+ self.flags(vnc_enabled=False)
+ vmwareapi_fake.reset()
+ stubs.set_stubs(self.stubs)
+ nova.tests.unit.image.fake.stub_out_image_service(self.stubs)
+ self.conn = driver.VMwareVCDriver(fake.FakeVirtAPI)
+ self.network_info = utils.get_test_network_info()
+ self.node_name = '%s(%s)' % (self.conn.dict_mors.keys()[0],
+ cluster_name)
+ image_ref = nova.tests.unit.image.fake.get_valid_image_id()
+ instance_values = {
+ 'vm_state': 'building',
+ 'project_id': 'fake',
+ 'user_id': 'fake',
+ 'name': '1',
+ 'kernel_id': '1',
+ 'ramdisk_id': '1',
+ 'mac_addresses': [{'address': 'de:ad:be:ef:be:ef'}],
+ 'memory_mb': 8192,
+ 'flavor': 'm1.large',
+ 'instance_type_id': 0,
+ 'vcpus': 4,
+ 'root_gb': 80,
+ 'image_ref': image_ref,
+ 'host': 'fake_host',
+ 'task_state': 'scheduling',
+ 'reservation_id': 'r-3t8muvr0',
+ 'id': 1,
+ 'uuid': 'fake-uuid',
+ 'node': self.node_name,
+ 'metadata': [],
+ 'expected_attrs': ['system_metadata'],
+ }
+ self.test_instance = fake_instance.fake_instance_obj(self.context,
+ **instance_values)
+
+ (image_service, image_id) = glance.get_remote_image_service(context,
+ image_ref)
+ metadata = image_service.show(context, image_id)
+ self.image = {
+ 'id': image_ref,
+ 'disk_format': 'vmdk',
+ 'size': int(metadata['size']),
+ }
+
+ class FakeInstanceMetadata(object):
+ def __init__(self, instance, content=None, extra_md=None):
+ pass
+
+ def metadata_for_config_drive(self):
+ return []
+
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.api.metadata.base.InstanceMetadata',
+ FakeInstanceMetadata))
+
+ def fake_make_drive(_self, _path):
+ pass
+ # We can't actually make a config drive v2 because ensure_tree has
+ # been faked out
+ self.stubs.Set(nova.virt.configdrive.ConfigDriveBuilder,
+ 'make_drive', fake_make_drive)
+
+ def fake_upload_iso_to_datastore(iso_path, instance, **kwargs):
+ pass
+ self.stubs.Set(images,
+ 'upload_iso_to_datastore',
+ fake_upload_iso_to_datastore)
+
+ def tearDown(self):
+ super(ConfigDriveTestCase, self).tearDown()
+ vmwareapi_fake.cleanup()
+ nova.tests.unit.image.fake.FakeImageService_reset()
+
+ def _spawn_vm(self, injected_files=None, admin_password=None,
+ block_device_info=None):
+
+ injected_files = injected_files or []
+ self.conn.spawn(self.context, self.test_instance, self.image,
+ injected_files=injected_files,
+ admin_password=admin_password,
+ network_info=self.network_info,
+ block_device_info=block_device_info)
+
+ def test_create_vm_with_config_drive_verify_method_invocation(self):
+ self.test_instance.config_drive = 'True'
+ self.mox.StubOutWithMock(vmops.VMwareVMOps, '_create_config_drive')
+ self.mox.StubOutWithMock(vmops.VMwareVMOps, '_attach_cdrom_to_vm')
+ self.conn._vmops._create_config_drive(self.test_instance,
+ mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ mox.IgnoreArg()
+ ).AndReturn('[ds1] fake.iso')
+ self.conn._vmops._attach_cdrom_to_vm(mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ mox.IgnoreArg())
+ self.mox.ReplayAll()
+ # if spawn does not call the _create_config_drive or
+ # _attach_cdrom_to_vm call with the correct set of parameters
+ # then mox's VerifyAll will throw a Expected methods never called
+ # Exception
+ self._spawn_vm()
+
+ def test_create_vm_without_config_drive(self):
+ self.test_instance.config_drive = None
+ self.mox.StubOutWithMock(vmops.VMwareVMOps, '_create_config_drive')
+ self.mox.StubOutWithMock(vmops.VMwareVMOps, '_attach_cdrom_to_vm')
+ self.mox.ReplayAll()
+ # if spawn ends up calling _create_config_drive or
+ # _attach_cdrom_to_vm then mox will log a Unexpected method call
+ # exception
+ self._spawn_vm()
+
+ def test_create_vm_with_config_drive(self):
+ self.test_instance.config_drive = 'True'
+ self._spawn_vm()
diff --git a/nova/tests/unit/virt/vmwareapi/test_driver_api.py b/nova/tests/unit/virt/vmwareapi/test_driver_api.py
new file mode 100644
index 0000000000..5f7eb76a62
--- /dev/null
+++ b/nova/tests/unit/virt/vmwareapi/test_driver_api.py
@@ -0,0 +1,2650 @@
+# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
+# Copyright (c) 2012 VMware, Inc.
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Test suite for VMwareAPI.
+"""
+
+import collections
+import contextlib
+import copy
+import datetime
+
+from eventlet import greenthread
+import mock
+import mox
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+from oslo.utils import timeutils
+from oslo.utils import units
+from oslo.vmware import exceptions as vexc
+from oslo.vmware import pbm
+from oslo.vmware import vim
+from oslo.vmware import vim_util as oslo_vim_util
+import suds
+
+from nova import block_device
+from nova.compute import api as compute_api
+from nova.compute import power_state
+from nova.compute import task_states
+from nova.compute import vm_states
+from nova import context
+from nova import exception
+from nova.image import glance
+from nova.network import model as network_model
+from nova.openstack.common import uuidutils
+from nova import test
+from nova.tests.unit import fake_instance
+import nova.tests.unit.image.fake
+from nova.tests.unit import matchers
+from nova.tests.unit import test_flavors
+from nova.tests.unit import utils
+from nova.tests.unit.virt.vmwareapi import fake as vmwareapi_fake
+from nova.tests.unit.virt.vmwareapi import stubs
+from nova import utils as nova_utils
+from nova.virt import driver as v_driver
+from nova.virt.vmwareapi import constants
+from nova.virt.vmwareapi import driver
+from nova.virt.vmwareapi import ds_util
+from nova.virt.vmwareapi import error_util
+from nova.virt.vmwareapi import imagecache
+from nova.virt.vmwareapi import images
+from nova.virt.vmwareapi import vif
+from nova.virt.vmwareapi import vim_util
+from nova.virt.vmwareapi import vm_util
+from nova.virt.vmwareapi import vmops
+from nova.virt.vmwareapi import volumeops
+
+CONF = cfg.CONF
+CONF.import_opt('host', 'nova.netconf')
+CONF.import_opt('remove_unused_original_minimum_age_seconds',
+ 'nova.virt.imagecache')
+
+
+class fake_vm_ref(object):
+ def __init__(self):
+ self.value = 4
+ self._type = 'VirtualMachine'
+
+
+class fake_service_content(object):
+ def __init__(self):
+ self.ServiceContent = vmwareapi_fake.DataObject()
+ self.ServiceContent.fake = 'fake'
+
+
+class VMwareSudsTest(test.NoDBTestCase):
+
+ def setUp(self):
+ super(VMwareSudsTest, self).setUp()
+
+ def new_client_init(self, url, **kwargs):
+ return
+
+ mock.patch.object(suds.client.Client,
+ '__init__', new=new_client_init).start()
+ self.vim = self._vim_create()
+ self.addCleanup(mock.patch.stopall)
+
+ def _mock_getattr(self, attr_name):
+ self.assertEqual("RetrieveServiceContent", attr_name)
+ return lambda obj, **kwargs: fake_service_content()
+
+ def _vim_create(self):
+ with mock.patch.object(vim.Vim, '__getattr__', self._mock_getattr):
+ return vim.Vim()
+
+ def test_exception_with_deepcopy(self):
+ self.assertIsNotNone(self.vim)
+ self.assertRaises(vexc.VimException,
+ copy.deepcopy, self.vim)
+
+
+def _fake_create_session(inst):
+ session = vmwareapi_fake.DataObject()
+ session.key = 'fake_key'
+ session.userName = 'fake_username'
+ session._pbm_wsdl_loc = None
+ session._pbm = None
+ inst._session = session
+
+
+class VMwareDriverStartupTestCase(test.NoDBTestCase):
+ def _start_driver_with_flags(self, expected_exception_type, startup_flags):
+ self.flags(**startup_flags)
+ with mock.patch(
+ 'nova.virt.vmwareapi.driver.VMwareAPISession.__init__'):
+ e = self.assertRaises(
+ Exception, driver.VMwareVCDriver, None) # noqa
+ self.assertIs(type(e), expected_exception_type)
+
+ def test_start_driver_no_user(self):
+ self._start_driver_with_flags(
+ Exception,
+ dict(host_ip='ip', host_password='password',
+ group='vmware'))
+
+ def test_start_driver_no_host(self):
+ self._start_driver_with_flags(
+ Exception,
+ dict(host_username='username', host_password='password',
+ group='vmware'))
+
+ def test_start_driver_no_password(self):
+ self._start_driver_with_flags(
+ Exception,
+ dict(host_ip='ip', host_username='username',
+ group='vmware'))
+
+ def test_start_driver_with_user_host_password(self):
+ # Getting the InvalidInput exception signifies that no exception
+ # is raised regarding missing user/password/host
+ self._start_driver_with_flags(
+ nova.exception.InvalidInput,
+ dict(host_ip='ip', host_password='password',
+ host_username="user", datastore_regex="bad(regex",
+ group='vmware'))
+
+
+class VMwareSessionTestCase(test.NoDBTestCase):
+
+ @mock.patch.object(driver.VMwareAPISession, '_is_vim_object',
+ return_value=False)
+ def test_call_method(self, mock_is_vim):
+ with contextlib.nested(
+ mock.patch.object(driver.VMwareAPISession, '_create_session',
+ _fake_create_session),
+ mock.patch.object(driver.VMwareAPISession, 'invoke_api'),
+ ) as (fake_create, fake_invoke):
+ session = driver.VMwareAPISession()
+ session._vim = mock.Mock()
+ module = mock.Mock()
+ session._call_method(module, 'fira')
+ fake_invoke.assert_called_once_with(module, 'fira', session._vim)
+
+ @mock.patch.object(driver.VMwareAPISession, '_is_vim_object',
+ return_value=True)
+ def test_call_method_vim(self, mock_is_vim):
+ with contextlib.nested(
+ mock.patch.object(driver.VMwareAPISession, '_create_session',
+ _fake_create_session),
+ mock.patch.object(driver.VMwareAPISession, 'invoke_api'),
+ ) as (fake_create, fake_invoke):
+ session = driver.VMwareAPISession()
+ module = mock.Mock()
+ session._call_method(module, 'fira')
+ fake_invoke.assert_called_once_with(module, 'fira')
+
+
+class VMwareAPIVMTestCase(test.NoDBTestCase):
+ """Unit tests for Vmware API connection calls."""
+
+ REQUIRES_LOCKING = True
+
+ @mock.patch.object(driver.VMwareVCDriver, '_register_openstack_extension')
+ def setUp(self, mock_register, create_connection=True):
+ super(VMwareAPIVMTestCase, self).setUp()
+ vm_util.vm_refs_cache_reset()
+ self.context = context.RequestContext('fake', 'fake', is_admin=False)
+ cluster_name = 'test_cluster'
+ cluster_name2 = 'test_cluster2'
+ self.flags(cluster_name=[cluster_name, cluster_name2],
+ host_ip='test_url',
+ host_username='test_username',
+ host_password='test_pass',
+ api_retry_count=1,
+ use_linked_clone=False, group='vmware')
+ self.flags(vnc_enabled=False,
+ image_cache_subdirectory_name='vmware_base',
+ my_ip='')
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = context.RequestContext(self.user_id, self.project_id)
+ stubs.set_stubs(self.stubs)
+ vmwareapi_fake.reset()
+ nova.tests.unit.image.fake.stub_out_image_service(self.stubs)
+ if create_connection:
+ self.conn = driver.VMwareVCDriver(None, False)
+ self._set_exception_vars()
+ self.node_name = self.conn._resources.keys()[0]
+ self.node_name2 = self.conn._resources.keys()[1]
+ if cluster_name2 in self.node_name2:
+ self.ds = 'ds1'
+ else:
+ self.ds = 'ds2'
+
+ self.vim = vmwareapi_fake.FakeVim()
+
+ # NOTE(vish): none of the network plugging code is actually
+ # being tested
+ self.network_info = utils.get_test_network_info()
+ image_ref = nova.tests.unit.image.fake.get_valid_image_id()
+ (image_service, image_id) = glance.get_remote_image_service(
+ self.context, image_ref)
+ metadata = image_service.show(self.context, image_id)
+ self.image = {
+ 'id': image_ref,
+ 'disk_format': 'vmdk',
+ 'size': int(metadata['size']),
+ }
+ self.fake_image_uuid = self.image['id']
+ nova.tests.unit.image.fake.stub_out_image_service(self.stubs)
+ self.vnc_host = 'ha-host'
+ self.instance_without_compute = {'node': None,
+ 'vm_state': 'building',
+ 'project_id': 'fake',
+ 'user_id': 'fake',
+ 'name': '1',
+ 'display_description': '1',
+ 'kernel_id': '1',
+ 'ramdisk_id': '1',
+ 'mac_addresses': [
+ {'address': 'de:ad:be:ef:be:ef'}
+ ],
+ 'memory_mb': 8192,
+ 'instance_type': 'm1.large',
+ 'vcpus': 4,
+ 'root_gb': 80,
+ 'image_ref': self.image['id'],
+ 'host': 'fake_host',
+ 'task_state':
+ 'scheduling',
+ 'reservation_id': 'r-3t8muvr0',
+ 'id': 1,
+ 'uuid': 'fake-uuid',
+ 'metadata': []}
+
+ def tearDown(self):
+ super(VMwareAPIVMTestCase, self).tearDown()
+ vmwareapi_fake.cleanup()
+ nova.tests.unit.image.fake.FakeImageService_reset()
+
+ def test_get_host_ip_addr(self):
+ self.assertEqual('test_url', self.conn.get_host_ip_addr())
+
+ def test_init_host_with_no_session(self):
+ self.conn._session = mock.Mock()
+ self.conn._session.vim = None
+ self.conn.init_host('fake_host')
+ self.conn._session._create_session.assert_called_once_with()
+
+ def test_init_host(self):
+ try:
+ self.conn.init_host("fake_host")
+ except Exception as ex:
+ self.fail("init_host raised: %s" % ex)
+
+ def _set_exception_vars(self):
+ self.wait_task = self.conn._session._wait_for_task
+ self.call_method = self.conn._session._call_method
+ self.task_ref = None
+ self.exception = False
+
+ def test_cleanup_host(self):
+ self.conn.init_host("fake_host")
+ try:
+ self.conn.cleanup_host("fake_host")
+ except Exception as ex:
+ self.fail("cleanup_host raised: %s" % ex)
+
+ @mock.patch('nova.virt.vmwareapi.driver.VMwareVCDriver.__init__')
+ def test_cleanup_host_direct(self, mock_init):
+ mock_init.return_value = None
+ vcdriver = driver.VMwareVCDriver(None, False)
+ vcdriver._session = mock.Mock()
+ vcdriver.cleanup_host("foo")
+ vcdriver._session.vim.client.service.Logout.assert_called_once_with(
+ vcdriver._session.vim.service_content.sessionManager
+ )
+
+ @mock.patch('nova.virt.vmwareapi.driver.VMwareVCDriver.__init__')
+ def test_cleanup_host_direct_with_bad_logout(self, mock_init):
+ mock_init.return_value = None
+ vcdriver = driver.VMwareVCDriver(None, False)
+ vcdriver._session = mock.Mock()
+ fault = suds.WebFault(mock.Mock(), mock.Mock())
+ vcdriver._session.vim.client.service.Logout.side_effect = fault
+ vcdriver.cleanup_host("foo")
+
+ def test_driver_capabilities(self):
+ self.assertTrue(self.conn.capabilities['has_imagecache'])
+ self.assertFalse(self.conn.capabilities['supports_recreate'])
+
+ def test_configuration_linked_clone(self):
+ self.flags(use_linked_clone=None, group='vmware')
+ self.assertRaises(vexc.UseLinkedCloneConfigurationFault,
+ self.conn._validate_configuration)
+
+ @mock.patch.object(pbm, 'get_profile_id_by_name')
+ def test_configuration_pbm(self, get_profile_mock):
+ get_profile_mock.return_value = 'fake-profile'
+ self.flags(pbm_enabled=True,
+ pbm_default_policy='fake-policy',
+ pbm_wsdl_location='fake-location', group='vmware')
+ self.conn._validate_configuration()
+
+ @mock.patch.object(pbm, 'get_profile_id_by_name')
+ def test_configuration_pbm_bad_default(self, get_profile_mock):
+ get_profile_mock.return_value = None
+ self.flags(pbm_enabled=True,
+ pbm_wsdl_location='fake-location',
+ pbm_default_policy='fake-policy', group='vmware')
+ self.assertRaises(error_util.PbmDefaultPolicyDoesNotExist,
+ self.conn._validate_configuration)
+
+ def test_login_retries(self):
+ self.attempts = 0
+ self.login_session = vmwareapi_fake.FakeVim()._login()
+
+ def _fake_login(_self):
+ self.attempts += 1
+ if self.attempts == 1:
+ raise vexc.VimConnectionException('Here is my fake exception')
+ return self.login_session
+
+ def _fake_check_session(_self):
+ return True
+
+ self.stubs.Set(vmwareapi_fake.FakeVim, '_login', _fake_login)
+ self.stubs.Set(vmwareapi_fake.FakeVim, '_check_session',
+ _fake_check_session)
+
+ with mock.patch.object(greenthread, 'sleep'):
+ self.conn = driver.VMwareAPISession()
+ self.assertEqual(self.attempts, 2)
+
+ def _get_instance_type_by_name(self, type):
+ for instance_type in test_flavors.DEFAULT_FLAVORS:
+ if instance_type['name'] == type:
+ return instance_type
+ if type == 'm1.micro':
+ return {'memory_mb': 128, 'root_gb': 0, 'deleted_at': None,
+ 'name': 'm1.micro', 'deleted': 0, 'created_at': None,
+ 'ephemeral_gb': 0, 'updated_at': None,
+ 'disabled': False, 'vcpus': 1, 'extra_specs': {},
+ 'swap': 0, 'rxtx_factor': 1.0, 'is_public': True,
+ 'flavorid': '1', 'vcpu_weight': None, 'id': 2}
+
+ def _create_instance(self, node=None, set_image_ref=True,
+ uuid=None, instance_type='m1.large'):
+ if not node:
+ node = self.node_name
+ if not uuid:
+ uuid = uuidutils.generate_uuid()
+ self.type_data = self._get_instance_type_by_name(instance_type)
+ values = {'name': 'fake_name',
+ 'id': 1,
+ 'uuid': uuid,
+ 'project_id': self.project_id,
+ 'user_id': self.user_id,
+ 'kernel_id': "fake_kernel_uuid",
+ 'ramdisk_id': "fake_ramdisk_uuid",
+ 'mac_address': "de:ad:be:ef:be:ef",
+ 'flavor': instance_type,
+ 'node': node,
+ 'memory_mb': self.type_data['memory_mb'],
+ 'root_gb': self.type_data['root_gb'],
+ 'ephemeral_gb': self.type_data['ephemeral_gb'],
+ 'vcpus': self.type_data['vcpus'],
+ 'swap': self.type_data['swap'],
+ 'expected_attrs': ['system_metadata'],
+ }
+ if set_image_ref:
+ values['image_ref'] = self.fake_image_uuid
+ self.instance_node = node
+ self.uuid = uuid
+ self.instance = fake_instance.fake_instance_obj(
+ self.context, **values)
+
+ def _create_vm(self, node=None, num_instances=1, uuid=None,
+ instance_type='m1.large', powered_on=True):
+ """Create and spawn the VM."""
+ if not node:
+ node = self.node_name
+ self._create_instance(node=node, uuid=uuid,
+ instance_type=instance_type)
+ self.assertIsNone(vm_util.vm_ref_cache_get(self.uuid))
+ self.conn.spawn(self.context, self.instance, self.image,
+ injected_files=[], admin_password=None,
+ network_info=self.network_info,
+ block_device_info=None)
+ self._check_vm_record(num_instances=num_instances,
+ powered_on=powered_on)
+ self.assertIsNotNone(vm_util.vm_ref_cache_get(self.uuid))
+
+ def _get_vm_record(self):
+ # Get record for VM
+ vms = vmwareapi_fake._get_objects("VirtualMachine")
+ for vm in vms.objects:
+ if vm.get('name') == self.uuid:
+ return vm
+ self.fail('Unable to find VM backing!')
+
+ def _check_vm_record(self, num_instances=1, powered_on=True):
+ """Check if the spawned VM's properties correspond to the instance in
+ the db.
+ """
+ instances = self.conn.list_instances()
+ self.assertEqual(len(instances), num_instances)
+
+ # Get Nova record for VM
+ vm_info = self.conn.get_info({'uuid': self.uuid,
+ 'name': 1,
+ 'node': self.instance_node})
+
+ vm = self._get_vm_record()
+
+ # Check that m1.large above turned into the right thing.
+ mem_kib = long(self.type_data['memory_mb']) << 10
+ vcpus = self.type_data['vcpus']
+ self.assertEqual(vm_info['max_mem'], mem_kib)
+ self.assertEqual(vm_info['mem'], mem_kib)
+ self.assertEqual(vm.get("summary.config.instanceUuid"), self.uuid)
+ self.assertEqual(vm.get("summary.config.numCpu"), vcpus)
+ self.assertEqual(vm.get("summary.config.memorySizeMB"),
+ self.type_data['memory_mb'])
+
+ self.assertEqual(
+ vm.get("config.hardware.device").VirtualDevice[2].obj_name,
+ "ns0:VirtualE1000")
+ if powered_on:
+ # Check that the VM is running according to Nova
+ self.assertEqual(power_state.RUNNING, vm_info['state'])
+
+ # Check that the VM is running according to vSphere API.
+ self.assertEqual('poweredOn', vm.get("runtime.powerState"))
+ else:
+ # Check that the VM is not running according to Nova
+ self.assertEqual(power_state.SHUTDOWN, vm_info['state'])
+
+ # Check that the VM is not running according to vSphere API.
+ self.assertEqual('poweredOff', vm.get("runtime.powerState"))
+
+ found_vm_uuid = False
+ found_iface_id = False
+ extras = vm.get("config.extraConfig")
+ for c in extras.OptionValue:
+ if (c.key == "nvp.vm-uuid" and c.value == self.instance['uuid']):
+ found_vm_uuid = True
+ if (c.key == "nvp.iface-id.0" and c.value == "vif-xxx-yyy-zzz"):
+ found_iface_id = True
+
+ self.assertTrue(found_vm_uuid)
+ self.assertTrue(found_iface_id)
+
+ def _check_vm_info(self, info, pwr_state=power_state.RUNNING):
+ """Check if the get_info returned values correspond to the instance
+ object in the db.
+ """
+ mem_kib = long(self.type_data['memory_mb']) << 10
+ self.assertEqual(info["state"], pwr_state)
+ self.assertEqual(info["max_mem"], mem_kib)
+ self.assertEqual(info["mem"], mem_kib)
+ self.assertEqual(info["num_cpu"], self.type_data['vcpus'])
+
+ def test_instance_exists(self):
+ self._create_vm()
+ self.assertTrue(self.conn.instance_exists(self.instance))
+ invalid_instance = dict(uuid='foo', name='bar', node=self.node_name)
+ self.assertFalse(self.conn.instance_exists(invalid_instance))
+
+ def test_list_instances(self):
+ instances = self.conn.list_instances()
+ self.assertEqual(len(instances), 0)
+
+ def test_list_instances_1(self):
+ self._create_vm()
+ instances = self.conn.list_instances()
+ self.assertEqual(len(instances), 1)
+
+ def test_list_instance_uuids(self):
+ self._create_vm()
+ uuids = self.conn.list_instance_uuids()
+ self.assertEqual(len(uuids), 1)
+
+ def test_list_instance_uuids_invalid_uuid(self):
+ self._create_vm(uuid='fake_id')
+ uuids = self.conn.list_instance_uuids()
+ self.assertEqual(len(uuids), 0)
+
+ def _cached_files_exist(self, exists=True):
+ cache = ds_util.DatastorePath(self.ds, 'vmware_base',
+ self.fake_image_uuid,
+ '%s.vmdk' % self.fake_image_uuid)
+ if exists:
+ self.assertTrue(vmwareapi_fake.get_file(str(cache)))
+ else:
+ self.assertFalse(vmwareapi_fake.get_file(str(cache)))
+
+ @mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
+ 'from_image')
+ def test_instance_dir_disk_created(self, mock_from_image):
+ """Test image file is cached when even when use_linked_clone
+ is False
+ """
+ img_props = images.VMwareImage(
+ image_id=self.fake_image_uuid,
+ linked_clone=False)
+
+ mock_from_image.return_value = img_props
+ self._create_vm()
+ path = ds_util.DatastorePath(self.ds, self.uuid, '%s.vmdk' % self.uuid)
+ self.assertTrue(vmwareapi_fake.get_file(str(path)))
+ self._cached_files_exist()
+
+ @mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
+ 'from_image')
+ def test_cache_dir_disk_created(self, mock_from_image):
+ """Test image disk is cached when use_linked_clone is True."""
+ self.flags(use_linked_clone=True, group='vmware')
+
+ img_props = images.VMwareImage(
+ image_id=self.fake_image_uuid,
+ file_size=1 * units.Ki,
+ disk_type=constants.DISK_TYPE_SPARSE)
+
+ mock_from_image.return_value = img_props
+
+ self._create_vm()
+ path = ds_util.DatastorePath(self.ds, 'vmware_base',
+ self.fake_image_uuid,
+ '%s.vmdk' % self.fake_image_uuid)
+ root = ds_util.DatastorePath(self.ds, 'vmware_base',
+ self.fake_image_uuid,
+ '%s.80.vmdk' % self.fake_image_uuid)
+ self.assertTrue(vmwareapi_fake.get_file(str(path)))
+ self.assertTrue(vmwareapi_fake.get_file(str(root)))
+
+ def _iso_disk_type_created(self, instance_type='m1.large'):
+ self.image['disk_format'] = 'iso'
+ self._create_vm(instance_type=instance_type)
+ path = ds_util.DatastorePath(self.ds, 'vmware_base',
+ self.fake_image_uuid,
+ '%s.iso' % self.fake_image_uuid)
+ self.assertTrue(vmwareapi_fake.get_file(str(path)))
+
+ def test_iso_disk_type_created(self):
+ self._iso_disk_type_created()
+ path = ds_util.DatastorePath(self.ds, self.uuid, '%s.vmdk' % self.uuid)
+ self.assertTrue(vmwareapi_fake.get_file(str(path)))
+
+ def test_iso_disk_type_created_with_root_gb_0(self):
+ self._iso_disk_type_created(instance_type='m1.micro')
+ path = ds_util.DatastorePath(self.ds, self.uuid, '%s.vmdk' % self.uuid)
+ self.assertFalse(vmwareapi_fake.get_file(str(path)))
+
+ def test_iso_disk_cdrom_attach(self):
+ iso_path = ds_util.DatastorePath(self.ds, 'vmware_base',
+ self.fake_image_uuid,
+ '%s.iso' % self.fake_image_uuid)
+
+ def fake_attach_cdrom(vm_ref, instance, data_store_ref,
+ iso_uploaded_path):
+ self.assertEqual(iso_uploaded_path, str(iso_path))
+
+ self.stubs.Set(self.conn._vmops, "_attach_cdrom_to_vm",
+ fake_attach_cdrom)
+ self.image['disk_format'] = 'iso'
+ self._create_vm()
+
+ @mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
+ 'from_image')
+ def test_iso_disk_cdrom_attach_with_config_drive(self,
+ mock_from_image):
+ img_props = images.VMwareImage(
+ image_id=self.fake_image_uuid,
+ file_size=80 * units.Gi,
+ file_type='iso',
+ linked_clone=False)
+
+ mock_from_image.return_value = img_props
+
+ self.flags(force_config_drive=True)
+ iso_path = [
+ ds_util.DatastorePath(self.ds, 'vmware_base',
+ self.fake_image_uuid,
+ '%s.iso' % self.fake_image_uuid),
+ ds_util.DatastorePath(self.ds, 'fake-config-drive')]
+ self.iso_index = 0
+
+ def fake_create_config_drive(instance, injected_files, password,
+ data_store_name, folder, uuid, cookies):
+ return 'fake-config-drive'
+
+ def fake_attach_cdrom(vm_ref, instance, data_store_ref,
+ iso_uploaded_path):
+ self.assertEqual(iso_uploaded_path, str(iso_path[self.iso_index]))
+ self.iso_index += 1
+
+ self.stubs.Set(self.conn._vmops, "_attach_cdrom_to_vm",
+ fake_attach_cdrom)
+ self.stubs.Set(self.conn._vmops, '_create_config_drive',
+ fake_create_config_drive)
+
+ self.image['disk_format'] = 'iso'
+ self._create_vm()
+ self.assertEqual(self.iso_index, 2)
+
+ def test_cdrom_attach_with_config_drive(self):
+ self.flags(force_config_drive=True)
+
+ iso_path = ds_util.DatastorePath(self.ds, 'fake-config-drive')
+ self.cd_attach_called = False
+
+ def fake_create_config_drive(instance, injected_files, password,
+ data_store_name, folder, uuid, cookies):
+ return 'fake-config-drive'
+
+ def fake_attach_cdrom(vm_ref, instance, data_store_ref,
+ iso_uploaded_path):
+ self.assertEqual(iso_uploaded_path, str(iso_path))
+ self.cd_attach_called = True
+
+ self.stubs.Set(self.conn._vmops, "_attach_cdrom_to_vm",
+ fake_attach_cdrom)
+ self.stubs.Set(self.conn._vmops, '_create_config_drive',
+ fake_create_config_drive)
+
+ self._create_vm()
+ self.assertTrue(self.cd_attach_called)
+
+ def test_spawn(self):
+ self._create_vm()
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+
+ def test_spawn_vm_ref_cached(self):
+ uuid = uuidutils.generate_uuid()
+ self.assertIsNone(vm_util.vm_ref_cache_get(uuid))
+ self._create_vm(uuid=uuid)
+ self.assertIsNotNone(vm_util.vm_ref_cache_get(uuid))
+
+ def _spawn_power_state(self, power_on):
+ self._spawn = self.conn._vmops.spawn
+ self._power_on = power_on
+
+ def _fake_spawn(context, instance, image_meta, injected_files,
+ admin_password, network_info, block_device_info=None,
+ instance_name=None, power_on=True):
+ return self._spawn(context, instance, image_meta,
+ injected_files, admin_password, network_info,
+ block_device_info=block_device_info,
+ instance_name=instance_name,
+ power_on=self._power_on)
+
+ with (
+ mock.patch.object(self.conn._vmops, 'spawn', _fake_spawn)
+ ):
+ self._create_vm(powered_on=power_on)
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ if power_on:
+ self._check_vm_info(info, power_state.RUNNING)
+ else:
+ self._check_vm_info(info, power_state.SHUTDOWN)
+
+ def test_spawn_no_power_on(self):
+ self._spawn_power_state(False)
+
+ def test_spawn_power_on(self):
+ self._spawn_power_state(True)
+
+ def test_spawn_root_size_0(self):
+ self._create_vm(instance_type='m1.micro')
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+ cache = ('[%s] vmware_base/%s/%s.vmdk' %
+ (self.ds, self.fake_image_uuid, self.fake_image_uuid))
+ gb_cache = ('[%s] vmware_base/%s/%s.0.vmdk' %
+ (self.ds, self.fake_image_uuid, self.fake_image_uuid))
+ self.assertTrue(vmwareapi_fake.get_file(cache))
+ self.assertFalse(vmwareapi_fake.get_file(gb_cache))
+
+ def _spawn_with_delete_exception(self, fault=None):
+
+ def fake_call_method(module, method, *args, **kwargs):
+ task_ref = self.call_method(module, method, *args, **kwargs)
+ if method == "DeleteDatastoreFile_Task":
+ self.exception = True
+ task_mdo = vmwareapi_fake.create_task(method, "error",
+ error_fault=fault)
+ return task_mdo.obj
+ return task_ref
+
+ with (
+ mock.patch.object(self.conn._session, '_call_method',
+ fake_call_method)
+ ):
+ if fault:
+ self._create_vm()
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+ else:
+ self.assertRaises(vexc.VMwareDriverException, self._create_vm)
+ self.assertTrue(self.exception)
+
+ def test_spawn_with_delete_exception_not_found(self):
+ self._spawn_with_delete_exception(vmwareapi_fake.FileNotFound())
+
+ def test_spawn_with_delete_exception_file_fault(self):
+ self._spawn_with_delete_exception(vmwareapi_fake.FileFault())
+
+ def test_spawn_with_delete_exception_cannot_delete_file(self):
+ self._spawn_with_delete_exception(vmwareapi_fake.CannotDeleteFile())
+
+ def test_spawn_with_delete_exception_file_locked(self):
+ self._spawn_with_delete_exception(vmwareapi_fake.FileLocked())
+
+ def test_spawn_with_delete_exception_general(self):
+ self._spawn_with_delete_exception()
+
+ def test_spawn_disk_extend(self):
+ self.mox.StubOutWithMock(self.conn._vmops, '_extend_virtual_disk')
+ requested_size = 80 * units.Mi
+ self.conn._vmops._extend_virtual_disk(mox.IgnoreArg(),
+ requested_size, mox.IgnoreArg(), mox.IgnoreArg())
+ self.mox.ReplayAll()
+ self._create_vm()
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+
+ def test_spawn_disk_extend_exists(self):
+ root = ds_util.DatastorePath(self.ds, 'vmware_base',
+ self.fake_image_uuid,
+ '%s.80.vmdk' % self.fake_image_uuid)
+
+ def _fake_extend(instance, requested_size, name, dc_ref):
+ vmwareapi_fake._add_file(str(root))
+
+ self.stubs.Set(self.conn._vmops, '_extend_virtual_disk',
+ _fake_extend)
+
+ self._create_vm()
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+ self.assertTrue(vmwareapi_fake.get_file(str(root)))
+
+ @mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
+ 'from_image')
+ def test_spawn_disk_extend_sparse(self, mock_from_image):
+ img_props = images.VMwareImage(
+ image_id=self.fake_image_uuid,
+ file_size=units.Ki,
+ disk_type=constants.DISK_TYPE_SPARSE,
+ linked_clone=True)
+
+ mock_from_image.return_value = img_props
+
+ with contextlib.nested(
+ mock.patch.object(self.conn._vmops, '_extend_virtual_disk'),
+ mock.patch.object(self.conn._vmops, 'get_datacenter_ref_and_name'),
+ ) as (mock_extend, mock_get_dc):
+ dc_val = mock.Mock()
+ dc_val.ref = "fake_dc_ref"
+ dc_val.name = "dc1"
+ mock_get_dc.return_value = dc_val
+ self._create_vm()
+ iid = img_props.image_id
+ cached_image = ds_util.DatastorePath(self.ds, 'vmware_base',
+ iid, '%s.80.vmdk' % iid)
+ mock_extend.assert_called_once_with(
+ self.instance, self.instance.root_gb * units.Mi,
+ str(cached_image), "fake_dc_ref")
+
+ def test_spawn_disk_extend_failed_copy(self):
+ # Spawn instance
+ # copy for extend fails without creating a file
+ #
+ # Expect the copy error to be raised
+ self.flags(use_linked_clone=True, group='vmware')
+
+ CopyError = vexc.FileFaultException
+
+ def fake_wait_for_task(task_ref):
+ if task_ref == 'fake-copy-task':
+ raise CopyError('Copy failed!')
+ return self.wait_task(task_ref)
+
+ def fake_call_method(module, method, *args, **kwargs):
+ if method == "CopyVirtualDisk_Task":
+ return 'fake-copy-task'
+
+ return self.call_method(module, method, *args, **kwargs)
+
+ with contextlib.nested(
+ mock.patch.object(self.conn._session, '_call_method',
+ new=fake_call_method),
+ mock.patch.object(self.conn._session, '_wait_for_task',
+ new=fake_wait_for_task)):
+ self.assertRaises(CopyError, self._create_vm)
+
+ def test_spawn_disk_extend_failed_partial_copy(self):
+ # Spawn instance
+ # Copy for extend fails, leaving a file behind
+ #
+ # Expect the file to be cleaned up
+ # Expect the copy error to be raised
+ self.flags(use_linked_clone=True, group='vmware')
+ self.task_ref = None
+ uuid = self.fake_image_uuid
+ cached_image = '[%s] vmware_base/%s/%s.80.vmdk' % (self.ds,
+ uuid, uuid)
+
+ CopyError = vexc.FileFaultException
+
+ def fake_wait_for_task(task_ref):
+ if task_ref == self.task_ref:
+ self.task_ref = None
+ self.assertTrue(vmwareapi_fake.get_file(cached_image))
+ # N.B. We don't test for -flat here because real
+ # CopyVirtualDisk_Task doesn't actually create it
+ raise CopyError('Copy failed!')
+ return self.wait_task(task_ref)
+
+ def fake_call_method(module, method, *args, **kwargs):
+ task_ref = self.call_method(module, method, *args, **kwargs)
+ if method == "CopyVirtualDisk_Task":
+ self.task_ref = task_ref
+ return task_ref
+
+ with contextlib.nested(
+ mock.patch.object(self.conn._session, '_call_method',
+ new=fake_call_method),
+ mock.patch.object(self.conn._session, '_wait_for_task',
+ new=fake_wait_for_task)):
+ self.assertRaises(CopyError, self._create_vm)
+ self.assertFalse(vmwareapi_fake.get_file(cached_image))
+
+ def test_spawn_disk_extend_failed_partial_copy_failed_cleanup(self):
+ # Spawn instance
+ # Copy for extend fails, leaves file behind
+ # File cleanup fails
+ #
+ # Expect file to be left behind
+ # Expect file cleanup error to be raised
+ self.flags(use_linked_clone=True, group='vmware')
+ self.task_ref = None
+ uuid = self.fake_image_uuid
+ cached_image = '[%s] vmware_base/%s/%s.80.vmdk' % (self.ds,
+ uuid, uuid)
+
+ CopyError = vexc.FileFaultException
+ DeleteError = vexc.CannotDeleteFileException
+
+ def fake_wait_for_task(task_ref):
+ if task_ref == self.task_ref:
+ self.task_ref = None
+ self.assertTrue(vmwareapi_fake.get_file(cached_image))
+ # N.B. We don't test for -flat here because real
+ # CopyVirtualDisk_Task doesn't actually create it
+ raise CopyError('Copy failed!')
+ elif task_ref == 'fake-delete-task':
+ raise DeleteError('Delete failed!')
+ return self.wait_task(task_ref)
+
+ def fake_call_method(module, method, *args, **kwargs):
+ if method == "DeleteDatastoreFile_Task":
+ return 'fake-delete-task'
+
+ task_ref = self.call_method(module, method, *args, **kwargs)
+ if method == "CopyVirtualDisk_Task":
+ self.task_ref = task_ref
+ return task_ref
+
+ with contextlib.nested(
+ mock.patch.object(self.conn._session, '_wait_for_task',
+ new=fake_wait_for_task),
+ mock.patch.object(self.conn._session, '_call_method',
+ new=fake_call_method)):
+ self.assertRaises(DeleteError, self._create_vm)
+ self.assertTrue(vmwareapi_fake.get_file(cached_image))
+
+ @mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
+ 'from_image')
+ def test_spawn_disk_invalid_disk_size(self, mock_from_image):
+ img_props = images.VMwareImage(
+ image_id=self.fake_image_uuid,
+ file_size=82 * units.Gi,
+ disk_type=constants.DISK_TYPE_SPARSE,
+ linked_clone=True)
+
+ mock_from_image.return_value = img_props
+
+ self.assertRaises(exception.InstanceUnacceptable,
+ self._create_vm)
+
+ @mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
+ 'from_image')
+ def test_spawn_disk_extend_insufficient_disk_space(self, mock_from_image):
+ img_props = images.VMwareImage(
+ image_id=self.fake_image_uuid,
+ file_size=1024,
+ disk_type=constants.DISK_TYPE_SPARSE,
+ linked_clone=True)
+
+ mock_from_image.return_value = img_props
+
+ cached_image = ds_util.DatastorePath(self.ds, 'vmware_base',
+ self.fake_image_uuid,
+ '%s.80.vmdk' %
+ self.fake_image_uuid)
+ tmp_file = ds_util.DatastorePath(self.ds, 'vmware_base',
+ self.fake_image_uuid,
+ '%s.80-flat.vmdk' %
+ self.fake_image_uuid)
+
+ NoDiskSpace = vexc.get_fault_class('NoDiskSpace')
+
+ def fake_wait_for_task(task_ref):
+ if task_ref == self.task_ref:
+ self.task_ref = None
+ raise NoDiskSpace()
+ return self.wait_task(task_ref)
+
+ def fake_call_method(module, method, *args, **kwargs):
+ task_ref = self.call_method(module, method, *args, **kwargs)
+ if method == 'ExtendVirtualDisk_Task':
+ self.task_ref = task_ref
+ return task_ref
+
+ with contextlib.nested(
+ mock.patch.object(self.conn._session, '_wait_for_task',
+ fake_wait_for_task),
+ mock.patch.object(self.conn._session, '_call_method',
+ fake_call_method)
+ ) as (mock_wait_for_task, mock_call_method):
+ self.assertRaises(NoDiskSpace, self._create_vm)
+ self.assertFalse(vmwareapi_fake.get_file(str(cached_image)))
+ self.assertFalse(vmwareapi_fake.get_file(str(tmp_file)))
+
+ def test_spawn_with_move_file_exists_exception(self):
+ # The test will validate that the spawn completes
+ # successfully. The "MoveDatastoreFile_Task" will
+ # raise an file exists exception. The flag
+ # self.exception will be checked to see that
+ # the exception has indeed been raised.
+
+ def fake_wait_for_task(task_ref):
+ if task_ref == self.task_ref:
+ self.task_ref = None
+ self.exception = True
+ raise vexc.FileAlreadyExistsException()
+ return self.wait_task(task_ref)
+
+ def fake_call_method(module, method, *args, **kwargs):
+ task_ref = self.call_method(module, method, *args, **kwargs)
+ if method == "MoveDatastoreFile_Task":
+ self.task_ref = task_ref
+ return task_ref
+
+ with contextlib.nested(
+ mock.patch.object(self.conn._session, '_wait_for_task',
+ fake_wait_for_task),
+ mock.patch.object(self.conn._session, '_call_method',
+ fake_call_method)
+ ) as (_wait_for_task, _call_method):
+ self._create_vm()
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+ self.assertTrue(self.exception)
+
+ def test_spawn_with_move_general_exception(self):
+ # The test will validate that the spawn completes
+ # successfully. The "MoveDatastoreFile_Task" will
+ # raise a general exception. The flag self.exception
+ # will be checked to see that the exception has
+ # indeed been raised.
+
+ def fake_wait_for_task(task_ref):
+ if task_ref == self.task_ref:
+ self.task_ref = None
+ self.exception = True
+ raise vexc.VMwareDriverException('Exception!')
+ return self.wait_task(task_ref)
+
+ def fake_call_method(module, method, *args, **kwargs):
+ task_ref = self.call_method(module, method, *args, **kwargs)
+ if method == "MoveDatastoreFile_Task":
+ self.task_ref = task_ref
+ return task_ref
+
+ with contextlib.nested(
+ mock.patch.object(self.conn._session, '_wait_for_task',
+ fake_wait_for_task),
+ mock.patch.object(self.conn._session, '_call_method',
+ fake_call_method)
+ ) as (_wait_for_task, _call_method):
+ self.assertRaises(vexc.VMwareDriverException,
+ self._create_vm)
+ self.assertTrue(self.exception)
+
+ def test_spawn_with_move_poll_exception(self):
+ self.call_method = self.conn._session._call_method
+
+ def fake_call_method(module, method, *args, **kwargs):
+ task_ref = self.call_method(module, method, *args, **kwargs)
+ if method == "MoveDatastoreFile_Task":
+ task_mdo = vmwareapi_fake.create_task(method, "error")
+ return task_mdo.obj
+ return task_ref
+
+ with (
+ mock.patch.object(self.conn._session, '_call_method',
+ fake_call_method)
+ ):
+ self.assertRaises(vexc.VMwareDriverException,
+ self._create_vm)
+
+ def test_spawn_with_move_file_exists_poll_exception(self):
+ # The test will validate that the spawn completes
+ # successfully. The "MoveDatastoreFile_Task" will
+ # raise a file exists exception. The flag self.exception
+ # will be checked to see that the exception has
+ # indeed been raised.
+
+ def fake_call_method(module, method, *args, **kwargs):
+ task_ref = self.call_method(module, method, *args, **kwargs)
+ if method == "MoveDatastoreFile_Task":
+ self.exception = True
+ task_mdo = vmwareapi_fake.create_task(method, "error",
+ error_fault=vmwareapi_fake.FileAlreadyExists())
+ return task_mdo.obj
+ return task_ref
+
+ with (
+ mock.patch.object(self.conn._session, '_call_method',
+ fake_call_method)
+ ):
+ self._create_vm()
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+ self.assertTrue(self.exception)
+
+ def _spawn_attach_volume_vmdk(self, set_image_ref=True, vc_support=False):
+ self._create_instance(set_image_ref=set_image_ref)
+ self.mox.StubOutWithMock(block_device, 'volume_in_mapping')
+ self.mox.StubOutWithMock(v_driver, 'block_device_info_get_mapping')
+ connection_info = self._test_vmdk_connection_info('vmdk')
+ root_disk = [{'connection_info': connection_info}]
+ v_driver.block_device_info_get_mapping(
+ mox.IgnoreArg()).AndReturn(root_disk)
+ if vc_support:
+ self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
+ '_get_res_pool_of_vm')
+ volumeops.VMwareVolumeOps._get_res_pool_of_vm(
+ mox.IgnoreArg()).AndReturn('fake_res_pool')
+ self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
+ '_relocate_vmdk_volume')
+ volumeops.VMwareVolumeOps._relocate_vmdk_volume(mox.IgnoreArg(),
+ 'fake_res_pool', mox.IgnoreArg())
+ self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
+ 'attach_volume')
+ volumeops.VMwareVolumeOps.attach_volume(connection_info,
+ self.instance, mox.IgnoreArg())
+ self.mox.ReplayAll()
+ block_device_info = {'mount_device': 'vda'}
+ self.conn.spawn(self.context, self.instance, self.image,
+ injected_files=[], admin_password=None,
+ network_info=self.network_info,
+ block_device_info=block_device_info)
+
+ def test_spawn_attach_volume_iscsi(self):
+ self._create_instance()
+ self.mox.StubOutWithMock(block_device, 'volume_in_mapping')
+ self.mox.StubOutWithMock(v_driver, 'block_device_info_get_mapping')
+ connection_info = self._test_vmdk_connection_info('iscsi')
+ root_disk = [{'connection_info': connection_info}]
+ v_driver.block_device_info_get_mapping(
+ mox.IgnoreArg()).AndReturn(root_disk)
+ self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
+ 'attach_volume')
+ volumeops.VMwareVolumeOps.attach_volume(connection_info,
+ self.instance, mox.IgnoreArg())
+ self.mox.ReplayAll()
+ block_device_info = {'mount_device': 'vda'}
+ self.conn.spawn(self.context, self.instance, self.image,
+ injected_files=[], admin_password=None,
+ network_info=self.network_info,
+ block_device_info=block_device_info)
+
+ def mock_upload_image(self, context, image, instance, **kwargs):
+ self.assertEqual(image, 'Test-Snapshot')
+ self.assertEqual(instance, self.instance)
+ self.assertEqual(kwargs['disk_type'], 'preallocated')
+
+ def test_get_vm_ref_using_extra_config(self):
+ self._create_vm()
+ vm_ref = vm_util._get_vm_ref_from_extraconfig(self.conn._session,
+ self.instance['uuid'])
+ self.assertIsNotNone(vm_ref, 'VM Reference cannot be none')
+ # Disrupt the fake Virtual Machine object so that extraConfig
+ # cannot be matched.
+ fake_vm = self._get_vm_record()
+ fake_vm.get('config.extraConfig["nvp.vm-uuid"]').value = ""
+ # We should not get a Virtual Machine through extraConfig.
+ vm_ref = vm_util._get_vm_ref_from_extraconfig(self.conn._session,
+ self.instance['uuid'])
+ self.assertIsNone(vm_ref, 'VM Reference should be none')
+ # Check if we can find the Virtual Machine using the name.
+ vm_ref = vm_util.get_vm_ref(self.conn._session, self.instance)
+ self.assertIsNotNone(vm_ref, 'VM Reference cannot be none')
+
+ def test_search_vm_ref_by_identifier(self):
+ self._create_vm()
+ vm_ref = vm_util.search_vm_ref_by_identifier(self.conn._session,
+ self.instance['uuid'])
+ self.assertIsNotNone(vm_ref, 'VM Reference cannot be none')
+ fake_vm = self._get_vm_record()
+ fake_vm.set("summary.config.instanceUuid", "foo")
+ fake_vm.set("name", "foo")
+ fake_vm.get('config.extraConfig["nvp.vm-uuid"]').value = "foo"
+ self.assertIsNone(vm_util.search_vm_ref_by_identifier(
+ self.conn._session, self.instance['uuid']),
+ "VM Reference should be none")
+ self.assertIsNotNone(
+ vm_util.search_vm_ref_by_identifier(self.conn._session, "foo"),
+ "VM Reference should not be none")
+
+ def test_get_object_for_optionvalue(self):
+ self._create_vm()
+ vms = self.conn._session._call_method(vim_util, "get_objects",
+ "VirtualMachine", ['config.extraConfig["nvp.vm-uuid"]'])
+ vm_ref = vm_util._get_object_for_optionvalue(vms,
+ self.instance["uuid"])
+ self.assertIsNotNone(vm_ref, 'VM Reference cannot be none')
+
+ def _test_snapshot(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+ with mock.patch.object(images, 'upload_image',
+ self.mock_upload_image):
+ self.conn.snapshot(self.context, self.instance, "Test-Snapshot",
+ func_call_matcher.call)
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+ self.assertIsNone(func_call_matcher.match())
+
+ def test_snapshot(self):
+ self._create_vm()
+ self._test_snapshot()
+
+ def test_snapshot_no_root_disk(self):
+ self._iso_disk_type_created(instance_type='m1.micro')
+ self.assertRaises(error_util.NoRootDiskDefined, self.conn.snapshot,
+ self.context, self.instance, "Test-Snapshot",
+ lambda *args, **kwargs: None)
+
+ def test_snapshot_non_existent(self):
+ self._create_instance()
+ self.assertRaises(exception.InstanceNotFound, self.conn.snapshot,
+ self.context, self.instance, "Test-Snapshot",
+ lambda *args, **kwargs: None)
+
+ def test_snapshot_delete_vm_snapshot(self):
+ self._create_vm()
+ fake_vm = self._get_vm_record()
+ snapshot_ref = vmwareapi_fake.ManagedObjectReference(
+ value="Snapshot-123",
+ name="VirtualMachineSnapshot")
+
+ self.mox.StubOutWithMock(vmops.VMwareVMOps,
+ '_create_vm_snapshot')
+ self.conn._vmops._create_vm_snapshot(
+ self.instance, fake_vm.obj).AndReturn(snapshot_ref)
+
+ self.mox.StubOutWithMock(vmops.VMwareVMOps,
+ '_delete_vm_snapshot')
+ self.conn._vmops._delete_vm_snapshot(
+ self.instance, fake_vm.obj, snapshot_ref).AndReturn(None)
+ self.mox.ReplayAll()
+
+ self._test_snapshot()
+
+ def _snapshot_delete_vm_snapshot_exception(self, exception, call_count=1):
+ self._create_vm()
+ fake_vm = vmwareapi_fake._get_objects("VirtualMachine").objects[0].obj
+ snapshot_ref = vmwareapi_fake.ManagedObjectReference(
+ value="Snapshot-123",
+ name="VirtualMachineSnapshot")
+
+ with contextlib.nested(
+ mock.patch.object(self.conn._session, '_wait_for_task',
+ side_effect=exception),
+ mock.patch.object(vmops, '_time_sleep_wrapper')
+ ) as (_fake_wait, _fake_sleep):
+ if exception != error_util.TaskInProgress:
+ self.assertRaises(exception,
+ self.conn._vmops._delete_vm_snapshot,
+ self.instance, fake_vm, snapshot_ref)
+ self.assertEqual(0, _fake_sleep.call_count)
+ else:
+ self.conn._vmops._delete_vm_snapshot(self.instance, fake_vm,
+ snapshot_ref)
+ self.assertEqual(call_count - 1, _fake_sleep.call_count)
+ self.assertEqual(call_count, _fake_wait.call_count)
+
+ def test_snapshot_delete_vm_snapshot_exception(self):
+ self._snapshot_delete_vm_snapshot_exception(exception.NovaException)
+
+ def test_snapshot_delete_vm_snapshot_exception_retry(self):
+ self.flags(api_retry_count=5, group='vmware')
+ self._snapshot_delete_vm_snapshot_exception(error_util.TaskInProgress,
+ 5)
+
+ def test_reboot(self):
+ self._create_vm()
+ info = self.conn.get_info({'name': 1, 'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+ reboot_type = "SOFT"
+ self.conn.reboot(self.context, self.instance, self.network_info,
+ reboot_type)
+ info = self.conn.get_info({'name': 1, 'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+
+ def test_reboot_with_uuid(self):
+ """Test fall back to use name when can't find by uuid."""
+ self._create_vm()
+ info = self.conn.get_info({'name': 'fake-name', 'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+ reboot_type = "SOFT"
+ self.conn.reboot(self.context, self.instance, self.network_info,
+ reboot_type)
+ info = self.conn.get_info({'name': 'fake-name', 'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+
+ def test_reboot_non_existent(self):
+ self._create_instance()
+ self.assertRaises(exception.InstanceNotFound, self.conn.reboot,
+ self.context, self.instance, self.network_info,
+ 'SOFT')
+
+ def test_poll_rebooting_instances(self):
+ self.mox.StubOutWithMock(compute_api.API, 'reboot')
+ compute_api.API.reboot(mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg())
+ self.mox.ReplayAll()
+ self._create_vm()
+ instances = [self.instance]
+ self.conn.poll_rebooting_instances(60, instances)
+
+ def test_reboot_not_poweredon(self):
+ self._create_vm()
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+ self.conn.suspend(self.instance)
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.SUSPENDED)
+ self.assertRaises(exception.InstanceRebootFailure, self.conn.reboot,
+ self.context, self.instance, self.network_info,
+ 'SOFT')
+
+ def test_suspend(self):
+ self._create_vm()
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+ self.conn.suspend(self.instance)
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.SUSPENDED)
+
+ def test_suspend_non_existent(self):
+ self._create_instance()
+ self.assertRaises(exception.InstanceNotFound, self.conn.suspend,
+ self.instance)
+
+ def test_resume(self):
+ self._create_vm()
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+ self.conn.suspend(self.instance)
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.SUSPENDED)
+ self.conn.resume(self.context, self.instance, self.network_info)
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+
+ def test_resume_non_existent(self):
+ self._create_instance()
+ self.assertRaises(exception.InstanceNotFound, self.conn.resume,
+ self.context, self.instance, self.network_info)
+
+ def test_resume_not_suspended(self):
+ self._create_vm()
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+ self.assertRaises(exception.InstanceResumeFailure, self.conn.resume,
+ self.context, self.instance, self.network_info)
+
+ def test_power_on(self):
+ self._create_vm()
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+ self.conn.power_off(self.instance)
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.SHUTDOWN)
+ self.conn.power_on(self.context, self.instance, self.network_info)
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+
+ def test_power_on_non_existent(self):
+ self._create_instance()
+ self.assertRaises(exception.InstanceNotFound, self.conn.power_on,
+ self.context, self.instance, self.network_info)
+
+ def test_power_off(self):
+ self._create_vm()
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+ self.conn.power_off(self.instance)
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.SHUTDOWN)
+
+ def test_power_off_non_existent(self):
+ self._create_instance()
+ self.assertRaises(exception.InstanceNotFound, self.conn.power_off,
+ self.instance)
+
+ def test_resume_state_on_host_boot(self):
+ self._create_vm()
+ self.mox.StubOutWithMock(vm_util, 'get_vm_state_from_name')
+ self.mox.StubOutWithMock(self.conn, "reboot")
+ vm_util.get_vm_state_from_name(mox.IgnoreArg(),
+ self.instance['uuid']).AndReturn("poweredOff")
+ self.conn.reboot(self.context, self.instance, 'network_info',
+ 'hard', None)
+ self.mox.ReplayAll()
+ self.conn.resume_state_on_host_boot(self.context, self.instance,
+ 'network_info')
+
+ def test_resume_state_on_host_boot_no_reboot_1(self):
+ """Don't call reboot on instance which is poweredon."""
+ self._create_vm()
+ self.mox.StubOutWithMock(vm_util, 'get_vm_state_from_name')
+ self.mox.StubOutWithMock(self.conn, 'reboot')
+ vm_util.get_vm_state_from_name(mox.IgnoreArg(),
+ self.instance['uuid']).AndReturn("poweredOn")
+ self.mox.ReplayAll()
+ self.conn.resume_state_on_host_boot(self.context, self.instance,
+ 'network_info')
+
+ def test_resume_state_on_host_boot_no_reboot_2(self):
+ """Don't call reboot on instance which is suspended."""
+ self._create_vm()
+ self.mox.StubOutWithMock(vm_util, 'get_vm_state_from_name')
+ self.mox.StubOutWithMock(self.conn, 'reboot')
+ vm_util.get_vm_state_from_name(mox.IgnoreArg(),
+ self.instance['uuid']).AndReturn("suspended")
+ self.mox.ReplayAll()
+ self.conn.resume_state_on_host_boot(self.context, self.instance,
+ 'network_info')
+
+ def destroy_rescued(self, fake_method):
+ self._rescue()
+ with contextlib.nested(
+ mock.patch.object(self.conn._volumeops, "detach_disk_from_vm",
+ fake_method),
+ mock.patch.object(vm_util, "power_on_instance"),
+ ) as (fake_detach, fake_power_on):
+ self.instance['vm_state'] = vm_states.RESCUED
+ self.conn.destroy(self.context, self.instance, self.network_info)
+ inst_path = ds_util.DatastorePath(self.ds, self.uuid,
+ '%s.vmdk' % self.uuid)
+ self.assertFalse(vmwareapi_fake.get_file(str(inst_path)))
+ rescue_file_path = ds_util.DatastorePath(
+ self.ds, '%s-rescue' % self.uuid, '%s-rescue.vmdk' % self.uuid)
+ self.assertFalse(vmwareapi_fake.get_file(str(rescue_file_path)))
+ # Unrescue does not power on with destroy
+ self.assertFalse(fake_power_on.called)
+
+ def test_destroy_rescued(self):
+ def fake_detach_disk_from_vm(*args, **kwargs):
+ pass
+ self.destroy_rescued(fake_detach_disk_from_vm)
+
+ def test_destroy_rescued_with_exception(self):
+ def fake_detach_disk_from_vm(*args, **kwargs):
+ raise exception.NovaException('Here is my fake exception')
+ self.destroy_rescued(fake_detach_disk_from_vm)
+
+ def test_destroy(self):
+ self._create_vm()
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+ instances = self.conn.list_instances()
+ self.assertEqual(len(instances), 1)
+ self.conn.destroy(self.context, self.instance, self.network_info)
+ instances = self.conn.list_instances()
+ self.assertEqual(len(instances), 0)
+ self.assertIsNone(vm_util.vm_ref_cache_get(self.uuid))
+
+ def test_destroy_no_datastore(self):
+ self._create_vm()
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+ instances = self.conn.list_instances()
+ self.assertEqual(len(instances), 1)
+ # Delete the vmPathName
+ vm = self._get_vm_record()
+ vm.delete('config.files.vmPathName')
+ self.conn.destroy(self.context, self.instance, self.network_info)
+ instances = self.conn.list_instances()
+ self.assertEqual(len(instances), 0)
+
+ def test_destroy_non_existent(self):
+ self.destroy_disks = True
+ with mock.patch.object(self.conn._vmops,
+ "destroy") as mock_destroy:
+ self._create_instance()
+ self.conn.destroy(self.context, self.instance,
+ self.network_info,
+ None, self.destroy_disks)
+ mock_destroy.assert_called_once_with(self.instance,
+ self.destroy_disks)
+
+ def test_destroy_instance_without_compute(self):
+ self.destroy_disks = True
+ with mock.patch.object(self.conn._vmops,
+ "destroy") as mock_destroy:
+ self.conn.destroy(self.context, self.instance_without_compute,
+ self.network_info,
+ None, self.destroy_disks)
+ self.assertFalse(mock_destroy.called)
+
+ def _destroy_instance_without_vm_ref(self, resize_exists=False,
+ task_state=None):
+
+ def fake_vm_ref_from_name(session, vm_name):
+ if resize_exists:
+ return 'fake-ref'
+
+ self._create_instance()
+ with contextlib.nested(
+ mock.patch.object(vm_util, 'get_vm_ref_from_name',
+ fake_vm_ref_from_name),
+ mock.patch.object(self.conn._session,
+ '_call_method'),
+ mock.patch.object(self.conn._vmops,
+ '_destroy_instance')
+ ) as (mock_get, mock_call, mock_destroy):
+ self.instance.task_state = task_state
+ self.conn.destroy(self.context, self.instance,
+ self.network_info,
+ None, True)
+ if resize_exists:
+ if task_state == task_states.RESIZE_REVERTING:
+ expected = 1
+ else:
+ expected = 2
+ else:
+ expected = 1
+ self.assertEqual(expected, mock_destroy.call_count)
+ self.assertFalse(mock_call.called)
+
+ def test_destroy_instance_without_vm_ref(self):
+ self._destroy_instance_without_vm_ref()
+
+ def test_destroy_instance_without_vm_ref_with_resize(self):
+ self._destroy_instance_without_vm_ref(resize_exists=True)
+
+ def test_destroy_instance_without_vm_ref_with_resize_revert(self):
+ self._destroy_instance_without_vm_ref(resize_exists=True,
+ task_state=task_states.RESIZE_REVERTING)
+
+ def _rescue(self, config_drive=False):
+ # validate that the power on is only called once
+ self._power_on = vm_util.power_on_instance
+ self._power_on_called = 0
+
+ def fake_attach_disk_to_vm(vm_ref, instance,
+ adapter_type, disk_type, vmdk_path=None,
+ disk_size=None, linked_clone=False,
+ controller_key=None, unit_number=None,
+ device_name=None):
+ info = self.conn.get_info(instance)
+ self._check_vm_info(info, power_state.SHUTDOWN)
+
+ if config_drive:
+ def fake_create_config_drive(instance, injected_files, password,
+ data_store_name, folder,
+ instance_uuid, cookies):
+ self.assertTrue(uuidutils.is_uuid_like(instance['uuid']))
+ return str(ds_util.DatastorePath(data_store_name,
+ instance_uuid, 'fake.iso'))
+
+ self.stubs.Set(self.conn._vmops, '_create_config_drive',
+ fake_create_config_drive)
+
+ self._create_vm()
+
+ def fake_power_on_instance(session, instance, vm_ref=None):
+ self._power_on_called += 1
+ return self._power_on(session, instance, vm_ref=vm_ref)
+
+ info = self.conn.get_info({'name': 1, 'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+ self.stubs.Set(vm_util, "power_on_instance",
+ fake_power_on_instance)
+ self.stubs.Set(self.conn._volumeops, "attach_disk_to_vm",
+ fake_attach_disk_to_vm)
+
+ self.conn.rescue(self.context, self.instance, self.network_info,
+ self.image, 'fake-password')
+
+ info = self.conn.get_info({'name': '1-rescue',
+ 'uuid': '%s-rescue' % self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+ info = self.conn.get_info({'name': 1, 'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.SHUTDOWN)
+ self.assertIsNotNone(vm_util.vm_ref_cache_get('%s-rescue' % self.uuid))
+ self.assertEqual(1, self._power_on_called)
+
+ def test_rescue(self):
+ self._rescue()
+ inst_file_path = ds_util.DatastorePath(self.ds, self.uuid,
+ '%s.vmdk' % self.uuid)
+ self.assertTrue(vmwareapi_fake.get_file(str(inst_file_path)))
+ rescue_file_path = ds_util.DatastorePath(self.ds,
+ '%s-rescue' % self.uuid,
+ '%s-rescue.vmdk' % self.uuid)
+ self.assertTrue(vmwareapi_fake.get_file(str(rescue_file_path)))
+
+ def test_rescue_with_config_drive(self):
+ self.flags(force_config_drive=True)
+ self._rescue(config_drive=True)
+
+ def test_unrescue(self):
+ # NOTE(dims): driver unrescue ends up eventually in vmops.unrescue
+ # with power_on=True, the test_destroy_rescued tests the
+ # vmops.unrescue with power_on=False
+ self._rescue()
+ vm_ref = vm_util.get_vm_ref(self.conn._session,
+ self.instance)
+ vm_rescue_ref = vm_util.get_vm_ref_from_name(self.conn._session,
+ '%s-rescue' % self.uuid)
+
+ self.poweroff_instance = vm_util.power_off_instance
+
+ def fake_power_off_instance(session, instance, vm_ref):
+ # This is called so that we actually poweroff the simulated vm.
+ # The reason for this is that there is a validation in destroy
+ # that the instance is not powered on.
+ self.poweroff_instance(session, instance, vm_ref)
+
+ def fake_detach_disk_from_vm(vm_ref, instance,
+ device_name, destroy_disk=False):
+ self.test_device_name = device_name
+ info = self.conn.get_info(instance)
+ self._check_vm_info(info, power_state.SHUTDOWN)
+
+ with contextlib.nested(
+ mock.patch.object(vm_util, "power_off_instance",
+ side_effect=fake_power_off_instance),
+ mock.patch.object(self.conn._volumeops, "detach_disk_from_vm",
+ side_effect=fake_detach_disk_from_vm),
+ mock.patch.object(vm_util, "power_on_instance"),
+ ) as (poweroff, detach, fake_power_on):
+ self.conn.unrescue(self.instance, None)
+ poweroff.assert_called_once_with(self.conn._session, mock.ANY,
+ vm_rescue_ref)
+ detach.assert_called_once_with(vm_rescue_ref, mock.ANY,
+ self.test_device_name)
+ fake_power_on.assert_called_once_with(self.conn._session,
+ self.instance,
+ vm_ref=vm_ref)
+ self.test_vm_ref = None
+ self.test_device_name = None
+
+ def test_get_diagnostics(self):
+ self._create_vm()
+ expected = {'memoryReservation': 0, 'suspendInterval': 0,
+ 'maxCpuUsage': 2000, 'toolsInstallerMounted': False,
+ 'consumedOverheadMemory': 20, 'numEthernetCards': 1,
+ 'numCpu': 1, 'featureRequirement': [{'key': 'cpuid.AES'}],
+ 'memoryOverhead': 21417984,
+ 'guestMemoryUsage': 0, 'connectionState': 'connected',
+ 'memorySizeMB': 512, 'balloonedMemory': 0,
+ 'vmPathName': 'fake_path', 'template': False,
+ 'overallCpuUsage': 0, 'powerState': 'poweredOn',
+ 'cpuReservation': 0, 'overallCpuDemand': 0,
+ 'numVirtualDisks': 1, 'hostMemoryUsage': 141}
+ expected = dict([('vmware:' + k, v) for k, v in expected.items()])
+ self.assertThat(
+ self.conn.get_diagnostics({'name': 1, 'uuid': self.uuid,
+ 'node': self.instance_node}),
+ matchers.DictMatches(expected))
+
+ def test_get_instance_diagnostics(self):
+ self._create_vm()
+ expected = {'uptime': 0,
+ 'memory_details': {'used': 0, 'maximum': 512},
+ 'nic_details': [],
+ 'driver': 'vmwareapi',
+ 'state': 'running',
+ 'version': '1.0',
+ 'cpu_details': [],
+ 'disk_details': [],
+ 'hypervisor_os': 'esxi',
+ 'config_drive': False}
+ actual = self.conn.get_instance_diagnostics(
+ {'name': 1, 'uuid': self.uuid, 'node': self.instance_node})
+ self.assertThat(actual.serialize(), matchers.DictMatches(expected))
+
+ def test_get_console_output(self):
+ self.assertRaises(NotImplementedError, self.conn.get_console_output,
+ None, None)
+
+ def _test_finish_migration(self, power_on, resize_instance=False):
+ self._create_vm()
+ self.conn.finish_migration(context=self.context,
+ migration=None,
+ instance=self.instance,
+ disk_info=None,
+ network_info=None,
+ block_device_info=None,
+ resize_instance=resize_instance,
+ image_meta=None,
+ power_on=power_on)
+
+ def _test_finish_revert_migration(self, power_on):
+ self._create_vm()
+ # Ensure ESX driver throws an error
+ self.assertRaises(NotImplementedError,
+ self.conn.finish_revert_migration,
+ self.context,
+ instance=self.instance,
+ network_info=None)
+
+ def test_get_vnc_console_non_existent(self):
+ self._create_instance()
+ self.assertRaises(exception.InstanceNotFound,
+ self.conn.get_vnc_console,
+ self.context,
+ self.instance)
+
+ def _test_get_vnc_console(self):
+ self._create_vm()
+ fake_vm = self._get_vm_record()
+ OptionValue = collections.namedtuple('OptionValue', ['key', 'value'])
+ opt_val = OptionValue(key='', value=5906)
+ fake_vm.set(vm_util.VNC_CONFIG_KEY, opt_val)
+ vnc_console = self.conn.get_vnc_console(self.context, self.instance)
+ self.assertEqual(self.vnc_host, vnc_console.host)
+ self.assertEqual(5906, vnc_console.port)
+
+ def test_get_vnc_console(self):
+ self._test_get_vnc_console()
+
+ def test_get_vnc_console_noport(self):
+ self._create_vm()
+ self.assertRaises(exception.ConsoleTypeUnavailable,
+ self.conn.get_vnc_console,
+ self.context,
+ self.instance)
+
+ def test_get_volume_connector(self):
+ self._create_vm()
+ connector_dict = self.conn.get_volume_connector(self.instance)
+ fake_vm = self._get_vm_record()
+ fake_vm_id = fake_vm.obj.value
+ self.assertEqual(connector_dict['ip'], 'test_url')
+ self.assertEqual(connector_dict['initiator'], 'iscsi-name')
+ self.assertEqual(connector_dict['host'], 'test_url')
+ self.assertEqual(connector_dict['instance'], fake_vm_id)
+
+ def _test_vmdk_connection_info(self, type):
+ return {'driver_volume_type': type,
+ 'serial': 'volume-fake-id',
+ 'data': {'volume': 'vm-10',
+ 'volume_id': 'volume-fake-id'}}
+
+ def test_volume_attach_vmdk(self):
+ self._create_vm()
+ connection_info = self._test_vmdk_connection_info('vmdk')
+ mount_point = '/dev/vdc'
+ self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
+ '_attach_volume_vmdk')
+ volumeops.VMwareVolumeOps._attach_volume_vmdk(connection_info,
+ self.instance, mount_point)
+ self.mox.ReplayAll()
+ self.conn.attach_volume(None, connection_info, self.instance,
+ mount_point)
+
+ def test_volume_detach_vmdk(self):
+ self._create_vm()
+ connection_info = self._test_vmdk_connection_info('vmdk')
+ mount_point = '/dev/vdc'
+ self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
+ '_detach_volume_vmdk')
+ volumeops.VMwareVolumeOps._detach_volume_vmdk(connection_info,
+ self.instance, mount_point)
+ self.mox.ReplayAll()
+ self.conn.detach_volume(connection_info, self.instance, mount_point,
+ encryption=None)
+
+ def test_attach_vmdk_disk_to_vm(self):
+ self._create_vm()
+ connection_info = self._test_vmdk_connection_info('vmdk')
+ mount_point = '/dev/vdc'
+
+ # create fake backing info
+ volume_device = vmwareapi_fake.DataObject()
+ volume_device.backing = vmwareapi_fake.DataObject()
+ volume_device.backing.fileName = 'fake_path'
+
+ self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
+ '_get_vmdk_base_volume_device')
+ volumeops.VMwareVolumeOps._get_vmdk_base_volume_device(
+ mox.IgnoreArg()).AndReturn(volume_device)
+ self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
+ 'attach_disk_to_vm')
+ volumeops.VMwareVolumeOps.attach_disk_to_vm(mox.IgnoreArg(),
+ self.instance, mox.IgnoreArg(), mox.IgnoreArg(),
+ vmdk_path='fake_path')
+ self.mox.ReplayAll()
+ self.conn.attach_volume(None, connection_info, self.instance,
+ mount_point)
+
+ def test_detach_vmdk_disk_from_vm(self):
+ self._create_vm()
+ connection_info = self._test_vmdk_connection_info('vmdk')
+ mount_point = '/dev/vdc'
+ self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
+ '_get_volume_uuid')
+ volumeops.VMwareVolumeOps._get_volume_uuid(mox.IgnoreArg(),
+ 'volume-fake-id').AndReturn('fake_disk_uuid')
+ self.mox.StubOutWithMock(vm_util, 'get_vmdk_backed_disk_device')
+ vm_util.get_vmdk_backed_disk_device(mox.IgnoreArg(),
+ 'fake_disk_uuid').AndReturn('fake_device')
+ self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
+ '_consolidate_vmdk_volume')
+ volumeops.VMwareVolumeOps._consolidate_vmdk_volume(self.instance,
+ mox.IgnoreArg(), 'fake_device', mox.IgnoreArg())
+ self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
+ 'detach_disk_from_vm')
+ volumeops.VMwareVolumeOps.detach_disk_from_vm(mox.IgnoreArg(),
+ self.instance, mox.IgnoreArg())
+ self.mox.ReplayAll()
+ self.conn.detach_volume(connection_info, self.instance, mount_point,
+ encryption=None)
+
+ def test_volume_attach_iscsi(self):
+ self._create_vm()
+ connection_info = self._test_vmdk_connection_info('iscsi')
+ mount_point = '/dev/vdc'
+ self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
+ '_attach_volume_iscsi')
+ volumeops.VMwareVolumeOps._attach_volume_iscsi(connection_info,
+ self.instance, mount_point)
+ self.mox.ReplayAll()
+ self.conn.attach_volume(None, connection_info, self.instance,
+ mount_point)
+
+ def test_volume_detach_iscsi(self):
+ self._create_vm()
+ connection_info = self._test_vmdk_connection_info('iscsi')
+ mount_point = '/dev/vdc'
+ self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
+ '_detach_volume_iscsi')
+ volumeops.VMwareVolumeOps._detach_volume_iscsi(connection_info,
+ self.instance, mount_point)
+ self.mox.ReplayAll()
+ self.conn.detach_volume(connection_info, self.instance, mount_point,
+ encryption=None)
+
+ def test_attach_iscsi_disk_to_vm(self):
+ self._create_vm()
+ connection_info = self._test_vmdk_connection_info('iscsi')
+ connection_info['data']['target_portal'] = 'fake_target_host:port'
+ connection_info['data']['target_iqn'] = 'fake_target_iqn'
+ mount_point = '/dev/vdc'
+ discover = ('fake_name', 'fake_uuid')
+ self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
+ '_iscsi_get_target')
+ # simulate target not found
+ volumeops.VMwareVolumeOps._iscsi_get_target(
+ connection_info['data']).AndReturn((None, None))
+ self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
+ '_iscsi_add_send_target_host')
+ # rescan gets called with target portal
+ self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
+ '_iscsi_rescan_hba')
+ volumeops.VMwareVolumeOps._iscsi_rescan_hba(
+ connection_info['data']['target_portal'])
+ # simulate target found
+ volumeops.VMwareVolumeOps._iscsi_get_target(
+ connection_info['data']).AndReturn(discover)
+ self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
+ 'attach_disk_to_vm')
+ volumeops.VMwareVolumeOps.attach_disk_to_vm(mox.IgnoreArg(),
+ self.instance, mox.IgnoreArg(), 'rdmp',
+ device_name=mox.IgnoreArg())
+ self.mox.ReplayAll()
+ self.conn.attach_volume(None, connection_info, self.instance,
+ mount_point)
+
+ def test_iscsi_rescan_hba(self):
+ fake_target_portal = 'fake_target_host:port'
+ host_storage_sys = vmwareapi_fake._get_objects(
+ "HostStorageSystem").objects[0]
+ iscsi_hba_array = host_storage_sys.get('storageDeviceInfo'
+ '.hostBusAdapter')
+ iscsi_hba = iscsi_hba_array.HostHostBusAdapter[0]
+ # Check the host system does not have the send target
+ self.assertRaises(AttributeError, getattr, iscsi_hba,
+ 'configuredSendTarget')
+ # Rescan HBA with the target portal
+ vops = volumeops.VMwareVolumeOps(self.conn._session)
+ vops._iscsi_rescan_hba(fake_target_portal)
+ # Check if HBA has the target portal configured
+ self.assertEqual('fake_target_host',
+ iscsi_hba.configuredSendTarget[0].address)
+ # Rescan HBA with same portal
+ vops._iscsi_rescan_hba(fake_target_portal)
+ self.assertEqual(1, len(iscsi_hba.configuredSendTarget))
+
+ def test_iscsi_get_target(self):
+ data = {'target_portal': 'fake_target_host:port',
+ 'target_iqn': 'fake_target_iqn'}
+ host = vmwareapi_fake._get_objects('HostSystem').objects[0]
+ host._add_iscsi_target(data)
+ vops = volumeops.VMwareVolumeOps(self.conn._session)
+ result = vops._iscsi_get_target(data)
+ self.assertEqual(('fake-device', 'fake-uuid'), result)
+
+ def test_detach_iscsi_disk_from_vm(self):
+ self._create_vm()
+ connection_info = self._test_vmdk_connection_info('iscsi')
+ connection_info['data']['target_portal'] = 'fake_target_portal'
+ connection_info['data']['target_iqn'] = 'fake_target_iqn'
+ mount_point = '/dev/vdc'
+ find = ('fake_name', 'fake_uuid')
+ self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
+ '_iscsi_get_target')
+ volumeops.VMwareVolumeOps._iscsi_get_target(
+ connection_info['data']).AndReturn(find)
+ self.mox.StubOutWithMock(vm_util, 'get_rdm_disk')
+ device = 'fake_device'
+ vm_util.get_rdm_disk(mox.IgnoreArg(), 'fake_uuid').AndReturn(device)
+ self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
+ 'detach_disk_from_vm')
+ volumeops.VMwareVolumeOps.detach_disk_from_vm(mox.IgnoreArg(),
+ self.instance, device, destroy_disk=True)
+ self.mox.ReplayAll()
+ self.conn.detach_volume(connection_info, self.instance, mount_point,
+ encryption=None)
+
+ def test_connection_info_get(self):
+ self._create_vm()
+ connector = self.conn.get_volume_connector(self.instance)
+ self.assertEqual(connector['ip'], 'test_url')
+ self.assertEqual(connector['host'], 'test_url')
+ self.assertEqual(connector['initiator'], 'iscsi-name')
+ self.assertIn('instance', connector)
+
+ def test_connection_info_get_after_destroy(self):
+ self._create_vm()
+ self.conn.destroy(self.context, self.instance, self.network_info)
+ connector = self.conn.get_volume_connector(self.instance)
+ self.assertEqual(connector['ip'], 'test_url')
+ self.assertEqual(connector['host'], 'test_url')
+ self.assertEqual(connector['initiator'], 'iscsi-name')
+ self.assertNotIn('instance', connector)
+
+ def test_refresh_instance_security_rules(self):
+ self.assertRaises(NotImplementedError,
+ self.conn.refresh_instance_security_rules,
+ instance=None)
+
+ def test_image_aging_image_used(self):
+ self._create_vm()
+ all_instances = [self.instance]
+ self.conn.manage_image_cache(self.context, all_instances)
+ self._cached_files_exist()
+
+ def _get_timestamp_filename(self):
+ return '%s%s' % (imagecache.TIMESTAMP_PREFIX,
+ timeutils.strtime(at=self.old_time,
+ fmt=imagecache.TIMESTAMP_FORMAT))
+
+ def _override_time(self):
+ self.old_time = datetime.datetime(2012, 11, 22, 12, 00, 00)
+
+ def _fake_get_timestamp_filename(fake):
+ return self._get_timestamp_filename()
+
+ self.stubs.Set(imagecache.ImageCacheManager, '_get_timestamp_filename',
+ _fake_get_timestamp_filename)
+
+ def _timestamp_file_exists(self, exists=True):
+ timestamp = ds_util.DatastorePath(self.ds, 'vmware_base',
+ self.fake_image_uuid,
+ self._get_timestamp_filename() + '/')
+ if exists:
+ self.assertTrue(vmwareapi_fake.get_file(str(timestamp)))
+ else:
+ self.assertFalse(vmwareapi_fake.get_file(str(timestamp)))
+
+ def _image_aging_image_marked_for_deletion(self):
+ self._create_vm(uuid=uuidutils.generate_uuid())
+ self._cached_files_exist()
+ all_instances = []
+ self.conn.manage_image_cache(self.context, all_instances)
+ self._cached_files_exist()
+ self._timestamp_file_exists()
+
+ def test_image_aging_image_marked_for_deletion(self):
+ self._override_time()
+ self._image_aging_image_marked_for_deletion()
+
+ def _timestamp_file_removed(self):
+ self._override_time()
+ self._image_aging_image_marked_for_deletion()
+ self._create_vm(num_instances=2,
+ uuid=uuidutils.generate_uuid())
+ self._timestamp_file_exists(exists=False)
+
+ def test_timestamp_file_removed_spawn(self):
+ self._timestamp_file_removed()
+
+ def test_timestamp_file_removed_aging(self):
+ self._timestamp_file_removed()
+ ts = self._get_timestamp_filename()
+ ts_path = ds_util.DatastorePath(self.ds, 'vmware_base',
+ self.fake_image_uuid, ts + '/')
+ vmwareapi_fake._add_file(str(ts_path))
+ self._timestamp_file_exists()
+ all_instances = [self.instance]
+ self.conn.manage_image_cache(self.context, all_instances)
+ self._timestamp_file_exists(exists=False)
+
+ def test_image_aging_disabled(self):
+ self._override_time()
+ self.flags(remove_unused_base_images=False)
+ self._create_vm()
+ self._cached_files_exist()
+ all_instances = []
+ self.conn.manage_image_cache(self.context, all_instances)
+ self._cached_files_exist(exists=True)
+ self._timestamp_file_exists(exists=False)
+
+ def _image_aging_aged(self, aging_time=100):
+ self._override_time()
+ cur_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
+ self.flags(remove_unused_original_minimum_age_seconds=aging_time)
+ self._image_aging_image_marked_for_deletion()
+ all_instances = []
+ timeutils.set_time_override(cur_time)
+ self.conn.manage_image_cache(self.context, all_instances)
+
+ def test_image_aging_aged(self):
+ self._image_aging_aged(aging_time=8)
+ self._cached_files_exist(exists=False)
+
+ def test_image_aging_not_aged(self):
+ self._image_aging_aged()
+ self._cached_files_exist()
+
+
+class VMwareAPIVCDriverTestCase(VMwareAPIVMTestCase):
+
+ @mock.patch.object(driver.VMwareVCDriver, '_register_openstack_extension')
+ def setUp(self, mock_register):
+ super(VMwareAPIVCDriverTestCase, self).setUp(create_connection=False)
+ cluster_name = 'test_cluster'
+ cluster_name2 = 'test_cluster2'
+ self.flags(cluster_name=[cluster_name, cluster_name2],
+ api_retry_count=1,
+ task_poll_interval=10, datastore_regex='.*', group='vmware')
+ self.flags(vnc_enabled=False,
+ image_cache_subdirectory_name='vmware_base')
+ vmwareapi_fake.reset()
+ self.conn = driver.VMwareVCDriver(None, False)
+ self._set_exception_vars()
+ self.node_name = self.conn._resources.keys()[0]
+ self.node_name2 = self.conn._resources.keys()[1]
+ if cluster_name2 in self.node_name2:
+ self.ds = 'ds1'
+ else:
+ self.ds = 'ds2'
+ self.vnc_host = 'ha-host'
+
+ def tearDown(self):
+ super(VMwareAPIVCDriverTestCase, self).tearDown()
+ vmwareapi_fake.cleanup()
+
+ def test_public_api_signatures(self):
+ self.assertPublicAPISignatures(v_driver.ComputeDriver(None), self.conn)
+
+ def test_register_extension(self):
+ with mock.patch.object(self.conn._session, '_call_method',
+ return_value=None) as mock_call_method:
+ self.conn._register_openstack_extension()
+ mock_call_method.assert_has_calls(
+ [mock.call(oslo_vim_util, 'find_extension',
+ constants.EXTENSION_KEY),
+ mock.call(oslo_vim_util, 'register_extension',
+ constants.EXTENSION_KEY,
+ constants.EXTENSION_TYPE_INSTANCE)])
+
+ def test_register_extension_already_exists(self):
+ with mock.patch.object(self.conn._session, '_call_method',
+ return_value='fake-extension') as mock_find_ext:
+ self.conn._register_openstack_extension()
+ mock_find_ext.assert_called_once_with(oslo_vim_util,
+ 'find_extension',
+ constants.EXTENSION_KEY)
+
+ def test_list_instances(self):
+ instances = self.conn.list_instances()
+ self.assertEqual(0, len(instances))
+
+ def test_list_instances_from_nodes(self):
+ # Create instance on node1
+ self._create_vm(self.node_name)
+ # Create instances on the other node
+ self._create_vm(self.node_name2, num_instances=2)
+ self._create_vm(self.node_name2, num_instances=3)
+ node1_vmops = self.conn._get_vmops_for_compute_node(self.node_name)
+ node2_vmops = self.conn._get_vmops_for_compute_node(self.node_name2)
+ self.assertEqual(1, len(node1_vmops.list_instances()))
+ self.assertEqual(2, len(node2_vmops.list_instances()))
+ self.assertEqual(3, len(self.conn.list_instances()))
+
+ def _setup_mocks_for_session(self, mock_init):
+ mock_init.return_value = None
+
+ vcdriver = driver.VMwareVCDriver(None, False)
+ vcdriver._session = mock.Mock()
+ vcdriver._session.vim = None
+
+ def side_effect():
+ vcdriver._session.vim = mock.Mock()
+ vcdriver._session._create_session.side_effect = side_effect
+ return vcdriver
+
+ def test_host_power_action(self):
+ self.assertRaises(NotImplementedError,
+ self.conn.host_power_action, 'host', 'action')
+
+ def test_host_maintenance_mode(self):
+ self.assertRaises(NotImplementedError,
+ self.conn.host_maintenance_mode, 'host', 'mode')
+
+ def test_set_host_enabled(self):
+ self.assertRaises(NotImplementedError,
+ self.conn.set_host_enabled, 'host', 'state')
+
+ def test_datastore_regex_configured(self):
+ for node in self.conn._resources.keys():
+ self.assertEqual(self.conn._datastore_regex,
+ self.conn._resources[node]['vmops']._datastore_regex)
+
+ def test_get_available_resource(self):
+ stats = self.conn.get_available_resource(self.node_name)
+ cpu_info = {"model": ["Intel(R) Xeon(R)", "Intel(R) Xeon(R)"],
+ "vendor": ["Intel", "Intel"],
+ "topology": {"cores": 16,
+ "threads": 32}}
+ self.assertEqual(stats['vcpus'], 32)
+ self.assertEqual(stats['local_gb'], 1024)
+ self.assertEqual(stats['local_gb_used'], 1024 - 500)
+ self.assertEqual(stats['memory_mb'], 1000)
+ self.assertEqual(stats['memory_mb_used'], 500)
+ self.assertEqual(stats['hypervisor_type'], 'VMware vCenter Server')
+ self.assertEqual(stats['hypervisor_version'], 5001000)
+ self.assertEqual(stats['hypervisor_hostname'], self.node_name)
+ self.assertEqual(stats['cpu_info'], jsonutils.dumps(cpu_info))
+ self.assertEqual(stats['supported_instances'],
+ '[["i686", "vmware", "hvm"], ["x86_64", "vmware", "hvm"]]')
+
+ def test_invalid_datastore_regex(self):
+
+ # Tests if we raise an exception for Invalid Regular Expression in
+ # vmware_datastore_regex
+ self.flags(cluster_name=['test_cluster'], datastore_regex='fake-ds(01',
+ group='vmware')
+ self.assertRaises(exception.InvalidInput, driver.VMwareVCDriver, None)
+
+ def test_get_available_nodes(self):
+ nodelist = self.conn.get_available_nodes()
+ self.assertEqual(len(nodelist), 2)
+ self.assertIn(self.node_name, nodelist)
+ self.assertIn(self.node_name2, nodelist)
+
+ def test_spawn_multiple_node(self):
+
+ def fake_is_neutron():
+ return False
+
+ self.stubs.Set(nova_utils, 'is_neutron', fake_is_neutron)
+ uuid1 = uuidutils.generate_uuid()
+ uuid2 = uuidutils.generate_uuid()
+ self._create_vm(node=self.node_name, num_instances=1,
+ uuid=uuid1)
+ info = self.conn.get_info({'uuid': uuid1,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+ self.conn.destroy(self.context, self.instance, self.network_info)
+ self._create_vm(node=self.node_name2, num_instances=1,
+ uuid=uuid2)
+ info = self.conn.get_info({'uuid': uuid2,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+
+ def test_snapshot(self):
+ self._create_vm()
+ self._test_snapshot()
+
+ def test_snapshot_using_file_manager(self):
+ self._create_vm()
+ uuid_str = uuidutils.generate_uuid()
+ self.mox.StubOutWithMock(uuidutils,
+ 'generate_uuid')
+ uuidutils.generate_uuid().AndReturn(uuid_str)
+
+ self.mox.StubOutWithMock(ds_util, 'file_delete')
+ disk_ds_path = ds_util.DatastorePath(
+ self.ds, "vmware_temp", "%s.vmdk" % uuid_str)
+ disk_ds_flat_path = ds_util.DatastorePath(
+ self.ds, "vmware_temp", "%s-flat.vmdk" % uuid_str)
+ # Check calls for delete vmdk and -flat.vmdk pair
+ ds_util.file_delete(
+ mox.IgnoreArg(), disk_ds_flat_path,
+ mox.IgnoreArg()).AndReturn(None)
+ ds_util.file_delete(
+ mox.IgnoreArg(), disk_ds_path, mox.IgnoreArg()).AndReturn(None)
+
+ self.mox.ReplayAll()
+ self._test_snapshot()
+
+ def test_spawn_invalid_node(self):
+ self._create_instance(node='InvalidNodeName')
+ self.assertRaises(exception.NotFound, self.conn.spawn,
+ self.context, self.instance, self.image,
+ injected_files=[], admin_password=None,
+ network_info=self.network_info,
+ block_device_info=None)
+
+ @mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
+ 'from_image')
+ def test_spawn_with_sparse_image(self, mock_from_image):
+ img_info = images.VMwareImage(
+ image_id=self.fake_image_uuid,
+ file_size=1024,
+ disk_type=constants.DISK_TYPE_SPARSE,
+ linked_clone=False)
+
+ mock_from_image.return_value = img_info
+
+ self._create_vm()
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+
+ def test_plug_vifs(self):
+ # Check to make sure the method raises NotImplementedError.
+ self._create_instance()
+ self.assertRaises(NotImplementedError,
+ self.conn.plug_vifs,
+ instance=self.instance, network_info=None)
+
+ def test_unplug_vifs(self):
+ # Check to make sure the method raises NotImplementedError.
+ self._create_instance()
+ self.assertRaises(NotImplementedError,
+ self.conn.unplug_vifs,
+ instance=self.instance, network_info=None)
+
+ def _create_vif(self):
+ gw_4 = network_model.IP(address='101.168.1.1', type='gateway')
+ dns_4 = network_model.IP(address='8.8.8.8', type=None)
+ subnet_4 = network_model.Subnet(cidr='101.168.1.0/24',
+ dns=[dns_4],
+ gateway=gw_4,
+ routes=None,
+ dhcp_server='191.168.1.1')
+
+ gw_6 = network_model.IP(address='101:1db9::1', type='gateway')
+ subnet_6 = network_model.Subnet(cidr='101:1db9::/64',
+ dns=None,
+ gateway=gw_6,
+ ips=None,
+ routes=None)
+
+ network_neutron = network_model.Network(id='network-id-xxx-yyy-zzz',
+ bridge=None,
+ label=None,
+ subnets=[subnet_4,
+ subnet_6],
+ bridge_interface='eth0',
+ vlan=99)
+
+ vif_bridge_neutron = network_model.VIF(id='new-vif-xxx-yyy-zzz',
+ address='ca:fe:de:ad:be:ef',
+ network=network_neutron,
+ type=None,
+ devname='tap-xxx-yyy-zzz',
+ ovs_interfaceid='aaa-bbb-ccc')
+ return vif_bridge_neutron
+
+ def _validate_interfaces(self, id, index, num_iface_ids):
+ vm = self._get_vm_record()
+ found_iface_id = False
+ extras = vm.get("config.extraConfig")
+ key = "nvp.iface-id.%s" % index
+ num_found = 0
+ for c in extras.OptionValue:
+ if c.key.startswith("nvp.iface-id."):
+ num_found += 1
+ if c.key == key and c.value == id:
+ found_iface_id = True
+ self.assertTrue(found_iface_id)
+ self.assertEqual(num_found, num_iface_ids)
+
+ def _attach_interface(self, vif):
+ self.conn.attach_interface(self.instance, self.image, vif)
+ self._validate_interfaces(vif['id'], 1, 2)
+
+ def test_attach_interface(self):
+ self._create_vm()
+ vif = self._create_vif()
+ self._attach_interface(vif)
+
+ def test_attach_interface_with_exception(self):
+ self._create_vm()
+ vif = self._create_vif()
+
+ with mock.patch.object(self.conn._session, '_wait_for_task',
+ side_effect=Exception):
+ self.assertRaises(exception.InterfaceAttachFailed,
+ self.conn.attach_interface,
+ self.instance, self.image, vif)
+
+ @mock.patch.object(vif, 'get_network_device',
+ return_value='fake_device')
+ def _detach_interface(self, vif, mock_get_device):
+ self._create_vm()
+ self._attach_interface(vif)
+ self.conn.detach_interface(self.instance, vif)
+ self._validate_interfaces('free', 1, 2)
+
+ def test_detach_interface(self):
+ vif = self._create_vif()
+ self._detach_interface(vif)
+
+ def test_detach_interface_and_attach(self):
+ vif = self._create_vif()
+ self._detach_interface(vif)
+ self.conn.attach_interface(self.instance, self.image, vif)
+ self._validate_interfaces(vif['id'], 1, 2)
+
+ def test_detach_interface_no_device(self):
+ self._create_vm()
+ vif = self._create_vif()
+ self._attach_interface(vif)
+ self.assertRaises(exception.NotFound, self.conn.detach_interface,
+ self.instance, vif)
+
+ def test_detach_interface_no_vif_match(self):
+ self._create_vm()
+ vif = self._create_vif()
+ self._attach_interface(vif)
+ vif['id'] = 'bad-id'
+ self.assertRaises(exception.NotFound, self.conn.detach_interface,
+ self.instance, vif)
+
+ @mock.patch.object(vif, 'get_network_device',
+ return_value='fake_device')
+ def test_detach_interface_with_exception(self, mock_get_device):
+ self._create_vm()
+ vif = self._create_vif()
+ self._attach_interface(vif)
+
+ with mock.patch.object(self.conn._session, '_wait_for_task',
+ side_effect=Exception):
+ self.assertRaises(exception.InterfaceDetachFailed,
+ self.conn.detach_interface,
+ self.instance, vif)
+
+ def test_migrate_disk_and_power_off(self):
+ def fake_update_instance_progress(context, instance, step,
+ total_steps):
+ pass
+
+ def fake_get_host_ref_from_name(dest):
+ return None
+
+ self._create_vm(instance_type='m1.large')
+ vm_ref_orig = vm_util.get_vm_ref(self.conn._session, self.instance)
+ flavor = self._get_instance_type_by_name('m1.large')
+ self.stubs.Set(self.conn._vmops, "_update_instance_progress",
+ fake_update_instance_progress)
+ self.stubs.Set(self.conn._vmops, "_get_host_ref_from_name",
+ fake_get_host_ref_from_name)
+ self.conn.migrate_disk_and_power_off(self.context, self.instance,
+ 'fake_dest', flavor,
+ None)
+ vm_ref = vm_util.get_vm_ref(self.conn._session, self.instance)
+ self.assertNotEqual(vm_ref_orig.value, vm_ref.value,
+ "These should be different")
+
+ def test_disassociate_vmref_from_instance(self):
+ self._create_vm()
+ vm_ref = vm_util.get_vm_ref(self.conn._session, self.instance)
+ vm_util.disassociate_vmref_from_instance(self.conn._session,
+ self.instance, vm_ref, "-backup")
+ self.assertRaises(exception.InstanceNotFound,
+ vm_util.get_vm_ref, self.conn._session, self.instance)
+
+ def test_clone_vmref_for_instance(self):
+ self._create_vm()
+ vm_ref = vm_util.get_vm_ref(self.conn._session, self.instance)
+ vm_util.disassociate_vmref_from_instance(self.conn._session,
+ self.instance, vm_ref, "-backup")
+ host_ref = vmwareapi_fake._get_object_refs("HostSystem")[0]
+ ds_ref = vmwareapi_fake._get_object_refs("Datastore")[0]
+ dc_obj = vmwareapi_fake._get_objects("Datacenter").objects[0]
+ vm_util.clone_vmref_for_instance(self.conn._session, self.instance,
+ vm_ref, host_ref, ds_ref,
+ dc_obj.get("vmFolder"))
+ self.assertIsNotNone(
+ vm_util.get_vm_ref(self.conn._session, self.instance),
+ "No VM found")
+ cloned_vm_ref = vm_util.get_vm_ref(self.conn._session, self.instance)
+ self.assertNotEqual(vm_ref.value, cloned_vm_ref.value,
+ "Reference for the cloned VM should be different")
+ vm_obj = vmwareapi_fake._get_vm_mdo(vm_ref)
+ cloned_vm_obj = vmwareapi_fake._get_vm_mdo(cloned_vm_ref)
+ self.assertEqual(vm_obj.name, self.instance['uuid'] + "-backup",
+ "Original VM name should be with suffix -backup")
+ self.assertEqual(cloned_vm_obj.name, self.instance['uuid'],
+ "VM name does not match instance['uuid']")
+ self.assertRaises(vexc.MissingParameter,
+ vm_util.clone_vmref_for_instance, self.conn._session,
+ self.instance, None, host_ref, ds_ref,
+ dc_obj.get("vmFolder"))
+
+ def test_associate_vmref_for_instance(self):
+ self._create_vm()
+ vm_ref = vm_util.get_vm_ref(self.conn._session, self.instance)
+ # First disassociate the VM from the instance so that we have a VM
+ # to later associate using the associate_vmref_for_instance method
+ vm_util.disassociate_vmref_from_instance(self.conn._session,
+ self.instance, vm_ref, "-backup")
+ # Ensure that the VM is indeed disassociated and that we cannot find
+ # the VM using the get_vm_ref method
+ self.assertRaises(exception.InstanceNotFound,
+ vm_util.get_vm_ref, self.conn._session, self.instance)
+ # Associate the VM back to the instance
+ vm_util.associate_vmref_for_instance(self.conn._session, self.instance,
+ suffix="-backup")
+ # Verify if we can get the VM reference
+ self.assertIsNotNone(
+ vm_util.get_vm_ref(self.conn._session, self.instance),
+ "No VM found")
+
+ def test_confirm_migration(self):
+ self._create_vm()
+ self.conn.confirm_migration(self.context, self.instance, None)
+
+ def test_resize_to_smaller_disk(self):
+ self._create_vm(instance_type='m1.large')
+ flavor = self._get_instance_type_by_name('m1.small')
+ self.assertRaises(exception.InstanceFaultRollback,
+ self.conn.migrate_disk_and_power_off, self.context,
+ self.instance, 'fake_dest', flavor, None)
+
+ def test_spawn_attach_volume_vmdk(self):
+ self._spawn_attach_volume_vmdk(vc_support=True)
+
+ def test_spawn_attach_volume_vmdk_no_image_ref(self):
+ self._spawn_attach_volume_vmdk(set_image_ref=False, vc_support=True)
+
+ def test_pause(self):
+ # Tests that the VMwareVCDriver does not implement the pause method.
+ self._create_instance()
+ self.assertRaises(NotImplementedError, self.conn.pause, self.instance)
+
+ def test_unpause(self):
+ # Tests that the VMwareVCDriver does not implement the unpause method.
+ self._create_instance()
+ self.assertRaises(NotImplementedError, self.conn.unpause,
+ self.instance)
+
+ def test_datastore_dc_map(self):
+ vmops = self.conn._resources[self.node_name]['vmops']
+ self.assertEqual({}, vmops._datastore_dc_mapping)
+ self._create_vm()
+ # currently there are 2 data stores
+ self.assertEqual(2, len(vmops._datastore_dc_mapping))
+
+ def test_rollback_live_migration_at_destination(self):
+ with mock.patch.object(self.conn, "destroy") as mock_destroy:
+ self.conn.rollback_live_migration_at_destination(self.context,
+ "instance", [], None)
+ mock_destroy.assert_called_once_with(self.context,
+ "instance", [], None)
+
+ def test_get_instance_disk_info_is_implemented(self):
+ # Ensure that the method has been implemented in the driver
+ try:
+ disk_info = self.conn.get_instance_disk_info('fake_instance_name')
+ self.assertIsNone(disk_info)
+ except NotImplementedError:
+ self.fail("test_get_instance_disk_info() should not raise "
+ "NotImplementedError")
+
+ def test_destroy(self):
+ self._create_vm()
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+ instances = self.conn.list_instances()
+ self.assertEqual(1, len(instances))
+ self.conn.destroy(self.context, self.instance, self.network_info)
+ instances = self.conn.list_instances()
+ self.assertEqual(0, len(instances))
+ self.assertIsNone(vm_util.vm_ref_cache_get(self.uuid))
+
+ def test_destroy_no_datastore(self):
+ self._create_vm()
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+ instances = self.conn.list_instances()
+ self.assertEqual(1, len(instances))
+ # Overwrite the vmPathName
+ vm = self._get_vm_record()
+ vm.set("config.files.vmPathName", None)
+ self.conn.destroy(self.context, self.instance, self.network_info)
+ instances = self.conn.list_instances()
+ self.assertEqual(0, len(instances))
+
+ def test_destroy_non_existent(self):
+ self.destroy_disks = True
+ with mock.patch.object(self.conn._vmops,
+ "destroy") as mock_destroy:
+ self._create_instance()
+ self.conn.destroy(self.context, self.instance,
+ self.network_info,
+ None, self.destroy_disks)
+ mock_destroy.assert_called_once_with(self.instance,
+ self.destroy_disks)
+
+ def test_destroy_instance_without_compute(self):
+ self.destroy_disks = True
+ with mock.patch.object(self.conn._vmops,
+ "destroy") as mock_destroy:
+ self.conn.destroy(self.context, self.instance_without_compute,
+ self.network_info,
+ None, self.destroy_disks)
+ self.assertFalse(mock_destroy.called)
+
+ def test_get_host_uptime(self):
+ self.assertRaises(NotImplementedError,
+ self.conn.get_host_uptime, 'host')
+
+ def _test_finish_migration(self, power_on, resize_instance=False):
+ """Tests the finish_migration method on VC Driver."""
+ # setup the test instance in the database
+ self._create_vm()
+ if resize_instance:
+ self.instance.system_metadata = {'old_instance_type_root_gb': '0'}
+ vm_ref = vm_util.get_vm_ref(self.conn._session, self.instance)
+ datastore = ds_util.Datastore(ref='fake-ref', name='fake')
+ dc_info = vmops.DcInfo(ref='fake_ref', name='fake',
+ vmFolder='fake_folder')
+ with contextlib.nested(
+ mock.patch.object(self.conn._session, "_call_method",
+ return_value='fake-task'),
+ mock.patch.object(self.conn._vmops,
+ "_update_instance_progress"),
+ mock.patch.object(self.conn._session, "_wait_for_task"),
+ mock.patch.object(vm_util, "get_vm_resize_spec",
+ return_value='fake-spec'),
+ mock.patch.object(ds_util, "get_datastore",
+ return_value=datastore),
+ mock.patch.object(self.conn._vmops,
+ 'get_datacenter_ref_and_name',
+ return_value=dc_info),
+ mock.patch.object(self.conn._vmops, '_extend_virtual_disk'),
+ mock.patch.object(vm_util, "power_on_instance")
+ ) as (fake_call_method, fake_update_instance_progress,
+ fake_wait_for_task, fake_vm_resize_spec,
+ fake_get_datastore, fake_get_datacenter_ref_and_name,
+ fake_extend_virtual_disk, fake_power_on):
+ self.conn.finish_migration(context=self.context,
+ migration=None,
+ instance=self.instance,
+ disk_info=None,
+ network_info=None,
+ block_device_info=None,
+ resize_instance=resize_instance,
+ image_meta=None,
+ power_on=power_on)
+ if resize_instance:
+ fake_vm_resize_spec.assert_called_once_with(
+ self.conn._session.vim.client.factory,
+ self.instance)
+ fake_call_method.assert_any_call(
+ self.conn._session.vim,
+ "ReconfigVM_Task",
+ vm_ref,
+ spec='fake-spec')
+ fake_wait_for_task.assert_called_once_with('fake-task')
+ fake_extend_virtual_disk.assert_called_once_with(
+ self.instance, self.instance['root_gb'] * units.Mi,
+ None, dc_info.ref)
+ else:
+ self.assertFalse(fake_vm_resize_spec.called)
+ self.assertFalse(fake_call_method.called)
+ self.assertFalse(fake_wait_for_task.called)
+ self.assertFalse(fake_extend_virtual_disk.called)
+
+ if power_on:
+ fake_power_on.assert_called_once_with(self.conn._session,
+ self.instance,
+ vm_ref=vm_ref)
+ else:
+ self.assertFalse(fake_power_on.called)
+ fake_update_instance_progress.called_once_with(
+ self.context, self.instance, 4, vmops.RESIZE_TOTAL_STEPS)
+
+ def test_finish_migration_power_on(self):
+ self._test_finish_migration(power_on=True)
+
+ def test_finish_migration_power_off(self):
+ self._test_finish_migration(power_on=False)
+
+ def test_finish_migration_power_on_resize(self):
+ self._test_finish_migration(power_on=True,
+ resize_instance=True)
+
+ @mock.patch.object(vm_util, 'associate_vmref_for_instance')
+ @mock.patch.object(vm_util, 'power_on_instance')
+ def _test_finish_revert_migration(self, fake_power_on,
+ fake_associate_vmref, power_on):
+ """Tests the finish_revert_migration method on VC Driver."""
+
+ # setup the test instance in the database
+ self._create_instance()
+ self.conn.finish_revert_migration(self.context,
+ instance=self.instance,
+ network_info=None,
+ block_device_info=None,
+ power_on=power_on)
+ fake_associate_vmref.assert_called_once_with(self.conn._session,
+ self.instance,
+ suffix='-orig')
+ if power_on:
+ fake_power_on.assert_called_once_with(self.conn._session,
+ self.instance)
+ else:
+ self.assertFalse(fake_power_on.called)
+
+ def test_finish_revert_migration_power_on(self):
+ self._test_finish_revert_migration(power_on=True)
+
+ def test_finish_revert_migration_power_off(self):
+ self._test_finish_revert_migration(power_on=False)
+
+ def test_pbm_wsdl_location(self):
+ self.flags(pbm_enabled=True,
+ pbm_wsdl_location='fira',
+ group='vmware')
+ self.conn._update_pbm_location()
+ self.assertEqual('fira', self.conn._session._pbm_wsdl_loc)
+ self.assertIsNone(self.conn._session._pbm)
diff --git a/nova/tests/unit/virt/vmwareapi/test_ds_util.py b/nova/tests/unit/virt/vmwareapi/test_ds_util.py
new file mode 100644
index 0000000000..6f5cf74b26
--- /dev/null
+++ b/nova/tests/unit/virt/vmwareapi/test_ds_util.py
@@ -0,0 +1,548 @@
+# Copyright (c) 2014 VMware, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import contextlib
+import re
+
+import mock
+from oslo.utils import units
+from oslo.vmware import exceptions as vexc
+from testtools import matchers
+
+from nova import exception
+from nova.i18n import _
+from nova import test
+from nova.tests.unit.virt.vmwareapi import fake
+from nova.virt.vmwareapi import ds_util
+
+
+class DsUtilTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(DsUtilTestCase, self).setUp()
+ self.session = fake.FakeSession()
+ self.flags(api_retry_count=1, group='vmware')
+ fake.reset()
+
+ def tearDown(self):
+ super(DsUtilTestCase, self).tearDown()
+ fake.reset()
+
+ def test_file_delete(self):
+ def fake_call_method(module, method, *args, **kwargs):
+ self.assertEqual('DeleteDatastoreFile_Task', method)
+ name = kwargs.get('name')
+ self.assertEqual('[ds] fake/path', name)
+ datacenter = kwargs.get('datacenter')
+ self.assertEqual('fake-dc-ref', datacenter)
+ return 'fake_delete_task'
+
+ with contextlib.nested(
+ mock.patch.object(self.session, '_wait_for_task'),
+ mock.patch.object(self.session, '_call_method',
+ fake_call_method)
+ ) as (_wait_for_task, _call_method):
+ ds_path = ds_util.DatastorePath('ds', 'fake/path')
+ ds_util.file_delete(self.session,
+ ds_path, 'fake-dc-ref')
+ _wait_for_task.assert_has_calls([
+ mock.call('fake_delete_task')])
+
+ def test_file_move(self):
+ def fake_call_method(module, method, *args, **kwargs):
+ self.assertEqual('MoveDatastoreFile_Task', method)
+ sourceName = kwargs.get('sourceName')
+ self.assertEqual('[ds] tmp/src', sourceName)
+ destinationName = kwargs.get('destinationName')
+ self.assertEqual('[ds] base/dst', destinationName)
+ sourceDatacenter = kwargs.get('sourceDatacenter')
+ self.assertEqual('fake-dc-ref', sourceDatacenter)
+ destinationDatacenter = kwargs.get('destinationDatacenter')
+ self.assertEqual('fake-dc-ref', destinationDatacenter)
+ return 'fake_move_task'
+
+ with contextlib.nested(
+ mock.patch.object(self.session, '_wait_for_task'),
+ mock.patch.object(self.session, '_call_method',
+ fake_call_method)
+ ) as (_wait_for_task, _call_method):
+ src_ds_path = ds_util.DatastorePath('ds', 'tmp/src')
+ dst_ds_path = ds_util.DatastorePath('ds', 'base/dst')
+ ds_util.file_move(self.session,
+ 'fake-dc-ref', src_ds_path, dst_ds_path)
+ _wait_for_task.assert_has_calls([
+ mock.call('fake_move_task')])
+
+ def test_mkdir(self):
+ def fake_call_method(module, method, *args, **kwargs):
+ self.assertEqual('MakeDirectory', method)
+ name = kwargs.get('name')
+ self.assertEqual('[ds] fake/path', name)
+ datacenter = kwargs.get('datacenter')
+ self.assertEqual('fake-dc-ref', datacenter)
+ createParentDirectories = kwargs.get('createParentDirectories')
+ self.assertTrue(createParentDirectories)
+
+ with mock.patch.object(self.session, '_call_method',
+ fake_call_method):
+ ds_path = ds_util.DatastorePath('ds', 'fake/path')
+ ds_util.mkdir(self.session, ds_path, 'fake-dc-ref')
+
+ def test_file_exists(self):
+ def fake_call_method(module, method, *args, **kwargs):
+ if method == 'SearchDatastore_Task':
+ ds_browser = args[0]
+ self.assertEqual('fake-browser', ds_browser)
+ datastorePath = kwargs.get('datastorePath')
+ self.assertEqual('[ds] fake/path', datastorePath)
+ return 'fake_exists_task'
+
+ # Should never get here
+ self.fail()
+
+ def fake_wait_for_task(task_ref):
+ if task_ref == 'fake_exists_task':
+ result_file = fake.DataObject()
+ result_file.path = 'fake-file'
+
+ result = fake.DataObject()
+ result.file = [result_file]
+ result.path = '[ds] fake/path'
+
+ task_info = fake.DataObject()
+ task_info.result = result
+
+ return task_info
+
+ # Should never get here
+ self.fail()
+
+ with contextlib.nested(
+ mock.patch.object(self.session, '_call_method',
+ fake_call_method),
+ mock.patch.object(self.session, '_wait_for_task',
+ fake_wait_for_task)):
+ ds_path = ds_util.DatastorePath('ds', 'fake/path')
+ file_exists = ds_util.file_exists(self.session,
+ 'fake-browser', ds_path, 'fake-file')
+ self.assertTrue(file_exists)
+
+ def test_file_exists_fails(self):
+ def fake_call_method(module, method, *args, **kwargs):
+ if method == 'SearchDatastore_Task':
+ return 'fake_exists_task'
+
+ # Should never get here
+ self.fail()
+
+ def fake_wait_for_task(task_ref):
+ if task_ref == 'fake_exists_task':
+ raise vexc.FileNotFoundException()
+
+ # Should never get here
+ self.fail()
+
+ with contextlib.nested(
+ mock.patch.object(self.session, '_call_method',
+ fake_call_method),
+ mock.patch.object(self.session, '_wait_for_task',
+ fake_wait_for_task)):
+ ds_path = ds_util.DatastorePath('ds', 'fake/path')
+ file_exists = ds_util.file_exists(self.session,
+ 'fake-browser', ds_path, 'fake-file')
+ self.assertFalse(file_exists)
+
+ def _mock_get_datastore_calls(self, *datastores):
+ """Mock vim_util calls made by get_datastore."""
+
+ datastores_i = [None]
+
+ # For the moment, at least, this list of datastores is simply passed to
+ # get_properties_for_a_collection_of_objects, which we mock below. We
+ # don't need to over-complicate the fake function by worrying about its
+ # contents.
+ fake_ds_list = ['fake-ds']
+
+ def fake_call_method(module, method, *args, **kwargs):
+ # Mock the call which returns a list of datastores for the cluster
+ if (module == ds_util.vim_util and
+ method == 'get_dynamic_property' and
+ args == ('fake-cluster', 'ClusterComputeResource',
+ 'datastore')):
+ fake_ds_mor = fake.DataObject()
+ fake_ds_mor.ManagedObjectReference = fake_ds_list
+ return fake_ds_mor
+
+ # Return the datastore result sets we were passed in, in the order
+ # given
+ if (module == ds_util.vim_util and
+ method == 'get_properties_for_a_collection_of_objects' and
+ args[0] == 'Datastore' and
+ args[1] == fake_ds_list):
+ # Start a new iterator over given datastores
+ datastores_i[0] = iter(datastores)
+ return datastores_i[0].next()
+
+ # Continue returning results from the current iterator.
+ if (module == ds_util.vim_util and
+ method == 'continue_to_get_objects'):
+ try:
+ return datastores_i[0].next()
+ except StopIteration:
+ return None
+
+ # Sentinel that get_datastore's use of vim has changed
+ self.fail('Unexpected vim call in get_datastore: %s' % method)
+
+ return mock.patch.object(self.session, '_call_method',
+ side_effect=fake_call_method)
+
+ def test_get_datastore(self):
+ fake_objects = fake.FakeRetrieveResult()
+ fake_objects.add_object(fake.Datastore())
+ fake_objects.add_object(fake.Datastore("fake-ds-2", 2048, 1000,
+ False, "normal"))
+ fake_objects.add_object(fake.Datastore("fake-ds-3", 4096, 2000,
+ True, "inMaintenance"))
+
+ with self._mock_get_datastore_calls(fake_objects):
+ result = ds_util.get_datastore(self.session, 'fake-cluster')
+ self.assertEqual("fake-ds", result.name)
+ self.assertEqual(units.Ti, result.capacity)
+ self.assertEqual(500 * units.Gi, result.freespace)
+
+ def test_get_datastore_with_regex(self):
+ # Test with a regex that matches with a datastore
+ datastore_valid_regex = re.compile("^openstack.*\d$")
+ fake_objects = fake.FakeRetrieveResult()
+ fake_objects.add_object(fake.Datastore("openstack-ds0"))
+ fake_objects.add_object(fake.Datastore("fake-ds0"))
+ fake_objects.add_object(fake.Datastore("fake-ds1"))
+
+ with self._mock_get_datastore_calls(fake_objects):
+ result = ds_util.get_datastore(self.session, 'fake-cluster',
+ datastore_valid_regex)
+ self.assertEqual("openstack-ds0", result.name)
+
+ def test_get_datastore_with_token(self):
+ regex = re.compile("^ds.*\d$")
+ fake0 = fake.FakeRetrieveResult()
+ fake0.add_object(fake.Datastore("ds0", 10 * units.Gi, 5 * units.Gi))
+ fake0.add_object(fake.Datastore("foo", 10 * units.Gi, 9 * units.Gi))
+ setattr(fake0, 'token', 'token-0')
+ fake1 = fake.FakeRetrieveResult()
+ fake1.add_object(fake.Datastore("ds2", 10 * units.Gi, 8 * units.Gi))
+ fake1.add_object(fake.Datastore("ds3", 10 * units.Gi, 1 * units.Gi))
+
+ with self._mock_get_datastore_calls(fake0, fake1):
+ result = ds_util.get_datastore(self.session, 'fake-cluster', regex)
+ self.assertEqual("ds2", result.name)
+
+ def test_get_datastore_with_list(self):
+ # Test with a regex containing whitelist of datastores
+ datastore_valid_regex = re.compile("(openstack-ds0|openstack-ds2)")
+ fake_objects = fake.FakeRetrieveResult()
+ fake_objects.add_object(fake.Datastore("openstack-ds0"))
+ fake_objects.add_object(fake.Datastore("openstack-ds1"))
+ fake_objects.add_object(fake.Datastore("openstack-ds2"))
+
+ with self._mock_get_datastore_calls(fake_objects):
+ result = ds_util.get_datastore(self.session, 'fake-cluster',
+ datastore_valid_regex)
+ self.assertNotEqual("openstack-ds1", result.name)
+
+ def test_get_datastore_with_regex_error(self):
+ # Test with a regex that has no match
+ # Checks if code raises DatastoreNotFound with a specific message
+ datastore_invalid_regex = re.compile("unknown-ds")
+ exp_message = (_("Datastore regex %s did not match any datastores")
+ % datastore_invalid_regex.pattern)
+ fake_objects = fake.FakeRetrieveResult()
+ fake_objects.add_object(fake.Datastore("fake-ds0"))
+ fake_objects.add_object(fake.Datastore("fake-ds1"))
+ # assertRaisesRegExp would have been a good choice instead of
+ # try/catch block, but it's available only from Py 2.7.
+ try:
+ with self._mock_get_datastore_calls(fake_objects):
+ ds_util.get_datastore(self.session, 'fake-cluster',
+ datastore_invalid_regex)
+ except exception.DatastoreNotFound as e:
+ self.assertEqual(exp_message, e.args[0])
+ else:
+ self.fail("DatastoreNotFound Exception was not raised with "
+ "message: %s" % exp_message)
+
+ def test_get_datastore_without_datastore(self):
+ self.assertRaises(exception.DatastoreNotFound,
+ ds_util.get_datastore,
+ fake.FakeObjectRetrievalSession(None), cluster="fake-cluster")
+
+ def test_get_datastore_inaccessible_ds(self):
+ data_store = fake.Datastore()
+ data_store.set("summary.accessible", False)
+
+ fake_objects = fake.FakeRetrieveResult()
+ fake_objects.add_object(data_store)
+
+ with self._mock_get_datastore_calls(fake_objects):
+ self.assertRaises(exception.DatastoreNotFound,
+ ds_util.get_datastore,
+ self.session, 'fake-cluster')
+
+ def test_get_datastore_ds_in_maintenance(self):
+ data_store = fake.Datastore()
+ data_store.set("summary.maintenanceMode", "inMaintenance")
+
+ fake_objects = fake.FakeRetrieveResult()
+ fake_objects.add_object(data_store)
+
+ with self._mock_get_datastore_calls(fake_objects):
+ self.assertRaises(exception.DatastoreNotFound,
+ ds_util.get_datastore,
+ self.session, 'fake-cluster')
+
+ def test_get_datastore_no_host_in_cluster(self):
+ def fake_call_method(module, method, *args, **kwargs):
+ return ''
+
+ with mock.patch.object(self.session, '_call_method',
+ fake_call_method):
+ self.assertRaises(exception.DatastoreNotFound,
+ ds_util.get_datastore,
+ self.session, 'fake-cluster')
+
+ def _test_is_datastore_valid(self, accessible=True,
+ maintenance_mode="normal",
+ type="VMFS",
+ datastore_regex=None):
+ propdict = {}
+ propdict["summary.accessible"] = accessible
+ propdict["summary.maintenanceMode"] = maintenance_mode
+ propdict["summary.type"] = type
+ propdict["summary.name"] = "ds-1"
+
+ return ds_util._is_datastore_valid(propdict, datastore_regex)
+
+ def test_is_datastore_valid(self):
+ for ds_type in ds_util.ALLOWED_DATASTORE_TYPES:
+ self.assertTrue(self._test_is_datastore_valid(True,
+ "normal",
+ ds_type))
+
+ def test_is_datastore_valid_inaccessible_ds(self):
+ self.assertFalse(self._test_is_datastore_valid(False,
+ "normal",
+ "VMFS"))
+
+ def test_is_datastore_valid_ds_in_maintenance(self):
+ self.assertFalse(self._test_is_datastore_valid(True,
+ "inMaintenance",
+ "VMFS"))
+
+ def test_is_datastore_valid_ds_type_invalid(self):
+ self.assertFalse(self._test_is_datastore_valid(True,
+ "normal",
+ "vfat"))
+
+ def test_is_datastore_valid_not_matching_regex(self):
+ datastore_regex = re.compile("ds-2")
+ self.assertFalse(self._test_is_datastore_valid(True,
+ "normal",
+ "VMFS",
+ datastore_regex))
+
+ def test_is_datastore_valid_matching_regex(self):
+ datastore_regex = re.compile("ds-1")
+ self.assertTrue(self._test_is_datastore_valid(True,
+ "normal",
+ "VMFS",
+ datastore_regex))
+
+
+class DatastoreTestCase(test.NoDBTestCase):
+ def test_ds(self):
+ ds = ds_util.Datastore(
+ "fake_ref", "ds_name", 2 * units.Gi, 1 * units.Gi)
+ self.assertEqual('ds_name', ds.name)
+ self.assertEqual('fake_ref', ds.ref)
+ self.assertEqual(2 * units.Gi, ds.capacity)
+ self.assertEqual(1 * units.Gi, ds.freespace)
+
+ def test_ds_invalid_space(self):
+ self.assertRaises(ValueError, ds_util.Datastore,
+ "fake_ref", "ds_name", 1 * units.Gi, 2 * units.Gi)
+ self.assertRaises(ValueError, ds_util.Datastore,
+ "fake_ref", "ds_name", None, 2 * units.Gi)
+
+ def test_ds_no_capacity_no_freespace(self):
+ ds = ds_util.Datastore("fake_ref", "ds_name")
+ self.assertIsNone(ds.capacity)
+ self.assertIsNone(ds.freespace)
+
+ def test_ds_invalid(self):
+ self.assertRaises(ValueError, ds_util.Datastore, None, "ds_name")
+ self.assertRaises(ValueError, ds_util.Datastore, "fake_ref", None)
+
+ def test_build_path(self):
+ ds = ds_util.Datastore("fake_ref", "ds_name")
+ ds_path = ds.build_path("some_dir", "foo.vmdk")
+ self.assertEqual('[ds_name] some_dir/foo.vmdk', str(ds_path))
+
+
+class DatastorePathTestCase(test.NoDBTestCase):
+
+ def test_ds_path(self):
+ p = ds_util.DatastorePath('dsname', 'a/b/c', 'file.iso')
+ self.assertEqual('[dsname] a/b/c/file.iso', str(p))
+ self.assertEqual('a/b/c/file.iso', p.rel_path)
+ self.assertEqual('a/b/c', p.parent.rel_path)
+ self.assertEqual('[dsname] a/b/c', str(p.parent))
+ self.assertEqual('dsname', p.datastore)
+ self.assertEqual('file.iso', p.basename)
+ self.assertEqual('a/b/c', p.dirname)
+
+ def test_ds_path_no_ds_name(self):
+ bad_args = [
+ ('', ['a/b/c', 'file.iso']),
+ (None, ['a/b/c', 'file.iso'])]
+ for t in bad_args:
+ self.assertRaises(
+ ValueError, ds_util.DatastorePath,
+ t[0], *t[1])
+
+ def test_ds_path_invalid_path_components(self):
+ bad_args = [
+ ('dsname', [None]),
+ ('dsname', ['', None]),
+ ('dsname', ['a', None]),
+ ('dsname', ['a', None, 'b']),
+ ('dsname', [None, '']),
+ ('dsname', [None, 'b'])]
+
+ for t in bad_args:
+ self.assertRaises(
+ ValueError, ds_util.DatastorePath,
+ t[0], *t[1])
+
+ def test_ds_path_no_subdir(self):
+ args = [
+ ('dsname', ['', 'x.vmdk']),
+ ('dsname', ['x.vmdk'])]
+
+ canonical_p = ds_util.DatastorePath('dsname', 'x.vmdk')
+ self.assertEqual('[dsname] x.vmdk', str(canonical_p))
+ self.assertEqual('', canonical_p.dirname)
+ self.assertEqual('x.vmdk', canonical_p.basename)
+ self.assertEqual('x.vmdk', canonical_p.rel_path)
+ for t in args:
+ p = ds_util.DatastorePath(t[0], *t[1])
+ self.assertEqual(str(canonical_p), str(p))
+
+ def test_ds_path_ds_only(self):
+ args = [
+ ('dsname', []),
+ ('dsname', ['']),
+ ('dsname', ['', ''])]
+
+ canonical_p = ds_util.DatastorePath('dsname')
+ self.assertEqual('[dsname]', str(canonical_p))
+ self.assertEqual('', canonical_p.rel_path)
+ self.assertEqual('', canonical_p.basename)
+ self.assertEqual('', canonical_p.dirname)
+ for t in args:
+ p = ds_util.DatastorePath(t[0], *t[1])
+ self.assertEqual(str(canonical_p), str(p))
+ self.assertEqual(canonical_p.rel_path, p.rel_path)
+
+ def test_ds_path_equivalence(self):
+ args = [
+ ('dsname', ['a/b/c/', 'x.vmdk']),
+ ('dsname', ['a/', 'b/c/', 'x.vmdk']),
+ ('dsname', ['a', 'b', 'c', 'x.vmdk']),
+ ('dsname', ['a/b/c', 'x.vmdk'])]
+
+ canonical_p = ds_util.DatastorePath('dsname', 'a/b/c', 'x.vmdk')
+ for t in args:
+ p = ds_util.DatastorePath(t[0], *t[1])
+ self.assertEqual(str(canonical_p), str(p))
+ self.assertEqual(canonical_p.datastore, p.datastore)
+ self.assertEqual(canonical_p.rel_path, p.rel_path)
+ self.assertEqual(str(canonical_p.parent), str(p.parent))
+
+ def test_ds_path_non_equivalence(self):
+ args = [
+ # leading slash
+ ('dsname', ['/a', 'b', 'c', 'x.vmdk']),
+ ('dsname', ['/a/b/c/', 'x.vmdk']),
+ ('dsname', ['a/b/c', '/x.vmdk']),
+ # leading space
+ ('dsname', ['a/b/c/', ' x.vmdk']),
+ ('dsname', ['a/', ' b/c/', 'x.vmdk']),
+ ('dsname', [' a', 'b', 'c', 'x.vmdk']),
+ # trailing space
+ ('dsname', ['/a/b/c/', 'x.vmdk ']),
+ ('dsname', ['a/b/c/ ', 'x.vmdk'])]
+
+ canonical_p = ds_util.DatastorePath('dsname', 'a/b/c', 'x.vmdk')
+ for t in args:
+ p = ds_util.DatastorePath(t[0], *t[1])
+ self.assertNotEqual(str(canonical_p), str(p))
+
+ def test_ds_path_hashable(self):
+ ds1 = ds_util.DatastorePath('dsname', 'path')
+ ds2 = ds_util.DatastorePath('dsname', 'path')
+
+ # If the above objects have the same hash, they will only be added to
+ # the set once
+ self.assertThat(set([ds1, ds2]), matchers.HasLength(1))
+
+ def test_equal(self):
+ a = ds_util.DatastorePath('ds_name', 'a')
+ b = ds_util.DatastorePath('ds_name', 'a')
+ self.assertEqual(a, b)
+
+ def test_join(self):
+ p = ds_util.DatastorePath('ds_name', 'a')
+ ds_path = p.join('b')
+ self.assertEqual('[ds_name] a/b', str(ds_path))
+
+ p = ds_util.DatastorePath('ds_name', 'a')
+ ds_path = p.join()
+ self.assertEqual('[ds_name] a', str(ds_path))
+
+ bad_args = [
+ [None],
+ ['', None],
+ ['a', None],
+ ['a', None, 'b']]
+ for arg in bad_args:
+ self.assertRaises(ValueError, p.join, *arg)
+
+ def test_ds_path_parse(self):
+ p = ds_util.DatastorePath.parse('[dsname]')
+ self.assertEqual('dsname', p.datastore)
+ self.assertEqual('', p.rel_path)
+
+ p = ds_util.DatastorePath.parse('[dsname] folder')
+ self.assertEqual('dsname', p.datastore)
+ self.assertEqual('folder', p.rel_path)
+
+ p = ds_util.DatastorePath.parse('[dsname] folder/file')
+ self.assertEqual('dsname', p.datastore)
+ self.assertEqual('folder/file', p.rel_path)
+
+ for p in [None, '']:
+ self.assertRaises(ValueError, ds_util.DatastorePath.parse, p)
+
+ for p in ['bad path', '/a/b/c', 'a/b/c']:
+ self.assertRaises(IndexError, ds_util.DatastorePath.parse, p)
diff --git a/nova/tests/unit/virt/vmwareapi/test_ds_util_datastore_selection.py b/nova/tests/unit/virt/vmwareapi/test_ds_util_datastore_selection.py
new file mode 100644
index 0000000000..1351530143
--- /dev/null
+++ b/nova/tests/unit/virt/vmwareapi/test_ds_util_datastore_selection.py
@@ -0,0 +1,163 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import collections
+import re
+
+from oslo.utils import units
+
+from nova import test
+from nova.virt.vmwareapi import ds_util
+
+ResultSet = collections.namedtuple('ResultSet', ['objects'])
+ObjectContent = collections.namedtuple('ObjectContent', ['obj', 'propSet'])
+DynamicProperty = collections.namedtuple('Property', ['name', 'val'])
+MoRef = collections.namedtuple('ManagedObjectReference', ['value'])
+
+
+class VMwareDSUtilDatastoreSelectionTestCase(test.NoDBTestCase):
+
+ def setUp(self):
+ super(VMwareDSUtilDatastoreSelectionTestCase, self).setUp()
+ self.data = [
+ ['VMFS', 'os-some-name', True, 'normal', 987654321, 12346789],
+ ['NFS', 'another-name', True, 'normal', 9876543210, 123467890],
+ ['BAD', 'some-name-bad', True, 'normal', 98765432100, 1234678900],
+ ['VMFS', 'some-name-good', False, 'normal', 987654321, 12346789],
+ ['VMFS', 'new-name', True, 'inMaintenance', 987654321, 12346789]
+ ]
+
+ def build_result_set(self, mock_data, name_list=None):
+ # datastores will have a moref_id of ds-000 and
+ # so on based on their index in the mock_data list
+ if name_list is None:
+ name_list = self.propset_name_list
+
+ objects = []
+ for id, row in enumerate(mock_data):
+ obj = ObjectContent(
+ obj=MoRef(value="ds-%03d" % id),
+ propSet=[])
+ for index, value in enumerate(row):
+ obj.propSet.append(
+ DynamicProperty(name=name_list[index], val=row[index]))
+ objects.append(obj)
+ return ResultSet(objects=objects)
+
+ @property
+ def propset_name_list(self):
+ return ['summary.type', 'summary.name', 'summary.accessible',
+ 'summary.maintenanceMode', 'summary.capacity',
+ 'summary.freeSpace']
+
+ def test_filter_datastores_simple(self):
+ datastores = self.build_result_set(self.data)
+ best_match = ds_util.Datastore(ref='fake_ref', name='ds',
+ capacity=0, freespace=0)
+ rec = ds_util._select_datastore(datastores, best_match)
+
+ self.assertIsNotNone(rec.ref, "could not find datastore!")
+ self.assertEqual('ds-001', rec.ref.value,
+ "didn't find the right datastore!")
+ self.assertEqual(123467890, rec.freespace,
+ "did not obtain correct freespace!")
+
+ def test_filter_datastores_empty(self):
+ data = []
+ datastores = self.build_result_set(data)
+
+ best_match = ds_util.Datastore(ref='fake_ref', name='ds',
+ capacity=0, freespace=0)
+ rec = ds_util._select_datastore(datastores, best_match)
+
+ self.assertEqual(rec, best_match)
+
+ def test_filter_datastores_no_match(self):
+ datastores = self.build_result_set(self.data)
+ datastore_regex = re.compile('no_match.*')
+
+ best_match = ds_util.Datastore(ref='fake_ref', name='ds',
+ capacity=0, freespace=0)
+ rec = ds_util._select_datastore(datastores,
+ best_match,
+ datastore_regex)
+
+ self.assertEqual(rec, best_match, "did not match datastore properly")
+
+ def test_filter_datastores_specific_match(self):
+
+ data = [
+ ['VMFS', 'os-some-name', True, 'normal', 987654321, 1234678],
+ ['NFS', 'another-name', True, 'normal', 9876543210, 123467890],
+ ['BAD', 'some-name-bad', True, 'normal', 98765432100, 1234678900],
+ ['VMFS', 'some-name-good', True, 'normal', 987654321, 12346789],
+ ['VMFS', 'some-other-good', False, 'normal', 987654321000,
+ 12346789000],
+ ['VMFS', 'new-name', True, 'inMaintenance', 987654321000,
+ 12346789000]
+ ]
+ # only the DS some-name-good is accessible and matches the regex
+ datastores = self.build_result_set(data)
+ datastore_regex = re.compile('.*-good$')
+
+ best_match = ds_util.Datastore(ref='fake_ref', name='ds',
+ capacity=0, freespace=0)
+ rec = ds_util._select_datastore(datastores,
+ best_match,
+ datastore_regex)
+
+ self.assertIsNotNone(rec, "could not find datastore!")
+ self.assertEqual('ds-003', rec.ref.value,
+ "didn't find the right datastore!")
+ self.assertNotEqual('ds-004', rec.ref.value,
+ "accepted an unreachable datastore!")
+ self.assertEqual('some-name-good', rec.name)
+ self.assertEqual(12346789, rec.freespace,
+ "did not obtain correct freespace!")
+ self.assertEqual(987654321, rec.capacity,
+ "did not obtain correct capacity!")
+
+ def test_filter_datastores_missing_props(self):
+ data = [
+ ['VMFS', 'os-some-name', 987654321, 1234678],
+ ['NFS', 'another-name', 9876543210, 123467890],
+ ]
+ # no matches are expected when 'summary.accessible' is missing
+ prop_names = ['summary.type', 'summary.name',
+ 'summary.capacity', 'summary.freeSpace']
+ datastores = self.build_result_set(data, prop_names)
+ best_match = ds_util.Datastore(ref='fake_ref', name='ds',
+ capacity=0, freespace=0)
+
+ rec = ds_util._select_datastore(datastores, best_match)
+ self.assertEqual(rec, best_match, "no matches were expected")
+
+ def test_filter_datastores_best_match(self):
+ data = [
+ ['VMFS', 'spam-good', True, 20 * units.Gi, 10 * units.Gi],
+ ['NFS', 'eggs-good', True, 40 * units.Gi, 15 * units.Gi],
+ ['BAD', 'some-name-bad', True, 30 * units.Gi, 20 * units.Gi],
+ ['VMFS', 'some-name-good', True, 50 * units.Gi, 5 * units.Gi],
+ ['VMFS', 'some-other-good', True, 10 * units.Gi, 10 * units.Gi],
+ ]
+
+ datastores = self.build_result_set(data)
+ datastore_regex = re.compile('.*-good$')
+
+ # the current best match is better than all candidates
+ best_match = ds_util.Datastore(ref='ds-100', name='best-ds-good',
+ capacity=20 * units.Gi, freespace=19 * units.Gi)
+ rec = ds_util._select_datastore(datastores,
+ best_match,
+ datastore_regex)
+ self.assertEqual(rec, best_match, "did not match datastore properly")
diff --git a/nova/tests/unit/virt/vmwareapi/test_imagecache.py b/nova/tests/unit/virt/vmwareapi/test_imagecache.py
new file mode 100644
index 0000000000..d277963106
--- /dev/null
+++ b/nova/tests/unit/virt/vmwareapi/test_imagecache.py
@@ -0,0 +1,277 @@
+# Copyright (c) 2014 VMware, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import contextlib
+import datetime
+
+import mock
+from oslo.config import cfg
+from oslo.utils import timeutils
+
+from nova import test
+from nova.tests.unit import fake_instance
+from nova.tests.unit.virt.vmwareapi import fake
+from nova.virt.vmwareapi import ds_util
+from nova.virt.vmwareapi import imagecache
+from nova.virt.vmwareapi import vim_util
+from nova.virt.vmwareapi import vmops
+
+CONF = cfg.CONF
+
+
+class ImageCacheManagerTestCase(test.NoDBTestCase):
+
+ REQUIRES_LOCKING = True
+
+ def setUp(self):
+ super(ImageCacheManagerTestCase, self).setUp()
+ self._session = mock.Mock(name='session')
+ self._imagecache = imagecache.ImageCacheManager(self._session,
+ 'fake-base-folder')
+ self._time = datetime.datetime(2012, 11, 22, 12, 00, 00)
+ self._file_name = 'ts-2012-11-22-12-00-00'
+ fake.reset()
+
+ def tearDown(self):
+ super(ImageCacheManagerTestCase, self).tearDown()
+ fake.reset()
+
+ def test_timestamp_cleanup(self):
+ def fake_get_timestamp(ds_browser, ds_path):
+ self.assertEqual('fake-ds-browser', ds_browser)
+ self.assertEqual('[fake-ds] fake-path', str(ds_path))
+ if not self.exists:
+ return
+ ts = '%s%s' % (imagecache.TIMESTAMP_PREFIX,
+ timeutils.strtime(at=self._time,
+ fmt=imagecache.TIMESTAMP_FORMAT))
+ return ts
+
+ with contextlib.nested(
+ mock.patch.object(self._imagecache, '_get_timestamp',
+ fake_get_timestamp),
+ mock.patch.object(ds_util, 'file_delete')
+ ) as (_get_timestamp, _file_delete):
+ self.exists = False
+ self._imagecache.timestamp_cleanup(
+ 'fake-dc-ref', 'fake-ds-browser',
+ ds_util.DatastorePath('fake-ds', 'fake-path'))
+ self.assertEqual(0, _file_delete.call_count)
+ self.exists = True
+ self._imagecache.timestamp_cleanup(
+ 'fake-dc-ref', 'fake-ds-browser',
+ ds_util.DatastorePath('fake-ds', 'fake-path'))
+ expected_ds_path = ds_util.DatastorePath(
+ 'fake-ds', 'fake-path', self._file_name)
+ _file_delete.assert_called_once_with(self._session,
+ expected_ds_path, 'fake-dc-ref')
+
+ def test_get_timestamp(self):
+ def fake_get_sub_folders(session, ds_browser, ds_path):
+ self.assertEqual('fake-ds-browser', ds_browser)
+ self.assertEqual('[fake-ds] fake-path', str(ds_path))
+ if self.exists:
+ files = set()
+ files.add(self._file_name)
+ return files
+
+ with contextlib.nested(
+ mock.patch.object(ds_util, 'get_sub_folders',
+ fake_get_sub_folders)
+ ):
+ self.exists = True
+ ts = self._imagecache._get_timestamp(
+ 'fake-ds-browser',
+ ds_util.DatastorePath('fake-ds', 'fake-path'))
+ self.assertEqual(self._file_name, ts)
+ self.exists = False
+ ts = self._imagecache._get_timestamp(
+ 'fake-ds-browser',
+ ds_util.DatastorePath('fake-ds', 'fake-path'))
+ self.assertIsNone(ts)
+
+ def test_get_timestamp_filename(self):
+ timeutils.set_time_override(override_time=self._time)
+ fn = self._imagecache._get_timestamp_filename()
+ self.assertEqual(self._file_name, fn)
+
+ def test_get_datetime_from_filename(self):
+ t = self._imagecache._get_datetime_from_filename(self._file_name)
+ self.assertEqual(self._time, t)
+
+ def test_get_ds_browser(self):
+ cache = self._imagecache._ds_browser
+ ds_browser = mock.Mock()
+ moref = fake.ManagedObjectReference('datastore-100')
+ self.assertIsNone(cache.get(moref.value))
+ mock_get_method = mock.Mock(return_value=ds_browser)
+ with mock.patch.object(vim_util, 'get_dynamic_property',
+ mock_get_method):
+ ret = self._imagecache._get_ds_browser(moref)
+ mock_get_method.assert_called_once_with(mock.ANY, moref,
+ 'Datastore', 'browser')
+ self.assertIs(ds_browser, ret)
+ self.assertIs(ds_browser, cache.get(moref.value))
+
+ def test_list_base_images(self):
+ def fake_get_dynamic_property(vim, mobj, type, property_name):
+ return 'fake-ds-browser'
+
+ def fake_get_sub_folders(session, ds_browser, ds_path):
+ files = set()
+ files.add('image-ref-uuid')
+ return files
+
+ with contextlib.nested(
+ mock.patch.object(vim_util, 'get_dynamic_property',
+ fake_get_dynamic_property),
+ mock.patch.object(ds_util, 'get_sub_folders',
+ fake_get_sub_folders)
+ ) as (_get_dynamic, _get_sub_folders):
+ fake_ds_ref = fake.ManagedObjectReference('fake-ds-ref')
+ datastore = ds_util.Datastore(name='ds', ref=fake_ds_ref)
+ ds_path = datastore.build_path('base_folder')
+ images = self._imagecache._list_datastore_images(
+ ds_path, datastore)
+ originals = set()
+ originals.add('image-ref-uuid')
+ self.assertEqual({'originals': originals,
+ 'unexplained_images': []},
+ images)
+
+ @mock.patch.object(imagecache.ImageCacheManager, 'timestamp_folder_get')
+ @mock.patch.object(imagecache.ImageCacheManager, 'timestamp_cleanup')
+ @mock.patch.object(imagecache.ImageCacheManager, '_get_ds_browser')
+ def test_enlist_image(self,
+ mock_get_ds_browser,
+ mock_timestamp_cleanup,
+ mock_timestamp_folder_get):
+ image_id = "fake_image_id"
+ dc_ref = "fake_dc_ref"
+ fake_ds_ref = mock.Mock()
+ ds = ds_util.Datastore(
+ ref=fake_ds_ref, name='fake_ds',
+ capacity=1,
+ freespace=1)
+
+ ds_browser = mock.Mock()
+ mock_get_ds_browser.return_value = ds_browser
+ timestamp_folder_path = mock.Mock()
+ mock_timestamp_folder_get.return_value = timestamp_folder_path
+
+ self._imagecache.enlist_image(image_id, ds, dc_ref)
+
+ cache_root_folder = ds.build_path("fake-base-folder")
+ mock_get_ds_browser.assert_called_once_with(
+ ds.ref)
+ mock_timestamp_folder_get.assert_called_once_with(
+ cache_root_folder, "fake_image_id")
+ mock_timestamp_cleanup.assert_called_once_with(
+ dc_ref, ds_browser, timestamp_folder_path)
+
+ def test_age_cached_images(self):
+ def fake_get_ds_browser(ds_ref):
+ return 'fake-ds-browser'
+
+ def fake_get_timestamp(ds_browser, ds_path):
+ self._get_timestamp_called += 1
+ path = str(ds_path)
+ if path == '[fake-ds] fake-path/fake-image-1':
+ # No time stamp exists
+ return
+ if path == '[fake-ds] fake-path/fake-image-2':
+ # Timestamp that will be valid => no deletion
+ return 'ts-2012-11-22-10-00-00'
+ if path == '[fake-ds] fake-path/fake-image-3':
+ # Timestamp that will be invalid => deletion
+ return 'ts-2012-11-20-12-00-00'
+ self.fail()
+
+ def fake_mkdir(session, ts_path, dc_ref):
+ self.assertEqual(
+ '[fake-ds] fake-path/fake-image-1/ts-2012-11-22-12-00-00',
+ str(ts_path))
+
+ def fake_file_delete(session, ds_path, dc_ref):
+ self.assertEqual('[fake-ds] fake-path/fake-image-3', str(ds_path))
+
+ def fake_timestamp_cleanup(dc_ref, ds_browser, ds_path):
+ self.assertEqual('[fake-ds] fake-path/fake-image-4', str(ds_path))
+
+ with contextlib.nested(
+ mock.patch.object(self._imagecache, '_get_ds_browser',
+ fake_get_ds_browser),
+ mock.patch.object(self._imagecache, '_get_timestamp',
+ fake_get_timestamp),
+ mock.patch.object(ds_util, 'mkdir',
+ fake_mkdir),
+ mock.patch.object(ds_util, 'file_delete',
+ fake_file_delete),
+ mock.patch.object(self._imagecache, 'timestamp_cleanup',
+ fake_timestamp_cleanup),
+ ) as (_get_ds_browser, _get_timestamp, _mkdir, _file_delete,
+ _timestamp_cleanup):
+ timeutils.set_time_override(override_time=self._time)
+ datastore = ds_util.Datastore(name='ds', ref='fake-ds-ref')
+ dc_info = vmops.DcInfo(ref='dc_ref', name='name',
+ vmFolder='vmFolder')
+ self._get_timestamp_called = 0
+ self._imagecache.originals = set(['fake-image-1', 'fake-image-2',
+ 'fake-image-3', 'fake-image-4'])
+ self._imagecache.used_images = set(['fake-image-4'])
+ self._imagecache._age_cached_images(
+ 'fake-context', datastore, dc_info,
+ ds_util.DatastorePath('fake-ds', 'fake-path'))
+ self.assertEqual(3, self._get_timestamp_called)
+
+ def test_update(self):
+ def fake_list_datastore_images(ds_path, datastore):
+ return {'unexplained_images': [],
+ 'originals': self.images}
+
+ def fake_age_cached_images(context, datastore,
+ dc_info, ds_path):
+ self.assertEqual('[ds] fake-base-folder', str(ds_path))
+ self.assertEqual(self.images,
+ self._imagecache.used_images)
+ self.assertEqual(self.images,
+ self._imagecache.originals)
+
+ with contextlib.nested(
+ mock.patch.object(self._imagecache, '_list_datastore_images',
+ fake_list_datastore_images),
+ mock.patch.object(self._imagecache,
+ '_age_cached_images',
+ fake_age_cached_images)
+ ) as (_list_base, _age_and_verify):
+ instances = [{'image_ref': '1',
+ 'host': CONF.host,
+ 'name': 'inst-1',
+ 'uuid': '123',
+ 'vm_state': '',
+ 'task_state': ''},
+ {'image_ref': '2',
+ 'host': CONF.host,
+ 'name': 'inst-2',
+ 'uuid': '456',
+ 'vm_state': '',
+ 'task_state': ''}]
+ all_instances = [fake_instance.fake_instance_obj(None, **instance)
+ for instance in instances]
+ self.images = set(['1', '2'])
+ datastore = ds_util.Datastore(name='ds', ref='fake-ds-ref')
+ dc_info = vmops.DcInfo(ref='dc_ref', name='name',
+ vmFolder='vmFolder')
+ datastores_info = [(datastore, dc_info)]
+ self._imagecache.update('context', all_instances, datastores_info)
diff --git a/nova/tests/unit/virt/vmwareapi/test_images.py b/nova/tests/unit/virt/vmwareapi/test_images.py
new file mode 100644
index 0000000000..07fc3be214
--- /dev/null
+++ b/nova/tests/unit/virt/vmwareapi/test_images.py
@@ -0,0 +1,216 @@
+# Copyright (c) 2014 VMware, Inc.
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Test suite for images.
+"""
+
+import contextlib
+
+import mock
+from oslo.utils import units
+
+from nova import exception
+from nova import test
+import nova.tests.unit.image.fake
+from nova.virt.vmwareapi import constants
+from nova.virt.vmwareapi import images
+from nova.virt.vmwareapi import read_write_util
+
+
+class VMwareImagesTestCase(test.NoDBTestCase):
+ """Unit tests for Vmware API connection calls."""
+
+ def test_fetch_image(self):
+ """Test fetching images."""
+
+ dc_name = 'fake-dc'
+ file_path = 'fake_file'
+ ds_name = 'ds1'
+ host = mock.MagicMock()
+ context = mock.MagicMock()
+
+ image_data = {
+ 'id': nova.tests.unit.image.fake.get_valid_image_id(),
+ 'disk_format': 'vmdk',
+ 'size': 512,
+ }
+ read_file_handle = mock.MagicMock()
+ write_file_handle = mock.MagicMock()
+ read_iter = mock.MagicMock()
+ instance = {}
+ instance['image_ref'] = image_data['id']
+ instance['uuid'] = 'fake-uuid'
+
+ def fake_read_handle(read_iter):
+ return read_file_handle
+
+ def fake_write_handle(host, dc_name, ds_name, cookies,
+ file_path, file_size):
+ return write_file_handle
+
+ with contextlib.nested(
+ mock.patch.object(read_write_util, 'GlanceFileRead',
+ side_effect=fake_read_handle),
+ mock.patch.object(read_write_util, 'VMwareHTTPWriteFile',
+ side_effect=fake_write_handle),
+ mock.patch.object(images, 'start_transfer'),
+ mock.patch.object(images.IMAGE_API, 'get',
+ return_value=image_data),
+ mock.patch.object(images.IMAGE_API, 'download',
+ return_value=read_iter),
+ ) as (glance_read, http_write, start_transfer, image_show,
+ image_download):
+ images.fetch_image(context, instance,
+ host, dc_name,
+ ds_name, file_path)
+
+ glance_read.assert_called_once_with(read_iter)
+ http_write.assert_called_once_with(host, dc_name, ds_name, None,
+ file_path, image_data['size'])
+ start_transfer.assert_called_once_with(
+ context, read_file_handle,
+ image_data['size'],
+ write_file_handle=write_file_handle)
+ image_download.assert_called_once_with(context, instance['image_ref'])
+ image_show.assert_called_once_with(context, instance['image_ref'])
+
+ def _setup_mock_get_remote_image_service(self,
+ mock_get_remote_image_service,
+ metadata):
+ mock_image_service = mock.MagicMock()
+ mock_image_service.show.return_value = metadata
+ mock_get_remote_image_service.return_value = [mock_image_service, 'i']
+
+ def test_from_image_with_image_ref(self):
+ raw_disk_size_in_gb = 83
+ raw_disk_size_in_bytes = raw_disk_size_in_gb * units.Gi
+ image_id = nova.tests.unit.image.fake.get_valid_image_id()
+ mdata = {'size': raw_disk_size_in_bytes,
+ 'disk_format': 'vmdk',
+ 'properties': {
+ "vmware_ostype": constants.DEFAULT_OS_TYPE,
+ "vmware_adaptertype": constants.DEFAULT_ADAPTER_TYPE,
+ "vmware_disktype": constants.DEFAULT_DISK_TYPE,
+ "hw_vif_model": constants.DEFAULT_VIF_MODEL,
+ images.LINKED_CLONE_PROPERTY: True}}
+
+ img_props = images.VMwareImage.from_image(image_id, mdata)
+
+ image_size_in_kb = raw_disk_size_in_bytes / units.Ki
+
+ # assert that defaults are set and no value returned is left empty
+ self.assertEqual(constants.DEFAULT_OS_TYPE, img_props.os_type)
+ self.assertEqual(constants.DEFAULT_ADAPTER_TYPE,
+ img_props.adapter_type)
+ self.assertEqual(constants.DEFAULT_DISK_TYPE, img_props.disk_type)
+ self.assertEqual(constants.DEFAULT_VIF_MODEL, img_props.vif_model)
+ self.assertTrue(img_props.linked_clone)
+ self.assertEqual(image_size_in_kb, img_props.file_size_in_kb)
+
+ def _image_build(self, image_lc_setting, global_lc_setting,
+ disk_format=constants.DEFAULT_DISK_FORMAT,
+ os_type=constants.DEFAULT_OS_TYPE,
+ adapter_type=constants.DEFAULT_ADAPTER_TYPE,
+ disk_type=constants.DEFAULT_DISK_TYPE,
+ vif_model=constants.DEFAULT_VIF_MODEL):
+ self.flags(use_linked_clone=global_lc_setting, group='vmware')
+ raw_disk_size_in_gb = 93
+ raw_disk_size_in_btyes = raw_disk_size_in_gb * units.Gi
+
+ image_id = nova.tests.unit.image.fake.get_valid_image_id()
+ mdata = {'size': raw_disk_size_in_btyes,
+ 'disk_format': disk_format,
+ 'properties': {
+ "vmware_ostype": os_type,
+ "vmware_adaptertype": adapter_type,
+ "vmware_disktype": disk_type,
+ "hw_vif_model": vif_model}}
+
+ if image_lc_setting is not None:
+ mdata['properties'][
+ images.LINKED_CLONE_PROPERTY] = image_lc_setting
+
+ return images.VMwareImage.from_image(image_id, mdata)
+
+ def test_use_linked_clone_override_nf(self):
+ image_props = self._image_build(None, False)
+ self.assertFalse(image_props.linked_clone,
+ "No overrides present but still overridden!")
+
+ def test_use_linked_clone_override_nt(self):
+ image_props = self._image_build(None, True)
+ self.assertTrue(image_props.linked_clone,
+ "No overrides present but still overridden!")
+
+ def test_use_linked_clone_override_ny(self):
+ image_props = self._image_build(None, "yes")
+ self.assertTrue(image_props.linked_clone,
+ "No overrides present but still overridden!")
+
+ def test_use_linked_clone_override_ft(self):
+ image_props = self._image_build(False, True)
+ self.assertFalse(image_props.linked_clone,
+ "image level metadata failed to override global")
+
+ def test_use_linked_clone_override_string_nt(self):
+ image_props = self._image_build("no", True)
+ self.assertFalse(image_props.linked_clone,
+ "image level metadata failed to override global")
+
+ def test_use_linked_clone_override_string_yf(self):
+ image_props = self._image_build("yes", False)
+ self.assertTrue(image_props.linked_clone,
+ "image level metadata failed to override global")
+
+ def test_use_disk_format_none(self):
+ image = self._image_build(None, True, disk_format=None)
+ self.assertIsNone(image.file_type)
+ self.assertFalse(image.is_iso)
+
+ def test_use_disk_format_iso(self):
+ image = self._image_build(None, True, disk_format='iso')
+ self.assertEqual('iso', image.file_type)
+ self.assertTrue(image.is_iso)
+
+ def test_use_bad_disk_format(self):
+ self.assertRaises(exception.InvalidDiskFormat,
+ self._image_build,
+ None,
+ True,
+ disk_format='bad_disk_format')
+
+ def test_image_no_defaults(self):
+ image = self._image_build(False, False,
+ disk_format='iso',
+ os_type='fake-os-type',
+ adapter_type='fake-adapter-type',
+ disk_type='fake-disk-type',
+ vif_model='fake-vif-model')
+ self.assertEqual('iso', image.file_type)
+ self.assertEqual('fake-os-type', image.os_type)
+ self.assertEqual('fake-adapter-type', image.adapter_type)
+ self.assertEqual('fake-disk-type', image.disk_type)
+ self.assertEqual('fake-vif-model', image.vif_model)
+ self.assertFalse(image.linked_clone)
+
+ def test_image_defaults(self):
+ image = images.VMwareImage(image_id='fake-image-id')
+
+ # N.B. We intentially don't use the defined constants here. Amongst
+ # other potential failures, we're interested in changes to their
+ # values, which would not otherwise be picked up.
+ self.assertEqual('otherGuest', image.os_type)
+ self.assertEqual('lsiLogic', image.adapter_type)
+ self.assertEqual('preallocated', image.disk_type)
+ self.assertEqual('e1000', image.vif_model)
diff --git a/nova/tests/unit/virt/vmwareapi/test_io_util.py b/nova/tests/unit/virt/vmwareapi/test_io_util.py
new file mode 100644
index 0000000000..a03c1e95b5
--- /dev/null
+++ b/nova/tests/unit/virt/vmwareapi/test_io_util.py
@@ -0,0 +1,33 @@
+# Copyright (c) 2014 VMware, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova import exception
+from nova import test
+from nova.virt.vmwareapi import io_util
+
+
+@mock.patch.object(io_util, 'IMAGE_API')
+class GlanceWriteThreadTestCase(test.NoDBTestCase):
+
+ def test_start_image_update_service_exception(self, mocked):
+ mocked.update.side_effect = exception.ImageNotAuthorized(
+ image_id='image')
+ write_thread = io_util.GlanceWriteThread(
+ None, None, image_id=None)
+ write_thread.start()
+ self.assertRaises(exception.ImageNotAuthorized, write_thread.wait)
+ write_thread.stop()
+ write_thread.close()
diff --git a/nova/tests/unit/virt/vmwareapi/test_read_write_util.py b/nova/tests/unit/virt/vmwareapi/test_read_write_util.py
new file mode 100644
index 0000000000..468d8b213a
--- /dev/null
+++ b/nova/tests/unit/virt/vmwareapi/test_read_write_util.py
@@ -0,0 +1,39 @@
+# Copyright 2013 IBM Corp.
+# Copyright 2011 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import httplib
+
+from oslo.config import cfg
+
+from nova import test
+from nova.virt.vmwareapi import read_write_util
+
+CONF = cfg.CONF
+
+
+class ReadWriteUtilTestCase(test.NoDBTestCase):
+ def test_ipv6_host(self):
+ ipv6_host = 'fd8c:215d:178e:c51e:200:c9ff:fed1:584c'
+ self.mox.StubOutWithMock(httplib.HTTPConnection, 'endheaders')
+ httplib.HTTPConnection.endheaders()
+ self.mox.ReplayAll()
+ file = read_write_util.VMwareHTTPWriteFile(ipv6_host,
+ 'fake_dc',
+ 'fake_ds',
+ dict(),
+ '/tmp/fake.txt',
+ 0)
+ self.assertEqual(ipv6_host, file.conn.host)
+ self.assertEqual(443, file.conn.port)
diff --git a/nova/tests/unit/virt/vmwareapi/test_vif.py b/nova/tests/unit/virt/vmwareapi/test_vif.py
new file mode 100644
index 0000000000..2a4d086c36
--- /dev/null
+++ b/nova/tests/unit/virt/vmwareapi/test_vif.py
@@ -0,0 +1,346 @@
+# Copyright 2013 Canonical Corp.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import contextlib
+
+import mock
+from oslo.config import cfg
+from oslo.vmware import exceptions as vexc
+
+from nova import exception
+from nova.network import model as network_model
+from nova import test
+from nova.tests.unit import matchers
+from nova.tests.unit import utils
+from nova.tests.unit.virt.vmwareapi import fake
+from nova.virt.vmwareapi import network_util
+from nova.virt.vmwareapi import vif
+from nova.virt.vmwareapi import vim_util
+from nova.virt.vmwareapi import vm_util
+
+CONF = cfg.CONF
+
+
+class VMwareVifTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(VMwareVifTestCase, self).setUp()
+ self.flags(vlan_interface='vmnet0', group='vmware')
+ network = network_model.Network(id=0,
+ bridge='fa0',
+ label='fake',
+ vlan=3,
+ bridge_interface='eth0',
+ injected=True)
+
+ self.vif = network_model.NetworkInfo([
+ network_model.VIF(id=None,
+ address='DE:AD:BE:EF:00:00',
+ network=network,
+ type=None,
+ devname=None,
+ ovs_interfaceid=None,
+ rxtx_cap=3)
+ ])[0]
+ self.session = fake.FakeSession()
+ self.cluster = None
+
+ def tearDown(self):
+ super(VMwareVifTestCase, self).tearDown()
+
+ def test_ensure_vlan_bridge(self):
+ self.mox.StubOutWithMock(network_util, 'get_network_with_the_name')
+ self.mox.StubOutWithMock(network_util,
+ 'get_vswitch_for_vlan_interface')
+ self.mox.StubOutWithMock(network_util,
+ 'check_if_vlan_interface_exists')
+ self.mox.StubOutWithMock(network_util, 'create_port_group')
+ network_util.get_network_with_the_name(self.session, 'fa0',
+ self.cluster).AndReturn(None)
+ network_util.get_vswitch_for_vlan_interface(self.session, 'vmnet0',
+ self.cluster).AndReturn('vmnet0')
+ network_util.check_if_vlan_interface_exists(self.session, 'vmnet0',
+ self.cluster).AndReturn(True)
+ network_util.create_port_group(self.session, 'fa0', 'vmnet0', 3,
+ self.cluster)
+ network_util.get_network_with_the_name(self.session, 'fa0', None)
+
+ self.mox.ReplayAll()
+ vif.ensure_vlan_bridge(self.session, self.vif, create_vlan=True)
+
+ # FlatDHCP network mode without vlan - network doesn't exist with the host
+ def test_ensure_vlan_bridge_without_vlan(self):
+ self.mox.StubOutWithMock(network_util, 'get_network_with_the_name')
+ self.mox.StubOutWithMock(network_util,
+ 'get_vswitch_for_vlan_interface')
+ self.mox.StubOutWithMock(network_util,
+ 'check_if_vlan_interface_exists')
+ self.mox.StubOutWithMock(network_util, 'create_port_group')
+
+ network_util.get_network_with_the_name(self.session, 'fa0',
+ self.cluster).AndReturn(None)
+ network_util.get_vswitch_for_vlan_interface(self.session, 'vmnet0',
+ self.cluster).AndReturn('vmnet0')
+ network_util.check_if_vlan_interface_exists(self.session, 'vmnet0',
+ self.cluster).AndReturn(True)
+ network_util.create_port_group(self.session, 'fa0', 'vmnet0', 0,
+ self.cluster)
+ network_util.get_network_with_the_name(self.session, 'fa0', None)
+ self.mox.ReplayAll()
+ vif.ensure_vlan_bridge(self.session, self.vif, create_vlan=False)
+
+ # FlatDHCP network mode without vlan - network exists with the host
+ # Get vswitch and check vlan interface should not be called
+ def test_ensure_vlan_bridge_with_network(self):
+ self.mox.StubOutWithMock(network_util, 'get_network_with_the_name')
+ self.mox.StubOutWithMock(network_util,
+ 'get_vswitch_for_vlan_interface')
+ self.mox.StubOutWithMock(network_util,
+ 'check_if_vlan_interface_exists')
+ self.mox.StubOutWithMock(network_util, 'create_port_group')
+ vm_network = {'name': 'VM Network', 'type': 'Network'}
+ network_util.get_network_with_the_name(self.session, 'fa0',
+ self.cluster).AndReturn(vm_network)
+ self.mox.ReplayAll()
+ vif.ensure_vlan_bridge(self.session, self.vif, create_vlan=False)
+
+ # Flat network mode with DVS
+ def test_ensure_vlan_bridge_with_existing_dvs(self):
+ network_ref = {'dvpg': 'dvportgroup-2062',
+ 'type': 'DistributedVirtualPortgroup'}
+ self.mox.StubOutWithMock(network_util, 'get_network_with_the_name')
+ self.mox.StubOutWithMock(network_util,
+ 'get_vswitch_for_vlan_interface')
+ self.mox.StubOutWithMock(network_util,
+ 'check_if_vlan_interface_exists')
+ self.mox.StubOutWithMock(network_util, 'create_port_group')
+
+ network_util.get_network_with_the_name(self.session, 'fa0',
+ self.cluster).AndReturn(network_ref)
+ self.mox.ReplayAll()
+ ref = vif.ensure_vlan_bridge(self.session,
+ self.vif,
+ create_vlan=False)
+ self.assertThat(ref, matchers.DictMatches(network_ref))
+
+ def test_get_network_ref_neutron(self):
+ self.mox.StubOutWithMock(vif, 'get_neutron_network')
+ vif.get_neutron_network(self.session, 'fa0', self.cluster, self.vif)
+ self.mox.ReplayAll()
+ vif.get_network_ref(self.session, self.cluster, self.vif, True)
+
+ def test_get_network_ref_flat_dhcp(self):
+ self.mox.StubOutWithMock(vif, 'ensure_vlan_bridge')
+ vif.ensure_vlan_bridge(self.session, self.vif, cluster=self.cluster,
+ create_vlan=False)
+ self.mox.ReplayAll()
+ vif.get_network_ref(self.session, self.cluster, self.vif, False)
+
+ def test_get_network_ref_bridge(self):
+ self.mox.StubOutWithMock(vif, 'ensure_vlan_bridge')
+ vif.ensure_vlan_bridge(self.session, self.vif, cluster=self.cluster,
+ create_vlan=True)
+ self.mox.ReplayAll()
+ network = network_model.Network(id=0,
+ bridge='fa0',
+ label='fake',
+ vlan=3,
+ bridge_interface='eth0',
+ injected=True,
+ should_create_vlan=True)
+ self.vif = network_model.NetworkInfo([
+ network_model.VIF(id=None,
+ address='DE:AD:BE:EF:00:00',
+ network=network,
+ type=None,
+ devname=None,
+ ovs_interfaceid=None,
+ rxtx_cap=3)
+ ])[0]
+ vif.get_network_ref(self.session, self.cluster, self.vif, False)
+
+ def test_get_network_ref_bridge_from_opaque(self):
+ opaque_networks = [{'opaqueNetworkId': 'bridge_id',
+ 'opaqueNetworkName': 'name',
+ 'opaqueNetworkType': 'OpaqueNetwork'}]
+ network_ref = vif._get_network_ref_from_opaque(opaque_networks,
+ 'integration_bridge', 'bridge_id')
+ self.assertEqual('bridge_id', network_ref['network-id'])
+
+ def test_get_network_ref_multiple_bridges_from_opaque(self):
+ opaque_networks = [{'opaqueNetworkId': 'bridge_id1',
+ 'opaqueNetworkName': 'name1',
+ 'opaqueNetworkType': 'OpaqueNetwork'},
+ {'opaqueNetworkId': 'bridge_id2',
+ 'opaqueNetworkName': 'name2',
+ 'opaqueNetworkType': 'OpaqueNetwork'}]
+ network_ref = vif._get_network_ref_from_opaque(opaque_networks,
+ 'integration_bridge', 'bridge_id2')
+ self.assertEqual('bridge_id2', network_ref['network-id'])
+
+ def test_get_network_ref_integration(self):
+ opaque_networks = [{'opaqueNetworkId': 'integration_bridge',
+ 'opaqueNetworkName': 'name',
+ 'opaqueNetworkType': 'OpaqueNetwork'}]
+ network_ref = vif._get_network_ref_from_opaque(opaque_networks,
+ 'integration_bridge', 'bridge_id')
+ self.assertEqual('integration_bridge', network_ref['network-id'])
+
+ def test_get_network_ref_bridge_none(self):
+ opaque_networks = [{'opaqueNetworkId': 'bridge_id1',
+ 'opaqueNetworkName': 'name1',
+ 'opaqueNetworkType': 'OpaqueNetwork'},
+ {'opaqueNetworkId': 'bridge_id2',
+ 'opaqueNetworkName': 'name2',
+ 'opaqueNetworkType': 'OpaqueNetwork'}]
+ network_ref = vif._get_network_ref_from_opaque(opaque_networks,
+ 'integration_bridge', 'bridge_id')
+ self.assertIsNone(network_ref)
+
+ def test_get_network_ref_integration_multiple(self):
+ opaque_networks = [{'opaqueNetworkId': 'bridge_id1',
+ 'opaqueNetworkName': 'name1',
+ 'opaqueNetworkType': 'OpaqueNetwork'},
+ {'opaqueNetworkId': 'integration_bridge',
+ 'opaqueNetworkName': 'name2',
+ 'opaqueNetworkType': 'OpaqueNetwork'}]
+ network_ref = vif._get_network_ref_from_opaque(opaque_networks,
+ 'integration_bridge', 'bridge_id')
+ self.assertIsNone(network_ref)
+
+ def test_get_neutron_network(self):
+ self.mox.StubOutWithMock(vm_util, 'get_host_ref')
+ self.mox.StubOutWithMock(self.session, '_call_method')
+ self.mox.StubOutWithMock(vif, '_get_network_ref_from_opaque')
+ vm_util.get_host_ref(self.session,
+ self.cluster).AndReturn('fake-host')
+ opaque = fake.DataObject()
+ opaque.HostOpaqueNetworkInfo = ['fake-network-info']
+ self.session._call_method(vim_util, "get_dynamic_property",
+ 'fake-host', 'HostSystem',
+ 'config.network.opaqueNetwork').AndReturn(opaque)
+ vif._get_network_ref_from_opaque(opaque.HostOpaqueNetworkInfo,
+ CONF.vmware.integration_bridge,
+ self.vif['network']['id']).AndReturn('fake-network-ref')
+ self.mox.ReplayAll()
+ network_ref = vif.get_neutron_network(self.session,
+ self.vif['network']['id'],
+ self.cluster,
+ self.vif)
+ self.assertEqual(network_ref, 'fake-network-ref')
+
+ def test_get_neutron_network_opaque_network_not_found(self):
+ self.mox.StubOutWithMock(vm_util, 'get_host_ref')
+ self.mox.StubOutWithMock(self.session, '_call_method')
+ self.mox.StubOutWithMock(vif, '_get_network_ref_from_opaque')
+ vm_util.get_host_ref(self.session,
+ self.cluster).AndReturn('fake-host')
+ opaque = fake.DataObject()
+ opaque.HostOpaqueNetworkInfo = ['fake-network-info']
+ self.session._call_method(vim_util, "get_dynamic_property",
+ 'fake-host', 'HostSystem',
+ 'config.network.opaqueNetwork').AndReturn(opaque)
+ vif._get_network_ref_from_opaque(opaque.HostOpaqueNetworkInfo,
+ CONF.vmware.integration_bridge,
+ self.vif['network']['id']).AndReturn(None)
+ self.mox.ReplayAll()
+ self.assertRaises(exception.NetworkNotFoundForBridge,
+ vif.get_neutron_network, self.session,
+ self.vif['network']['id'], self.cluster, self.vif)
+
+ def test_get_neutron_network_bridge_network_not_found(self):
+ self.mox.StubOutWithMock(vm_util, 'get_host_ref')
+ self.mox.StubOutWithMock(self.session, '_call_method')
+ self.mox.StubOutWithMock(network_util, 'get_network_with_the_name')
+ vm_util.get_host_ref(self.session,
+ self.cluster).AndReturn('fake-host')
+ opaque = fake.DataObject()
+ opaque.HostOpaqueNetworkInfo = ['fake-network-info']
+ self.session._call_method(vim_util, "get_dynamic_property",
+ 'fake-host', 'HostSystem',
+ 'config.network.opaqueNetwork').AndReturn(None)
+ network_util.get_network_with_the_name(self.session, 0,
+ self.cluster).AndReturn(None)
+ self.mox.ReplayAll()
+ self.assertRaises(exception.NetworkNotFoundForBridge,
+ vif.get_neutron_network, self.session,
+ self.vif['network']['id'], self.cluster, self.vif)
+
+ def test_create_port_group_already_exists(self):
+ def fake_call_method(module, method, *args, **kwargs):
+ if method == 'AddPortGroup':
+ raise vexc.AlreadyExistsException()
+
+ with contextlib.nested(
+ mock.patch.object(vm_util, 'get_add_vswitch_port_group_spec'),
+ mock.patch.object(vm_util, 'get_host_ref'),
+ mock.patch.object(self.session, '_call_method',
+ fake_call_method)
+ ) as (_add_vswitch, _get_host, _call_method):
+ network_util.create_port_group(self.session, 'pg_name',
+ 'vswitch_name', vlan_id=0,
+ cluster=None)
+
+ def test_create_port_group_exception(self):
+ def fake_call_method(module, method, *args, **kwargs):
+ if method == 'AddPortGroup':
+ raise vexc.VMwareDriverException()
+
+ with contextlib.nested(
+ mock.patch.object(vm_util, 'get_add_vswitch_port_group_spec'),
+ mock.patch.object(vm_util, 'get_host_ref'),
+ mock.patch.object(self.session, '_call_method',
+ fake_call_method)
+ ) as (_add_vswitch, _get_host, _call_method):
+ self.assertRaises(vexc.VMwareDriverException,
+ network_util.create_port_group,
+ self.session, 'pg_name',
+ 'vswitch_name', vlan_id=0,
+ cluster=None)
+
+ def test_get_neutron_network_invalid_property(self):
+ def fake_call_method(module, method, *args, **kwargs):
+ if method == 'get_dynamic_property':
+ raise vexc.InvalidPropertyException()
+
+ with contextlib.nested(
+ mock.patch.object(vm_util, 'get_host_ref'),
+ mock.patch.object(self.session, '_call_method',
+ fake_call_method),
+ mock.patch.object(network_util, 'get_network_with_the_name')
+ ) as (_get_host, _call_method, _get_name):
+ vif.get_neutron_network(self.session, 'network_name',
+ 'cluster', self.vif)
+
+ def test_get_vif_info_none(self):
+ vif_info = vif.get_vif_info('fake_session', 'fake_cluster',
+ 'is_neutron', 'fake_model', None)
+ self.assertEqual([], vif_info)
+
+ def test_get_vif_info_empty_list(self):
+ vif_info = vif.get_vif_info('fake_session', 'fake_cluster',
+ 'is_neutron', 'fake_model', [])
+ self.assertEqual([], vif_info)
+
+ @mock.patch.object(vif, 'get_network_ref', return_value='fake_ref')
+ def test_get_vif_info(self, mock_get_network_ref):
+ network_info = utils.get_test_network_info()
+ vif_info = vif.get_vif_info('fake_session', 'fake_cluster',
+ 'is_neutron', 'fake_model', network_info)
+ expected = [{'iface_id': 'vif-xxx-yyy-zzz',
+ 'mac_address': 'fake',
+ 'network_name': 'fake',
+ 'network_ref': 'fake_ref',
+ 'vif_model': 'fake_model'}]
+ self.assertEqual(expected, vif_info)
diff --git a/nova/tests/unit/virt/vmwareapi/test_vim_util.py b/nova/tests/unit/virt/vmwareapi/test_vim_util.py
new file mode 100644
index 0000000000..d00e127b66
--- /dev/null
+++ b/nova/tests/unit/virt/vmwareapi/test_vim_util.py
@@ -0,0 +1,117 @@
+# Copyright (c) 2013 VMware, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import collections
+
+import fixtures
+import mock
+
+from nova import test
+from nova.tests.unit.virt.vmwareapi import fake
+from nova.tests.unit.virt.vmwareapi import stubs
+from nova.virt.vmwareapi import driver
+from nova.virt.vmwareapi import vim_util
+
+
+def _fake_get_object_properties(vim, collector, mobj,
+ type, properties):
+ fake_objects = fake.FakeRetrieveResult()
+ fake_objects.add_object(fake.ObjectContent(None))
+ return fake_objects
+
+
+def _fake_get_object_properties_missing(vim, collector, mobj,
+ type, properties):
+ fake_objects = fake.FakeRetrieveResult()
+ ml = [fake.MissingProperty()]
+ fake_objects.add_object(fake.ObjectContent(None, missing_list=ml))
+ return fake_objects
+
+
+class VMwareVIMUtilTestCase(test.NoDBTestCase):
+
+ def setUp(self):
+ super(VMwareVIMUtilTestCase, self).setUp()
+ fake.reset()
+ self.vim = fake.FakeVim()
+ self.vim._login()
+
+ def test_get_dynamic_properties_missing(self):
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.vmwareapi.vim_util.get_object_properties',
+ _fake_get_object_properties))
+ res = vim_util.get_dynamic_property('fake-vim', 'fake-obj',
+ 'fake-type', 'fake-property')
+ self.assertIsNone(res)
+
+ def test_get_dynamic_properties_missing_path_exists(self):
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.vmwareapi.vim_util.get_object_properties',
+ _fake_get_object_properties_missing))
+ res = vim_util.get_dynamic_property('fake-vim', 'fake-obj',
+ 'fake-type', 'fake-property')
+ self.assertIsNone(res)
+
+ def test_get_dynamic_properties_with_token(self):
+ ObjectContent = collections.namedtuple('ObjectContent', ['propSet'])
+ DynamicProperty = collections.namedtuple('Property', ['name', 'val'])
+
+ # Add a token to our results, indicating that more are available
+ result = fake.FakeRetrieveResult(token='fake_token')
+
+ # We expect these properties to be returned
+ result.add_object(ObjectContent(propSet=[
+ DynamicProperty(name='name1', val='value1'),
+ DynamicProperty(name='name2', val='value2')
+ ]))
+
+ # These properties should be ignored
+ result.add_object(ObjectContent(propSet=[
+ DynamicProperty(name='name3', val='value3')
+ ]))
+
+ retrievePropertiesEx = mock.MagicMock(name='RetrievePropertiesEx')
+ retrievePropertiesEx.return_value = result
+
+ calls = {'RetrievePropertiesEx': retrievePropertiesEx}
+ with stubs.fake_suds_context(calls):
+ session = driver.VMwareAPISession(host_ip='localhost')
+
+ service_content = session.vim.service_content
+ props = session._call_method(vim_util, "get_dynamic_properties",
+ service_content.propertyCollector,
+ 'fake_type', None)
+
+ self.assertEqual(props, {
+ 'name1': 'value1',
+ 'name2': 'value2'
+ })
+
+ @mock.patch.object(vim_util, 'get_object_properties', return_value=None)
+ def test_get_dynamic_properties_no_objects(self, mock_get_object_props):
+ res = vim_util.get_dynamic_properties('fake-vim', 'fake-obj',
+ 'fake-type', 'fake-property')
+ self.assertEqual({}, res)
+
+ def test_get_inner_objects(self):
+ property = ['summary.name']
+ # Get the fake datastores directly from the cluster
+ cluster_refs = fake._get_object_refs('ClusterComputeResource')
+ cluster = fake._get_object(cluster_refs[0])
+ expected_ds = cluster.datastore.ManagedObjectReference
+ # Get the fake datastores using inner objects utility method
+ result = vim_util.get_inner_objects(
+ self.vim, cluster_refs[0], 'datastore', 'Datastore', property)
+ datastores = [oc.obj for oc in result.objects]
+ self.assertEqual(expected_ds, datastores)
diff --git a/nova/tests/unit/virt/vmwareapi/test_vm_util.py b/nova/tests/unit/virt/vmwareapi/test_vm_util.py
new file mode 100644
index 0000000000..906d03cf66
--- /dev/null
+++ b/nova/tests/unit/virt/vmwareapi/test_vm_util.py
@@ -0,0 +1,1069 @@
+# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
+# Copyright 2013 Canonical Corp.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import collections
+import contextlib
+import re
+
+import mock
+from oslo.vmware import exceptions as vexc
+
+from nova import context
+from nova import exception
+from nova.network import model as network_model
+from nova.openstack.common import uuidutils
+from nova import test
+from nova.tests.unit import fake_instance
+from nova.tests.unit.virt.vmwareapi import fake
+from nova.tests.unit.virt.vmwareapi import stubs
+from nova.virt.vmwareapi import driver
+from nova.virt.vmwareapi import vm_util
+
+
+class partialObject(object):
+ def __init__(self, path='fake-path'):
+ self.path = path
+ self.fault = fake.DataObject()
+
+
+class VMwareVMUtilTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(VMwareVMUtilTestCase, self).setUp()
+ fake.reset()
+ stubs.set_stubs(self.stubs)
+ vm_util.vm_refs_cache_reset()
+
+ def _test_get_stats_from_cluster(self, connection_state="connected",
+ maintenance_mode=False):
+ ManagedObjectRefs = [fake.ManagedObjectReference("host1",
+ "HostSystem"),
+ fake.ManagedObjectReference("host2",
+ "HostSystem")]
+ hosts = fake._convert_to_array_of_mor(ManagedObjectRefs)
+ respool = fake.ManagedObjectReference("resgroup-11", "ResourcePool")
+ prop_dict = {'host': hosts, 'resourcePool': respool}
+
+ hardware = fake.DataObject()
+ hardware.numCpuCores = 8
+ hardware.numCpuThreads = 16
+ hardware.vendor = "Intel"
+ hardware.cpuModel = "Intel(R) Xeon(R)"
+
+ runtime_host_1 = fake.DataObject()
+ runtime_host_1.connectionState = "connected"
+ runtime_host_1.inMaintenanceMode = False
+
+ runtime_host_2 = fake.DataObject()
+ runtime_host_2.connectionState = connection_state
+ runtime_host_2.inMaintenanceMode = maintenance_mode
+
+ prop_list_host_1 = [fake.Prop(name="hardware_summary", val=hardware),
+ fake.Prop(name="runtime_summary",
+ val=runtime_host_1)]
+ prop_list_host_2 = [fake.Prop(name="hardware_summary", val=hardware),
+ fake.Prop(name="runtime_summary",
+ val=runtime_host_2)]
+
+ fake_objects = fake.FakeRetrieveResult()
+ fake_objects.add_object(fake.ObjectContent("prop_list_host1",
+ prop_list_host_1))
+ fake_objects.add_object(fake.ObjectContent("prop_list_host1",
+ prop_list_host_2))
+
+ respool_resource_usage = fake.DataObject()
+ respool_resource_usage.maxUsage = 5368709120
+ respool_resource_usage.overallUsage = 2147483648
+
+ def fake_call_method(*args):
+ if "get_dynamic_properties" in args:
+ return prop_dict
+ elif "get_properties_for_a_collection_of_objects" in args:
+ return fake_objects
+ else:
+ return respool_resource_usage
+
+ session = fake.FakeSession()
+ with mock.patch.object(session, '_call_method', fake_call_method):
+ result = vm_util.get_stats_from_cluster(session, "cluster1")
+ cpu_info = {}
+ mem_info = {}
+ if connection_state == "connected" and not maintenance_mode:
+ cpu_info['vcpus'] = 32
+ cpu_info['cores'] = 16
+ cpu_info['vendor'] = ["Intel", "Intel"]
+ cpu_info['model'] = ["Intel(R) Xeon(R)",
+ "Intel(R) Xeon(R)"]
+ else:
+ cpu_info['vcpus'] = 16
+ cpu_info['cores'] = 8
+ cpu_info['vendor'] = ["Intel"]
+ cpu_info['model'] = ["Intel(R) Xeon(R)"]
+ mem_info['total'] = 5120
+ mem_info['free'] = 3072
+ expected_stats = {'cpu': cpu_info, 'mem': mem_info}
+ self.assertEqual(expected_stats, result)
+
+ def test_get_stats_from_cluster_hosts_connected_and_active(self):
+ self._test_get_stats_from_cluster()
+
+ def test_get_stats_from_cluster_hosts_disconnected_and_active(self):
+ self._test_get_stats_from_cluster(connection_state="disconnected")
+
+ def test_get_stats_from_cluster_hosts_connected_and_maintenance(self):
+ self._test_get_stats_from_cluster(maintenance_mode=True)
+
+ def test_get_host_ref_no_hosts_in_cluster(self):
+ self.assertRaises(exception.NoValidHost,
+ vm_util.get_host_ref,
+ fake.FakeObjectRetrievalSession(""), 'fake_cluster')
+
+ def test_get_resize_spec(self):
+ fake_instance = {'id': 7, 'name': 'fake!',
+ 'uuid': 'bda5fb9e-b347-40e8-8256-42397848cb00',
+ 'vcpus': 2, 'memory_mb': 2048}
+ result = vm_util.get_vm_resize_spec(fake.FakeFactory(),
+ fake_instance)
+ expected = """{'memoryMB': 2048,
+ 'numCPUs': 2,
+ 'obj_name': 'ns0:VirtualMachineConfigSpec'}"""
+ expected = re.sub(r'\s+', '', expected)
+ result = re.sub(r'\s+', '', repr(result))
+ self.assertEqual(expected, result)
+
+ def test_get_cdrom_attach_config_spec(self):
+
+ result = vm_util.get_cdrom_attach_config_spec(fake.FakeFactory(),
+ fake.Datastore(),
+ "/tmp/foo.iso",
+ 200, 0)
+ expected = """{
+ 'deviceChange': [
+ {
+ 'device': {
+ 'connectable': {
+ 'allowGuestControl': False,
+ 'startConnected': True,
+ 'connected': True,
+ 'obj_name': 'ns0: VirtualDeviceConnectInfo'
+ },
+ 'backing': {
+ 'datastore': {
+ "summary.maintenanceMode": "normal",
+ "summary.type": "VMFS",
+ "summary.accessible":true,
+ "summary.name": "fake-ds",
+ "summary.capacity": 1099511627776,
+ "summary.freeSpace": 536870912000,
+ "browser": ""
+ },
+ 'fileName': '/tmp/foo.iso',
+ 'obj_name': 'ns0: VirtualCdromIsoBackingInfo'
+ },
+ 'controllerKey': 200,
+ 'unitNumber': 0,
+ 'key': -1,
+ 'obj_name': 'ns0: VirtualCdrom'
+ },
+ 'operation': 'add',
+ 'obj_name': 'ns0: VirtualDeviceConfigSpec'
+ }
+ ],
+ 'obj_name': 'ns0: VirtualMachineConfigSpec'
+}
+"""
+
+ expected = re.sub(r'\s+', '', expected)
+ result = re.sub(r'\s+', '', repr(result))
+ self.assertEqual(expected, result)
+
+ def test_lsilogic_controller_spec(self):
+ # Test controller spec returned for lsiLogic sas adapter type
+ config_spec = vm_util.create_controller_spec(fake.FakeFactory(), -101,
+ adapter_type="lsiLogicsas")
+ self.assertEqual("ns0:VirtualLsiLogicSASController",
+ config_spec.device.obj_name)
+
+ def test_paravirtual_controller_spec(self):
+ # Test controller spec returned for paraVirtual adapter type
+ config_spec = vm_util.create_controller_spec(fake.FakeFactory(), -101,
+ adapter_type="paraVirtual")
+ self.assertEqual("ns0:ParaVirtualSCSIController",
+ config_spec.device.obj_name)
+
+ def _vmdk_path_and_adapter_type_devices(self, filename, parent=None):
+ # Test the adapter_type returned for a lsiLogic sas controller
+ controller_key = 1000
+ disk = fake.VirtualDisk()
+ disk.controllerKey = controller_key
+ disk_backing = fake.VirtualDiskFlatVer2BackingInfo()
+ disk_backing.fileName = filename
+ if parent:
+ disk_backing.parent = parent
+ disk.backing = disk_backing
+ controller = fake.VirtualLsiLogicSASController()
+ controller.key = controller_key
+ devices = [disk, controller]
+ return devices
+
+ def test_get_vmdk_path(self):
+ uuid = '00000000-0000-0000-0000-000000000000'
+ filename = '[test_datastore] %s/%s.vmdk' % (uuid, uuid)
+ devices = self._vmdk_path_and_adapter_type_devices(filename)
+ session = fake.FakeSession()
+
+ with mock.patch.object(session, '_call_method',
+ return_value=devices):
+ instance = {'uuid': uuid}
+ vmdk_path = vm_util.get_vmdk_path(session, None, instance)
+ self.assertEqual(filename, vmdk_path)
+
+ def test_get_vmdk_path_and_adapter_type(self):
+ filename = '[test_datastore] test_file.vmdk'
+ devices = self._vmdk_path_and_adapter_type_devices(filename)
+ vmdk_info = vm_util.get_vmdk_path_and_adapter_type(devices)
+ adapter_type = vmdk_info[1]
+ self.assertEqual('lsiLogicsas', adapter_type)
+ self.assertEqual(vmdk_info[0], filename)
+
+ def test_get_vmdk_path_and_adapter_type_with_match(self):
+ n_filename = '[test_datastore] uuid/uuid.vmdk'
+ devices = self._vmdk_path_and_adapter_type_devices(n_filename)
+ vmdk_info = vm_util.get_vmdk_path_and_adapter_type(
+ devices, uuid='uuid')
+ adapter_type = vmdk_info[1]
+ self.assertEqual('lsiLogicsas', adapter_type)
+ self.assertEqual(n_filename, vmdk_info[0])
+
+ def test_get_vmdk_path_and_adapter_type_with_nomatch(self):
+ n_filename = '[test_datastore] diuu/diuu.vmdk'
+ devices = self._vmdk_path_and_adapter_type_devices(n_filename)
+ vmdk_info = vm_util.get_vmdk_path_and_adapter_type(
+ devices, uuid='uuid')
+ adapter_type = vmdk_info[1]
+ self.assertEqual('lsiLogicsas', adapter_type)
+ self.assertIsNone(vmdk_info[0])
+
+ def test_get_vmdk_adapter_type(self):
+ # Test for the adapter_type to be used in vmdk descriptor
+ # Adapter type in vmdk descriptor is same for LSI-SAS, LSILogic
+ # and ParaVirtual
+ vmdk_adapter_type = vm_util.get_vmdk_adapter_type("lsiLogic")
+ self.assertEqual("lsiLogic", vmdk_adapter_type)
+ vmdk_adapter_type = vm_util.get_vmdk_adapter_type("lsiLogicsas")
+ self.assertEqual("lsiLogic", vmdk_adapter_type)
+ vmdk_adapter_type = vm_util.get_vmdk_adapter_type("paraVirtual")
+ self.assertEqual("lsiLogic", vmdk_adapter_type)
+ vmdk_adapter_type = vm_util.get_vmdk_adapter_type("dummyAdapter")
+ self.assertEqual("dummyAdapter", vmdk_adapter_type)
+
+ def test_find_allocated_slots(self):
+ disk1 = fake.VirtualDisk(200, 0)
+ disk2 = fake.VirtualDisk(200, 1)
+ disk3 = fake.VirtualDisk(201, 1)
+ ide0 = fake.VirtualIDEController(200)
+ ide1 = fake.VirtualIDEController(201)
+ scsi0 = fake.VirtualLsiLogicController(key=1000, scsiCtlrUnitNumber=7)
+ devices = [disk1, disk2, disk3, ide0, ide1, scsi0]
+ taken = vm_util._find_allocated_slots(devices)
+ self.assertEqual([0, 1], sorted(taken[200]))
+ self.assertEqual([1], taken[201])
+ self.assertEqual([7], taken[1000])
+
+ def test_allocate_controller_key_and_unit_number_ide_default(self):
+ # Test that default IDE controllers are used when there is a free slot
+ # on them
+ disk1 = fake.VirtualDisk(200, 0)
+ disk2 = fake.VirtualDisk(200, 1)
+ ide0 = fake.VirtualIDEController(200)
+ ide1 = fake.VirtualIDEController(201)
+ devices = [disk1, disk2, ide0, ide1]
+ (controller_key, unit_number,
+ controller_spec) = vm_util.allocate_controller_key_and_unit_number(
+ None,
+ devices,
+ 'ide')
+ self.assertEqual(201, controller_key)
+ self.assertEqual(0, unit_number)
+ self.assertIsNone(controller_spec)
+
+ def test_allocate_controller_key_and_unit_number_ide(self):
+ # Test that a new controller is created when there is no free slot on
+ # the default IDE controllers
+ ide0 = fake.VirtualIDEController(200)
+ ide1 = fake.VirtualIDEController(201)
+ devices = [ide0, ide1]
+ for controller_key in [200, 201]:
+ for unit_number in [0, 1]:
+ disk = fake.VirtualDisk(controller_key, unit_number)
+ devices.append(disk)
+ factory = fake.FakeFactory()
+ (controller_key, unit_number,
+ controller_spec) = vm_util.allocate_controller_key_and_unit_number(
+ factory,
+ devices,
+ 'ide')
+ self.assertEqual(-101, controller_key)
+ self.assertEqual(0, unit_number)
+ self.assertIsNotNone(controller_spec)
+
+ def test_allocate_controller_key_and_unit_number_scsi(self):
+ # Test that we allocate on existing SCSI controller if there is a free
+ # slot on it
+ devices = [fake.VirtualLsiLogicController(1000, scsiCtlrUnitNumber=7)]
+ for unit_number in range(7):
+ disk = fake.VirtualDisk(1000, unit_number)
+ devices.append(disk)
+ factory = fake.FakeFactory()
+ (controller_key, unit_number,
+ controller_spec) = vm_util.allocate_controller_key_and_unit_number(
+ factory,
+ devices,
+ 'lsiLogic')
+ self.assertEqual(1000, controller_key)
+ self.assertEqual(8, unit_number)
+ self.assertIsNone(controller_spec)
+
+ def _test_get_vnc_config_spec(self, port):
+
+ result = vm_util.get_vnc_config_spec(fake.FakeFactory(),
+ port)
+ return result
+
+ def test_get_vnc_config_spec(self):
+ result = self._test_get_vnc_config_spec(7)
+ expected = """{'extraConfig': [
+ {'value': 'true',
+ 'key': 'RemoteDisplay.vnc.enabled',
+ 'obj_name': 'ns0:OptionValue'},
+ {'value': 7,
+ 'key': 'RemoteDisplay.vnc.port',
+ 'obj_name': 'ns0:OptionValue'}],
+ 'obj_name': 'ns0:VirtualMachineConfigSpec'}"""
+ expected = re.sub(r'\s+', '', expected)
+ result = re.sub(r'\s+', '', repr(result))
+ self.assertEqual(expected, result)
+
+ def _create_fake_vms(self):
+ fake_vms = fake.FakeRetrieveResult()
+ OptionValue = collections.namedtuple('OptionValue', ['key', 'value'])
+ for i in range(10):
+ vm = fake.ManagedObject()
+ opt_val = OptionValue(key='', value=5900 + i)
+ vm.set(vm_util.VNC_CONFIG_KEY, opt_val)
+ fake_vms.add_object(vm)
+ return fake_vms
+
+ def test_get_vnc_port(self):
+ fake_vms = self._create_fake_vms()
+ self.flags(vnc_port=5900, group='vmware')
+ self.flags(vnc_port_total=10000, group='vmware')
+ actual = vm_util.get_vnc_port(
+ fake.FakeObjectRetrievalSession(fake_vms))
+ self.assertEqual(actual, 5910)
+
+ def test_get_vnc_port_exhausted(self):
+ fake_vms = self._create_fake_vms()
+ self.flags(vnc_port=5900, group='vmware')
+ self.flags(vnc_port_total=10, group='vmware')
+ self.assertRaises(exception.ConsolePortRangeExhausted,
+ vm_util.get_vnc_port,
+ fake.FakeObjectRetrievalSession(fake_vms))
+
+ def test_get_all_cluster_refs_by_name_none(self):
+ fake_objects = fake.FakeRetrieveResult()
+ refs = vm_util.get_all_cluster_refs_by_name(
+ fake.FakeObjectRetrievalSession(fake_objects), ['fake_cluster'])
+ self.assertEqual({}, refs)
+
+ def test_get_all_cluster_refs_by_name_exists(self):
+ fake_objects = fake.FakeRetrieveResult()
+ fake_objects.add_object(fake.ClusterComputeResource(name='cluster'))
+ refs = vm_util.get_all_cluster_refs_by_name(
+ fake.FakeObjectRetrievalSession(fake_objects), ['cluster'])
+ self.assertEqual(1, len(refs))
+
+ def test_get_all_cluster_refs_by_name_missing(self):
+ fake_objects = fake.FakeRetrieveResult()
+ fake_objects.add_object(partialObject(path='cluster'))
+ refs = vm_util.get_all_cluster_refs_by_name(
+ fake.FakeObjectRetrievalSession(fake_objects), ['cluster'])
+ self.assertEqual({}, refs)
+
+ def test_propset_dict_simple(self):
+ ObjectContent = collections.namedtuple('ObjectContent', ['propSet'])
+ DynamicProperty = collections.namedtuple('Property', ['name', 'val'])
+
+ object = ObjectContent(propSet=[
+ DynamicProperty(name='foo', val="bar")])
+ propdict = vm_util.propset_dict(object.propSet)
+ self.assertEqual("bar", propdict['foo'])
+
+ def test_propset_dict_complex(self):
+ ObjectContent = collections.namedtuple('ObjectContent', ['propSet'])
+ DynamicProperty = collections.namedtuple('Property', ['name', 'val'])
+ MoRef = collections.namedtuple('Val', ['value'])
+
+ object = ObjectContent(propSet=[
+ DynamicProperty(name='foo', val="bar"),
+ DynamicProperty(name='some.thing',
+ val=MoRef(value='else')),
+ DynamicProperty(name='another.thing', val='value')])
+
+ propdict = vm_util.propset_dict(object.propSet)
+ self.assertEqual("bar", propdict['foo'])
+ self.assertTrue(hasattr(propdict['some.thing'], 'value'))
+ self.assertEqual("else", propdict['some.thing'].value)
+ self.assertEqual("value", propdict['another.thing'])
+
+ def _test_detach_virtual_disk_spec(self, destroy_disk=False):
+ virtual_device_config = vm_util.detach_virtual_disk_spec(
+ fake.FakeFactory(),
+ 'fake_device',
+ destroy_disk)
+ self.assertEqual('remove', virtual_device_config.operation)
+ self.assertEqual('fake_device', virtual_device_config.device)
+ self.assertEqual('ns0:VirtualDeviceConfigSpec',
+ virtual_device_config.obj_name)
+ if destroy_disk:
+ self.assertEqual('destroy', virtual_device_config.fileOperation)
+ else:
+ self.assertFalse(hasattr(virtual_device_config, 'fileOperation'))
+
+ def test_detach_virtual_disk_spec(self):
+ self._test_detach_virtual_disk_spec(destroy_disk=False)
+
+ def test_detach_virtual_disk_destroy_spec(self):
+ self._test_detach_virtual_disk_spec(destroy_disk=True)
+
+ def test_get_vm_create_spec(self):
+ instance_uuid = uuidutils.generate_uuid()
+ fake_instance = {'id': 7, 'name': 'fake!',
+ 'uuid': instance_uuid,
+ 'vcpus': 2, 'memory_mb': 2048}
+ result = vm_util.get_vm_create_spec(fake.FakeFactory(),
+ fake_instance, instance_uuid,
+ 'fake-datastore', [])
+ expected = """{
+ 'files': {'vmPathName': '[fake-datastore]',
+ 'obj_name': 'ns0:VirtualMachineFileInfo'},
+ 'instanceUuid': '%(instance_uuid)s',
+ 'name': '%(instance_uuid)s', 'deviceChange': [],
+ 'extraConfig': [{'value': '%(instance_uuid)s',
+ 'key': 'nvp.vm-uuid',
+ 'obj_name': 'ns0:OptionValue'}],
+ 'memoryMB': 2048,
+ 'managedBy': {'extensionKey': 'org.openstack.compute',
+ 'type': 'instance',
+ 'obj_name': 'ns0:ManagedByInfo'},
+ 'obj_name': 'ns0:VirtualMachineConfigSpec',
+ 'guestId': 'otherGuest',
+ 'tools': {'beforeGuestStandby': True,
+ 'beforeGuestReboot': True,
+ 'beforeGuestShutdown': True,
+ 'afterResume': True,
+ 'afterPowerOn': True,
+ 'obj_name': 'ns0:ToolsConfigInfo'},
+ 'numCPUs': 2}""" % {'instance_uuid': instance_uuid}
+ expected = re.sub(r'\s+', '', expected)
+ result = re.sub(r'\s+', '', repr(result))
+ self.assertEqual(expected, result)
+
+ def test_get_vm_create_spec_with_allocations(self):
+ instance_uuid = uuidutils.generate_uuid()
+ fake_instance = {'id': 7, 'name': 'fake!',
+ 'uuid': instance_uuid,
+ 'vcpus': 2, 'memory_mb': 2048}
+ result = vm_util.get_vm_create_spec(fake.FakeFactory(),
+ fake_instance, instance_uuid,
+ 'fake-datastore', [],
+ allocations={'cpu_limit': 7,
+ 'cpu_reservation': 6})
+ expected = """{
+ 'files': {'vmPathName': '[fake-datastore]',
+ 'obj_name': 'ns0:VirtualMachineFileInfo'},
+ 'instanceUuid': '%(instance_uuid)s',
+ 'name': '%(instance_uuid)s', 'deviceChange': [],
+ 'extraConfig': [{'value': '%(instance_uuid)s',
+ 'key': 'nvp.vm-uuid',
+ 'obj_name': 'ns0:OptionValue'}],
+ 'memoryMB': 2048,
+ 'managedBy': {'extensionKey': 'org.openstack.compute',
+ 'type': 'instance',
+ 'obj_name': 'ns0:ManagedByInfo'},
+ 'obj_name': 'ns0:VirtualMachineConfigSpec',
+ 'guestId': 'otherGuest',
+ 'tools': {'beforeGuestStandby': True,
+ 'beforeGuestReboot': True,
+ 'beforeGuestShutdown': True,
+ 'afterResume': True,
+ 'afterPowerOn': True,
+ 'obj_name': 'ns0:ToolsConfigInfo'},
+ 'cpuAllocation': {'reservation': 6,
+ 'limit': 7,
+ 'obj_name': 'ns0:ResourceAllocationInfo'},
+ 'numCPUs': 2}""" % {'instance_uuid': instance_uuid}
+ expected = re.sub(r'\s+', '', expected)
+ result = re.sub(r'\s+', '', repr(result))
+ self.assertEqual(expected, result)
+
+ def test_get_vm_create_spec_with_limit(self):
+ instance_uuid = uuidutils.generate_uuid()
+ fake_instance = {'id': 7, 'name': 'fake!',
+ 'uuid': instance_uuid,
+ 'vcpus': 2, 'memory_mb': 2048}
+ result = vm_util.get_vm_create_spec(fake.FakeFactory(),
+ fake_instance, instance_uuid,
+ 'fake-datastore', [],
+ allocations={'cpu_limit': 7})
+ expected = """{
+ 'files': {'vmPathName': '[fake-datastore]',
+ 'obj_name': 'ns0:VirtualMachineFileInfo'},
+ 'instanceUuid': '%(instance_uuid)s',
+ 'name': '%(instance_uuid)s', 'deviceChange': [],
+ 'extraConfig': [{'value': '%(instance_uuid)s',
+ 'key': 'nvp.vm-uuid',
+ 'obj_name': 'ns0:OptionValue'}],
+ 'memoryMB': 2048,
+ 'managedBy': {'extensionKey': 'org.openstack.compute',
+ 'type': 'instance',
+ 'obj_name': 'ns0:ManagedByInfo'},
+ 'obj_name': 'ns0:VirtualMachineConfigSpec',
+ 'guestId': 'otherGuest',
+ 'tools': {'beforeGuestStandby': True,
+ 'beforeGuestReboot': True,
+ 'beforeGuestShutdown': True,
+ 'afterResume': True,
+ 'afterPowerOn': True,
+ 'obj_name': 'ns0:ToolsConfigInfo'},
+ 'cpuAllocation': {'limit': 7,
+ 'obj_name': 'ns0:ResourceAllocationInfo'},
+ 'numCPUs': 2}""" % {'instance_uuid': instance_uuid}
+ expected = re.sub(r'\s+', '', expected)
+ result = re.sub(r'\s+', '', repr(result))
+ self.assertEqual(expected, result)
+
+ def test_get_vm_create_spec_with_share(self):
+ instance_uuid = uuidutils.generate_uuid()
+ fake_instance = {'id': 7, 'name': 'fake!',
+ 'uuid': instance_uuid,
+ 'vcpus': 2, 'memory_mb': 2048}
+ shares = {'cpu_shares_level': 'high'}
+ result = vm_util.get_vm_create_spec(fake.FakeFactory(),
+ fake_instance, instance_uuid,
+ 'fake-datastore', [],
+ allocations=shares)
+ expected = """{
+ 'files': {'vmPathName': '[fake-datastore]',
+ 'obj_name': 'ns0:VirtualMachineFileInfo'},
+ 'instanceUuid': '%(instance_uuid)s',
+ 'name': '%(instance_uuid)s', 'deviceChange': [],
+ 'extraConfig': [{'value': '%(instance_uuid)s',
+ 'key': 'nvp.vm-uuid',
+ 'obj_name': 'ns0:OptionValue'}],
+ 'memoryMB': 2048,
+ 'managedBy': {'extensionKey': 'org.openstack.compute',
+ 'type': 'instance',
+ 'obj_name': 'ns0:ManagedByInfo'},
+ 'obj_name': 'ns0:VirtualMachineConfigSpec',
+ 'guestId': 'otherGuest',
+ 'tools': {'beforeGuestStandby': True,
+ 'beforeGuestReboot': True,
+ 'beforeGuestShutdown': True,
+ 'afterResume': True,
+ 'afterPowerOn': True,
+ 'obj_name': 'ns0:ToolsConfigInfo'},
+ 'cpuAllocation': {'shares': {'level': 'high',
+ 'shares': 0,
+ 'obj_name':'ns0:SharesInfo'},
+ 'obj_name':'ns0:ResourceAllocationInfo'},
+ 'numCPUs': 2}""" % {'instance_uuid': instance_uuid}
+ expected = re.sub(r'\s+', '', expected)
+ result = re.sub(r'\s+', '', repr(result))
+ self.assertEqual(expected, result)
+
+ def test_get_vm_create_spec_with_share_custom(self):
+ instance_uuid = uuidutils.generate_uuid()
+ fake_instance = {'id': 7, 'name': 'fake!',
+ 'uuid': instance_uuid,
+ 'vcpus': 2, 'memory_mb': 2048}
+ shares = {'cpu_shares_level': 'custom',
+ 'cpu_shares_share': 1948}
+ result = vm_util.get_vm_create_spec(fake.FakeFactory(),
+ fake_instance, instance_uuid,
+ 'fake-datastore', [],
+ allocations=shares)
+ expected = """{
+ 'files': {'vmPathName': '[fake-datastore]',
+ 'obj_name': 'ns0:VirtualMachineFileInfo'},
+ 'instanceUuid': '%(instance_uuid)s',
+ 'name': '%(instance_uuid)s', 'deviceChange': [],
+ 'extraConfig': [{'value': '%(instance_uuid)s',
+ 'key': 'nvp.vm-uuid',
+ 'obj_name': 'ns0:OptionValue'}],
+ 'memoryMB': 2048,
+ 'managedBy': {'extensionKey': 'org.openstack.compute',
+ 'type': 'instance',
+ 'obj_name': 'ns0:ManagedByInfo'},
+ 'obj_name': 'ns0:VirtualMachineConfigSpec',
+ 'guestId': 'otherGuest',
+ 'tools': {'beforeGuestStandby': True,
+ 'beforeGuestReboot': True,
+ 'beforeGuestShutdown': True,
+ 'afterResume': True,
+ 'afterPowerOn': True,
+ 'obj_name': 'ns0:ToolsConfigInfo'},
+ 'cpuAllocation': {'shares': {'level': 'custom',
+ 'shares': 1948,
+ 'obj_name':'ns0:SharesInfo'},
+ 'obj_name':'ns0:ResourceAllocationInfo'},
+ 'numCPUs': 2}""" % {'instance_uuid': instance_uuid}
+ expected = re.sub(r'\s+', '', expected)
+ result = re.sub(r'\s+', '', repr(result))
+ self.assertEqual(expected, result)
+
+ def test_create_vm(self):
+
+ method_list = ['CreateVM_Task', 'get_dynamic_property']
+
+ def fake_call_method(module, method, *args, **kwargs):
+ expected_method = method_list.pop(0)
+ self.assertEqual(expected_method, method)
+ if (expected_method == 'CreateVM_Task'):
+ return 'fake_create_vm_task'
+ elif (expected_method == 'get_dynamic_property'):
+ task_info = mock.Mock(state="success", result="fake_vm_ref")
+ return task_info
+ else:
+ self.fail('Should not get here....')
+
+ def fake_wait_for_task(self, *args):
+ task_info = mock.Mock(state="success", result="fake_vm_ref")
+ return task_info
+
+ session = fake.FakeSession()
+ fake_instance = mock.MagicMock()
+ fake_call_mock = mock.Mock(side_effect=fake_call_method)
+ fake_wait_mock = mock.Mock(side_effect=fake_wait_for_task)
+ with contextlib.nested(
+ mock.patch.object(session, '_wait_for_task',
+ fake_wait_mock),
+ mock.patch.object(session, '_call_method',
+ fake_call_mock)
+ ) as (wait_for_task, call_method):
+ vm_ref = vm_util.create_vm(
+ session,
+ fake_instance,
+ 'fake_vm_folder',
+ 'fake_config_spec',
+ 'fake_res_pool_ref')
+ self.assertEqual('fake_vm_ref', vm_ref)
+
+ call_method.assert_called_once_with(mock.ANY, 'CreateVM_Task',
+ 'fake_vm_folder', config='fake_config_spec',
+ pool='fake_res_pool_ref')
+ wait_for_task.assert_called_once_with('fake_create_vm_task')
+
+ @mock.patch.object(vm_util.LOG, 'warning')
+ def test_create_vm_invalid_guestid(self, mock_log_warn):
+ """Ensure we warn when create_vm() fails after we passed an
+ unrecognised guestId
+ """
+
+ found = [False]
+
+ def fake_log_warn(msg, values):
+ if not isinstance(values, dict):
+ return
+ if values.get('ostype') == 'invalid_os_type':
+ found[0] = True
+ mock_log_warn.side_effect = fake_log_warn
+
+ instance_values = {'id': 7, 'name': 'fake-name',
+ 'uuid': uuidutils.generate_uuid(),
+ 'vcpus': 2, 'memory_mb': 2048}
+ instance = fake_instance.fake_instance_obj(
+ context.RequestContext('fake', 'fake', is_admin=False),
+ **instance_values)
+
+ session = driver.VMwareAPISession()
+
+ config_spec = vm_util.get_vm_create_spec(
+ session.vim.client.factory,
+ instance, instance.name, 'fake-datastore', [],
+ os_type='invalid_os_type')
+
+ self.assertRaises(vexc.VMwareDriverException,
+ vm_util.create_vm, session, instance, 'folder',
+ config_spec, 'res-pool')
+ self.assertTrue(found[0])
+
+ def test_convert_vif_model(self):
+ expected = "VirtualE1000"
+ result = vm_util.convert_vif_model(network_model.VIF_MODEL_E1000)
+ self.assertEqual(expected, result)
+ expected = "VirtualE1000e"
+ result = vm_util.convert_vif_model(network_model.VIF_MODEL_E1000E)
+ self.assertEqual(expected, result)
+ types = ["VirtualE1000", "VirtualE1000e", "VirtualPCNet32",
+ "VirtualVmxnet"]
+ for type in types:
+ self.assertEqual(type,
+ vm_util.convert_vif_model(type))
+ self.assertRaises(exception.Invalid,
+ vm_util.convert_vif_model,
+ "InvalidVifModel")
+
+ def test_power_on_instance_with_vm_ref(self):
+ session = fake.FakeSession()
+ fake_instance = mock.MagicMock()
+ with contextlib.nested(
+ mock.patch.object(session, "_call_method",
+ return_value='fake-task'),
+ mock.patch.object(session, "_wait_for_task"),
+ ) as (fake_call_method, fake_wait_for_task):
+ vm_util.power_on_instance(session, fake_instance,
+ vm_ref='fake-vm-ref')
+ fake_call_method.assert_called_once_with(session.vim,
+ "PowerOnVM_Task",
+ 'fake-vm-ref')
+ fake_wait_for_task.assert_called_once_with('fake-task')
+
+ def test_power_on_instance_without_vm_ref(self):
+ session = fake.FakeSession()
+ fake_instance = mock.MagicMock()
+ with contextlib.nested(
+ mock.patch.object(vm_util, "get_vm_ref",
+ return_value='fake-vm-ref'),
+ mock.patch.object(session, "_call_method",
+ return_value='fake-task'),
+ mock.patch.object(session, "_wait_for_task"),
+ ) as (fake_get_vm_ref, fake_call_method, fake_wait_for_task):
+ vm_util.power_on_instance(session, fake_instance)
+ fake_get_vm_ref.assert_called_once_with(session, fake_instance)
+ fake_call_method.assert_called_once_with(session.vim,
+ "PowerOnVM_Task",
+ 'fake-vm-ref')
+ fake_wait_for_task.assert_called_once_with('fake-task')
+
+ def test_power_on_instance_with_exception(self):
+ session = fake.FakeSession()
+ fake_instance = mock.MagicMock()
+ with contextlib.nested(
+ mock.patch.object(session, "_call_method",
+ return_value='fake-task'),
+ mock.patch.object(session, "_wait_for_task",
+ side_effect=exception.NovaException('fake')),
+ ) as (fake_call_method, fake_wait_for_task):
+ self.assertRaises(exception.NovaException,
+ vm_util.power_on_instance,
+ session, fake_instance,
+ vm_ref='fake-vm-ref')
+ fake_call_method.assert_called_once_with(session.vim,
+ "PowerOnVM_Task",
+ 'fake-vm-ref')
+ fake_wait_for_task.assert_called_once_with('fake-task')
+
+ def test_power_on_instance_with_power_state_exception(self):
+ session = fake.FakeSession()
+ fake_instance = mock.MagicMock()
+ with contextlib.nested(
+ mock.patch.object(session, "_call_method",
+ return_value='fake-task'),
+ mock.patch.object(
+ session, "_wait_for_task",
+ side_effect=vexc.InvalidPowerStateException),
+ ) as (fake_call_method, fake_wait_for_task):
+ vm_util.power_on_instance(session, fake_instance,
+ vm_ref='fake-vm-ref')
+ fake_call_method.assert_called_once_with(session.vim,
+ "PowerOnVM_Task",
+ 'fake-vm-ref')
+ fake_wait_for_task.assert_called_once_with('fake-task')
+
+ def test_create_virtual_disk(self):
+ session = fake.FakeSession()
+ dm = session.vim.service_content.virtualDiskManager
+ with contextlib.nested(
+ mock.patch.object(vm_util, "get_vmdk_create_spec",
+ return_value='fake-spec'),
+ mock.patch.object(session, "_call_method",
+ return_value='fake-task'),
+ mock.patch.object(session, "_wait_for_task"),
+ ) as (fake_get_spec, fake_call_method, fake_wait_for_task):
+ vm_util.create_virtual_disk(session, 'fake-dc-ref',
+ 'fake-adapter-type', 'fake-disk-type',
+ 'fake-path', 7)
+ fake_get_spec.assert_called_once_with(
+ session.vim.client.factory, 7,
+ 'fake-adapter-type',
+ 'fake-disk-type')
+ fake_call_method.assert_called_once_with(
+ session.vim,
+ "CreateVirtualDisk_Task",
+ dm,
+ name='fake-path',
+ datacenter='fake-dc-ref',
+ spec='fake-spec')
+ fake_wait_for_task.assert_called_once_with('fake-task')
+
+ def test_copy_virtual_disk(self):
+ session = fake.FakeSession()
+ dm = session.vim.service_content.virtualDiskManager
+ with contextlib.nested(
+ mock.patch.object(session, "_call_method",
+ return_value='fake-task'),
+ mock.patch.object(session, "_wait_for_task"),
+ ) as (fake_call_method, fake_wait_for_task):
+ vm_util.copy_virtual_disk(session, 'fake-dc-ref',
+ 'fake-source', 'fake-dest')
+ fake_call_method.assert_called_once_with(
+ session.vim,
+ "CopyVirtualDisk_Task",
+ dm,
+ sourceName='fake-source',
+ sourceDatacenter='fake-dc-ref',
+ destName='fake-dest')
+ fake_wait_for_task.assert_called_once_with('fake-task')
+
+ def _create_fake_vm_objects(self):
+ fake_objects = fake.FakeRetrieveResult()
+ fake_objects.add_object(fake.VirtualMachine())
+ return fake_objects
+
+ def test_get_values(self):
+ objects = self._create_fake_vm_objects()
+ query = vm_util.get_values_from_object_properties(
+ fake.FakeObjectRetrievalSession(objects), objects)
+ self.assertEqual('poweredOn', query['runtime.powerState'])
+ self.assertEqual('guestToolsRunning',
+ query['summary.guest.toolsRunningStatus'])
+ self.assertEqual('toolsOk', query['summary.guest.toolsStatus'])
+
+ def test_reconfigure_vm(self):
+ session = fake.FakeSession()
+ with contextlib.nested(
+ mock.patch.object(session, '_call_method',
+ return_value='fake_reconfigure_task'),
+ mock.patch.object(session, '_wait_for_task')
+ ) as (_call_method, _wait_for_task):
+ vm_util.reconfigure_vm(session, 'fake-ref', 'fake-spec')
+ _call_method.assert_called_once_with(mock.ANY,
+ 'ReconfigVM_Task', 'fake-ref', spec='fake-spec')
+ _wait_for_task.assert_called_once_with(
+ 'fake_reconfigure_task')
+
+ def test_get_network_attach_config_spec_opaque(self):
+ vif_info = {'network_name': 'br-int',
+ 'mac_address': '00:00:00:ca:fe:01',
+ 'network_ref': {'type': 'OpaqueNetwork',
+ 'network-id': 'fake-network-id',
+ 'network-type': 'opaque'},
+ 'iface_id': 7,
+ 'vif_model': 'VirtualE1000'}
+ result = vm_util.get_network_attach_config_spec(
+ fake.FakeFactory(), vif_info, 1)
+ card = 'ns0:VirtualEthernetCardOpaqueNetworkBackingInfo'
+ expected = """{
+ 'extraConfig': [{'value': 7,
+ 'key': 'nvp.iface-id.1',
+ 'obj_name':'ns0:OptionValue'}],
+ 'deviceChange': [
+ {'device': {
+ 'macAddress':'00:00:00:ca:fe:01',
+ 'addressType': 'manual',
+ 'connectable': {
+ 'allowGuestControl':True,
+ 'startConnected': True,
+ 'connected': True,
+ 'obj_name':'ns0:VirtualDeviceConnectInfo'},
+ 'backing': {
+ 'opaqueNetworkType': 'opaque',
+ 'opaqueNetworkId': 'fake-network-id',
+ 'obj_name': '%(card)s'},
+ 'key': -47,
+ 'obj_name': 'ns0:VirtualE1000',
+ 'wakeOnLanEnabled': True},
+ 'operation': 'add',
+ 'obj_name': 'ns0:VirtualDeviceConfigSpec'}],
+ 'obj_name':'ns0:VirtualMachineConfigSpec'}""" % {'card': card}
+ expected = re.sub(r'\s+', '', expected)
+ result = re.sub(r'\s+', '', repr(result))
+ self.assertEqual(expected, result)
+
+ def test_get_network_attach_config_spec_dvs(self):
+ vif_info = {'network_name': 'br100',
+ 'mac_address': '00:00:00:ca:fe:01',
+ 'network_ref': {'type': 'DistributedVirtualPortgroup',
+ 'dvsw': 'fake-network-id',
+ 'dvpg': 'fake-group'},
+ 'iface_id': 7,
+ 'vif_model': 'VirtualE1000'}
+ result = vm_util.get_network_attach_config_spec(
+ fake.FakeFactory(), vif_info, 1)
+ port = 'ns0:DistributedVirtualSwitchPortConnection'
+ backing = 'ns0:VirtualEthernetCardDistributedVirtualPortBackingInfo'
+ expected = """{
+ 'extraConfig': [{'value': 7,
+ 'key': 'nvp.iface-id.1',
+ 'obj_name': 'ns0:OptionValue'}],
+ 'deviceChange': [
+ {'device': {'macAddress': '00:00:00:ca:fe:01',
+ 'addressType': 'manual',
+ 'connectable': {
+ 'allowGuestControl': True,
+ 'startConnected': True,
+ 'connected': True,
+ 'obj_name': 'ns0:VirtualDeviceConnectInfo'},
+ 'backing': {
+ 'port': {
+ 'portgroupKey': 'fake-group',
+ 'switchUuid': 'fake-network-id',
+ 'obj_name': '%(obj_name_port)s'},
+ 'obj_name': '%(obj_name_backing)s'},
+ 'key': -47,
+ 'obj_name': 'ns0:VirtualE1000',
+ 'wakeOnLanEnabled': True},
+ 'operation': 'add',
+ 'obj_name': 'ns0:VirtualDeviceConfigSpec'}],
+ 'obj_name':'ns0:VirtualMachineConfigSpec'}""" % {
+ 'obj_name_backing': backing,
+ 'obj_name_port': port}
+ expected = re.sub(r'\s+', '', expected)
+ result = re.sub(r'\s+', '', repr(result))
+ self.assertEqual(expected, result)
+
+ def test_get_network_detach_config_spec(self):
+ result = vm_util.get_network_detach_config_spec(
+ fake.FakeFactory(), 'fake-device', 2)
+ expected = """{
+ 'extraConfig': [{'value': 'free',
+ 'key': 'nvp.iface-id.2',
+ 'obj_name': 'ns0:OptionValue'}],
+ 'deviceChange': [{'device': 'fake-device',
+ 'operation': 'remove',
+ 'obj_name': 'ns0:VirtualDeviceConfigSpec'}],
+ 'obj_name':'ns0:VirtualMachineConfigSpec'}"""
+ expected = re.sub(r'\s+', '', expected)
+ result = re.sub(r'\s+', '', repr(result))
+ self.assertEqual(expected, result)
+
+ @mock.patch.object(vm_util, "get_vm_ref")
+ def test_power_off_instance(self, fake_get_ref):
+ session = fake.FakeSession()
+ fake_instance = mock.MagicMock()
+ with contextlib.nested(
+ mock.patch.object(session, '_call_method',
+ return_value='fake-task'),
+ mock.patch.object(session, '_wait_for_task')
+ ) as (fake_call_method, fake_wait_for_task):
+ vm_util.power_off_instance(session, fake_instance, 'fake-vm-ref')
+ fake_call_method.assert_called_once_with(session.vim,
+ "PowerOffVM_Task",
+ 'fake-vm-ref')
+ fake_wait_for_task.assert_called_once_with('fake-task')
+ self.assertFalse(fake_get_ref.called)
+
+ @mock.patch.object(vm_util, "get_vm_ref", return_value="fake-vm-ref")
+ def test_power_off_instance_no_vm_ref(self, fake_get_ref):
+ session = fake.FakeSession()
+ fake_instance = mock.MagicMock()
+ with contextlib.nested(
+ mock.patch.object(session, '_call_method',
+ return_value='fake-task'),
+ mock.patch.object(session, '_wait_for_task')
+ ) as (fake_call_method, fake_wait_for_task):
+ vm_util.power_off_instance(session, fake_instance)
+ fake_get_ref.assert_called_once_with(session, fake_instance)
+ fake_call_method.assert_called_once_with(session.vim,
+ "PowerOffVM_Task",
+ 'fake-vm-ref')
+ fake_wait_for_task.assert_called_once_with('fake-task')
+
+ @mock.patch.object(vm_util, "get_vm_ref")
+ def test_power_off_instance_with_exception(self, fake_get_ref):
+ session = fake.FakeSession()
+ fake_instance = mock.MagicMock()
+ with contextlib.nested(
+ mock.patch.object(session, '_call_method',
+ return_value='fake-task'),
+ mock.patch.object(session, '_wait_for_task',
+ side_effect=exception.NovaException('fake'))
+ ) as (fake_call_method, fake_wait_for_task):
+ self.assertRaises(exception.NovaException,
+ vm_util.power_off_instance,
+ session, fake_instance, 'fake-vm-ref')
+ fake_call_method.assert_called_once_with(session.vim,
+ "PowerOffVM_Task",
+ 'fake-vm-ref')
+ fake_wait_for_task.assert_called_once_with('fake-task')
+ self.assertFalse(fake_get_ref.called)
+
+ @mock.patch.object(vm_util, "get_vm_ref")
+ def test_power_off_instance_power_state_exception(self, fake_get_ref):
+ session = fake.FakeSession()
+ fake_instance = mock.MagicMock()
+ with contextlib.nested(
+ mock.patch.object(session, '_call_method',
+ return_value='fake-task'),
+ mock.patch.object(
+ session, '_wait_for_task',
+ side_effect=vexc.InvalidPowerStateException)
+ ) as (fake_call_method, fake_wait_for_task):
+ vm_util.power_off_instance(session, fake_instance, 'fake-vm-ref')
+ fake_call_method.assert_called_once_with(session.vim,
+ "PowerOffVM_Task",
+ 'fake-vm-ref')
+ fake_wait_for_task.assert_called_once_with('fake-task')
+ self.assertFalse(fake_get_ref.called)
+
+
+@mock.patch.object(driver.VMwareAPISession, 'vim', stubs.fake_vim_prop)
+class VMwareVMUtilGetHostRefTestCase(test.NoDBTestCase):
+ # N.B. Mocking on the class only mocks test_*(), but we need
+ # VMwareAPISession.vim to be mocked in both setUp and tests. Not mocking in
+ # setUp causes object initialisation to fail. Not mocking in tests results
+ # in vim calls not using FakeVim.
+ @mock.patch.object(driver.VMwareAPISession, 'vim', stubs.fake_vim_prop)
+ def setUp(self):
+ super(VMwareVMUtilGetHostRefTestCase, self).setUp()
+ fake.reset()
+ vm_util.vm_refs_cache_reset()
+
+ self.session = driver.VMwareAPISession()
+
+ # Create a fake VirtualMachine running on a known host
+ self.host_ref = fake._db_content['HostSystem'].keys()[0]
+ self.vm_ref = fake.create_vm(host_ref=self.host_ref)
+
+ @mock.patch.object(vm_util, 'get_vm_ref')
+ def test_get_host_ref_for_vm(self, mock_get_vm_ref):
+ mock_get_vm_ref.return_value = self.vm_ref
+
+ ret = vm_util.get_host_ref_for_vm(self.session, 'fake-instance')
+
+ mock_get_vm_ref.assert_called_once_with(self.session, 'fake-instance')
+ self.assertEqual(self.host_ref, ret)
+
+ @mock.patch.object(vm_util, 'get_vm_ref')
+ def test_get_host_name_for_vm(self, mock_get_vm_ref):
+ mock_get_vm_ref.return_value = self.vm_ref
+
+ host = fake._get_object(self.host_ref)
+
+ ret = vm_util.get_host_name_for_vm(self.session, 'fake-instance')
+
+ mock_get_vm_ref.assert_called_once_with(self.session, 'fake-instance')
+ self.assertEqual(host.name, ret)
diff --git a/nova/tests/unit/virt/vmwareapi/test_vmops.py b/nova/tests/unit/virt/vmwareapi/test_vmops.py
new file mode 100644
index 0000000000..e70f4661b0
--- /dev/null
+++ b/nova/tests/unit/virt/vmwareapi/test_vmops.py
@@ -0,0 +1,1293 @@
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import contextlib
+
+import mock
+from oslo.utils import units
+from oslo.vmware import exceptions as vexc
+
+from nova.compute import power_state
+from nova import context
+from nova import db
+from nova import exception
+from nova.network import model as network_model
+from nova import objects
+from nova.openstack.common import uuidutils
+from nova import test
+from nova.tests.unit import fake_instance
+import nova.tests.unit.image.fake
+from nova.tests.unit.virt.vmwareapi import fake as vmwareapi_fake
+from nova.tests.unit.virt.vmwareapi import stubs
+from nova.virt.vmwareapi import constants
+from nova.virt.vmwareapi import driver
+from nova.virt.vmwareapi import ds_util
+from nova.virt.vmwareapi import images
+from nova.virt.vmwareapi import vim_util
+from nova.virt.vmwareapi import vm_util
+from nova.virt.vmwareapi import vmops
+
+
+class DsPathMatcher:
+ def __init__(self, expected_ds_path_str):
+ self.expected_ds_path_str = expected_ds_path_str
+
+ def __eq__(self, ds_path_param):
+ return str(ds_path_param) == self.expected_ds_path_str
+
+
+class VMwareVMOpsTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(VMwareVMOpsTestCase, self).setUp()
+ vmwareapi_fake.reset()
+ stubs.set_stubs(self.stubs)
+ self.flags(image_cache_subdirectory_name='vmware_base',
+ my_ip='',
+ flat_injected=True,
+ vnc_enabled=True)
+ self._context = context.RequestContext('fake_user', 'fake_project')
+ self._session = driver.VMwareAPISession()
+
+ self._virtapi = mock.Mock()
+ self._vmops = vmops.VMwareVMOps(self._session, self._virtapi, None)
+
+ self._image_id = nova.tests.unit.image.fake.get_valid_image_id()
+ self._instance_values = {
+ 'name': 'fake_name',
+ 'uuid': 'fake_uuid',
+ 'vcpus': 1,
+ 'memory_mb': 512,
+ 'image_ref': self._image_id,
+ 'root_gb': 10,
+ 'node': 'respool-1001(MyResPoolName)',
+ 'expected_attrs': ['system_metadata'],
+ }
+ self._instance = fake_instance.fake_instance_obj(
+ self._context, **self._instance_values)
+
+ fake_ds_ref = vmwareapi_fake.ManagedObjectReference('fake-ds')
+ self._ds = ds_util.Datastore(
+ ref=fake_ds_ref, name='fake_ds',
+ capacity=10 * units.Gi,
+ freespace=10 * units.Gi)
+ self._dc_info = vmops.DcInfo(
+ ref='fake_dc_ref', name='fake_dc',
+ vmFolder='fake_vm_folder')
+
+ subnet_4 = network_model.Subnet(cidr='192.168.0.1/24',
+ dns=[network_model.IP('192.168.0.1')],
+ gateway=
+ network_model.IP('192.168.0.1'),
+ ips=[
+ network_model.IP('192.168.0.100')],
+ routes=None)
+ subnet_6 = network_model.Subnet(cidr='dead:beef::1/64',
+ dns=None,
+ gateway=
+ network_model.IP('dead:beef::1'),
+ ips=[network_model.IP(
+ 'dead:beef::dcad:beff:feef:0')],
+ routes=None)
+ network = network_model.Network(id=0,
+ bridge='fa0',
+ label='fake',
+ subnets=[subnet_4, subnet_6],
+ vlan=None,
+ bridge_interface=None,
+ injected=True)
+ self._network_values = {
+ 'id': None,
+ 'address': 'DE:AD:BE:EF:00:00',
+ 'network': network,
+ 'type': None,
+ 'devname': None,
+ 'ovs_interfaceid': None,
+ 'rxtx_cap': 3
+ }
+ self.network_info = network_model.NetworkInfo([
+ network_model.VIF(**self._network_values)
+ ])
+ pure_IPv6_network = network_model.Network(id=0,
+ bridge='fa0',
+ label='fake',
+ subnets=[subnet_6],
+ vlan=None,
+ bridge_interface=None,
+ injected=True)
+ self.pure_IPv6_network_info = network_model.NetworkInfo([
+ network_model.VIF(id=None,
+ address='DE:AD:BE:EF:00:00',
+ network=pure_IPv6_network,
+ type=None,
+ devname=None,
+ ovs_interfaceid=None,
+ rxtx_cap=3)
+ ])
+
+ def test_get_machine_id_str(self):
+ result = vmops.VMwareVMOps._get_machine_id_str(self.network_info)
+ self.assertEqual('DE:AD:BE:EF:00:00;192.168.0.100;255.255.255.0;'
+ '192.168.0.1;192.168.0.255;192.168.0.1#', result)
+ result = vmops.VMwareVMOps._get_machine_id_str(
+ self.pure_IPv6_network_info)
+ self.assertEqual('DE:AD:BE:EF:00:00;;;;;#', result)
+
+ def _setup_create_folder_mocks(self):
+ ops = vmops.VMwareVMOps(mock.Mock(), mock.Mock(), mock.Mock())
+ base_name = 'folder'
+ ds_name = "datastore"
+ ds_ref = mock.Mock()
+ ds_ref.value = 1
+ dc_ref = mock.Mock()
+ ops._datastore_dc_mapping[ds_ref.value] = vmops.DcInfo(
+ ref=dc_ref,
+ name='fake-name',
+ vmFolder='fake-folder')
+ path = ds_util.DatastorePath(ds_name, base_name)
+ return ds_name, ds_ref, ops, path, dc_ref
+
+ @mock.patch.object(ds_util, 'mkdir')
+ def test_create_folder_if_missing(self, mock_mkdir):
+ ds_name, ds_ref, ops, path, dc = self._setup_create_folder_mocks()
+ ops._create_folder_if_missing(ds_name, ds_ref, 'folder')
+ mock_mkdir.assert_called_with(ops._session, path, dc)
+
+ @mock.patch.object(ds_util, 'mkdir')
+ def test_create_folder_if_missing_exception(self, mock_mkdir):
+ ds_name, ds_ref, ops, path, dc = self._setup_create_folder_mocks()
+ ds_util.mkdir.side_effect = vexc.FileAlreadyExistsException()
+ ops._create_folder_if_missing(ds_name, ds_ref, 'folder')
+ mock_mkdir.assert_called_with(ops._session, path, dc)
+
+ @mock.patch.object(ds_util, 'file_exists', return_value=True)
+ def test_check_if_folder_file_exists_with_existing(self,
+ mock_exists):
+ ops = vmops.VMwareVMOps(mock.Mock(), mock.Mock(), mock.Mock())
+ ops._create_folder_if_missing = mock.Mock()
+ mock_ds_ref = mock.Mock()
+ ops._check_if_folder_file_exists(mock.Mock(), mock_ds_ref, "datastore",
+ "folder", "some_file")
+ ops._create_folder_if_missing.assert_called_once_with('datastore',
+ mock_ds_ref,
+ 'vmware_base')
+
+ @mock.patch.object(ds_util, 'file_exists', return_value=False)
+ def test_check_if_folder_file_exists_no_existing(self, mock_exists):
+ ops = vmops.VMwareVMOps(mock.Mock(), mock.Mock(), mock.Mock())
+ ops._create_folder_if_missing = mock.Mock()
+ mock_ds_ref = mock.Mock()
+ ops._check_if_folder_file_exists(mock.Mock(), mock_ds_ref, "datastore",
+ "folder", "some_file")
+ ops._create_folder_if_missing.assert_called_once_with('datastore',
+ mock_ds_ref,
+ 'vmware_base')
+
+ def test_get_valid_vms_from_retrieve_result(self):
+ ops = vmops.VMwareVMOps(mock.Mock(), mock.Mock(), mock.Mock())
+ fake_objects = vmwareapi_fake.FakeRetrieveResult()
+ fake_objects.add_object(vmwareapi_fake.VirtualMachine())
+ fake_objects.add_object(vmwareapi_fake.VirtualMachine())
+ fake_objects.add_object(vmwareapi_fake.VirtualMachine())
+ vms = ops._get_valid_vms_from_retrieve_result(fake_objects)
+ self.assertEqual(3, len(vms))
+
+ def test_get_valid_vms_from_retrieve_result_with_invalid(self):
+ ops = vmops.VMwareVMOps(mock.Mock(), mock.Mock(), mock.Mock())
+ fake_objects = vmwareapi_fake.FakeRetrieveResult()
+ fake_objects.add_object(vmwareapi_fake.VirtualMachine())
+ invalid_vm1 = vmwareapi_fake.VirtualMachine()
+ invalid_vm1.set('runtime.connectionState', 'orphaned')
+ invalid_vm2 = vmwareapi_fake.VirtualMachine()
+ invalid_vm2.set('runtime.connectionState', 'inaccessible')
+ fake_objects.add_object(invalid_vm1)
+ fake_objects.add_object(invalid_vm2)
+ vms = ops._get_valid_vms_from_retrieve_result(fake_objects)
+ self.assertEqual(1, len(vms))
+
+ def test_delete_vm_snapshot(self):
+ def fake_call_method(module, method, *args, **kwargs):
+ self.assertEqual('RemoveSnapshot_Task', method)
+ self.assertEqual('fake_vm_snapshot', args[0])
+ self.assertFalse(kwargs['removeChildren'])
+ self.assertTrue(kwargs['consolidate'])
+ return 'fake_remove_snapshot_task'
+
+ with contextlib.nested(
+ mock.patch.object(self._session, '_wait_for_task'),
+ mock.patch.object(self._session, '_call_method', fake_call_method)
+ ) as (_wait_for_task, _call_method):
+ self._vmops._delete_vm_snapshot(self._instance,
+ "fake_vm_ref", "fake_vm_snapshot")
+ _wait_for_task.assert_has_calls([
+ mock.call('fake_remove_snapshot_task')])
+
+ def test_create_vm_snapshot(self):
+
+ method_list = ['CreateSnapshot_Task', 'get_dynamic_property']
+
+ def fake_call_method(module, method, *args, **kwargs):
+ expected_method = method_list.pop(0)
+ self.assertEqual(expected_method, method)
+ if (expected_method == 'CreateSnapshot_Task'):
+ self.assertEqual('fake_vm_ref', args[0])
+ self.assertFalse(kwargs['memory'])
+ self.assertTrue(kwargs['quiesce'])
+ return 'fake_snapshot_task'
+ elif (expected_method == 'get_dynamic_property'):
+ task_info = mock.Mock()
+ task_info.result = "fake_snapshot_ref"
+ self.assertEqual(('fake_snapshot_task', 'Task', 'info'), args)
+ return task_info
+
+ with contextlib.nested(
+ mock.patch.object(self._session, '_wait_for_task'),
+ mock.patch.object(self._session, '_call_method', fake_call_method)
+ ) as (_wait_for_task, _call_method):
+ snap = self._vmops._create_vm_snapshot(self._instance,
+ "fake_vm_ref")
+ self.assertEqual("fake_snapshot_ref", snap)
+ _wait_for_task.assert_has_calls([
+ mock.call('fake_snapshot_task')])
+
+ def test_update_instance_progress(self):
+ instance = objects.Instance(context=mock.MagicMock(), uuid='fake-uuid')
+ with mock.patch.object(instance, 'save') as mock_save:
+ self._vmops._update_instance_progress(instance._context,
+ instance, 5, 10)
+ mock_save.assert_called_once_with()
+ self.assertEqual(50, instance.progress)
+
+ @mock.patch.object(vm_util, 'get_vm_ref', return_value='fake_ref')
+ @mock.patch.object(driver.VMwareAPISession, '_call_method')
+ def test_get_info(self, mock_call, mock_get_vm_ref):
+ props = ['summary.config.numCpu', 'summary.config.memorySizeMB',
+ 'runtime.powerState']
+ prop_cpu = vmwareapi_fake.Prop(props[0], 4)
+ prop_mem = vmwareapi_fake.Prop(props[1], 128)
+ prop_state = vmwareapi_fake.Prop(props[2], 'poweredOn')
+ prop_list = [prop_state, prop_mem, prop_cpu]
+ obj_content = vmwareapi_fake.ObjectContent(None, prop_list=prop_list)
+ result = vmwareapi_fake.FakeRetrieveResult()
+ result.add_object(obj_content)
+ mock_call.return_value = result
+ info = self._vmops.get_info(self._instance)
+ mock_call.assert_called_once_with(vim_util,
+ 'get_object_properties', None, 'fake_ref', 'VirtualMachine',
+ props)
+ mock_get_vm_ref.assert_called_once_with(self._session,
+ self._instance)
+ self.assertEqual(power_state.RUNNING, info['state'])
+ self.assertEqual(128 * 1024, info['max_mem'])
+ self.assertEqual(128 * 1024, info['mem'])
+ self.assertEqual(4, info['num_cpu'])
+ self.assertEqual(0, info['cpu_time'])
+
+ @mock.patch.object(vm_util, 'get_vm_ref', return_value='fake_ref')
+ @mock.patch.object(driver.VMwareAPISession, '_call_method')
+ def test_get_info_when_ds_unavailable(self, mock_call, mock_get_vm_ref):
+ props = ['summary.config.numCpu', 'summary.config.memorySizeMB',
+ 'runtime.powerState']
+ prop_state = vmwareapi_fake.Prop(props[2], 'poweredOff')
+ # when vm's ds not available, only power state can be received
+ prop_list = [prop_state]
+ obj_content = vmwareapi_fake.ObjectContent(None, prop_list=prop_list)
+ result = vmwareapi_fake.FakeRetrieveResult()
+ result.add_object(obj_content)
+ mock_call.return_value = result
+ info = self._vmops.get_info(self._instance)
+ mock_call.assert_called_once_with(vim_util,
+ 'get_object_properties', None, 'fake_ref', 'VirtualMachine',
+ props)
+ mock_get_vm_ref.assert_called_once_with(self._session,
+ self._instance)
+ self.assertEqual(power_state.SHUTDOWN, info['state'])
+ self.assertEqual(0, info['max_mem'])
+ self.assertEqual(0, info['mem'])
+ self.assertEqual(0, info['num_cpu'])
+ self.assertEqual(0, info['cpu_time'])
+
+ def _test_get_datacenter_ref_and_name(self, ds_ref_exists=False):
+ instance_ds_ref = mock.Mock()
+ instance_ds_ref.value = "ds-1"
+ _vcvmops = vmops.VMwareVMOps(self._session, None, None)
+ if ds_ref_exists:
+ ds_ref = mock.Mock()
+ ds_ref.value = "ds-1"
+ else:
+ ds_ref = None
+
+ def fake_call_method(module, method, *args, **kwargs):
+ fake_object1 = vmwareapi_fake.FakeRetrieveResult()
+ fake_object1.add_object(vmwareapi_fake.Datacenter(
+ ds_ref=ds_ref))
+ if not ds_ref:
+ # Token is set for the fake_object1, so it will continue to
+ # fetch the next object.
+ setattr(fake_object1, 'token', 'token-0')
+ if method == "continue_to_get_objects":
+ fake_object2 = vmwareapi_fake.FakeRetrieveResult()
+ fake_object2.add_object(vmwareapi_fake.Datacenter())
+ return fake_object2
+
+ return fake_object1
+
+ with mock.patch.object(self._session, '_call_method',
+ side_effect=fake_call_method) as fake_call:
+ dc_info = _vcvmops.get_datacenter_ref_and_name(instance_ds_ref)
+
+ if ds_ref:
+ self.assertEqual(1, len(_vcvmops._datastore_dc_mapping))
+ fake_call.assert_called_once_with(vim_util, "get_objects",
+ "Datacenter", ["name", "datastore", "vmFolder"])
+ self.assertEqual("ha-datacenter", dc_info.name)
+ else:
+ calls = [mock.call(vim_util, "get_objects", "Datacenter",
+ ["name", "datastore", "vmFolder"]),
+ mock.call(vim_util, "continue_to_get_objects",
+ "token-0")]
+ fake_call.assert_has_calls(calls)
+ self.assertIsNone(dc_info)
+
+ def test_get_datacenter_ref_and_name(self):
+ self._test_get_datacenter_ref_and_name(ds_ref_exists=True)
+
+ def test_get_datacenter_ref_and_name_with_no_datastore(self):
+ self._test_get_datacenter_ref_and_name()
+
+ def test_unrescue_power_on(self):
+ self._test_unrescue(True)
+
+ def test_unrescue_power_off(self):
+ self._test_unrescue(False)
+
+ def _test_unrescue(self, power_on):
+ self._vmops._volumeops = mock.Mock()
+ vm_rescue_ref = mock.Mock()
+ vm_ref = mock.Mock()
+
+ args_list = [(vm_ref, 'VirtualMachine',
+ 'config.hardware.device'),
+ (vm_rescue_ref, 'VirtualMachine',
+ 'config.hardware.device')]
+
+ def fake_call_method(module, method, *args, **kwargs):
+ expected_args = args_list.pop(0)
+ self.assertEqual('get_dynamic_property', method)
+ self.assertEqual(expected_args, args)
+
+ path = mock.Mock()
+ path_and_type = (path, mock.Mock(), mock.Mock())
+ with contextlib.nested(
+ mock.patch.object(vm_util, 'get_vmdk_path_and_adapter_type',
+ return_value=path_and_type),
+ mock.patch.object(vm_util, 'get_vmdk_volume_disk'),
+ mock.patch.object(vm_util, 'power_on_instance'),
+ mock.patch.object(vm_util, 'get_vm_ref', return_value=vm_ref),
+ mock.patch.object(vm_util, 'get_vm_ref_from_name',
+ return_value=vm_rescue_ref),
+ mock.patch.object(self._session, '_call_method',
+ fake_call_method),
+ mock.patch.object(vm_util, 'power_off_instance'),
+ mock.patch.object(self._vmops, '_destroy_instance'),
+ ) as (_get_vmdk_path_and_adapter_type, _get_vmdk_volume_disk,
+ _power_on_instance, _get_vm_ref, _get_vm_ref_from_name,
+ _call_method, _power_off, _destroy_instance):
+ self._vmops.unrescue(self._instance, power_on=power_on)
+
+ _get_vmdk_path_and_adapter_type.assert_called_once_with(
+ None, uuid='fake_uuid')
+ _get_vmdk_volume_disk.assert_called_once_with(None, path=path)
+ if power_on:
+ _power_on_instance.assert_called_once_with(self._session,
+ self._instance,
+ vm_ref=vm_ref)
+ else:
+ self.assertFalse(_power_on_instance.called)
+ _get_vm_ref.assert_called_once_with(self._session,
+ self._instance)
+ _get_vm_ref_from_name.assert_called_once_with(self._session,
+ 'fake_uuid-rescue')
+ _power_off.assert_called_once_with(self._session, self._instance,
+ vm_rescue_ref)
+ _destroy_instance.assert_called_once_with(self._instance,
+ instance_name='fake_uuid-rescue')
+
+ def _test_finish_migration(self, power_on=True, resize_instance=False):
+ """Tests the finish_migration method on vmops."""
+ if resize_instance:
+ self._instance.system_metadata = {'old_instance_type_root_gb': '0'}
+ datastore = ds_util.Datastore(ref='fake-ref', name='fake')
+ dc_info = vmops.DcInfo(ref='fake_ref', name='fake',
+ vmFolder='fake_folder')
+ with contextlib.nested(
+ mock.patch.object(self._session, "_call_method",
+ return_value='fake-task'),
+ mock.patch.object(self._vmops, "_update_instance_progress"),
+ mock.patch.object(self._session, "_wait_for_task"),
+ mock.patch.object(vm_util, "get_vm_resize_spec",
+ return_value='fake-spec'),
+ mock.patch.object(ds_util, "get_datastore",
+ return_value=datastore),
+ mock.patch.object(self._vmops, 'get_datacenter_ref_and_name',
+ return_value=dc_info),
+ mock.patch.object(self._vmops, '_extend_virtual_disk'),
+ mock.patch.object(vm_util, "power_on_instance")
+ ) as (fake_call_method, fake_update_instance_progress,
+ fake_wait_for_task, fake_vm_resize_spec,
+ fake_get_datastore, fake_get_datacenter_ref_and_name,
+ fake_extend_virtual_disk, fake_power_on):
+ self._vmops.finish_migration(context=self._context,
+ migration=None,
+ instance=self._instance,
+ disk_info=None,
+ network_info=None,
+ block_device_info=None,
+ resize_instance=resize_instance,
+ image_meta=None,
+ power_on=power_on)
+ if resize_instance:
+ fake_vm_resize_spec.assert_called_once_with(
+ self._session.vim.client.factory,
+ self._instance)
+ fake_call_method.assert_has_calls(mock.call(
+ self._session.vim,
+ "ReconfigVM_Task",
+ 'f',
+ spec='fake-spec'))
+ fake_wait_for_task.assert_called_once_with('fake-task')
+ fake_extend_virtual_disk.assert_called_once_with(
+ self._instance, self._instance['root_gb'] * units.Mi,
+ None, dc_info.ref)
+ else:
+ self.assertFalse(fake_vm_resize_spec.called)
+ self.assertFalse(fake_wait_for_task.called)
+ self.assertFalse(fake_extend_virtual_disk.called)
+
+ if power_on:
+ fake_power_on.assert_called_once_with(self._session,
+ self._instance,
+ vm_ref='f')
+ else:
+ self.assertFalse(fake_power_on.called)
+ fake_update_instance_progress.called_once_with(
+ self._context, self._instance, 4, vmops.RESIZE_TOTAL_STEPS)
+
+ def test_finish_migration_power_on(self):
+ self._test_finish_migration(power_on=True, resize_instance=False)
+
+ def test_finish_migration_power_off(self):
+ self._test_finish_migration(power_on=False, resize_instance=False)
+
+ def test_finish_migration_power_on_resize(self):
+ self._test_finish_migration(power_on=True, resize_instance=True)
+
+ @mock.patch.object(vm_util, 'associate_vmref_for_instance')
+ @mock.patch.object(vm_util, 'power_on_instance')
+ def _test_finish_revert_migration(self, fake_power_on,
+ fake_associate_vmref, power_on):
+ """Tests the finish_revert_migration method on vmops."""
+
+ # setup the test instance in the database
+ self._vmops.finish_revert_migration(self._context,
+ instance=self._instance,
+ network_info=None,
+ block_device_info=None,
+ power_on=power_on)
+ fake_associate_vmref.assert_called_once_with(self._session,
+ self._instance,
+ suffix='-orig')
+ if power_on:
+ fake_power_on.assert_called_once_with(self._session,
+ self._instance)
+ else:
+ self.assertFalse(fake_power_on.called)
+
+ def test_finish_revert_migration_power_on(self):
+ self._test_finish_revert_migration(power_on=True)
+
+ def test_finish_revert_migration_power_off(self):
+ self._test_finish_revert_migration(power_on=False)
+
+ @mock.patch.object(vmops.VMwareVMOps, '_attach_cdrom_to_vm')
+ @mock.patch.object(vmops.VMwareVMOps, '_create_config_drive')
+ def test_configure_config_drive(self,
+ mock_create_config_drive,
+ mock_attach_cdrom_to_vm):
+ injected_files = mock.Mock()
+ admin_password = mock.Mock()
+ vm_ref = mock.Mock()
+ mock_create_config_drive.return_value = "fake_iso_path"
+ self._vmops._configure_config_drive(
+ self._instance, vm_ref, self._dc_info, self._ds,
+ injected_files, admin_password)
+
+ upload_iso_path = self._ds.build_path("fake_iso_path")
+ mock_create_config_drive.assert_called_once_with(self._instance,
+ injected_files, admin_password, self._ds.name,
+ self._dc_info.name, self._instance.uuid, "Fake-CookieJar")
+ mock_attach_cdrom_to_vm.assert_called_once_with(
+ vm_ref, self._instance, self._ds.ref, str(upload_iso_path))
+
+ @mock.patch.object(vmops.LOG, 'debug')
+ @mock.patch.object(vmops.VMwareVMOps, '_get_vm_config_info')
+ @mock.patch.object(vmops.VMwareVMOps, 'build_virtual_machine')
+ def test_spawn_mask_block_device_info_password(self,
+ mock_build_virtual_machine,
+ mock_get_vm_config_info,
+ mock_debug):
+ # Very simple test that just ensures block_device_info auth_password
+ # is masked when logged; the rest of the test just fails out early.
+ data = {'auth_password': 'scrubme'}
+ bdm = [{'connection_info': {'data': data}}]
+ bdi = {'block_device_mapping': bdm}
+
+ self.password_logged = False
+
+ # Tests that the parameters to the to_xml method are sanitized for
+ # passwords when logged.
+ def fake_debug(*args, **kwargs):
+ if 'auth_password' in args[0]:
+ self.password_logged = True
+ self.assertNotIn('scrubme', args[0])
+
+ mock_debug.side_effect = fake_debug
+ self.flags(flat_injected=False, vnc_enabled=False)
+
+ # Call spawn(). We don't care what it does as long as it generates
+ # the log message, which we check below.
+ with mock.patch.object(self._vmops, '_volumeops') as mock_vo:
+ mock_vo.attach_root_volume.side_effect = test.TestingException
+ try:
+ self._vmops.spawn(
+ self._context, self._instance, {},
+ injected_files=None, admin_password=None,
+ network_info=[], block_device_info=bdi
+ )
+ except test.TestingException:
+ pass
+
+ # Check that the relevant log message was generated, and therefore
+ # that we checked it was scrubbed
+ self.assertTrue(self.password_logged)
+
+ def test_get_ds_browser(self):
+ cache = self._vmops._datastore_browser_mapping
+ ds_browser = mock.Mock()
+ moref = vmwareapi_fake.ManagedObjectReference('datastore-100')
+ self.assertIsNone(cache.get(moref.value))
+ mock_call_method = mock.Mock(return_value=ds_browser)
+ with mock.patch.object(self._session, '_call_method',
+ mock_call_method):
+ ret = self._vmops._get_ds_browser(moref)
+ mock_call_method.assert_called_once_with(vim_util,
+ 'get_dynamic_property', moref, 'Datastore', 'browser')
+ self.assertIs(ds_browser, ret)
+ self.assertIs(ds_browser, cache.get(moref.value))
+
+ @mock.patch.object(
+ vmops.VMwareVMOps, '_sized_image_exists', return_value=False)
+ @mock.patch.object(vmops.VMwareVMOps, '_extend_virtual_disk')
+ @mock.patch.object(vm_util, 'copy_virtual_disk')
+ def _test_use_disk_image_as_linked_clone(self,
+ mock_copy_virtual_disk,
+ mock_extend_virtual_disk,
+ mock_sized_image_exists,
+ flavor_fits_image=False):
+ file_size = 10 * units.Gi if flavor_fits_image else 5 * units.Gi
+ image_info = images.VMwareImage(
+ image_id=self._image_id,
+ file_size=file_size,
+ linked_clone=False)
+
+ cache_root_folder = self._ds.build_path("vmware_base", self._image_id)
+ mock_imagecache = mock.Mock()
+ mock_imagecache.get_image_cache_folder.return_value = cache_root_folder
+ vi = vmops.VirtualMachineInstanceConfigInfo(
+ self._instance, "fake_uuid", image_info,
+ self._ds, self._dc_info, mock_imagecache)
+
+ sized_cached_image_ds_loc = cache_root_folder.join(
+ "%s.%s.vmdk" % (self._image_id, vi.root_gb))
+
+ self._vmops._volumeops = mock.Mock()
+ mock_attach_disk_to_vm = self._vmops._volumeops.attach_disk_to_vm
+
+ self._vmops._use_disk_image_as_linked_clone("fake_vm_ref", vi)
+
+ mock_copy_virtual_disk.assert_called_once_with(
+ self._session, self._dc_info.ref,
+ str(vi.cache_image_path),
+ str(sized_cached_image_ds_loc))
+
+ if not flavor_fits_image:
+ mock_extend_virtual_disk.assert_called_once_with(
+ self._instance, vi.root_gb * units.Mi,
+ str(sized_cached_image_ds_loc),
+ self._dc_info.ref)
+
+ mock_attach_disk_to_vm.assert_called_once_with(
+ "fake_vm_ref", self._instance, vi.ii.adapter_type,
+ vi.ii.disk_type,
+ str(sized_cached_image_ds_loc),
+ vi.root_gb * units.Mi, False)
+
+ def test_use_disk_image_as_linked_clone(self):
+ self._test_use_disk_image_as_linked_clone()
+
+ def test_use_disk_image_as_linked_clone_flavor_fits_image(self):
+ self._test_use_disk_image_as_linked_clone(flavor_fits_image=True)
+
+ @mock.patch.object(vmops.VMwareVMOps, '_extend_virtual_disk')
+ @mock.patch.object(vm_util, 'copy_virtual_disk')
+ def _test_use_disk_image_as_full_clone(self,
+ mock_copy_virtual_disk,
+ mock_extend_virtual_disk,
+ flavor_fits_image=False):
+ file_size = 10 * units.Gi if flavor_fits_image else 5 * units.Gi
+ image_info = images.VMwareImage(
+ image_id=self._image_id,
+ file_size=file_size,
+ linked_clone=False)
+
+ cache_root_folder = self._ds.build_path("vmware_base", self._image_id)
+ mock_imagecache = mock.Mock()
+ mock_imagecache.get_image_cache_folder.return_value = cache_root_folder
+ vi = vmops.VirtualMachineInstanceConfigInfo(
+ self._instance, "fake_uuid", image_info,
+ self._ds, self._dc_info, mock_imagecache)
+
+ self._vmops._volumeops = mock.Mock()
+ mock_attach_disk_to_vm = self._vmops._volumeops.attach_disk_to_vm
+
+ self._vmops._use_disk_image_as_full_clone("fake_vm_ref", vi)
+
+ mock_copy_virtual_disk.assert_called_once_with(
+ self._session, self._dc_info.ref,
+ str(vi.cache_image_path),
+ '[fake_ds] fake_uuid/fake_uuid.vmdk')
+
+ if not flavor_fits_image:
+ mock_extend_virtual_disk.assert_called_once_with(
+ self._instance, vi.root_gb * units.Mi,
+ '[fake_ds] fake_uuid/fake_uuid.vmdk', self._dc_info.ref)
+
+ mock_attach_disk_to_vm.assert_called_once_with(
+ "fake_vm_ref", self._instance, vi.ii.adapter_type,
+ vi.ii.disk_type, '[fake_ds] fake_uuid/fake_uuid.vmdk',
+ vi.root_gb * units.Mi, False)
+
+ def test_use_disk_image_as_full_clone(self):
+ self._test_use_disk_image_as_full_clone()
+
+ def test_use_disk_image_as_full_clone_image_too_big(self):
+ self._test_use_disk_image_as_full_clone(flavor_fits_image=True)
+
+ @mock.patch.object(vmops.VMwareVMOps, '_attach_cdrom_to_vm')
+ @mock.patch.object(vm_util, 'create_virtual_disk')
+ def _test_use_iso_image(self,
+ mock_create_virtual_disk,
+ mock_attach_cdrom,
+ with_root_disk):
+ image_info = images.VMwareImage(
+ image_id=self._image_id,
+ file_size=10 * units.Mi,
+ linked_clone=True)
+
+ cache_root_folder = self._ds.build_path("vmware_base", self._image_id)
+ mock_imagecache = mock.Mock()
+ mock_imagecache.get_image_cache_folder.return_value = cache_root_folder
+ vi = vmops.VirtualMachineInstanceConfigInfo(
+ self._instance, "fake_uuid", image_info,
+ self._ds, self._dc_info, mock_imagecache)
+
+ self._vmops._volumeops = mock.Mock()
+ mock_attach_disk_to_vm = self._vmops._volumeops.attach_disk_to_vm
+
+ self._vmops._use_iso_image("fake_vm_ref", vi)
+
+ mock_attach_cdrom.assert_called_once_with(
+ "fake_vm_ref", self._instance, self._ds.ref,
+ str(vi.cache_image_path))
+
+ if with_root_disk:
+ mock_create_virtual_disk.assert_called_once_with(
+ self._session, self._dc_info.ref,
+ vi.ii.adapter_type, vi.ii.disk_type,
+ '[fake_ds] fake_uuid/fake_uuid.vmdk',
+ vi.root_gb * units.Mi)
+ linked_clone = False
+ mock_attach_disk_to_vm.assert_called_once_with(
+ "fake_vm_ref", self._instance,
+ vi.ii.adapter_type, vi.ii.disk_type,
+ '[fake_ds] fake_uuid/fake_uuid.vmdk',
+ vi.root_gb * units.Mi, linked_clone)
+
+ def test_use_iso_image_with_root_disk(self):
+ self._test_use_iso_image(with_root_disk=True)
+
+ def test_use_iso_image_without_root_disk(self):
+ self._test_use_iso_image(with_root_disk=False)
+
+ def _verify_spawn_method_calls(self, mock_call_method):
+ # TODO(vui): More explicit assertions of spawn() behavior
+ # are waiting on additional refactoring pertaining to image
+ # handling/manipulation. Till then, we continue to assert on the
+ # sequence of VIM operations invoked.
+ expected_methods = ['get_dynamic_property',
+ 'SearchDatastore_Task',
+ 'CreateVirtualDisk_Task',
+ 'DeleteDatastoreFile_Task',
+ 'MoveDatastoreFile_Task',
+ 'DeleteDatastoreFile_Task',
+ 'SearchDatastore_Task',
+ 'ExtendVirtualDisk_Task',
+ ]
+
+ recorded_methods = [c[1][1] for c in mock_call_method.mock_calls]
+ self.assertEqual(expected_methods, recorded_methods)
+
+ @mock.patch(
+ 'nova.virt.vmwareapi.vmops.VMwareVMOps._configure_config_drive')
+ @mock.patch('nova.virt.vmwareapi.ds_util.get_datastore')
+ @mock.patch(
+ 'nova.virt.vmwareapi.vmops.VMwareVMOps.get_datacenter_ref_and_name')
+ @mock.patch('nova.virt.vmwareapi.vm_util.get_mo_id_from_instance',
+ return_value='fake_node_mo_id')
+ @mock.patch('nova.virt.vmwareapi.vm_util.get_res_pool_ref',
+ return_value='fake_rp_ref')
+ @mock.patch('nova.virt.vmwareapi.vif.get_vif_info',
+ return_value=[])
+ @mock.patch('nova.utils.is_neutron',
+ return_value=False)
+ @mock.patch('nova.virt.vmwareapi.vm_util.get_vm_create_spec',
+ return_value='fake_create_spec')
+ @mock.patch('nova.virt.vmwareapi.vm_util.create_vm',
+ return_value='fake_vm_ref')
+ @mock.patch('nova.virt.vmwareapi.ds_util.mkdir')
+ @mock.patch('nova.virt.vmwareapi.vmops.VMwareVMOps._set_machine_id')
+ @mock.patch(
+ 'nova.virt.vmwareapi.imagecache.ImageCacheManager.enlist_image')
+ @mock.patch.object(vmops.VMwareVMOps, '_get_and_set_vnc_config')
+ @mock.patch('nova.virt.vmwareapi.vm_util.power_on_instance')
+ @mock.patch('nova.virt.vmwareapi.vm_util.copy_virtual_disk')
+ # TODO(dims): Need to add tests for create_virtual_disk after the
+ # disk/image code in spawn gets refactored
+ def _test_spawn(self,
+ mock_copy_virtual_disk,
+ mock_power_on_instance,
+ mock_get_and_set_vnc_config,
+ mock_enlist_image,
+ mock_set_machine_id,
+ mock_mkdir,
+ mock_create_vm,
+ mock_get_create_spec,
+ mock_is_neutron,
+ mock_get_vif_info,
+ mock_get_res_pool_ref,
+ mock_get_mo_id_for_instance,
+ mock_get_datacenter_ref_and_name,
+ mock_get_datastore,
+ mock_configure_config_drive,
+ block_device_info=None,
+ power_on=True,
+ allocations=None,
+ config_drive=False):
+
+ self._vmops._volumeops = mock.Mock()
+ image = {
+ 'id': 'fake-image-d',
+ 'disk_format': 'vmdk',
+ 'size': 1 * units.Gi,
+ }
+ network_info = mock.Mock()
+ mock_get_datastore.return_value = self._ds
+ mock_get_datacenter_ref_and_name.return_value = self._dc_info
+ mock_call_method = mock.Mock(return_value='fake_task')
+
+ with contextlib.nested(
+ mock.patch.object(self._session, '_wait_for_task'),
+ mock.patch.object(self._session, '_call_method',
+ mock_call_method),
+ mock.patch.object(uuidutils, 'generate_uuid',
+ return_value='tmp-uuid'),
+ mock.patch.object(images, 'fetch_image')
+ ) as (_wait_for_task, _call_method, _generate_uuid, _fetch_image):
+ self._vmops.spawn(self._context, self._instance, image,
+ injected_files='fake_files',
+ admin_password='password',
+ network_info=network_info,
+ block_device_info=block_device_info,
+ power_on=power_on)
+
+ mock_is_neutron.assert_called_once_with()
+
+ expected_mkdir_calls = 2
+ if block_device_info and len(block_device_info.get(
+ 'block_device_mapping', [])) > 0:
+ # if block_device_info contains key 'block_device_mapping'
+ # with any information, method mkdir wouldn't be called in
+ # method self._vmops.spawn()
+ expected_mkdir_calls = 0
+
+ self.assertEqual(expected_mkdir_calls, len(mock_mkdir.mock_calls))
+
+ mock_get_mo_id_for_instance.assert_called_once_with(self._instance)
+ mock_get_res_pool_ref.assert_called_once_with(
+ self._session, None, 'fake_node_mo_id')
+ mock_get_vif_info.assert_called_once_with(
+ self._session, None, False,
+ constants.DEFAULT_VIF_MODEL, network_info)
+ if allocations is None:
+ allocations = {}
+ mock_get_create_spec.assert_called_once_with(
+ self._session.vim.client.factory,
+ self._instance,
+ 'fake_uuid',
+ 'fake_ds',
+ [],
+ 'otherGuest',
+ allocations=allocations)
+ mock_create_vm.assert_called_once_with(
+ self._session,
+ self._instance,
+ 'fake_vm_folder',
+ 'fake_create_spec',
+ 'fake_rp_ref')
+ mock_get_and_set_vnc_config.assert_called_once_with(
+ self._session.vim.client.factory,
+ self._instance)
+ mock_set_machine_id.assert_called_once_with(
+ self._session.vim.client.factory,
+ self._instance,
+ network_info)
+ if power_on:
+ mock_power_on_instance.assert_called_once_with(
+ self._session, self._instance, vm_ref='fake_vm_ref')
+ else:
+ self.assertFalse(mock_power_on_instance.called)
+
+ if block_device_info:
+ root_disk = block_device_info['block_device_mapping'][0]
+ mock_attach = self._vmops._volumeops.attach_root_volume
+ mock_attach.assert_called_once_with(
+ root_disk['connection_info'], self._instance, 'vda',
+ self._ds.ref)
+ self.assertFalse(_wait_for_task.called)
+ self.assertFalse(_fetch_image.called)
+ self.assertFalse(_call_method.called)
+ else:
+ mock_enlist_image.assert_called_once_with(
+ self._image_id, self._ds, self._dc_info.ref)
+
+ upload_file_name = 'vmware_temp/tmp-uuid/%s/%s-flat.vmdk' % (
+ self._image_id, self._image_id)
+ _fetch_image.assert_called_once_with(
+ self._context,
+ self._instance,
+ self._session._host,
+ self._dc_info.name,
+ self._ds.name,
+ upload_file_name,
+ cookies='Fake-CookieJar')
+ self.assertTrue(len(_wait_for_task.mock_calls) > 0)
+ self._verify_spawn_method_calls(_call_method)
+
+ dc_ref = 'fake_dc_ref'
+ source_file = unicode('[fake_ds] vmware_base/%s/%s.vmdk' %
+ (self._image_id, self._image_id))
+ dest_file = unicode('[fake_ds] vmware_base/%s/%s.%d.vmdk' %
+ (self._image_id, self._image_id,
+ self._instance['root_gb']))
+ # TODO(dims): add more tests for copy_virtual_disk after
+ # the disk/image code in spawn gets refactored
+ mock_copy_virtual_disk.assert_called_with(self._session,
+ dc_ref,
+ source_file,
+ dest_file)
+ if config_drive:
+ mock_configure_config_drive.assert_called_once_with(
+ self._instance, 'fake_vm_ref', self._dc_info,
+ self._ds, 'fake_files', 'password')
+
+ @mock.patch.object(ds_util, 'get_datastore')
+ @mock.patch.object(vmops.VMwareVMOps, 'get_datacenter_ref_and_name')
+ def _test_get_spawn_vm_config_info(self,
+ mock_get_datacenter_ref_and_name,
+ mock_get_datastore,
+ image_size_bytes=0,
+ instance_name=None):
+ image_info = images.VMwareImage(
+ image_id=self._image_id,
+ file_size=image_size_bytes,
+ linked_clone=True)
+
+ mock_get_datastore.return_value = self._ds
+ mock_get_datacenter_ref_and_name.return_value = self._dc_info
+
+ vi = self._vmops._get_vm_config_info(
+ self._instance, image_info, instance_name=instance_name)
+ self.assertEqual(image_info, vi.ii)
+ self.assertEqual(self._ds, vi.datastore)
+ self.assertEqual(self._instance.root_gb, vi.root_gb)
+ self.assertEqual(self._instance, vi.instance)
+ if instance_name is not None:
+ self.assertEqual(instance_name, vi.instance_name)
+ else:
+ self.assertEqual(self._instance.uuid, vi.instance_name)
+
+ cache_image_path = '[%s] vmware_base/%s/%s.vmdk' % (
+ self._ds.name, self._image_id, self._image_id)
+ self.assertEqual(cache_image_path, str(vi.cache_image_path))
+
+ cache_image_folder = '[%s] vmware_base/%s' % (
+ self._ds.name, self._image_id)
+ self.assertEqual(cache_image_folder, str(vi.cache_image_folder))
+
+ def test_get_spawn_vm_config_info(self):
+ image_size = (self._instance.root_gb) * units.Gi / 2
+ self._test_get_spawn_vm_config_info(image_size_bytes=image_size)
+
+ def test_get_spawn_vm_config_info_image_too_big(self):
+ image_size = (self._instance.root_gb + 1) * units.Gi
+ self.assertRaises(exception.InstanceUnacceptable,
+ self._test_get_spawn_vm_config_info,
+ image_size_bytes=image_size)
+
+ def test_get_spawn_vm_config_info_with_instance_name(self):
+ image_size = (self._instance.root_gb) * units.Gi / 2
+ self._test_get_spawn_vm_config_info(
+ image_size_bytes=image_size,
+ instance_name="foo_instance_name")
+
+ def test_spawn(self):
+ self._test_spawn()
+
+ def test_spawn_config_drive_enabled(self):
+ self.flags(force_config_drive=True)
+ self._test_spawn(config_drive=True)
+
+ def test_spawn_no_power_on(self):
+ self._test_spawn(power_on=False)
+
+ def test_spawn_with_block_device_info(self):
+ block_device_info = {
+ 'block_device_mapping': [{'connection_info': 'fake'}]
+ }
+ self._test_spawn(block_device_info=block_device_info)
+
+ def test_spawn_with_block_device_info_with_config_drive(self):
+ self.flags(force_config_drive=True)
+ block_device_info = {
+ 'block_device_mapping': [{'connection_info': 'fake'}]
+ }
+ self._test_spawn(block_device_info=block_device_info,
+ config_drive=True)
+
+ def test_build_virtual_machine(self):
+ image_id = nova.tests.unit.image.fake.get_valid_image_id()
+ image = images.VMwareImage(image_id=image_id)
+
+ vm_ref = self._vmops.build_virtual_machine(self._instance,
+ 'fake-instance-name',
+ image, self._dc_info,
+ self._ds, self.network_info)
+
+ vm = vmwareapi_fake._get_object(vm_ref)
+
+ # Test basic VM parameters
+ self.assertEqual('fake-instance-name', vm.name)
+ # NOTE(mdbooth): The instanceUuid behaviour below is apparently
+ # deliberate.
+ self.assertEqual('fake-instance-name',
+ vm.get('summary.config.instanceUuid'))
+ self.assertEqual(self._instance_values['vcpus'],
+ vm.get('summary.config.numCpu'))
+ self.assertEqual(self._instance_values['memory_mb'],
+ vm.get('summary.config.memorySizeMB'))
+
+ # Test NSX config
+ for optval in vm.get('config.extraConfig').OptionValue:
+ if optval.key == 'nvp.vm-uuid':
+ self.assertEqual(self._instance_values['uuid'], optval.value)
+ break
+ else:
+ self.fail('nvp.vm-uuid not found in extraConfig')
+
+ # Test that the VM is associated with the specified datastore
+ datastores = vm.datastore.ManagedObjectReference
+ self.assertEqual(1, len(datastores))
+
+ datastore = vmwareapi_fake._get_object(datastores[0])
+ self.assertEqual(self._ds.name, datastore.get('summary.name'))
+
+ # Test that the VM's network is configured as specified
+ devices = vm.get('config.hardware.device').VirtualDevice
+ for device in devices:
+ if device.obj_name != 'ns0:VirtualE1000':
+ continue
+ self.assertEqual(self._network_values['address'],
+ device.macAddress)
+ break
+ else:
+ self.fail('NIC not configured')
+
+ def test_spawn_cpu_limit(self):
+ def _fake_flavor_get(context, id):
+ flavor = stubs._fake_flavor_get(context, id)
+ flavor['extra_specs'].update({'quota:cpu_limit': 7})
+ return flavor
+
+ with mock.patch.object(db, 'flavor_get', _fake_flavor_get):
+ self._test_spawn(allocations={'cpu_limit': 7})
+
+ def test_spawn_cpu_reservation(self):
+ def _fake_flavor_get(context, id):
+ flavor = stubs._fake_flavor_get(context, id)
+ flavor['extra_specs'].update({'quota:cpu_reservation': 7})
+ return flavor
+
+ with mock.patch.object(db, 'flavor_get', _fake_flavor_get):
+ self._test_spawn(allocations={'cpu_reservation': 7})
+
+ def test_spawn_cpu_allocations(self):
+ def _fake_flavor_get(context, id):
+ flavor = stubs._fake_flavor_get(context, id)
+ flavor['extra_specs'].update({'quota:cpu_limit': 7,
+ 'quota:cpu_reservation': 6})
+ return flavor
+
+ with mock.patch.object(db, 'flavor_get', _fake_flavor_get):
+ self._test_spawn(allocations={'cpu_limit': 7,
+ 'cpu_reservation': 6})
+
+ def test_spawn_cpu_shares_level(self):
+ def _fake_flavor_get(context, id):
+ flavor = stubs._fake_flavor_get(context, id)
+ flavor['extra_specs'].update({'quota:cpu_shares_level': 'high'})
+ return flavor
+
+ with mock.patch.object(db, 'flavor_get', _fake_flavor_get):
+ self._test_spawn(allocations={'cpu_shares_level': 'high'})
+
+ def test_spawn_cpu_shares_custom(self):
+ def _fake_flavor_get(context, id):
+ flavor = stubs._fake_flavor_get(context, id)
+ flavor['extra_specs'].update({'quota:cpu_shares_level': 'custom',
+ 'quota:cpu_shares_share': 1948})
+ return flavor
+
+ with mock.patch.object(db, 'flavor_get', _fake_flavor_get):
+ self._test_spawn(allocations={'cpu_shares_level': 'custom',
+ 'cpu_shares_share': 1948})
+
+ def _make_vm_config_info(self, is_iso=False, is_sparse_disk=False):
+ disk_type = (constants.DISK_TYPE_SPARSE if is_sparse_disk
+ else constants.DEFAULT_DISK_TYPE)
+ file_type = (constants.DISK_FORMAT_ISO if is_iso
+ else constants.DEFAULT_DISK_FORMAT)
+
+ image_info = images.VMwareImage(
+ image_id=self._image_id,
+ file_size=10 * units.Mi,
+ file_type=file_type,
+ disk_type=disk_type,
+ linked_clone=True)
+ cache_root_folder = self._ds.build_path("vmware_base", self._image_id)
+ mock_imagecache = mock.Mock()
+ mock_imagecache.get_image_cache_folder.return_value = cache_root_folder
+ vi = vmops.VirtualMachineInstanceConfigInfo(
+ self._instance, "fake_uuid", image_info,
+ self._ds, self._dc_info, mock_imagecache)
+ return vi
+
+ @mock.patch.object(vmops.VMwareVMOps, 'check_cache_folder')
+ @mock.patch.object(vmops.VMwareVMOps, '_fetch_image_as_file')
+ @mock.patch.object(vmops.VMwareVMOps, '_prepare_iso_image')
+ @mock.patch.object(vmops.VMwareVMOps, '_prepare_sparse_image')
+ @mock.patch.object(vmops.VMwareVMOps, '_prepare_flat_image')
+ @mock.patch.object(vmops.VMwareVMOps, '_cache_iso_image')
+ @mock.patch.object(vmops.VMwareVMOps, '_cache_sparse_image')
+ @mock.patch.object(vmops.VMwareVMOps, '_cache_flat_image')
+ @mock.patch.object(vmops.VMwareVMOps, '_delete_datastore_file')
+ def _test_fetch_image_if_missing(self,
+ mock_delete_datastore_file,
+ mock_cache_flat_image,
+ mock_cache_sparse_image,
+ mock_cache_iso_image,
+ mock_prepare_flat_image,
+ mock_prepare_sparse_image,
+ mock_prepare_iso_image,
+ mock_fetch_image_as_file,
+ mock_check_cache_folder,
+ is_iso=False,
+ is_sparse_disk=False):
+
+ tmp_dir_path = mock.Mock()
+ tmp_image_path = mock.Mock()
+ if is_iso:
+ mock_prepare = mock_prepare_iso_image
+ mock_cache = mock_cache_iso_image
+ elif is_sparse_disk:
+ mock_prepare = mock_prepare_sparse_image
+ mock_cache = mock_cache_sparse_image
+ else:
+ mock_prepare = mock_prepare_flat_image
+ mock_cache = mock_cache_flat_image
+ mock_prepare.return_value = tmp_dir_path, tmp_image_path
+
+ vi = self._make_vm_config_info(is_iso, is_sparse_disk)
+ self._vmops._fetch_image_if_missing(self._context, vi)
+
+ mock_check_cache_folder.assert_called_once_with(
+ self._ds.name, self._ds.ref)
+ mock_prepare.assert_called_once_with(vi)
+ mock_fetch_image_as_file.assert_called_once_with(
+ self._context, vi, tmp_image_path)
+ mock_cache.assert_called_once_with(vi, tmp_image_path)
+ mock_delete_datastore_file.assert_called_once_with(
+ str(tmp_dir_path), self._dc_info.ref)
+
+ def test_fetch_image_if_missing(self):
+ self._test_fetch_image_if_missing()
+
+ def test_fetch_image_if_missing_with_sparse(self):
+ self._test_fetch_image_if_missing(
+ is_sparse_disk=True)
+
+ def test_fetch_image_if_missing_with_iso(self):
+ self._test_fetch_image_if_missing(
+ is_iso=True)
+
+ @mock.patch.object(images, 'fetch_image')
+ def test_fetch_image_as_file(self, mock_fetch_image):
+ vi = self._make_vm_config_info()
+ image_ds_loc = mock.Mock()
+ self._vmops._fetch_image_as_file(self._context, vi, image_ds_loc)
+ mock_fetch_image.assert_called_once_with(
+ self._context,
+ vi.instance,
+ self._session._host,
+ self._dc_info.name,
+ self._ds.name,
+ image_ds_loc.rel_path,
+ cookies='Fake-CookieJar')
+
+ @mock.patch.object(uuidutils, 'generate_uuid', return_value='tmp-uuid')
+ def test_prepare_iso_image(self, mock_generate_uuid):
+ vi = self._make_vm_config_info(is_iso=True)
+ tmp_dir_loc, tmp_image_ds_loc = self._vmops._prepare_iso_image(vi)
+
+ expected_tmp_dir_path = '[%s] vmware_temp/tmp-uuid' % (self._ds.name)
+ expected_image_path = '[%s] vmware_temp/tmp-uuid/%s/%s.iso' % (
+ self._ds.name, self._image_id, self._image_id)
+
+ self.assertEqual(str(tmp_dir_loc), expected_tmp_dir_path)
+ self.assertEqual(str(tmp_image_ds_loc), expected_image_path)
+
+ @mock.patch.object(uuidutils, 'generate_uuid', return_value='tmp-uuid')
+ def test_prepare_sparse_image(self, mock_generate_uuid):
+ vi = self._make_vm_config_info(is_sparse_disk=True)
+ tmp_dir_loc, tmp_image_ds_loc = self._vmops._prepare_sparse_image(vi)
+
+ expected_tmp_dir_path = '[%s] vmware_temp/tmp-uuid' % (self._ds.name)
+ expected_image_path = '[%s] vmware_temp/tmp-uuid/%s/%s' % (
+ self._ds.name, self._image_id, "tmp-sparse.vmdk")
+
+ self.assertEqual(str(tmp_dir_loc), expected_tmp_dir_path)
+ self.assertEqual(str(tmp_image_ds_loc), expected_image_path)
+
+ @mock.patch.object(ds_util, 'mkdir')
+ @mock.patch.object(vm_util, 'create_virtual_disk')
+ @mock.patch.object(vmops.VMwareVMOps, '_delete_datastore_file')
+ @mock.patch.object(uuidutils, 'generate_uuid', return_value='tmp-uuid')
+ def test_prepare_flat_image(self,
+ mock_generate_uuid,
+ mock_delete_datastore_file,
+ mock_create_virtual_disk,
+ mock_mkdir):
+ vi = self._make_vm_config_info()
+ tmp_dir_loc, tmp_image_ds_loc = self._vmops._prepare_flat_image(vi)
+
+ expected_tmp_dir_path = '[%s] vmware_temp/tmp-uuid' % (self._ds.name)
+ expected_image_path = '[%s] vmware_temp/tmp-uuid/%s/%s-flat.vmdk' % (
+ self._ds.name, self._image_id, self._image_id)
+ expected_image_path_parent = '[%s] vmware_temp/tmp-uuid/%s' % (
+ self._ds.name, self._image_id)
+ expected_path_to_create = '[%s] vmware_temp/tmp-uuid/%s/%s.vmdk' % (
+ self._ds.name, self._image_id, self._image_id)
+
+ mock_mkdir.assert_called_once_with(
+ self._session, DsPathMatcher(expected_image_path_parent),
+ self._dc_info.ref)
+
+ self.assertEqual(str(tmp_dir_loc), expected_tmp_dir_path)
+ self.assertEqual(str(tmp_image_ds_loc), expected_image_path)
+
+ image_info = vi.ii
+ mock_create_virtual_disk.assert_called_once_with(
+ self._session, self._dc_info.ref,
+ image_info.adapter_type,
+ image_info.disk_type,
+ DsPathMatcher(expected_path_to_create),
+ image_info.file_size_in_kb)
+ mock_delete_datastore_file.assert_called_once_with(
+ DsPathMatcher(expected_image_path),
+ self._dc_info.ref)
+
+ @mock.patch.object(ds_util, 'file_move')
+ def test_cache_iso_image(self, mock_file_move):
+ vi = self._make_vm_config_info(is_iso=True)
+ tmp_image_ds_loc = mock.Mock()
+
+ self._vmops._cache_iso_image(vi, tmp_image_ds_loc)
+
+ mock_file_move.assert_called_once_with(
+ self._session, self._dc_info.ref,
+ tmp_image_ds_loc.parent,
+ DsPathMatcher('[fake_ds] vmware_base/%s' % self._image_id))
+
+ @mock.patch.object(ds_util, 'file_move')
+ def test_cache_flat_image(self, mock_file_move):
+ vi = self._make_vm_config_info()
+ tmp_image_ds_loc = mock.Mock()
+
+ self._vmops._cache_flat_image(vi, tmp_image_ds_loc)
+
+ mock_file_move.assert_called_once_with(
+ self._session, self._dc_info.ref,
+ tmp_image_ds_loc.parent,
+ DsPathMatcher('[fake_ds] vmware_base/%s' % self._image_id))
+
+ @mock.patch.object(ds_util, 'file_move')
+ @mock.patch.object(vm_util, 'copy_virtual_disk')
+ @mock.patch.object(vmops.VMwareVMOps, '_delete_datastore_file')
+ def test_cache_sparse_image(self,
+ mock_delete_datastore_file,
+ mock_copy_virtual_disk,
+ mock_file_move):
+ vi = self._make_vm_config_info(is_sparse_disk=True)
+
+ sparse_disk_path = "[%s] vmware_temp/tmp-uuid/%s/tmp-sparse.vmdk" % (
+ self._ds.name, self._image_id)
+ tmp_image_ds_loc = ds_util.DatastorePath.parse(sparse_disk_path)
+
+ self._vmops._cache_sparse_image(vi, tmp_image_ds_loc)
+
+ target_disk_path = "[%s] vmware_temp/tmp-uuid/%s/%s.vmdk" % (
+ self._ds.name,
+ self._image_id, self._image_id)
+ mock_copy_virtual_disk.assert_called_once_with(
+ self._session, self._dc_info.ref,
+ sparse_disk_path,
+ DsPathMatcher(target_disk_path))
diff --git a/nova/tests/unit/virt/vmwareapi/test_volumeops.py b/nova/tests/unit/virt/vmwareapi/test_volumeops.py
new file mode 100644
index 0000000000..8dc6b500cb
--- /dev/null
+++ b/nova/tests/unit/virt/vmwareapi/test_volumeops.py
@@ -0,0 +1,95 @@
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import contextlib
+
+import mock
+
+from nova import test
+from nova.tests.unit.virt.vmwareapi import fake as vmwareapi_fake
+from nova.tests.unit.virt.vmwareapi import stubs
+from nova.virt.vmwareapi import driver
+from nova.virt.vmwareapi import volumeops
+
+
+class VMwareVolumeOpsTestCase(test.NoDBTestCase):
+
+ def setUp(self):
+
+ super(VMwareVolumeOpsTestCase, self).setUp()
+ vmwareapi_fake.reset()
+ stubs.set_stubs(self.stubs)
+ self._session = driver.VMwareAPISession()
+
+ self._volumeops = volumeops.VMwareVolumeOps(self._session)
+ self.instance = {'name': 'fake_name', 'uuid': 'fake_uuid'}
+
+ def _test_detach_disk_from_vm(self, destroy_disk=False):
+ def fake_call_method(module, method, *args, **kwargs):
+ vmdk_detach_config_spec = kwargs.get('spec')
+ virtual_device_config = vmdk_detach_config_spec.deviceChange[0]
+ self.assertEqual('remove', virtual_device_config.operation)
+ self.assertEqual('ns0:VirtualDeviceConfigSpec',
+ virtual_device_config.obj_name)
+ if destroy_disk:
+ self.assertEqual('destroy',
+ virtual_device_config.fileOperation)
+ else:
+ self.assertFalse(hasattr(virtual_device_config,
+ 'fileOperation'))
+ return 'fake_configure_task'
+ with contextlib.nested(
+ mock.patch.object(self._session, '_wait_for_task'),
+ mock.patch.object(self._session, '_call_method',
+ fake_call_method)
+ ) as (_wait_for_task, _call_method):
+ fake_device = vmwareapi_fake.DataObject()
+ fake_device.backing = vmwareapi_fake.DataObject()
+ fake_device.backing.fileName = 'fake_path'
+ fake_device.key = 'fake_key'
+ self._volumeops.detach_disk_from_vm('fake_vm_ref', self.instance,
+ fake_device, destroy_disk)
+ _wait_for_task.assert_has_calls([
+ mock.call('fake_configure_task')])
+
+ def test_detach_with_destroy_disk_from_vm(self):
+ self._test_detach_disk_from_vm(destroy_disk=True)
+
+ def test_detach_without_destroy_disk_from_vm(self):
+ self._test_detach_disk_from_vm(destroy_disk=False)
+
+ def _fake_call_get_dynamic_property(self, uuid, result):
+ def fake_call_method(vim, method, vm_ref, type, prop):
+ expected_prop = 'config.extraConfig["volume-%s"]' % uuid
+ self.assertEqual('VirtualMachine', type)
+ self.assertEqual(expected_prop, prop)
+ return result
+ return fake_call_method
+
+ def test_get_volume_uuid(self):
+ vm_ref = mock.Mock()
+ uuid = '1234'
+ opt_val = vmwareapi_fake.OptionValue('volume-%s' % uuid, 'volume-val')
+ fake_call = self._fake_call_get_dynamic_property(uuid, opt_val)
+ with mock.patch.object(self._session, "_call_method", fake_call):
+ val = self._volumeops._get_volume_uuid(vm_ref, uuid)
+ self.assertEqual('volume-val', val)
+
+ def test_get_volume_uuid_not_found(self):
+ vm_ref = mock.Mock()
+ uuid = '1234'
+ fake_call = self._fake_call_get_dynamic_property(uuid, None)
+ with mock.patch.object(self._session, "_call_method", fake_call):
+ val = self._volumeops._get_volume_uuid(vm_ref, uuid)
+ self.assertIsNone(val)
diff --git a/nova/tests/unit/virt/xenapi/__init__.py b/nova/tests/unit/virt/xenapi/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/__init__.py
diff --git a/nova/tests/unit/virt/xenapi/client/__init__.py b/nova/tests/unit/virt/xenapi/client/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/client/__init__.py
diff --git a/nova/tests/unit/virt/xenapi/client/test_objects.py b/nova/tests/unit/virt/xenapi/client/test_objects.py
new file mode 100644
index 0000000000..efaf17a9c7
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/client/test_objects.py
@@ -0,0 +1,113 @@
+# Copyright (c) 2014 Rackspace Hosting
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova.tests.unit.virt.xenapi import stubs
+from nova import utils
+from nova.virt.xenapi.client import objects
+
+
+class XenAPISessionObjectTestCase(stubs.XenAPITestBaseNoDB):
+ def setUp(self):
+ super(XenAPISessionObjectTestCase, self).setUp()
+ self.session = mock.Mock()
+ self.obj = objects.XenAPISessionObject(self.session, "FAKE")
+
+ def test_call_method_via_attr(self):
+ self.session.call_xenapi.return_value = "asdf"
+
+ result = self.obj.get_X("ref")
+
+ self.assertEqual(result, "asdf")
+ self.session.call_xenapi.assert_called_once_with("FAKE.get_X", "ref")
+
+
+class ObjectsTestCase(stubs.XenAPITestBaseNoDB):
+ def setUp(self):
+ super(ObjectsTestCase, self).setUp()
+ self.session = mock.Mock()
+
+ def test_VM(self):
+ vm = objects.VM(self.session)
+ vm.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("VM.get_X", "ref")
+
+ def test_SR(self):
+ sr = objects.SR(self.session)
+ sr.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("SR.get_X", "ref")
+
+ def test_VDI(self):
+ vdi = objects.VDI(self.session)
+ vdi.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("VDI.get_X", "ref")
+
+ def test_VBD(self):
+ vbd = objects.VBD(self.session)
+ vbd.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("VBD.get_X", "ref")
+
+ def test_PBD(self):
+ pbd = objects.PBD(self.session)
+ pbd.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("PBD.get_X", "ref")
+
+ def test_PIF(self):
+ pif = objects.PIF(self.session)
+ pif.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("PIF.get_X", "ref")
+
+ def test_VLAN(self):
+ vlan = objects.VLAN(self.session)
+ vlan.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("VLAN.get_X", "ref")
+
+ def test_host(self):
+ host = objects.Host(self.session)
+ host.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("host.get_X", "ref")
+
+ def test_network(self):
+ network = objects.Network(self.session)
+ network.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("network.get_X",
+ "ref")
+
+ def test_pool(self):
+ pool = objects.Pool(self.session)
+ pool.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("pool.get_X", "ref")
+
+
+class VBDTestCase(stubs.XenAPITestBaseNoDB):
+ def setUp(self):
+ super(VBDTestCase, self).setUp()
+ self.session = mock.Mock()
+ self.session.VBD = objects.VBD(self.session)
+
+ def test_plug(self):
+ self.session.VBD.plug("vbd_ref", "vm_ref")
+ self.session.call_xenapi.assert_called_once_with("VBD.plug", "vbd_ref")
+
+ def test_unplug(self):
+ self.session.VBD.unplug("vbd_ref", "vm_ref")
+ self.session.call_xenapi.assert_called_once_with("VBD.unplug",
+ "vbd_ref")
+
+ @mock.patch.object(utils, 'synchronized')
+ def test_vbd_plug_check_synchronized(self, mock_synchronized):
+ self.session.VBD.unplug("vbd_ref", "vm_ref")
+ mock_synchronized.assert_called_once_with("xenapi-vbd-vm_ref")
diff --git a/nova/tests/unit/virt/xenapi/client/test_session.py b/nova/tests/unit/virt/xenapi/client/test_session.py
new file mode 100644
index 0000000000..1fbbbf752d
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/client/test_session.py
@@ -0,0 +1,158 @@
+# Copyright (c) 2014 Rackspace Hosting
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import errno
+import socket
+
+import mock
+
+from nova import exception
+from nova.tests.unit.virt.xenapi import stubs
+from nova import version
+from nova.virt.xenapi.client import session
+
+
+class SessionTestCase(stubs.XenAPITestBaseNoDB):
+ @mock.patch.object(session.XenAPISession, '_create_session')
+ @mock.patch.object(session.XenAPISession, '_get_product_version_and_brand')
+ @mock.patch.object(session.XenAPISession, '_verify_plugin_version')
+ def test_session_passes_version(self, mock_verify, mock_version,
+ create_session):
+ sess = mock.Mock()
+ create_session.return_value = sess
+ mock_version.return_value = ('version', 'brand')
+
+ session.XenAPISession('url', 'username', 'password')
+
+ expected_version = '%s %s %s' % (version.vendor_string(),
+ version.product_string(),
+ version.version_string_with_package())
+ sess.login_with_password.assert_called_with('username', 'password',
+ expected_version,
+ 'OpenStack')
+
+
+class ApplySessionHelpersTestCase(stubs.XenAPITestBaseNoDB):
+ def setUp(self):
+ super(ApplySessionHelpersTestCase, self).setUp()
+ self.session = mock.Mock()
+ session.apply_session_helpers(self.session)
+
+ def test_apply_session_helpers_add_VM(self):
+ self.session.VM.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("VM.get_X", "ref")
+
+ def test_apply_session_helpers_add_SR(self):
+ self.session.SR.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("SR.get_X", "ref")
+
+ def test_apply_session_helpers_add_VDI(self):
+ self.session.VDI.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("VDI.get_X", "ref")
+
+ def test_apply_session_helpers_add_VBD(self):
+ self.session.VBD.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("VBD.get_X", "ref")
+
+ def test_apply_session_helpers_add_PBD(self):
+ self.session.PBD.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("PBD.get_X", "ref")
+
+ def test_apply_session_helpers_add_PIF(self):
+ self.session.PIF.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("PIF.get_X", "ref")
+
+ def test_apply_session_helpers_add_VLAN(self):
+ self.session.VLAN.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("VLAN.get_X", "ref")
+
+ def test_apply_session_helpers_add_host(self):
+ self.session.host.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("host.get_X", "ref")
+
+ def test_apply_session_helpers_add_network(self):
+ self.session.network.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("network.get_X",
+ "ref")
+
+ def test_apply_session_helpers_add_pool(self):
+ self.session.pool.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("pool.get_X", "ref")
+
+
+class CallPluginTestCase(stubs.XenAPITestBaseNoDB):
+ def _get_fake_xapisession(self):
+ class FakeXapiSession(session.XenAPISession):
+ def __init__(self, **kwargs):
+ "Skip the superclass's dirty init"
+ self.XenAPI = mock.MagicMock()
+
+ return FakeXapiSession()
+
+ def setUp(self):
+ super(CallPluginTestCase, self).setUp()
+ self.session = self._get_fake_xapisession()
+
+ def test_serialized_with_retry_socket_error_conn_reset(self):
+ exc = socket.error
+ exc.errno = errno.ECONNRESET
+ plugin = 'glance'
+ fn = 'download_vhd'
+ num_retries = 1
+ callback = None
+ retry_cb = mock.Mock()
+ with mock.patch.object(self.session, 'call_plugin_serialized',
+ autospec=True) as call_plugin_serialized:
+ call_plugin_serialized.side_effect = exc
+ self.assertRaises(exception.PluginRetriesExceeded,
+ self.session.call_plugin_serialized_with_retry, plugin, fn,
+ num_retries, callback, retry_cb)
+ call_plugin_serialized.assert_called_with(plugin, fn)
+ self.assertEqual(2, call_plugin_serialized.call_count)
+ self.assertEqual(2, retry_cb.call_count)
+
+ def test_serialized_with_retry_socket_error_reraised(self):
+ exc = socket.error
+ exc.errno = errno.ECONNREFUSED
+ plugin = 'glance'
+ fn = 'download_vhd'
+ num_retries = 1
+ callback = None
+ retry_cb = mock.Mock()
+ with mock.patch.object(self.session, 'call_plugin_serialized',
+ autospec=True) as call_plugin_serialized:
+ call_plugin_serialized.side_effect = exc
+ self.assertRaises(socket.error,
+ self.session.call_plugin_serialized_with_retry, plugin, fn,
+ num_retries, callback, retry_cb)
+ call_plugin_serialized.assert_called_once_with(plugin, fn)
+ self.assertEqual(0, retry_cb.call_count)
+
+ def test_serialized_with_retry_socket_reset_reraised(self):
+ exc = socket.error
+ exc.errno = errno.ECONNRESET
+ plugin = 'glance'
+ fn = 'download_vhd'
+ num_retries = 1
+ callback = None
+ retry_cb = mock.Mock()
+ with mock.patch.object(self.session, 'call_plugin_serialized',
+ autospec=True) as call_plugin_serialized:
+ call_plugin_serialized.side_effect = exc
+ self.assertRaises(exception.PluginRetriesExceeded,
+ self.session.call_plugin_serialized_with_retry, plugin, fn,
+ num_retries, callback, retry_cb)
+ call_plugin_serialized.assert_called_with(plugin, fn)
+ self.assertEqual(2, call_plugin_serialized.call_count)
diff --git a/nova/tests/unit/virt/xenapi/image/__init__.py b/nova/tests/unit/virt/xenapi/image/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/image/__init__.py
diff --git a/nova/tests/unit/virt/xenapi/image/test_bittorrent.py b/nova/tests/unit/virt/xenapi/image/test_bittorrent.py
new file mode 100644
index 0000000000..5422036b98
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/image/test_bittorrent.py
@@ -0,0 +1,163 @@
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mox
+import pkg_resources
+import six
+
+from nova import context
+from nova.i18n import _
+from nova import test
+from nova.tests.unit.virt.xenapi import stubs
+from nova.virt.xenapi import driver as xenapi_conn
+from nova.virt.xenapi import fake
+from nova.virt.xenapi.image import bittorrent
+from nova.virt.xenapi import vm_utils
+
+
+class TestBittorrentStore(stubs.XenAPITestBaseNoDB):
+ def setUp(self):
+ super(TestBittorrentStore, self).setUp()
+ self.store = bittorrent.BittorrentStore()
+ self.mox = mox.Mox()
+
+ self.flags(torrent_base_url='http://foo',
+ connection_url='test_url',
+ connection_password='test_pass',
+ group='xenserver')
+
+ self.context = context.RequestContext(
+ 'user', 'project', auth_token='foobar')
+
+ fake.reset()
+ stubs.stubout_session(self.stubs, fake.SessionBase)
+
+ def mock_iter_eps(namespace):
+ return []
+
+ self.stubs.Set(pkg_resources, 'iter_entry_points', mock_iter_eps)
+
+ driver = xenapi_conn.XenAPIDriver(False)
+ self.session = driver._session
+
+ self.stubs.Set(
+ vm_utils, 'get_sr_path', lambda *a, **kw: '/fake/sr/path')
+
+ def test_download_image(self):
+
+ instance = {'uuid': '00000000-0000-0000-0000-000000007357'}
+ params = {'image_id': 'fake_image_uuid',
+ 'sr_path': '/fake/sr/path',
+ 'torrent_download_stall_cutoff': 600,
+ 'torrent_listen_port_end': 6891,
+ 'torrent_listen_port_start': 6881,
+ 'torrent_max_last_accessed': 86400,
+ 'torrent_max_seeder_processes_per_host': 1,
+ 'torrent_seed_chance': 1.0,
+ 'torrent_seed_duration': 3600,
+ 'torrent_url': 'http://foo/fake_image_uuid.torrent',
+ 'uuid_stack': ['uuid1']}
+
+ self.stubs.Set(vm_utils, '_make_uuid_stack',
+ lambda *a, **kw: ['uuid1'])
+
+ self.mox.StubOutWithMock(self.session, 'call_plugin_serialized')
+ self.session.call_plugin_serialized(
+ 'bittorrent', 'download_vhd', **params)
+ self.mox.ReplayAll()
+
+ self.store.download_image(self.context, self.session,
+ instance, 'fake_image_uuid')
+
+ self.mox.VerifyAll()
+
+ def test_upload_image(self):
+ self.assertRaises(NotImplementedError, self.store.upload_image,
+ self.context, self.session, mox.IgnoreArg, 'fake_image_uuid',
+ ['fake_vdi_uuid'])
+
+
+def bad_fetcher(image_id):
+ raise test.TestingException("just plain bad.")
+
+
+def another_fetcher(image_id):
+ return "http://www.foobar.com/%s" % image_id
+
+
+class MockEntryPoint(object):
+ name = "torrent_url"
+
+ def load(self):
+ return another_fetcher
+
+
+class LookupTorrentURLTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(LookupTorrentURLTestCase, self).setUp()
+ self.store = bittorrent.BittorrentStore()
+ self.image_id = 'fakeimageid'
+
+ def _mock_iter_none(self, namespace):
+ return []
+
+ def _mock_iter_single(self, namespace):
+ return [MockEntryPoint()]
+
+ def test_default_fetch_url_no_base_url_set(self):
+ self.flags(torrent_base_url=None,
+ group='xenserver')
+ self.stubs.Set(pkg_resources, 'iter_entry_points',
+ self._mock_iter_none)
+
+ exc = self.assertRaises(
+ RuntimeError, self.store._lookup_torrent_url_fn)
+ self.assertEqual(_('Cannot create default bittorrent URL without'
+ ' torrent_base_url set'
+ ' or torrent URL fetcher extension'),
+ six.text_type(exc))
+
+ def test_default_fetch_url_base_url_is_set(self):
+ self.flags(torrent_base_url='http://foo',
+ group='xenserver')
+ self.stubs.Set(pkg_resources, 'iter_entry_points',
+ self._mock_iter_single)
+
+ lookup_fn = self.store._lookup_torrent_url_fn()
+ self.assertEqual('http://foo/fakeimageid.torrent',
+ lookup_fn(self.image_id))
+
+ def test_with_extension(self):
+ self.stubs.Set(pkg_resources, 'iter_entry_points',
+ self._mock_iter_single)
+
+ lookup_fn = self.store._lookup_torrent_url_fn()
+ self.assertEqual("http://www.foobar.com/%s" % self.image_id,
+ lookup_fn(self.image_id))
+
+ def test_multiple_extensions_found(self):
+ self.flags(torrent_base_url=None,
+ group='xenserver')
+
+ def mock_iter_multiple(namespace):
+ return [MockEntryPoint(), MockEntryPoint()]
+
+ self.stubs.Set(pkg_resources, 'iter_entry_points', mock_iter_multiple)
+
+ exc = self.assertRaises(
+ RuntimeError, self.store._lookup_torrent_url_fn)
+ self.assertEqual(_('Multiple torrent URL fetcher extensions found.'
+ ' Failing.'),
+ six.text_type(exc))
diff --git a/nova/tests/unit/virt/xenapi/image/test_glance.py b/nova/tests/unit/virt/xenapi/image/test_glance.py
new file mode 100644
index 0000000000..8fbb853efa
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/image/test_glance.py
@@ -0,0 +1,256 @@
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import random
+import time
+
+import mock
+from mox3 import mox
+
+from nova.compute import utils as compute_utils
+from nova import context
+from nova import exception
+from nova.openstack.common import log as logging
+from nova.tests.unit.virt.xenapi import stubs
+from nova.virt.xenapi import driver as xenapi_conn
+from nova.virt.xenapi import fake
+from nova.virt.xenapi.image import glance
+from nova.virt.xenapi import vm_utils
+
+
+class TestGlanceStore(stubs.XenAPITestBaseNoDB):
+ def setUp(self):
+ super(TestGlanceStore, self).setUp()
+ self.store = glance.GlanceStore()
+
+ self.flags(host='1.1.1.1',
+ port=123,
+ api_insecure=False, group='glance')
+ self.flags(connection_url='test_url',
+ connection_password='test_pass',
+ group='xenserver')
+
+ self.context = context.RequestContext(
+ 'user', 'project', auth_token='foobar')
+
+ fake.reset()
+ stubs.stubout_session(self.stubs, fake.SessionBase)
+ driver = xenapi_conn.XenAPIDriver(False)
+ self.session = driver._session
+
+ self.stubs.Set(
+ vm_utils, 'get_sr_path', lambda *a, **kw: '/fake/sr/path')
+
+ self.instance = {'uuid': 'blah',
+ 'system_metadata': [],
+ 'auto_disk_config': True,
+ 'os_type': 'default',
+ 'xenapi_use_agent': 'true'}
+
+ def _get_params(self):
+ return {'image_id': 'fake_image_uuid',
+ 'glance_host': '1.1.1.1',
+ 'glance_port': 123,
+ 'glance_use_ssl': False,
+ 'sr_path': '/fake/sr/path',
+ 'extra_headers': {'X-Service-Catalog': '[]',
+ 'X-Auth-Token': 'foobar',
+ 'X-Roles': '',
+ 'X-Tenant-Id': 'project',
+ 'X-User-Id': 'user',
+ 'X-Identity-Status': 'Confirmed'}}
+
+ def _get_download_params(self):
+ params = self._get_params()
+ params['uuid_stack'] = ['uuid1']
+ return params
+
+ def test_download_image(self):
+ params = self._get_download_params()
+
+ self.stubs.Set(vm_utils, '_make_uuid_stack',
+ lambda *a, **kw: ['uuid1'])
+
+ self.mox.StubOutWithMock(self.session, 'call_plugin_serialized')
+ self.session.call_plugin_serialized('glance', 'download_vhd', **params)
+ self.mox.ReplayAll()
+
+ self.store.download_image(self.context, self.session,
+ self.instance, 'fake_image_uuid')
+
+ self.mox.VerifyAll()
+
+ @mock.patch.object(vm_utils, '_make_uuid_stack', return_value=['uuid1'])
+ @mock.patch.object(random, 'shuffle')
+ @mock.patch.object(time, 'sleep')
+ @mock.patch.object(compute_utils, 'add_instance_fault_from_exc')
+ @mock.patch.object(logging.getLogger('nova.virt.xenapi.client.session'),
+ 'debug')
+ def test_download_image_retry(self, mock_log_debug, mock_fault, mock_sleep,
+ mock_shuffle, mock_make_uuid_stack):
+ params = self._get_download_params()
+ self.flags(num_retries=2, group='glance')
+
+ params.pop("glance_port")
+ params.pop("glance_host")
+ calls = [mock.call('glance', 'download_vhd', glance_port=9292,
+ glance_host='10.0.1.1', **params),
+ mock.call('glance', 'download_vhd', glance_port=9293,
+ glance_host='10.0.0.1', **params)]
+ log_calls = [mock.call(mock.ANY, {'callback_result': '10.0.1.1',
+ 'attempts': 3, 'attempt': 1,
+ 'fn': 'download_vhd',
+ 'plugin': 'glance'}),
+ mock.call(mock.ANY, {'callback_result': '10.0.0.1',
+ 'attempts': 3, 'attempt': 2,
+ 'fn': 'download_vhd',
+ 'plugin': 'glance'})]
+
+ glance_api_servers = ['10.0.1.1:9292',
+ 'http://10.0.0.1:9293']
+ self.flags(api_servers=glance_api_servers, group='glance')
+
+ with (mock.patch.object(self.session, 'call_plugin_serialized')
+ ) as mock_call_plugin_serialized:
+ error_details = ["", "", "RetryableError", ""]
+ error = self.session.XenAPI.Failure(details=error_details)
+ mock_call_plugin_serialized.side_effect = [error, "success"]
+
+ self.store.download_image(self.context, self.session,
+ self.instance, 'fake_image_uuid')
+
+ mock_call_plugin_serialized.assert_has_calls(calls)
+ mock_log_debug.assert_has_calls(log_calls, any_order=True)
+
+ self.assertEqual(1, mock_fault.call_count)
+
+ def _get_upload_params(self, auto_disk_config=True,
+ expected_os_type='default'):
+ params = self._get_params()
+ params['vdi_uuids'] = ['fake_vdi_uuid']
+ params['properties'] = {'auto_disk_config': auto_disk_config,
+ 'os_type': expected_os_type}
+ return params
+
+ def _test_upload_image(self, auto_disk_config, expected_os_type='default'):
+ params = self._get_upload_params(auto_disk_config, expected_os_type)
+
+ self.mox.StubOutWithMock(self.session, 'call_plugin_serialized')
+ self.session.call_plugin_serialized('glance', 'upload_vhd', **params)
+
+ self.mox.ReplayAll()
+ self.store.upload_image(self.context, self.session, self.instance,
+ 'fake_image_uuid', ['fake_vdi_uuid'])
+ self.mox.VerifyAll()
+
+ def test_upload_image(self):
+ self._test_upload_image(True)
+
+ def test_upload_image_None_os_type(self):
+ self.instance['os_type'] = None
+ self._test_upload_image(True, 'linux')
+
+ def test_upload_image_no_os_type(self):
+ del self.instance['os_type']
+ self._test_upload_image(True, 'linux')
+
+ def test_upload_image_auto_config_disk_disabled(self):
+ sys_meta = [{"key": "image_auto_disk_config", "value": "Disabled"}]
+ self.instance["system_metadata"] = sys_meta
+ self._test_upload_image("disabled")
+
+ def test_upload_image_raises_exception(self):
+ params = self._get_upload_params()
+
+ self.mox.StubOutWithMock(self.session, 'call_plugin_serialized')
+ self.session.call_plugin_serialized('glance', 'upload_vhd',
+ **params).AndRaise(RuntimeError)
+ self.mox.ReplayAll()
+
+ self.assertRaises(RuntimeError, self.store.upload_image,
+ self.context, self.session, self.instance,
+ 'fake_image_uuid', ['fake_vdi_uuid'])
+ self.mox.VerifyAll()
+
+ def test_upload_image_retries_then_raises_exception(self):
+ self.flags(num_retries=2, group='glance')
+ params = self._get_upload_params()
+
+ self.mox.StubOutWithMock(self.session, 'call_plugin_serialized')
+ self.mox.StubOutWithMock(time, 'sleep')
+ self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
+ error_details = ["", "", "RetryableError", ""]
+ error = self.session.XenAPI.Failure(details=error_details)
+ self.session.call_plugin_serialized('glance', 'upload_vhd',
+ **params).AndRaise(error)
+ compute_utils.add_instance_fault_from_exc(self.context, self.instance,
+ error, (fake.Failure,
+ error,
+ mox.IgnoreArg()))
+ time.sleep(0.5)
+ self.session.call_plugin_serialized('glance', 'upload_vhd',
+ **params).AndRaise(error)
+ compute_utils.add_instance_fault_from_exc(self.context, self.instance,
+ error, (fake.Failure,
+ error,
+ mox.IgnoreArg()))
+ time.sleep(1)
+ self.session.call_plugin_serialized('glance', 'upload_vhd',
+ **params).AndRaise(error)
+ compute_utils.add_instance_fault_from_exc(self.context, self.instance,
+ error, (fake.Failure,
+ error,
+ mox.IgnoreArg()))
+ self.mox.ReplayAll()
+
+ self.assertRaises(exception.CouldNotUploadImage,
+ self.store.upload_image,
+ self.context, self.session, self.instance,
+ 'fake_image_uuid', ['fake_vdi_uuid'])
+ self.mox.VerifyAll()
+
+ def test_upload_image_retries_on_signal_exception(self):
+ self.flags(num_retries=2, group='glance')
+ params = self._get_upload_params()
+
+ self.mox.StubOutWithMock(self.session, 'call_plugin_serialized')
+ self.mox.StubOutWithMock(time, 'sleep')
+ self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
+ error_details = ["", "task signaled", "", ""]
+ error = self.session.XenAPI.Failure(details=error_details)
+ self.session.call_plugin_serialized('glance', 'upload_vhd',
+ **params).AndRaise(error)
+ compute_utils.add_instance_fault_from_exc(self.context, self.instance,
+ error, (fake.Failure,
+ error,
+ mox.IgnoreArg()))
+ time.sleep(0.5)
+ # Note(johngarbutt) XenServer 6.1 and later has this error
+ error_details = ["", "signal: SIGTERM", "", ""]
+ error = self.session.XenAPI.Failure(details=error_details)
+ self.session.call_plugin_serialized('glance', 'upload_vhd',
+ **params).AndRaise(error)
+ compute_utils.add_instance_fault_from_exc(self.context, self.instance,
+ error, (fake.Failure,
+ error,
+ mox.IgnoreArg()))
+ time.sleep(1)
+ self.session.call_plugin_serialized('glance', 'upload_vhd',
+ **params)
+ self.mox.ReplayAll()
+
+ self.store.upload_image(self.context, self.session, self.instance,
+ 'fake_image_uuid', ['fake_vdi_uuid'])
+ self.mox.VerifyAll()
diff --git a/nova/tests/unit/virt/xenapi/image/test_utils.py b/nova/tests/unit/virt/xenapi/image/test_utils.py
new file mode 100644
index 0000000000..4763f66683
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/image/test_utils.py
@@ -0,0 +1,252 @@
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import tarfile
+
+import mock
+
+from nova import test
+from nova.virt.xenapi.image import utils
+
+
+@mock.patch.object(utils, 'IMAGE_API')
+class GlanceImageTestCase(test.NoDBTestCase):
+
+ def _get_image(self):
+ return utils.GlanceImage(mock.sentinel.context,
+ mock.sentinel.image_ref)
+
+ def test_meta(self, mocked):
+ mocked.get.return_value = mock.sentinel.meta
+
+ image = self._get_image()
+ self.assertEqual(mock.sentinel.meta, image.meta)
+ mocked.get.assert_called_once_with(mock.sentinel.context,
+ mock.sentinel.image_ref)
+
+ def test_download_to(self, mocked):
+ mocked.download.return_value = None
+
+ image = self._get_image()
+ result = image.download_to(mock.sentinel.fobj)
+ self.assertIsNone(result)
+ mocked.download.assert_called_once_with(mock.sentinel.context,
+ mock.sentinel.image_ref,
+ mock.sentinel.fobj)
+
+ def test_is_raw_tgz_empty_meta(self, mocked):
+ mocked.get.return_value = {}
+
+ image = self._get_image()
+ self.assertEqual(False, image.is_raw_tgz())
+
+ def test_is_raw_tgz_for_raw_tgz(self, mocked):
+ mocked.get.return_value = {
+ 'disk_format': 'raw',
+ 'container_format': 'tgz'
+ }
+
+ image = self._get_image()
+ self.assertEqual(True, image.is_raw_tgz())
+
+ def test_data(self, mocked):
+ mocked.download.return_value = mock.sentinel.image
+ image = self._get_image()
+
+ self.assertEqual(mock.sentinel.image, image.data())
+
+
+class RawImageTestCase(test.NoDBTestCase):
+ def test_get_size(self):
+ glance_image = self.mox.CreateMock(utils.GlanceImage)
+ glance_image.meta = {'size': '123'}
+ raw_image = utils.RawImage(glance_image)
+ self.mox.ReplayAll()
+
+ self.assertEqual(123, raw_image.get_size())
+
+ def test_stream_to(self):
+ glance_image = self.mox.CreateMock(utils.GlanceImage)
+ glance_image.download_to('file').AndReturn('result')
+ raw_image = utils.RawImage(glance_image)
+ self.mox.ReplayAll()
+
+ self.assertEqual('result', raw_image.stream_to('file'))
+
+
+class TestIterableBasedFile(test.NoDBTestCase):
+ def test_constructor(self):
+ class FakeIterable(object):
+ def __iter__(_self):
+ return 'iterator'
+
+ the_file = utils.IterableToFileAdapter(FakeIterable())
+
+ self.assertEqual('iterator', the_file.iterator)
+
+ def test_read_one_character(self):
+ the_file = utils.IterableToFileAdapter([
+ 'chunk1', 'chunk2'
+ ])
+
+ self.assertEqual('c', the_file.read(1))
+
+ def test_read_stores_remaining_characters(self):
+ the_file = utils.IterableToFileAdapter([
+ 'chunk1', 'chunk2'
+ ])
+
+ the_file.read(1)
+
+ self.assertEqual('hunk1', the_file.remaining_data)
+
+ def test_read_remaining_characters(self):
+ the_file = utils.IterableToFileAdapter([
+ 'chunk1', 'chunk2'
+ ])
+
+ self.assertEqual('c', the_file.read(1))
+ self.assertEqual('h', the_file.read(1))
+
+ def test_read_reached_end_of_file(self):
+ the_file = utils.IterableToFileAdapter([
+ 'chunk1', 'chunk2'
+ ])
+
+ self.assertEqual('chunk1', the_file.read(100))
+ self.assertEqual('chunk2', the_file.read(100))
+ self.assertEqual('', the_file.read(100))
+
+ def test_empty_chunks(self):
+ the_file = utils.IterableToFileAdapter([
+ '', '', 'chunk2'
+ ])
+
+ self.assertEqual('chunk2', the_file.read(100))
+
+
+class RawTGZTestCase(test.NoDBTestCase):
+ def test_as_tarfile(self):
+ image = utils.RawTGZImage(None)
+ self.mox.StubOutWithMock(image, '_as_file')
+ self.mox.StubOutWithMock(utils.tarfile, 'open')
+
+ image._as_file().AndReturn('the_file')
+ utils.tarfile.open(mode='r|gz', fileobj='the_file').AndReturn('tf')
+
+ self.mox.ReplayAll()
+
+ result = image._as_tarfile()
+ self.assertEqual('tf', result)
+
+ def test_as_file(self):
+ self.mox.StubOutWithMock(utils, 'IterableToFileAdapter')
+ glance_image = self.mox.CreateMock(utils.GlanceImage)
+ image = utils.RawTGZImage(glance_image)
+ glance_image.data().AndReturn('iterable-data')
+ utils.IterableToFileAdapter('iterable-data').AndReturn('data-as-file')
+
+ self.mox.ReplayAll()
+
+ result = image._as_file()
+
+ self.assertEqual('data-as-file', result)
+
+ def test_get_size(self):
+ tar_file = self.mox.CreateMock(tarfile.TarFile)
+ tar_info = self.mox.CreateMock(tarfile.TarInfo)
+
+ image = utils.RawTGZImage(None)
+
+ self.mox.StubOutWithMock(image, '_as_tarfile')
+
+ image._as_tarfile().AndReturn(tar_file)
+ tar_file.next().AndReturn(tar_info)
+ tar_info.size = 124
+
+ self.mox.ReplayAll()
+
+ result = image.get_size()
+
+ self.assertEqual(124, result)
+ self.assertEqual(image._tar_info, tar_info)
+ self.assertEqual(image._tar_file, tar_file)
+
+ def test_get_size_called_twice(self):
+ tar_file = self.mox.CreateMock(tarfile.TarFile)
+ tar_info = self.mox.CreateMock(tarfile.TarInfo)
+
+ image = utils.RawTGZImage(None)
+
+ self.mox.StubOutWithMock(image, '_as_tarfile')
+
+ image._as_tarfile().AndReturn(tar_file)
+ tar_file.next().AndReturn(tar_info)
+ tar_info.size = 124
+
+ self.mox.ReplayAll()
+
+ image.get_size()
+ result = image.get_size()
+
+ self.assertEqual(124, result)
+ self.assertEqual(image._tar_info, tar_info)
+ self.assertEqual(image._tar_file, tar_file)
+
+ def test_stream_to_without_size_retrieved(self):
+ source_tar = self.mox.CreateMock(tarfile.TarFile)
+ first_tarinfo = self.mox.CreateMock(tarfile.TarInfo)
+ target_file = self.mox.CreateMock(file)
+ source_file = self.mox.CreateMock(file)
+
+ image = utils.RawTGZImage(None)
+ image._image_service_and_image_id = ('service', 'id')
+
+ self.mox.StubOutWithMock(image, '_as_tarfile', source_tar)
+ self.mox.StubOutWithMock(utils.shutil, 'copyfileobj')
+
+ image._as_tarfile().AndReturn(source_tar)
+ source_tar.next().AndReturn(first_tarinfo)
+ source_tar.extractfile(first_tarinfo).AndReturn(source_file)
+ utils.shutil.copyfileobj(source_file, target_file)
+ source_tar.close()
+
+ self.mox.ReplayAll()
+
+ image.stream_to(target_file)
+
+ def test_stream_to_with_size_retrieved(self):
+ source_tar = self.mox.CreateMock(tarfile.TarFile)
+ first_tarinfo = self.mox.CreateMock(tarfile.TarInfo)
+ target_file = self.mox.CreateMock(file)
+ source_file = self.mox.CreateMock(file)
+ first_tarinfo.size = 124
+
+ image = utils.RawTGZImage(None)
+ image._image_service_and_image_id = ('service', 'id')
+
+ self.mox.StubOutWithMock(image, '_as_tarfile', source_tar)
+ self.mox.StubOutWithMock(utils.shutil, 'copyfileobj')
+
+ image._as_tarfile().AndReturn(source_tar)
+ source_tar.next().AndReturn(first_tarinfo)
+ source_tar.extractfile(first_tarinfo).AndReturn(source_file)
+ utils.shutil.copyfileobj(source_file, target_file)
+ source_tar.close()
+
+ self.mox.ReplayAll()
+
+ image.get_size()
+ image.stream_to(target_file)
diff --git a/nova/tests/unit/virt/xenapi/image/test_vdi_through_dev.py b/nova/tests/unit/virt/xenapi/image/test_vdi_through_dev.py
new file mode 100644
index 0000000000..4a86ce5371
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/image/test_vdi_through_dev.py
@@ -0,0 +1,182 @@
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import contextlib
+import tarfile
+
+import eventlet
+
+from nova.image import glance
+from nova import test
+from nova.virt.xenapi.client import session as xenapi_session
+from nova.virt.xenapi.image import vdi_through_dev
+
+
+@contextlib.contextmanager
+def fake_context(result=None):
+ yield result
+
+
+class TestDelegatingToCommand(test.NoDBTestCase):
+ def test_upload_image_is_delegated_to_command(self):
+ command = self.mox.CreateMock(vdi_through_dev.UploadToGlanceAsRawTgz)
+ self.mox.StubOutWithMock(vdi_through_dev, 'UploadToGlanceAsRawTgz')
+ vdi_through_dev.UploadToGlanceAsRawTgz(
+ 'ctx', 'session', 'instance', 'image_id', 'vdis').AndReturn(
+ command)
+ command.upload_image().AndReturn('result')
+ self.mox.ReplayAll()
+
+ store = vdi_through_dev.VdiThroughDevStore()
+ result = store.upload_image(
+ 'ctx', 'session', 'instance', 'image_id', 'vdis')
+
+ self.assertEqual('result', result)
+
+
+class TestUploadToGlanceAsRawTgz(test.NoDBTestCase):
+ def test_upload_image(self):
+ store = vdi_through_dev.UploadToGlanceAsRawTgz(
+ 'context', 'session', 'instance', 'id', ['vdi0', 'vdi1'])
+ self.mox.StubOutWithMock(store, '_perform_upload')
+ self.mox.StubOutWithMock(store, '_get_vdi_ref')
+ self.mox.StubOutWithMock(vdi_through_dev, 'glance')
+ self.mox.StubOutWithMock(vdi_through_dev, 'vm_utils')
+ self.mox.StubOutWithMock(vdi_through_dev, 'utils')
+
+ store._get_vdi_ref().AndReturn('vdi_ref')
+ vdi_through_dev.vm_utils.vdi_attached_here(
+ 'session', 'vdi_ref', read_only=True).AndReturn(
+ fake_context('dev'))
+ vdi_through_dev.utils.make_dev_path('dev').AndReturn('devpath')
+ vdi_through_dev.utils.temporary_chown('devpath').AndReturn(
+ fake_context())
+ store._perform_upload('devpath')
+
+ self.mox.ReplayAll()
+
+ store.upload_image()
+
+ def test__perform_upload(self):
+ producer = self.mox.CreateMock(vdi_through_dev.TarGzProducer)
+ consumer = self.mox.CreateMock(glance.UpdateGlanceImage)
+ pool = self.mox.CreateMock(eventlet.GreenPool)
+ store = vdi_through_dev.UploadToGlanceAsRawTgz(
+ 'context', 'session', 'instance', 'id', ['vdi0', 'vdi1'])
+ self.mox.StubOutWithMock(store, '_create_pipe')
+ self.mox.StubOutWithMock(store, '_get_virtual_size')
+ self.mox.StubOutWithMock(producer, 'get_metadata')
+ self.mox.StubOutWithMock(vdi_through_dev, 'TarGzProducer')
+ self.mox.StubOutWithMock(glance, 'UpdateGlanceImage')
+ self.mox.StubOutWithMock(vdi_through_dev, 'eventlet')
+
+ producer.get_metadata().AndReturn('metadata')
+ store._get_virtual_size().AndReturn('324')
+ store._create_pipe().AndReturn(('readfile', 'writefile'))
+ vdi_through_dev.TarGzProducer(
+ 'devpath', 'writefile', '324', 'disk.raw').AndReturn(
+ producer)
+ glance.UpdateGlanceImage('context', 'id', 'metadata',
+ 'readfile').AndReturn(consumer)
+ vdi_through_dev.eventlet.GreenPool().AndReturn(pool)
+ pool.spawn(producer.start)
+ pool.spawn(consumer.start)
+ pool.waitall()
+
+ self.mox.ReplayAll()
+
+ store._perform_upload('devpath')
+
+ def test__get_vdi_ref(self):
+ session = self.mox.CreateMock(xenapi_session.XenAPISession)
+ store = vdi_through_dev.UploadToGlanceAsRawTgz(
+ 'context', session, 'instance', 'id', ['vdi0', 'vdi1'])
+ session.call_xenapi('VDI.get_by_uuid', 'vdi0').AndReturn('vdi_ref')
+
+ self.mox.ReplayAll()
+
+ self.assertEqual('vdi_ref', store._get_vdi_ref())
+
+ def test__get_virtual_size(self):
+ session = self.mox.CreateMock(xenapi_session.XenAPISession)
+ store = vdi_through_dev.UploadToGlanceAsRawTgz(
+ 'context', session, 'instance', 'id', ['vdi0', 'vdi1'])
+ self.mox.StubOutWithMock(store, '_get_vdi_ref')
+ store._get_vdi_ref().AndReturn('vdi_ref')
+ session.call_xenapi('VDI.get_virtual_size', 'vdi_ref')
+
+ self.mox.ReplayAll()
+
+ store._get_virtual_size()
+
+ def test__create_pipe(self):
+ store = vdi_through_dev.UploadToGlanceAsRawTgz(
+ 'context', 'session', 'instance', 'id', ['vdi0', 'vdi1'])
+ self.mox.StubOutWithMock(vdi_through_dev, 'os')
+ self.mox.StubOutWithMock(vdi_through_dev, 'greenio')
+ vdi_through_dev.os.pipe().AndReturn(('rpipe', 'wpipe'))
+ vdi_through_dev.greenio.GreenPipe('rpipe', 'rb', 0).AndReturn('rfile')
+ vdi_through_dev.greenio.GreenPipe('wpipe', 'wb', 0).AndReturn('wfile')
+
+ self.mox.ReplayAll()
+
+ result = store._create_pipe()
+ self.assertEqual(('rfile', 'wfile'), result)
+
+
+class TestTarGzProducer(test.NoDBTestCase):
+ def test_constructor(self):
+ producer = vdi_through_dev.TarGzProducer('devpath', 'writefile',
+ '100', 'fname')
+
+ self.assertEqual('devpath', producer.fpath)
+ self.assertEqual('writefile', producer.output)
+ self.assertEqual('100', producer.size)
+ self.assertEqual('writefile', producer.output)
+
+ def test_start(self):
+ outf = self.mox.CreateMock(file)
+ producer = vdi_through_dev.TarGzProducer('fpath', outf,
+ '100', 'fname')
+
+ tfile = self.mox.CreateMock(tarfile.TarFile)
+ tinfo = self.mox.CreateMock(tarfile.TarInfo)
+
+ inf = self.mox.CreateMock(file)
+
+ self.mox.StubOutWithMock(vdi_through_dev, 'tarfile')
+ self.mox.StubOutWithMock(producer, '_open_file')
+
+ vdi_through_dev.tarfile.TarInfo(name='fname').AndReturn(tinfo)
+ vdi_through_dev.tarfile.open(fileobj=outf, mode='w|gz').AndReturn(
+ fake_context(tfile))
+ producer._open_file('fpath', 'rb').AndReturn(fake_context(inf))
+ tfile.addfile(tinfo, fileobj=inf)
+ outf.close()
+
+ self.mox.ReplayAll()
+
+ producer.start()
+
+ self.assertEqual(100, tinfo.size)
+
+ def test_get_metadata(self):
+ producer = vdi_through_dev.TarGzProducer('devpath', 'writefile',
+ '100', 'fname')
+
+ self.assertEqual({
+ 'disk_format': 'raw',
+ 'container_format': 'tgz'},
+ producer.get_metadata())
diff --git a/nova/tests/unit/virt/xenapi/stubs.py b/nova/tests/unit/virt/xenapi/stubs.py
new file mode 100644
index 0000000000..ad13ca41df
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/stubs.py
@@ -0,0 +1,365 @@
+# Copyright (c) 2010 Citrix Systems, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Stubouts, mocks and fixtures for the test suite."""
+
+import pickle
+import random
+
+from oslo.serialization import jsonutils
+
+from nova import test
+import nova.tests.unit.image.fake
+from nova.virt.xenapi.client import session
+from nova.virt.xenapi import fake
+from nova.virt.xenapi import vm_utils
+from nova.virt.xenapi import vmops
+
+
+def stubout_firewall_driver(stubs, conn):
+
+ def fake_none(self, *args):
+ return
+
+ _vmops = conn._vmops
+ stubs.Set(_vmops.firewall_driver, 'prepare_instance_filter', fake_none)
+ stubs.Set(_vmops.firewall_driver, 'instance_filter_exists', fake_none)
+
+
+def stubout_instance_snapshot(stubs):
+ def fake_fetch_image(context, session, instance, name_label, image, type):
+ return {'root': dict(uuid=_make_fake_vdi(), file=None),
+ 'kernel': dict(uuid=_make_fake_vdi(), file=None),
+ 'ramdisk': dict(uuid=_make_fake_vdi(), file=None)}
+
+ stubs.Set(vm_utils, '_fetch_image', fake_fetch_image)
+
+ def fake_wait_for_vhd_coalesce(*args):
+ # TODO(sirp): Should we actually fake out the data here
+ return "fakeparent", "fakebase"
+
+ stubs.Set(vm_utils, '_wait_for_vhd_coalesce', fake_wait_for_vhd_coalesce)
+
+
+def stubout_session(stubs, cls, product_version=(5, 6, 2),
+ product_brand='XenServer', **opt_args):
+ """Stubs out methods from XenAPISession."""
+ stubs.Set(session.XenAPISession, '_create_session',
+ lambda s, url: cls(url, **opt_args))
+ stubs.Set(session.XenAPISession, '_get_product_version_and_brand',
+ lambda s: (product_version, product_brand))
+
+
+def stubout_get_this_vm_uuid(stubs):
+ def f(session):
+ vms = [rec['uuid'] for ref, rec
+ in fake.get_all_records('VM').iteritems()
+ if rec['is_control_domain']]
+ return vms[0]
+ stubs.Set(vm_utils, 'get_this_vm_uuid', f)
+
+
+def stubout_image_service_download(stubs):
+ def fake_download(*args, **kwargs):
+ pass
+ stubs.Set(nova.tests.unit.image.fake._FakeImageService,
+ 'download', fake_download)
+
+
+def stubout_stream_disk(stubs):
+ def fake_stream_disk(*args, **kwargs):
+ pass
+ stubs.Set(vm_utils, '_stream_disk', fake_stream_disk)
+
+
+def stubout_determine_is_pv_objectstore(stubs):
+ """Assumes VMs stu have PV kernels."""
+
+ def f(*args):
+ return False
+ stubs.Set(vm_utils, '_determine_is_pv_objectstore', f)
+
+
+def stubout_is_snapshot(stubs):
+ """Always returns true
+
+ xenapi fake driver does not create vmrefs for snapshots.
+ """
+
+ def f(*args):
+ return True
+ stubs.Set(vm_utils, 'is_snapshot', f)
+
+
+def stubout_lookup_image(stubs):
+ """Simulates a failure in lookup image."""
+ def f(_1, _2, _3, _4):
+ raise Exception("Test Exception raised by fake lookup_image")
+ stubs.Set(vm_utils, 'lookup_image', f)
+
+
+def stubout_fetch_disk_image(stubs, raise_failure=False):
+ """Simulates a failure in fetch image_glance_disk."""
+
+ def _fake_fetch_disk_image(context, session, instance, name_label, image,
+ image_type):
+ if raise_failure:
+ raise fake.Failure("Test Exception raised by "
+ "fake fetch_image_glance_disk")
+ elif image_type == vm_utils.ImageType.KERNEL:
+ filename = "kernel"
+ elif image_type == vm_utils.ImageType.RAMDISK:
+ filename = "ramdisk"
+ else:
+ filename = "unknown"
+
+ vdi_type = vm_utils.ImageType.to_string(image_type)
+ return {vdi_type: dict(uuid=None, file=filename)}
+
+ stubs.Set(vm_utils, '_fetch_disk_image', _fake_fetch_disk_image)
+
+
+def stubout_create_vm(stubs):
+ """Simulates a failure in create_vm."""
+
+ def f(*args):
+ raise fake.Failure("Test Exception raised by fake create_vm")
+ stubs.Set(vm_utils, 'create_vm', f)
+
+
+def stubout_attach_disks(stubs):
+ """Simulates a failure in _attach_disks."""
+
+ def f(*args):
+ raise fake.Failure("Test Exception raised by fake _attach_disks")
+ stubs.Set(vmops.VMOps, '_attach_disks', f)
+
+
+def _make_fake_vdi():
+ sr_ref = fake.get_all('SR')[0]
+ vdi_ref = fake.create_vdi('', sr_ref)
+ vdi_rec = fake.get_record('VDI', vdi_ref)
+ return vdi_rec['uuid']
+
+
+class FakeSessionForVMTests(fake.SessionBase):
+ """Stubs out a XenAPISession for VM tests."""
+
+ _fake_iptables_save_output = ("# Generated by iptables-save v1.4.10 on "
+ "Sun Nov 6 22:49:02 2011\n"
+ "*filter\n"
+ ":INPUT ACCEPT [0:0]\n"
+ ":FORWARD ACCEPT [0:0]\n"
+ ":OUTPUT ACCEPT [0:0]\n"
+ "COMMIT\n"
+ "# Completed on Sun Nov 6 22:49:02 2011\n")
+
+ def host_call_plugin(self, _1, _2, plugin, method, _5):
+ if (plugin, method) == ('glance', 'download_vhd'):
+ root_uuid = _make_fake_vdi()
+ return pickle.dumps(dict(root=dict(uuid=root_uuid)))
+ elif (plugin, method) == ("xenhost", "iptables_config"):
+ return fake.as_json(out=self._fake_iptables_save_output,
+ err='')
+ else:
+ return (super(FakeSessionForVMTests, self).
+ host_call_plugin(_1, _2, plugin, method, _5))
+
+ def VM_start(self, _1, ref, _2, _3):
+ vm = fake.get_record('VM', ref)
+ if vm['power_state'] != 'Halted':
+ raise fake.Failure(['VM_BAD_POWER_STATE', ref, 'Halted',
+ vm['power_state']])
+ vm['power_state'] = 'Running'
+ vm['is_a_template'] = False
+ vm['is_control_domain'] = False
+ vm['domid'] = random.randrange(1, 1 << 16)
+ return vm
+
+ def VM_start_on(self, _1, vm_ref, host_ref, _2, _3):
+ vm_rec = self.VM_start(_1, vm_ref, _2, _3)
+ vm_rec['resident_on'] = host_ref
+
+ def VDI_snapshot(self, session_ref, vm_ref, _1):
+ sr_ref = "fakesr"
+ return fake.create_vdi('fakelabel', sr_ref, read_only=True)
+
+ def SR_scan(self, session_ref, sr_ref):
+ pass
+
+
+class FakeSessionForFirewallTests(FakeSessionForVMTests):
+ """Stubs out a XenApi Session for doing IPTable Firewall tests."""
+
+ def __init__(self, uri, test_case=None):
+ super(FakeSessionForFirewallTests, self).__init__(uri)
+ if hasattr(test_case, '_in_rules'):
+ self._in_rules = test_case._in_rules
+ if hasattr(test_case, '_in6_filter_rules'):
+ self._in6_filter_rules = test_case._in6_filter_rules
+ self._test_case = test_case
+
+ def host_call_plugin(self, _1, _2, plugin, method, args):
+ """Mock method four host_call_plugin to be used in unit tests
+ for the dom0 iptables Firewall drivers for XenAPI
+
+ """
+ if plugin == "xenhost" and method == "iptables_config":
+ # The command to execute is a json-encoded list
+ cmd_args = args.get('cmd_args', None)
+ cmd = jsonutils.loads(cmd_args)
+ if not cmd:
+ ret_str = ''
+ else:
+ output = ''
+ process_input = args.get('process_input', None)
+ if cmd == ['ip6tables-save', '-c']:
+ output = '\n'.join(self._in6_filter_rules)
+ if cmd == ['iptables-save', '-c']:
+ output = '\n'.join(self._in_rules)
+ if cmd == ['iptables-restore', '-c', ]:
+ lines = process_input.split('\n')
+ if '*filter' in lines:
+ if self._test_case is not None:
+ self._test_case._out_rules = lines
+ output = '\n'.join(lines)
+ if cmd == ['ip6tables-restore', '-c', ]:
+ lines = process_input.split('\n')
+ if '*filter' in lines:
+ output = '\n'.join(lines)
+ ret_str = fake.as_json(out=output, err='')
+ return ret_str
+ else:
+ return (super(FakeSessionForVMTests, self).
+ host_call_plugin(_1, _2, plugin, method, args))
+
+
+def stub_out_vm_methods(stubs):
+ def fake_acquire_bootlock(self, vm):
+ pass
+
+ def fake_release_bootlock(self, vm):
+ pass
+
+ def fake_generate_ephemeral(*args):
+ pass
+
+ def fake_wait_for_device(dev):
+ pass
+
+ stubs.Set(vmops.VMOps, "_acquire_bootlock", fake_acquire_bootlock)
+ stubs.Set(vmops.VMOps, "_release_bootlock", fake_release_bootlock)
+ stubs.Set(vm_utils, 'generate_ephemeral', fake_generate_ephemeral)
+ stubs.Set(vm_utils, '_wait_for_device', fake_wait_for_device)
+
+
+class FakeSessionForVolumeTests(fake.SessionBase):
+ """Stubs out a XenAPISession for Volume tests."""
+ def VDI_introduce(self, _1, uuid, _2, _3, _4, _5,
+ _6, _7, _8, _9, _10, _11):
+ valid_vdi = False
+ refs = fake.get_all('VDI')
+ for ref in refs:
+ rec = fake.get_record('VDI', ref)
+ if rec['uuid'] == uuid:
+ valid_vdi = True
+ if not valid_vdi:
+ raise fake.Failure([['INVALID_VDI', 'session', self._session]])
+
+
+class FakeSessionForVolumeFailedTests(FakeSessionForVolumeTests):
+ """Stubs out a XenAPISession for Volume tests: it injects failures."""
+ def VDI_introduce(self, _1, uuid, _2, _3, _4, _5,
+ _6, _7, _8, _9, _10, _11):
+ # This is for testing failure
+ raise fake.Failure([['INVALID_VDI', 'session', self._session]])
+
+ def PBD_unplug(self, _1, ref):
+ rec = fake.get_record('PBD', ref)
+ rec['currently-attached'] = False
+
+ def SR_forget(self, _1, ref):
+ pass
+
+
+def stub_out_migration_methods(stubs):
+ fakesr = fake.create_sr()
+
+ def fake_import_all_migrated_disks(session, instance):
+ vdi_ref = fake.create_vdi(instance['name'], fakesr)
+ vdi_rec = fake.get_record('VDI', vdi_ref)
+ vdi_rec['other_config']['nova_disk_type'] = 'root'
+ return {"root": {'uuid': vdi_rec['uuid'], 'ref': vdi_ref},
+ "ephemerals": {}}
+
+ def fake_wait_for_instance_to_start(self, *args):
+ pass
+
+ def fake_get_vdi(session, vm_ref, userdevice='0'):
+ vdi_ref_parent = fake.create_vdi('derp-parent', fakesr)
+ vdi_rec_parent = fake.get_record('VDI', vdi_ref_parent)
+ vdi_ref = fake.create_vdi('derp', fakesr,
+ sm_config={'vhd-parent': vdi_rec_parent['uuid']})
+ vdi_rec = session.call_xenapi("VDI.get_record", vdi_ref)
+ return vdi_ref, vdi_rec
+
+ def fake_sr(session, *args):
+ return fakesr
+
+ def fake_get_sr_path(*args):
+ return "fake"
+
+ def fake_destroy(*args, **kwargs):
+ pass
+
+ def fake_generate_ephemeral(*args):
+ pass
+
+ stubs.Set(vmops.VMOps, '_destroy', fake_destroy)
+ stubs.Set(vmops.VMOps, '_wait_for_instance_to_start',
+ fake_wait_for_instance_to_start)
+ stubs.Set(vm_utils, 'import_all_migrated_disks',
+ fake_import_all_migrated_disks)
+ stubs.Set(vm_utils, 'scan_default_sr', fake_sr)
+ stubs.Set(vm_utils, 'get_vdi_for_vm_safely', fake_get_vdi)
+ stubs.Set(vm_utils, 'get_sr_path', fake_get_sr_path)
+ stubs.Set(vm_utils, 'generate_ephemeral', fake_generate_ephemeral)
+
+
+class FakeSessionForFailedMigrateTests(FakeSessionForVMTests):
+ def VM_assert_can_migrate(self, session, vmref, migrate_data,
+ live, vdi_map, vif_map, options):
+ raise fake.Failure("XenAPI VM.assert_can_migrate failed")
+
+ def host_migrate_receive(self, session, hostref, networkref, options):
+ raise fake.Failure("XenAPI host.migrate_receive failed")
+
+ def VM_migrate_send(self, session, vmref, migrate_data, islive, vdi_map,
+ vif_map, options):
+ raise fake.Failure("XenAPI VM.migrate_send failed")
+
+
+# FIXME(sirp): XenAPITestBase is deprecated, all tests should be converted
+# over to use XenAPITestBaseNoDB
+class XenAPITestBase(test.TestCase):
+ def setUp(self):
+ super(XenAPITestBase, self).setUp()
+ self.useFixture(test.ReplaceModule('XenAPI', fake))
+ fake.reset()
+
+
+class XenAPITestBaseNoDB(test.NoDBTestCase):
+ def setUp(self):
+ super(XenAPITestBaseNoDB, self).setUp()
+ self.useFixture(test.ReplaceModule('XenAPI', fake))
+ fake.reset()
diff --git a/nova/tests/unit/virt/xenapi/test_agent.py b/nova/tests/unit/virt/xenapi/test_agent.py
new file mode 100644
index 0000000000..5004b381d4
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/test_agent.py
@@ -0,0 +1,468 @@
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import base64
+import time
+import uuid
+
+import mock
+
+from nova import exception
+from nova import test
+from nova.virt.xenapi import agent
+from nova.virt.xenapi import fake as xenapi_fake
+
+
+def _get_fake_instance(**kwargs):
+ system_metadata = []
+ for k, v in kwargs.items():
+ system_metadata.append({
+ "key": k,
+ "value": v
+ })
+
+ return {
+ "system_metadata": system_metadata,
+ "uuid": "uuid",
+ "key_data": "ssh-rsa asdf",
+ "os_type": "asdf",
+ }
+
+
+class AgentTestCaseBase(test.NoDBTestCase):
+ def _create_agent(self, instance, session="session"):
+ self.session = session
+ self.virtapi = "virtapi"
+ self.vm_ref = "vm_ref"
+ return agent.XenAPIBasedAgent(self.session, self.virtapi,
+ instance, self.vm_ref)
+
+
+class AgentImageFlagsTestCase(AgentTestCaseBase):
+ def test_agent_is_present(self):
+ self.flags(use_agent_default=False, group='xenserver')
+ instance = {"system_metadata":
+ [{"key": "image_xenapi_use_agent", "value": "true"}]}
+ self.assertTrue(agent.should_use_agent(instance))
+
+ def test_agent_is_disabled(self):
+ self.flags(use_agent_default=True, group='xenserver')
+ instance = {"system_metadata":
+ [{"key": "image_xenapi_use_agent", "value": "false"}]}
+ self.assertFalse(agent.should_use_agent(instance))
+
+ def test_agent_uses_deafault_when_prop_invalid(self):
+ self.flags(use_agent_default=True, group='xenserver')
+ instance = {"system_metadata":
+ [{"key": "image_xenapi_use_agent", "value": "bob"}],
+ "uuid": "uuid"}
+ self.assertTrue(agent.should_use_agent(instance))
+
+ def test_agent_default_not_present(self):
+ self.flags(use_agent_default=False, group='xenserver')
+ instance = {"system_metadata": []}
+ self.assertFalse(agent.should_use_agent(instance))
+
+ def test_agent_default_present(self):
+ self.flags(use_agent_default=True, group='xenserver')
+ instance = {"system_metadata": []}
+ self.assertTrue(agent.should_use_agent(instance))
+
+
+class SysMetaKeyTestBase():
+ key = None
+
+ def _create_agent_with_value(self, value):
+ kwargs = {self.key: value}
+ instance = _get_fake_instance(**kwargs)
+ return self._create_agent(instance)
+
+ def test_get_sys_meta_key_true(self):
+ agent = self._create_agent_with_value("true")
+ self.assertTrue(agent._get_sys_meta_key(self.key))
+
+ def test_get_sys_meta_key_false(self):
+ agent = self._create_agent_with_value("False")
+ self.assertFalse(agent._get_sys_meta_key(self.key))
+
+ def test_get_sys_meta_key_invalid_is_false(self):
+ agent = self._create_agent_with_value("invalid")
+ self.assertFalse(agent._get_sys_meta_key(self.key))
+
+ def test_get_sys_meta_key_missing_is_false(self):
+ instance = _get_fake_instance()
+ agent = self._create_agent(instance)
+ self.assertFalse(agent._get_sys_meta_key(self.key))
+
+
+class SkipSshFlagTestCase(SysMetaKeyTestBase, AgentTestCaseBase):
+ key = "image_xenapi_skip_agent_inject_ssh"
+
+ def test_skip_ssh_key_inject(self):
+ agent = self._create_agent_with_value("True")
+ self.assertTrue(agent._skip_ssh_key_inject())
+
+
+class SkipFileInjectAtBootFlagTestCase(SysMetaKeyTestBase, AgentTestCaseBase):
+ key = "image_xenapi_skip_agent_inject_files_at_boot"
+
+ def test_skip_inject_files_at_boot(self):
+ agent = self._create_agent_with_value("True")
+ self.assertTrue(agent._skip_inject_files_at_boot())
+
+
+class InjectSshTestCase(AgentTestCaseBase):
+ def test_inject_ssh_key_succeeds(self):
+ instance = _get_fake_instance()
+ agent = self._create_agent(instance)
+ self.mox.StubOutWithMock(agent, "inject_file")
+
+ agent.inject_file("/root/.ssh/authorized_keys",
+ "\n# The following ssh key was injected by Nova"
+ "\nssh-rsa asdf\n")
+
+ self.mox.ReplayAll()
+ agent.inject_ssh_key()
+
+ def _test_inject_ssh_key_skipped(self, instance):
+ agent = self._create_agent(instance)
+
+ # make sure its not called
+ self.mox.StubOutWithMock(agent, "inject_file")
+ self.mox.ReplayAll()
+
+ agent.inject_ssh_key()
+
+ def test_inject_ssh_key_skipped_no_key_data(self):
+ instance = _get_fake_instance()
+ instance["key_data"] = None
+ self._test_inject_ssh_key_skipped(instance)
+
+ def test_inject_ssh_key_skipped_windows(self):
+ instance = _get_fake_instance()
+ instance["os_type"] = "windows"
+ self._test_inject_ssh_key_skipped(instance)
+
+ def test_inject_ssh_key_skipped_cloud_init_present(self):
+ instance = _get_fake_instance(
+ image_xenapi_skip_agent_inject_ssh="True")
+ self._test_inject_ssh_key_skipped(instance)
+
+
+class FileInjectionTestCase(AgentTestCaseBase):
+ def test_inject_file(self):
+ instance = _get_fake_instance()
+ agent = self._create_agent(instance)
+ self.mox.StubOutWithMock(agent, "_call_agent")
+
+ b64_path = base64.b64encode('path')
+ b64_contents = base64.b64encode('contents')
+ agent._call_agent('inject_file',
+ {'b64_contents': b64_contents,
+ 'b64_path': b64_path})
+
+ self.mox.ReplayAll()
+
+ agent.inject_file("path", "contents")
+
+ def test_inject_files(self):
+ instance = _get_fake_instance()
+ agent = self._create_agent(instance)
+ self.mox.StubOutWithMock(agent, "inject_file")
+
+ files = [("path1", "content1"), ("path2", "content2")]
+ agent.inject_file(*files[0])
+ agent.inject_file(*files[1])
+
+ self.mox.ReplayAll()
+
+ agent.inject_files(files)
+
+ def test_inject_files_skipped_when_cloud_init_installed(self):
+ instance = _get_fake_instance(
+ image_xenapi_skip_agent_inject_files_at_boot="True")
+ agent = self._create_agent(instance)
+ self.mox.StubOutWithMock(agent, "inject_file")
+
+ files = [("path1", "content1"), ("path2", "content2")]
+
+ self.mox.ReplayAll()
+
+ agent.inject_files(files)
+
+
+class FakeRebootException(Exception):
+ details = ["", "", "", "asdf REBOOT: asdf"]
+
+
+class RebootRetryTestCase(AgentTestCaseBase):
+ @mock.patch.object(agent, '_wait_for_new_dom_id')
+ def test_retry_on_reboot(self, mock_wait):
+ mock_session = mock.Mock()
+
+ def fake_call_plugin(*args, **kwargs):
+ if fake_call_plugin.called:
+ return {"returncode": '0', "message": "done"}
+ else:
+ fake_call_plugin.called = True
+ raise FakeRebootException()
+
+ fake_call_plugin.called = False
+ mock_session.XenAPI.Failure = FakeRebootException
+ mock_session.VM.get_domid.return_value = "fake_dom_id"
+ mock_session.call_plugin.side_effect = fake_call_plugin
+
+ agent = self._create_agent(None, mock_session)
+
+ result = agent._call_agent("asdf")
+ self.assertEqual("done", result)
+ self.assertTrue(mock_session.VM.get_domid.called)
+ self.assertEqual(2, mock_session.call_plugin.call_count)
+ mock_wait.called_once_with(mock_session, self.vm_ref,
+ "fake_dom_id", "asdf")
+
+ @mock.patch.object(time, 'sleep')
+ @mock.patch.object(time, 'time')
+ def test_wait_for_new_dom_id_found(self, mock_time, mock_sleep):
+ mock_session = mock.Mock()
+ mock_session.VM.get_domid.return_value = "new"
+
+ agent._wait_for_new_dom_id(mock_session, "vm_ref", "old", "method")
+
+ mock_session.VM.get_domid.assert_called_once_with("vm_ref")
+ self.assertFalse(mock_sleep.called)
+
+ @mock.patch.object(time, 'sleep')
+ @mock.patch.object(time, 'time')
+ def test_wait_for_new_dom_id_after_retry(self, mock_time, mock_sleep):
+ self.flags(agent_timeout=3, group="xenserver")
+ mock_time.return_value = 0
+ mock_session = mock.Mock()
+ old = 40
+ new = 42
+ mock_session.VM.get_domid.side_effect = [old, -1, new]
+
+ agent._wait_for_new_dom_id(mock_session, "vm_ref", old, "method")
+
+ mock_session.VM.get_domid.assert_called_with("vm_ref")
+ self.assertEqual(3, mock_session.VM.get_domid.call_count)
+ self.assertEqual(2, mock_sleep.call_count)
+
+ @mock.patch.object(time, 'sleep')
+ @mock.patch.object(time, 'time')
+ def test_wait_for_new_dom_id_timeout(self, mock_time, mock_sleep):
+ self.flags(agent_timeout=3, group="xenserver")
+
+ def fake_time():
+ fake_time.time = fake_time.time + 1
+ return fake_time.time
+
+ fake_time.time = 0
+ mock_time.side_effect = fake_time
+ mock_session = mock.Mock()
+ mock_session.VM.get_domid.return_value = "old"
+
+ self.assertRaises(exception.AgentTimeout,
+ agent._wait_for_new_dom_id,
+ mock_session, "vm_ref", "old", "method")
+
+ self.assertEqual(4, mock_session.VM.get_domid.call_count)
+
+
+class SetAdminPasswordTestCase(AgentTestCaseBase):
+ @mock.patch.object(agent.XenAPIBasedAgent, '_call_agent')
+ @mock.patch("nova.virt.xenapi.agent.SimpleDH")
+ def test_exchange_key_with_agent(self, mock_simple_dh, mock_call_agent):
+ agent = self._create_agent(None)
+ instance_mock = mock_simple_dh()
+ instance_mock.get_public.return_value = 4321
+ mock_call_agent.return_value = "1234"
+
+ result = agent._exchange_key_with_agent()
+
+ mock_call_agent.assert_called_once_with('key_init', {"pub": "4321"},
+ success_codes=['D0'],
+ ignore_errors=False)
+ result.compute_shared.assert_called_once_with(1234)
+
+ @mock.patch.object(agent.XenAPIBasedAgent, '_call_agent')
+ @mock.patch.object(agent.XenAPIBasedAgent,
+ '_save_instance_password_if_sshkey_present')
+ @mock.patch.object(agent.XenAPIBasedAgent, '_exchange_key_with_agent')
+ def test_set_admin_password_works(self, mock_exchange, mock_save,
+ mock_call_agent):
+ mock_dh = mock.Mock(spec_set=agent.SimpleDH)
+ mock_dh.encrypt.return_value = "enc_pass"
+ mock_exchange.return_value = mock_dh
+ agent_inst = self._create_agent(None)
+
+ agent_inst.set_admin_password("new_pass")
+
+ mock_dh.encrypt.assert_called_once_with("new_pass\n")
+ mock_call_agent.assert_called_once_with('password',
+ {'enc_pass': 'enc_pass'})
+ mock_save.assert_called_once_with("new_pass")
+
+ @mock.patch.object(agent.XenAPIBasedAgent, '_add_instance_fault')
+ @mock.patch.object(agent.XenAPIBasedAgent, '_exchange_key_with_agent')
+ def test_set_admin_password_silently_fails(self, mock_exchange,
+ mock_add_fault):
+ error = exception.AgentTimeout(method="fake")
+ mock_exchange.side_effect = error
+ agent_inst = self._create_agent(None)
+
+ agent_inst.set_admin_password("new_pass")
+
+ mock_add_fault.assert_called_once_with(error, mock.ANY)
+
+
+class UpgradeRequiredTestCase(test.NoDBTestCase):
+ def test_less_than(self):
+ self.assertTrue(agent.is_upgrade_required('1.2.3.4', '1.2.3.5'))
+
+ def test_greater_than(self):
+ self.assertFalse(agent.is_upgrade_required('1.2.3.5', '1.2.3.4'))
+
+ def test_equal(self):
+ self.assertFalse(agent.is_upgrade_required('1.2.3.4', '1.2.3.4'))
+
+ def test_non_lexical(self):
+ self.assertFalse(agent.is_upgrade_required('1.2.3.10', '1.2.3.4'))
+
+ def test_length(self):
+ self.assertTrue(agent.is_upgrade_required('1.2.3', '1.2.3.4'))
+
+
+@mock.patch.object(uuid, "uuid4")
+class CallAgentTestCase(AgentTestCaseBase):
+ def test_call_agent_success(self, mock_uuid):
+ session = mock.Mock()
+ instance = {"uuid": "fake"}
+ addl_args = {"foo": "bar"}
+
+ session.VM.get_domid.return_value = '42'
+ mock_uuid.return_value = 1
+ session.call_plugin.return_value = {'returncode': '4',
+ 'message': "asdf\\r\\n"}
+
+ self.assertEqual("asdf",
+ agent._call_agent(session, instance, "vm_ref",
+ "method", addl_args, timeout=300,
+ success_codes=['0', '4']))
+
+ expected_args = {
+ 'id': '1',
+ 'dom_id': '42',
+ 'timeout': '300',
+ }
+ expected_args.update(addl_args)
+ session.VM.get_domid.assert_called_once_with("vm_ref")
+ session.call_plugin.assert_called_once_with("agent", "method",
+ expected_args)
+
+ def _call_agent_setup(self, session, mock_uuid,
+ returncode='0', success_codes=None,
+ exception=None):
+ session.XenAPI.Failure = xenapi_fake.Failure
+ instance = {"uuid": "fake"}
+
+ session.VM.get_domid.return_value = 42
+ mock_uuid.return_value = 1
+ if exception:
+ session.call_plugin.side_effect = exception
+ else:
+ session.call_plugin.return_value = {'returncode': returncode,
+ 'message': "asdf\\r\\n"}
+
+ return agent._call_agent(session, instance, "vm_ref", "method",
+ success_codes=success_codes)
+
+ def _assert_agent_called(self, session, mock_uuid):
+ expected_args = {
+ 'id': '1',
+ 'dom_id': '42',
+ 'timeout': '30',
+ }
+ session.call_plugin.assert_called_once_with("agent", "method",
+ expected_args)
+ session.VM.get_domid.assert_called_once_with("vm_ref")
+
+ def test_call_agent_works_with_defaults(self, mock_uuid):
+ session = mock.Mock()
+ self._call_agent_setup(session, mock_uuid)
+ self._assert_agent_called(session, mock_uuid)
+
+ def test_call_agent_fails_with_timeout(self, mock_uuid):
+ session = mock.Mock()
+ self.assertRaises(exception.AgentTimeout, self._call_agent_setup,
+ session, mock_uuid,
+ exception=xenapi_fake.Failure(["TIMEOUT:fake"]))
+ self._assert_agent_called(session, mock_uuid)
+
+ def test_call_agent_fails_with_not_implemented(self, mock_uuid):
+ session = mock.Mock()
+ self.assertRaises(exception.AgentNotImplemented,
+ self._call_agent_setup,
+ session, mock_uuid,
+ exception=xenapi_fake.Failure(["NOT IMPLEMENTED:"]))
+ self._assert_agent_called(session, mock_uuid)
+
+ def test_call_agent_fails_with_other_error(self, mock_uuid):
+ session = mock.Mock()
+ self.assertRaises(exception.AgentError, self._call_agent_setup,
+ session, mock_uuid,
+ exception=xenapi_fake.Failure(["asdf"]))
+ self._assert_agent_called(session, mock_uuid)
+
+ def test_call_agent_fails_with_returned_error(self, mock_uuid):
+ session = mock.Mock()
+ self.assertRaises(exception.AgentError, self._call_agent_setup,
+ session, mock_uuid, returncode='42')
+ self._assert_agent_called(session, mock_uuid)
+
+
+class XenAPIBasedAgent(AgentTestCaseBase):
+ @mock.patch.object(agent.XenAPIBasedAgent, "_add_instance_fault")
+ @mock.patch.object(agent, "_call_agent")
+ def test_call_agent_swallows_error(self, mock_call_agent,
+ mock_add_instance_fault):
+ fake_error = exception.AgentError(method="bob")
+ mock_call_agent.side_effect = fake_error
+
+ instance = _get_fake_instance()
+ agent = self._create_agent(instance)
+
+ agent._call_agent("bob")
+
+ mock_call_agent.assert_called_once_with(agent.session, agent.instance,
+ agent.vm_ref, "bob", None, None, None)
+ mock_add_instance_fault.assert_called_once_with(fake_error, mock.ANY)
+
+ @mock.patch.object(agent.XenAPIBasedAgent, "_add_instance_fault")
+ @mock.patch.object(agent, "_call_agent")
+ def test_call_agent_throws_error(self, mock_call_agent,
+ mock_add_instance_fault):
+ fake_error = exception.AgentError(method="bob")
+ mock_call_agent.side_effect = fake_error
+
+ instance = _get_fake_instance()
+ agent = self._create_agent(instance)
+
+ self.assertRaises(exception.AgentError, agent._call_agent,
+ "bob", ignore_errors=False)
+
+ mock_call_agent.assert_called_once_with(agent.session, agent.instance,
+ agent.vm_ref, "bob", None, None, None)
+ self.assertFalse(mock_add_instance_fault.called)
diff --git a/nova/tests/unit/virt/xenapi/test_driver.py b/nova/tests/unit/virt/xenapi/test_driver.py
new file mode 100644
index 0000000000..eb3e02f29e
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/test_driver.py
@@ -0,0 +1,101 @@
+# Copyright (c) 2013 Rackspace Hosting
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import math
+
+import mock
+from oslo.utils import units
+
+from nova.compute import arch
+from nova.tests.unit.virt.xenapi import stubs
+from nova.virt import driver
+from nova.virt import fake
+from nova.virt import xenapi
+from nova.virt.xenapi import driver as xenapi_driver
+
+
+class XenAPIDriverTestCase(stubs.XenAPITestBaseNoDB):
+ """Unit tests for Driver operations."""
+
+ def _get_driver(self):
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.flags(connection_url='test_url',
+ connection_password='test_pass', group='xenserver')
+ return xenapi.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ def host_stats(self, refresh=True):
+ return {'host_memory_total': 3 * units.Mi,
+ 'host_memory_free_computed': 2 * units.Mi,
+ 'disk_total': 5 * units.Gi,
+ 'disk_used': 2 * units.Gi,
+ 'disk_allocated': 4 * units.Gi,
+ 'host_hostname': 'somename',
+ 'supported_instances': arch.X86_64,
+ 'host_cpu_info': {'cpu_count': 50},
+ 'vcpus_used': 10,
+ 'pci_passthrough_devices': ''}
+
+ def test_available_resource(self):
+ driver = self._get_driver()
+ driver._session.product_version = (6, 8, 2)
+
+ self.stubs.Set(driver.host_state, 'get_host_stats', self.host_stats)
+
+ resources = driver.get_available_resource(None)
+ self.assertEqual(6008002, resources['hypervisor_version'])
+ self.assertEqual(50, resources['vcpus'])
+ self.assertEqual(3, resources['memory_mb'])
+ self.assertEqual(5, resources['local_gb'])
+ self.assertEqual(10, resources['vcpus_used'])
+ self.assertEqual(3 - 2, resources['memory_mb_used'])
+ self.assertEqual(2, resources['local_gb_used'])
+ self.assertEqual('xen', resources['hypervisor_type'])
+ self.assertEqual('somename', resources['hypervisor_hostname'])
+ self.assertEqual(1, resources['disk_available_least'])
+
+ def test_overhead(self):
+ driver = self._get_driver()
+ instance = {'memory_mb': 30720, 'vcpus': 4}
+
+ # expected memory overhead per:
+ # https://wiki.openstack.org/wiki/XenServer/Overhead
+ expected = ((instance['memory_mb'] * xenapi_driver.OVERHEAD_PER_MB) +
+ (instance['vcpus'] * xenapi_driver.OVERHEAD_PER_VCPU) +
+ xenapi_driver.OVERHEAD_BASE)
+ expected = math.ceil(expected)
+ overhead = driver.estimate_instance_overhead(instance)
+ self.assertEqual(expected, overhead['memory_mb'])
+
+ def test_set_bootable(self):
+ driver = self._get_driver()
+
+ self.mox.StubOutWithMock(driver._vmops, 'set_bootable')
+ driver._vmops.set_bootable('inst', True)
+ self.mox.ReplayAll()
+
+ driver.set_bootable('inst', True)
+
+ def test_post_interrupted_snapshot_cleanup(self):
+ driver = self._get_driver()
+ fake_vmops_cleanup = mock.Mock()
+ driver._vmops.post_interrupted_snapshot_cleanup = fake_vmops_cleanup
+
+ driver.post_interrupted_snapshot_cleanup("context", "instance")
+
+ fake_vmops_cleanup.assert_called_once_with("context", "instance")
+
+ def test_public_api_signatures(self):
+ inst = self._get_driver()
+ self.assertPublicAPISignatures(driver.ComputeDriver(None), inst)
diff --git a/nova/tests/unit/virt/xenapi/test_network_utils.py b/nova/tests/unit/virt/xenapi/test_network_utils.py
new file mode 100644
index 0000000000..5aa660f2a7
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/test_network_utils.py
@@ -0,0 +1,76 @@
+
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova import exception
+from nova.tests.unit.virt.xenapi import stubs
+from nova.virt.xenapi import network_utils
+
+
+class NetworkUtilsTestCase(stubs.XenAPITestBaseNoDB):
+ def test_find_network_with_name_label_works(self):
+ session = mock.Mock()
+ session.network.get_by_name_label.return_value = ["net"]
+
+ result = network_utils.find_network_with_name_label(session, "label")
+
+ self.assertEqual("net", result)
+ session.network.get_by_name_label.assert_called_once_with("label")
+
+ def test_find_network_with_name_returns_none(self):
+ session = mock.Mock()
+ session.network.get_by_name_label.return_value = []
+
+ result = network_utils.find_network_with_name_label(session, "label")
+
+ self.assertIsNone(result)
+
+ def test_find_network_with_name_label_raises(self):
+ session = mock.Mock()
+ session.network.get_by_name_label.return_value = ["net", "net2"]
+
+ self.assertRaises(exception.NovaException,
+ network_utils.find_network_with_name_label,
+ session, "label")
+
+ def test_find_network_with_bridge_works(self):
+ session = mock.Mock()
+ session.network.get_all_records_where.return_value = {"net": "asdf"}
+
+ result = network_utils.find_network_with_bridge(session, "bridge")
+
+ self.assertEqual(result, "net")
+ expr = 'field "name__label" = "bridge" or field "bridge" = "bridge"'
+ session.network.get_all_records_where.assert_called_once_with(expr)
+
+ def test_find_network_with_bridge_raises_too_many(self):
+ session = mock.Mock()
+ session.network.get_all_records_where.return_value = {
+ "net": "asdf",
+ "net2": "asdf2"
+ }
+
+ self.assertRaises(exception.NovaException,
+ network_utils.find_network_with_bridge,
+ session, "bridge")
+
+ def test_find_network_with_bridge_raises_no_networks(self):
+ session = mock.Mock()
+ session.network.get_all_records_where.return_value = {}
+
+ self.assertRaises(exception.NovaException,
+ network_utils.find_network_with_bridge,
+ session, "bridge")
diff --git a/nova/tests/unit/virt/xenapi/test_vm_utils.py b/nova/tests/unit/virt/xenapi/test_vm_utils.py
new file mode 100644
index 0000000000..ac54bd1480
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/test_vm_utils.py
@@ -0,0 +1,2422 @@
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import contextlib
+import uuid
+
+from eventlet import greenthread
+import fixtures
+import mock
+import mox
+from oslo.concurrency import lockutils
+from oslo.concurrency import processutils
+from oslo.config import cfg
+from oslo.utils import timeutils
+from oslo.utils import units
+import six
+
+from nova.compute import flavors
+from nova.compute import power_state
+from nova.compute import vm_mode
+from nova import context
+from nova import exception
+from nova.i18n import _
+from nova.openstack.common.fixture import config as config_fixture
+from nova import test
+from nova.tests.unit.virt.xenapi import stubs
+from nova.tests.unit.virt.xenapi import test_xenapi
+from nova import utils
+from nova.virt.xenapi.client import session as xenapi_session
+from nova.virt.xenapi import driver as xenapi_conn
+from nova.virt.xenapi import fake
+from nova.virt.xenapi import vm_utils
+
+CONF = cfg.CONF
+XENSM_TYPE = 'xensm'
+ISCSI_TYPE = 'iscsi'
+
+
+def get_fake_connection_data(sr_type):
+ fakes = {XENSM_TYPE: {'sr_uuid': 'falseSR',
+ 'name_label': 'fake_storage',
+ 'name_description': 'test purposes',
+ 'server': 'myserver',
+ 'serverpath': '/local/scratch/myname',
+ 'sr_type': 'nfs',
+ 'introduce_sr_keys': ['server',
+ 'serverpath',
+ 'sr_type'],
+ 'vdi_uuid': 'falseVDI'},
+ ISCSI_TYPE: {'volume_id': 'fake_volume_id',
+ 'target_lun': 1,
+ 'target_iqn': 'fake_iqn:volume-fake_volume_id',
+ 'target_portal': u'localhost:3260',
+ 'target_discovered': False}, }
+ return fakes[sr_type]
+
+
+def _get_fake_session(error=None):
+ session = mock.Mock()
+ xenapi_session.apply_session_helpers(session)
+
+ if error is not None:
+ class FakeException(Exception):
+ details = [error, "a", "b", "c"]
+
+ session.XenAPI.Failure = FakeException
+ session.call_xenapi.side_effect = FakeException
+
+ return session
+
+
+@contextlib.contextmanager
+def contextified(result):
+ yield result
+
+
+def _fake_noop(*args, **kwargs):
+ return
+
+
+class VMUtilsTestBase(stubs.XenAPITestBaseNoDB):
+ pass
+
+
+class LookupTestCase(VMUtilsTestBase):
+ def setUp(self):
+ super(LookupTestCase, self).setUp()
+ self.session = self.mox.CreateMockAnything('Fake Session')
+ self.name_label = 'my_vm'
+
+ def _do_mock(self, result):
+ self.session.call_xenapi(
+ "VM.get_by_name_label", self.name_label).AndReturn(result)
+ self.mox.ReplayAll()
+
+ def test_normal(self):
+ self._do_mock(['x'])
+ result = vm_utils.lookup(self.session, self.name_label)
+ self.assertEqual('x', result)
+
+ def test_no_result(self):
+ self._do_mock([])
+ result = vm_utils.lookup(self.session, self.name_label)
+ self.assertIsNone(result)
+
+ def test_too_many(self):
+ self._do_mock(['a', 'b'])
+ self.assertRaises(exception.InstanceExists,
+ vm_utils.lookup,
+ self.session, self.name_label)
+
+ def test_rescue_none(self):
+ self.session.call_xenapi(
+ "VM.get_by_name_label", self.name_label + '-rescue').AndReturn([])
+ self._do_mock(['x'])
+ result = vm_utils.lookup(self.session, self.name_label,
+ check_rescue=True)
+ self.assertEqual('x', result)
+
+ def test_rescue_found(self):
+ self.session.call_xenapi(
+ "VM.get_by_name_label",
+ self.name_label + '-rescue').AndReturn(['y'])
+ self.mox.ReplayAll()
+ result = vm_utils.lookup(self.session, self.name_label,
+ check_rescue=True)
+ self.assertEqual('y', result)
+
+ def test_rescue_too_many(self):
+ self.session.call_xenapi(
+ "VM.get_by_name_label",
+ self.name_label + '-rescue').AndReturn(['a', 'b', 'c'])
+ self.mox.ReplayAll()
+ self.assertRaises(exception.InstanceExists,
+ vm_utils.lookup,
+ self.session, self.name_label,
+ check_rescue=True)
+
+
+class GenerateConfigDriveTestCase(VMUtilsTestBase):
+ def test_no_admin_pass(self):
+ instance = {}
+
+ self.mox.StubOutWithMock(vm_utils, 'safe_find_sr')
+ vm_utils.safe_find_sr('session').AndReturn('sr_ref')
+
+ self.mox.StubOutWithMock(vm_utils, 'create_vdi')
+ vm_utils.create_vdi('session', 'sr_ref', instance, 'config-2',
+ 'configdrive',
+ 64 * units.Mi).AndReturn('vdi_ref')
+
+ self.mox.StubOutWithMock(vm_utils, 'vdi_attached_here')
+ vm_utils.vdi_attached_here(
+ 'session', 'vdi_ref', read_only=False).AndReturn(
+ contextified('mounted_dev'))
+
+ class FakeInstanceMetadata(object):
+ def __init__(_self, instance, content=None, extra_md=None,
+ network_info=None):
+ self.assertEqual(network_info, "nw_info")
+
+ def metadata_for_config_drive(_self):
+ return []
+
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.api.metadata.base.InstanceMetadata',
+ FakeInstanceMetadata))
+
+ self.mox.StubOutWithMock(utils, 'execute')
+ utils.execute('genisoimage', '-o', mox.IgnoreArg(), '-ldots',
+ '-allow-lowercase', '-allow-multidot', '-l',
+ '-publisher', mox.IgnoreArg(), '-quiet',
+ '-J', '-r', '-V', 'config-2', mox.IgnoreArg(),
+ attempts=1, run_as_root=False).AndReturn(None)
+ utils.execute('dd', mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg(), run_as_root=True).AndReturn(None)
+
+ self.mox.StubOutWithMock(vm_utils, 'create_vbd')
+ vm_utils.create_vbd('session', 'vm_ref', 'vdi_ref', mox.IgnoreArg(),
+ bootable=False, read_only=True).AndReturn(None)
+
+ self.mox.ReplayAll()
+
+ # And the actual call we're testing
+ vm_utils.generate_configdrive('session', instance, 'vm_ref',
+ 'userdevice', "nw_info")
+
+ @mock.patch.object(vm_utils, "destroy_vdi")
+ @mock.patch.object(vm_utils, "vdi_attached_here")
+ @mock.patch.object(vm_utils, "create_vdi")
+ @mock.patch.object(vm_utils, "safe_find_sr")
+ def test_vdi_cleaned_up(self, mock_find, mock_create_vdi, mock_attached,
+ mock_destroy):
+ mock_create_vdi.return_value = 'vdi_ref'
+ mock_attached.side_effect = test.TestingException
+ mock_destroy.side_effect = exception.StorageError(reason="")
+
+ instance = {"uuid": "asdf"}
+ self.assertRaises(test.TestingException,
+ vm_utils.generate_configdrive,
+ 'session', instance, 'vm_ref', 'userdevice',
+ 'nw_info')
+ mock_destroy.assert_called_once_with('session', 'vdi_ref')
+
+
+class XenAPIGetUUID(VMUtilsTestBase):
+ def test_get_this_vm_uuid_new_kernel(self):
+ self.mox.StubOutWithMock(vm_utils, '_get_sys_hypervisor_uuid')
+
+ vm_utils._get_sys_hypervisor_uuid().AndReturn(
+ '2f46f0f5-f14c-ef1b-1fac-9eeca0888a3f')
+
+ self.mox.ReplayAll()
+ self.assertEqual('2f46f0f5-f14c-ef1b-1fac-9eeca0888a3f',
+ vm_utils.get_this_vm_uuid(None))
+ self.mox.VerifyAll()
+
+ def test_get_this_vm_uuid_old_kernel_reboot(self):
+ self.mox.StubOutWithMock(vm_utils, '_get_sys_hypervisor_uuid')
+ self.mox.StubOutWithMock(utils, 'execute')
+
+ vm_utils._get_sys_hypervisor_uuid().AndRaise(
+ IOError(13, 'Permission denied'))
+ utils.execute('xenstore-read', 'domid', run_as_root=True).AndReturn(
+ ('27', ''))
+ utils.execute('xenstore-read', '/local/domain/27/vm',
+ run_as_root=True).AndReturn(
+ ('/vm/2f46f0f5-f14c-ef1b-1fac-9eeca0888a3f', ''))
+
+ self.mox.ReplayAll()
+ self.assertEqual('2f46f0f5-f14c-ef1b-1fac-9eeca0888a3f',
+ vm_utils.get_this_vm_uuid(None))
+ self.mox.VerifyAll()
+
+
+class FakeSession(object):
+ def call_xenapi(self, *args):
+ pass
+
+ def call_plugin(self, *args):
+ pass
+
+ def call_plugin_serialized(self, plugin, fn, *args, **kwargs):
+ pass
+
+ def call_plugin_serialized_with_retry(self, plugin, fn, num_retries,
+ callback, *args, **kwargs):
+ pass
+
+
+class FetchVhdImageTestCase(VMUtilsTestBase):
+ def setUp(self):
+ super(FetchVhdImageTestCase, self).setUp()
+ self.context = context.get_admin_context()
+ self.context.auth_token = 'auth_token'
+ self.session = FakeSession()
+ self.instance = {"uuid": "uuid"}
+
+ self.mox.StubOutWithMock(vm_utils, '_make_uuid_stack')
+ vm_utils._make_uuid_stack().AndReturn(["uuid_stack"])
+
+ self.mox.StubOutWithMock(vm_utils, 'get_sr_path')
+ vm_utils.get_sr_path(self.session).AndReturn('sr_path')
+
+ def _stub_glance_download_vhd(self, raise_exc=None):
+ self.mox.StubOutWithMock(
+ self.session, 'call_plugin_serialized_with_retry')
+ func = self.session.call_plugin_serialized_with_retry(
+ 'glance', 'download_vhd', 0, mox.IgnoreArg(), mox.IgnoreArg(),
+ extra_headers={'X-Service-Catalog': '[]',
+ 'X-Auth-Token': 'auth_token',
+ 'X-Roles': '',
+ 'X-Tenant-Id': None,
+ 'X-User-Id': None,
+ 'X-Identity-Status': 'Confirmed'},
+ image_id='image_id',
+ uuid_stack=["uuid_stack"],
+ sr_path='sr_path')
+
+ if raise_exc:
+ func.AndRaise(raise_exc)
+ else:
+ func.AndReturn({'root': {'uuid': 'vdi'}})
+
+ def _stub_bittorrent_download_vhd(self, raise_exc=None):
+ self.mox.StubOutWithMock(
+ self.session, 'call_plugin_serialized')
+ func = self.session.call_plugin_serialized(
+ 'bittorrent', 'download_vhd',
+ image_id='image_id',
+ uuid_stack=["uuid_stack"],
+ sr_path='sr_path',
+ torrent_download_stall_cutoff=600,
+ torrent_listen_port_start=6881,
+ torrent_listen_port_end=6891,
+ torrent_max_last_accessed=86400,
+ torrent_max_seeder_processes_per_host=1,
+ torrent_seed_chance=1.0,
+ torrent_seed_duration=3600,
+ torrent_url='http://foo/image_id.torrent'
+ )
+ if raise_exc:
+ func.AndRaise(raise_exc)
+ else:
+ func.AndReturn({'root': {'uuid': 'vdi'}})
+
+ def test_fetch_vhd_image_works_with_glance(self):
+ self.mox.StubOutWithMock(vm_utils, '_image_uses_bittorrent')
+ vm_utils._image_uses_bittorrent(
+ self.context, self.instance).AndReturn(False)
+
+ self._stub_glance_download_vhd()
+
+ self.mox.StubOutWithMock(vm_utils, 'safe_find_sr')
+ vm_utils.safe_find_sr(self.session).AndReturn("sr")
+
+ self.mox.StubOutWithMock(vm_utils, '_scan_sr')
+ vm_utils._scan_sr(self.session, "sr")
+
+ self.mox.StubOutWithMock(vm_utils, '_check_vdi_size')
+ vm_utils._check_vdi_size(
+ self.context, self.session, self.instance, "vdi")
+
+ self.mox.ReplayAll()
+
+ self.assertEqual("vdi", vm_utils._fetch_vhd_image(self.context,
+ self.session, self.instance, 'image_id')['root']['uuid'])
+
+ self.mox.VerifyAll()
+
+ def test_fetch_vhd_image_works_with_bittorrent(self):
+ cfg.CONF.import_opt('torrent_base_url',
+ 'nova.virt.xenapi.image.bittorrent',
+ group='xenserver')
+ self.flags(torrent_base_url='http://foo', group='xenserver')
+
+ self.mox.StubOutWithMock(vm_utils, '_image_uses_bittorrent')
+ vm_utils._image_uses_bittorrent(
+ self.context, self.instance).AndReturn(True)
+
+ self._stub_bittorrent_download_vhd()
+
+ self.mox.StubOutWithMock(vm_utils, 'safe_find_sr')
+ vm_utils.safe_find_sr(self.session).AndReturn("sr")
+
+ self.mox.StubOutWithMock(vm_utils, '_scan_sr')
+ vm_utils._scan_sr(self.session, "sr")
+
+ self.mox.StubOutWithMock(vm_utils, '_check_vdi_size')
+ vm_utils._check_vdi_size(self.context, self.session, self.instance,
+ "vdi")
+
+ self.mox.ReplayAll()
+
+ self.assertEqual("vdi", vm_utils._fetch_vhd_image(self.context,
+ self.session, self.instance, 'image_id')['root']['uuid'])
+
+ self.mox.VerifyAll()
+
+ def test_fetch_vhd_image_cleans_up_vdi_on_fail(self):
+ self.mox.StubOutWithMock(vm_utils, '_image_uses_bittorrent')
+ vm_utils._image_uses_bittorrent(
+ self.context, self.instance).AndReturn(False)
+
+ self._stub_glance_download_vhd()
+
+ self.mox.StubOutWithMock(vm_utils, 'safe_find_sr')
+ vm_utils.safe_find_sr(self.session).AndReturn("sr")
+
+ self.mox.StubOutWithMock(vm_utils, '_scan_sr')
+ vm_utils._scan_sr(self.session, "sr")
+
+ self.mox.StubOutWithMock(vm_utils, '_check_vdi_size')
+ vm_utils._check_vdi_size(self.context, self.session, self.instance,
+ "vdi").AndRaise(exception.FlavorDiskTooSmall)
+
+ self.mox.StubOutWithMock(self.session, 'call_xenapi')
+ self.session.call_xenapi("VDI.get_by_uuid", "vdi").AndReturn("ref")
+
+ self.mox.StubOutWithMock(vm_utils, 'destroy_vdi')
+ vm_utils.destroy_vdi(self.session,
+ "ref").AndRaise(exception.StorageError(reason=""))
+
+ self.mox.ReplayAll()
+
+ self.assertRaises(exception.FlavorDiskTooSmall,
+ vm_utils._fetch_vhd_image, self.context, self.session,
+ self.instance, 'image_id')
+
+ self.mox.VerifyAll()
+
+ def test_fallback_to_default_handler(self):
+ cfg.CONF.import_opt('torrent_base_url',
+ 'nova.virt.xenapi.image.bittorrent',
+ group='xenserver')
+ self.flags(torrent_base_url='http://foo', group='xenserver')
+
+ self.mox.StubOutWithMock(vm_utils, '_image_uses_bittorrent')
+ vm_utils._image_uses_bittorrent(
+ self.context, self.instance).AndReturn(True)
+
+ self._stub_bittorrent_download_vhd(raise_exc=RuntimeError)
+
+ vm_utils._make_uuid_stack().AndReturn(["uuid_stack"])
+ vm_utils.get_sr_path(self.session).AndReturn('sr_path')
+
+ self._stub_glance_download_vhd()
+
+ self.mox.StubOutWithMock(vm_utils, 'safe_find_sr')
+ vm_utils.safe_find_sr(self.session).AndReturn("sr")
+
+ self.mox.StubOutWithMock(vm_utils, '_scan_sr')
+ vm_utils._scan_sr(self.session, "sr")
+
+ self.mox.StubOutWithMock(vm_utils, '_check_vdi_size')
+ vm_utils._check_vdi_size(self.context, self.session, self.instance,
+ "vdi")
+
+ self.mox.ReplayAll()
+
+ self.assertEqual("vdi", vm_utils._fetch_vhd_image(self.context,
+ self.session, self.instance, 'image_id')['root']['uuid'])
+
+ self.mox.VerifyAll()
+
+ def test_default_handler_does_not_fallback_to_itself(self):
+ cfg.CONF.import_opt('torrent_base_url',
+ 'nova.virt.xenapi.image.bittorrent',
+ group='xenserver')
+ self.flags(torrent_base_url='http://foo', group='xenserver')
+
+ self.mox.StubOutWithMock(vm_utils, '_image_uses_bittorrent')
+ vm_utils._image_uses_bittorrent(
+ self.context, self.instance).AndReturn(False)
+
+ self._stub_glance_download_vhd(raise_exc=RuntimeError)
+
+ self.mox.ReplayAll()
+
+ self.assertRaises(RuntimeError, vm_utils._fetch_vhd_image,
+ self.context, self.session, self.instance, 'image_id')
+
+ self.mox.VerifyAll()
+
+
+class TestImageCompression(VMUtilsTestBase):
+ def test_image_compression(self):
+ # Testing for nova.conf, too low, negative, and a correct value.
+ self.assertIsNone(vm_utils.get_compression_level())
+ self.flags(image_compression_level=0, group='xenserver')
+ self.assertIsNone(vm_utils.get_compression_level())
+ self.flags(image_compression_level=-6, group='xenserver')
+ self.assertIsNone(vm_utils.get_compression_level())
+ self.flags(image_compression_level=6, group='xenserver')
+ self.assertEqual(vm_utils.get_compression_level(), 6)
+
+
+class ResizeHelpersTestCase(VMUtilsTestBase):
+ def test_repair_filesystem(self):
+ self.mox.StubOutWithMock(utils, 'execute')
+
+ utils.execute('e2fsck', '-f', "-y", "fakepath",
+ run_as_root=True, check_exit_code=[0, 1, 2]).AndReturn(
+ ("size is: 42", ""))
+
+ self.mox.ReplayAll()
+
+ vm_utils._repair_filesystem("fakepath")
+
+ def _call_tune2fs_remove_journal(self, path):
+ utils.execute("tune2fs", "-O ^has_journal", path, run_as_root=True)
+
+ def _call_tune2fs_add_journal(self, path):
+ utils.execute("tune2fs", "-j", path, run_as_root=True)
+
+ def _call_parted_mkpart(self, path, start, end):
+ utils.execute('parted', '--script', path, 'rm', '1',
+ run_as_root=True)
+ utils.execute('parted', '--script', path, 'mkpart',
+ 'primary', '%ds' % start, '%ds' % end, run_as_root=True)
+
+ def _call_parted_boot_flag(sef, path):
+ utils.execute('parted', '--script', path, 'set', '1',
+ 'boot', 'on', run_as_root=True)
+
+ def test_resize_part_and_fs_down_succeeds(self):
+ self.mox.StubOutWithMock(vm_utils, "_repair_filesystem")
+ self.mox.StubOutWithMock(utils, 'execute')
+
+ dev_path = "/dev/fake"
+ partition_path = "%s1" % dev_path
+ vm_utils._repair_filesystem(partition_path)
+ self._call_tune2fs_remove_journal(partition_path)
+ utils.execute("resize2fs", partition_path, "10s", run_as_root=True)
+ self._call_parted_mkpart(dev_path, 0, 9)
+ self._call_parted_boot_flag(dev_path)
+ self._call_tune2fs_add_journal(partition_path)
+
+ self.mox.ReplayAll()
+
+ vm_utils._resize_part_and_fs("fake", 0, 20, 10, "boot")
+
+ def test_log_progress_if_required(self):
+ self.mox.StubOutWithMock(vm_utils.LOG, "debug")
+ vm_utils.LOG.debug(_("Sparse copy in progress, "
+ "%(complete_pct).2f%% complete. "
+ "%(left)s bytes left to copy"),
+ {"complete_pct": 50.0, "left": 1})
+ current = timeutils.utcnow()
+ timeutils.set_time_override(current)
+ timeutils.advance_time_seconds(vm_utils.PROGRESS_INTERVAL_SECONDS + 1)
+ self.mox.ReplayAll()
+ vm_utils._log_progress_if_required(1, current, 2)
+
+ def test_log_progress_if_not_required(self):
+ self.mox.StubOutWithMock(vm_utils.LOG, "debug")
+ current = timeutils.utcnow()
+ timeutils.set_time_override(current)
+ timeutils.advance_time_seconds(vm_utils.PROGRESS_INTERVAL_SECONDS - 1)
+ self.mox.ReplayAll()
+ vm_utils._log_progress_if_required(1, current, 2)
+
+ def test_resize_part_and_fs_down_fails_disk_too_big(self):
+ self.mox.StubOutWithMock(vm_utils, "_repair_filesystem")
+ self.mox.StubOutWithMock(utils, 'execute')
+
+ dev_path = "/dev/fake"
+ partition_path = "%s1" % dev_path
+ new_sectors = 10
+ vm_utils._repair_filesystem(partition_path)
+ self._call_tune2fs_remove_journal(partition_path)
+ mobj = utils.execute("resize2fs",
+ partition_path,
+ "%ss" % new_sectors,
+ run_as_root=True)
+ mobj.AndRaise(processutils.ProcessExecutionError)
+ self.mox.ReplayAll()
+ self.assertRaises(exception.ResizeError,
+ vm_utils._resize_part_and_fs,
+ "fake", 0, 20, 10, "boot")
+
+ def test_resize_part_and_fs_up_succeeds(self):
+ self.mox.StubOutWithMock(vm_utils, "_repair_filesystem")
+ self.mox.StubOutWithMock(utils, 'execute')
+
+ dev_path = "/dev/fake"
+ partition_path = "%s1" % dev_path
+ vm_utils._repair_filesystem(partition_path)
+ self._call_tune2fs_remove_journal(partition_path)
+ self._call_parted_mkpart(dev_path, 0, 29)
+ utils.execute("resize2fs", partition_path, run_as_root=True)
+ self._call_tune2fs_add_journal(partition_path)
+
+ self.mox.ReplayAll()
+
+ vm_utils._resize_part_and_fs("fake", 0, 20, 30, "")
+
+ def test_resize_disk_throws_on_zero_size(self):
+ self.assertRaises(exception.ResizeError,
+ vm_utils.resize_disk, "session", "instance", "vdi_ref",
+ {"root_gb": 0})
+
+ def test_auto_config_disk_returns_early_on_zero_size(self):
+ vm_utils.try_auto_configure_disk("bad_session", "bad_vdi_ref", 0)
+
+ @mock.patch.object(utils, "execute")
+ def test_get_partitions(self, mock_execute):
+ parted_return = "BYT;\n...\n"
+ parted_return += "1:2s:11s:10s:ext3::boot;\n"
+ parted_return += "2:20s:11s:10s::bob:;\n"
+ mock_execute.return_value = (parted_return, None)
+
+ partitions = vm_utils._get_partitions("abc")
+
+ self.assertEqual(2, len(partitions))
+ self.assertEqual((1, 2, 10, "ext3", "", "boot"), partitions[0])
+ self.assertEqual((2, 20, 10, "", "bob", ""), partitions[1])
+
+
+class CheckVDISizeTestCase(VMUtilsTestBase):
+ def setUp(self):
+ super(CheckVDISizeTestCase, self).setUp()
+ self.context = 'fakecontext'
+ self.session = 'fakesession'
+ self.instance = dict(uuid='fakeinstance')
+ self.vdi_uuid = 'fakeuuid'
+
+ def test_not_too_large(self):
+ self.mox.StubOutWithMock(flavors, 'extract_flavor')
+ flavors.extract_flavor(self.instance).AndReturn(
+ dict(root_gb=1))
+
+ self.mox.StubOutWithMock(vm_utils, '_get_vdi_chain_size')
+ vm_utils._get_vdi_chain_size(self.session,
+ self.vdi_uuid).AndReturn(1073741824)
+
+ self.mox.ReplayAll()
+
+ vm_utils._check_vdi_size(self.context, self.session, self.instance,
+ self.vdi_uuid)
+
+ def test_too_large(self):
+ self.mox.StubOutWithMock(flavors, 'extract_flavor')
+ flavors.extract_flavor(self.instance).AndReturn(
+ dict(root_gb=1))
+
+ self.mox.StubOutWithMock(vm_utils, '_get_vdi_chain_size')
+ vm_utils._get_vdi_chain_size(self.session,
+ self.vdi_uuid).AndReturn(11811160065) # 10GB overhead allowed
+
+ self.mox.ReplayAll()
+
+ self.assertRaises(exception.FlavorDiskTooSmall,
+ vm_utils._check_vdi_size, self.context, self.session,
+ self.instance, self.vdi_uuid)
+
+ def test_zero_root_gb_disables_check(self):
+ self.mox.StubOutWithMock(flavors, 'extract_flavor')
+ flavors.extract_flavor(self.instance).AndReturn(
+ dict(root_gb=0))
+
+ self.mox.ReplayAll()
+
+ vm_utils._check_vdi_size(self.context, self.session, self.instance,
+ self.vdi_uuid)
+
+
+class GetInstanceForVdisForSrTestCase(VMUtilsTestBase):
+ def setUp(self):
+ super(GetInstanceForVdisForSrTestCase, self).setUp()
+ self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
+ self.fixture.config(disable_process_locking=True,
+ group='oslo_concurrency')
+ self.flags(instance_name_template='%d',
+ firewall_driver='nova.virt.xenapi.firewall.'
+ 'Dom0IptablesFirewallDriver')
+ self.flags(connection_url='test_url',
+ connection_password='test_pass',
+ group='xenserver')
+
+ def test_get_instance_vdis_for_sr(self):
+ vm_ref = fake.create_vm("foo", "Running")
+ sr_ref = fake.create_sr()
+
+ vdi_1 = fake.create_vdi('vdiname1', sr_ref)
+ vdi_2 = fake.create_vdi('vdiname2', sr_ref)
+
+ for vdi_ref in [vdi_1, vdi_2]:
+ fake.create_vbd(vm_ref, vdi_ref)
+
+ stubs.stubout_session(self.stubs, fake.SessionBase)
+ driver = xenapi_conn.XenAPIDriver(False)
+
+ result = list(vm_utils.get_instance_vdis_for_sr(
+ driver._session, vm_ref, sr_ref))
+
+ self.assertEqual([vdi_1, vdi_2], result)
+
+ def test_get_instance_vdis_for_sr_no_vbd(self):
+ vm_ref = fake.create_vm("foo", "Running")
+ sr_ref = fake.create_sr()
+
+ stubs.stubout_session(self.stubs, fake.SessionBase)
+ driver = xenapi_conn.XenAPIDriver(False)
+
+ result = list(vm_utils.get_instance_vdis_for_sr(
+ driver._session, vm_ref, sr_ref))
+
+ self.assertEqual([], result)
+
+
+class VMRefOrRaiseVMFoundTestCase(VMUtilsTestBase):
+
+ def test_lookup_call(self):
+ mock = mox.Mox()
+ mock.StubOutWithMock(vm_utils, 'lookup')
+
+ vm_utils.lookup('session', 'somename').AndReturn('ignored')
+
+ mock.ReplayAll()
+ vm_utils.vm_ref_or_raise('session', 'somename')
+ mock.VerifyAll()
+
+ def test_return_value(self):
+ mock = mox.Mox()
+ mock.StubOutWithMock(vm_utils, 'lookup')
+
+ vm_utils.lookup(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn('vmref')
+
+ mock.ReplayAll()
+ self.assertEqual(
+ 'vmref', vm_utils.vm_ref_or_raise('session', 'somename'))
+ mock.VerifyAll()
+
+
+class VMRefOrRaiseVMNotFoundTestCase(VMUtilsTestBase):
+
+ def test_exception_raised(self):
+ mock = mox.Mox()
+ mock.StubOutWithMock(vm_utils, 'lookup')
+
+ vm_utils.lookup('session', 'somename').AndReturn(None)
+
+ mock.ReplayAll()
+ self.assertRaises(
+ exception.InstanceNotFound,
+ lambda: vm_utils.vm_ref_or_raise('session', 'somename')
+ )
+ mock.VerifyAll()
+
+ def test_exception_msg_contains_vm_name(self):
+ mock = mox.Mox()
+ mock.StubOutWithMock(vm_utils, 'lookup')
+
+ vm_utils.lookup('session', 'somename').AndReturn(None)
+
+ mock.ReplayAll()
+ try:
+ vm_utils.vm_ref_or_raise('session', 'somename')
+ except exception.InstanceNotFound as e:
+ self.assertIn('somename', six.text_type(e))
+ mock.VerifyAll()
+
+
+@mock.patch.object(vm_utils, 'safe_find_sr', return_value='safe_find_sr')
+class CreateCachedImageTestCase(VMUtilsTestBase):
+ def setUp(self):
+ super(CreateCachedImageTestCase, self).setUp()
+ self.session = _get_fake_session()
+
+ @mock.patch.object(vm_utils, '_clone_vdi', return_value='new_vdi_ref')
+ def test_cached(self, mock_clone_vdi, mock_safe_find_sr):
+ self.session.call_xenapi.side_effect = ['ext', {'vdi_ref': 2},
+ None, None, None, 'vdi_uuid']
+ self.assertEqual((False, {'root': {'uuid': 'vdi_uuid', 'file': None}}),
+ vm_utils._create_cached_image('context', self.session,
+ 'instance', 'name', 'uuid',
+ vm_utils.ImageType.DISK_VHD))
+
+ @mock.patch.object(vm_utils, '_safe_copy_vdi', return_value='new_vdi_ref')
+ def test_no_cow(self, mock_safe_copy_vdi, mock_safe_find_sr):
+ self.flags(use_cow_images=False)
+ self.session.call_xenapi.side_effect = ['ext', {'vdi_ref': 2},
+ None, None, None, 'vdi_uuid']
+ self.assertEqual((False, {'root': {'uuid': 'vdi_uuid', 'file': None}}),
+ vm_utils._create_cached_image('context', self.session,
+ 'instance', 'name', 'uuid',
+ vm_utils.ImageType.DISK_VHD))
+
+ def test_no_cow_no_ext(self, mock_safe_find_sr):
+ self.flags(use_cow_images=False)
+ self.session.call_xenapi.side_effect = ['non-ext', {'vdi_ref': 2},
+ 'vdi_ref', None, None, None,
+ 'vdi_uuid']
+ self.assertEqual((False, {'root': {'uuid': 'vdi_uuid', 'file': None}}),
+ vm_utils._create_cached_image('context', self.session,
+ 'instance', 'name', 'uuid',
+ vm_utils.ImageType.DISK_VHD))
+
+ @mock.patch.object(vm_utils, '_clone_vdi', return_value='new_vdi_ref')
+ @mock.patch.object(vm_utils, '_fetch_image',
+ return_value={'root': {'uuid': 'vdi_uuid',
+ 'file': None}})
+ def test_noncached(self, mock_fetch_image, mock_clone_vdi,
+ mock_safe_find_sr):
+ self.session.call_xenapi.side_effect = ['ext', {}, 'cache_vdi_ref',
+ None, None, None, None, None,
+ None, 'vdi_uuid']
+ self.assertEqual((True, {'root': {'uuid': 'vdi_uuid', 'file': None}}),
+ vm_utils._create_cached_image('context', self.session,
+ 'instance', 'name', 'uuid',
+ vm_utils.ImageType.DISK_VHD))
+
+
+class BittorrentTestCase(VMUtilsTestBase):
+ def setUp(self):
+ super(BittorrentTestCase, self).setUp()
+ self.context = context.get_admin_context()
+
+ def test_image_uses_bittorrent(self):
+ instance = {'system_metadata': {'image_bittorrent': True}}
+ self.flags(torrent_images='some', group='xenserver')
+ self.assertTrue(vm_utils._image_uses_bittorrent(self.context,
+ instance))
+
+ def _test_create_image(self, cache_type):
+ instance = {'system_metadata': {'image_cache_in_nova': True}}
+ self.flags(cache_images=cache_type, group='xenserver')
+
+ was = {'called': None}
+
+ def fake_create_cached_image(*args):
+ was['called'] = 'some'
+ return (False, {})
+ self.stubs.Set(vm_utils, '_create_cached_image',
+ fake_create_cached_image)
+
+ def fake_fetch_image(*args):
+ was['called'] = 'none'
+ return {}
+ self.stubs.Set(vm_utils, '_fetch_image',
+ fake_fetch_image)
+
+ vm_utils.create_image(self.context, None, instance,
+ 'foo', 'bar', 'baz')
+
+ self.assertEqual(was['called'], cache_type)
+
+ def test_create_image_cached(self):
+ self._test_create_image('some')
+
+ def test_create_image_uncached(self):
+ self._test_create_image('none')
+
+
+class ShutdownTestCase(VMUtilsTestBase):
+
+ def test_hardshutdown_should_return_true_when_vm_is_shutdown(self):
+ self.mock = mox.Mox()
+ session = FakeSession()
+ instance = "instance"
+ vm_ref = "vm-ref"
+ self.mock.StubOutWithMock(vm_utils, 'is_vm_shutdown')
+ vm_utils.is_vm_shutdown(session, vm_ref).AndReturn(True)
+ self.mock.StubOutWithMock(vm_utils, 'LOG')
+ self.assertTrue(vm_utils.hard_shutdown_vm(
+ session, instance, vm_ref))
+
+ def test_cleanshutdown_should_return_true_when_vm_is_shutdown(self):
+ self.mock = mox.Mox()
+ session = FakeSession()
+ instance = "instance"
+ vm_ref = "vm-ref"
+ self.mock.StubOutWithMock(vm_utils, 'is_vm_shutdown')
+ vm_utils.is_vm_shutdown(session, vm_ref).AndReturn(True)
+ self.mock.StubOutWithMock(vm_utils, 'LOG')
+ self.assertTrue(vm_utils.clean_shutdown_vm(
+ session, instance, vm_ref))
+
+
+class CreateVBDTestCase(VMUtilsTestBase):
+ def setUp(self):
+ super(CreateVBDTestCase, self).setUp()
+ self.session = FakeSession()
+ self.mock = mox.Mox()
+ self.mock.StubOutWithMock(self.session, 'call_xenapi')
+ self.vbd_rec = self._generate_vbd_rec()
+
+ def _generate_vbd_rec(self):
+ vbd_rec = {}
+ vbd_rec['VM'] = 'vm_ref'
+ vbd_rec['VDI'] = 'vdi_ref'
+ vbd_rec['userdevice'] = '0'
+ vbd_rec['bootable'] = False
+ vbd_rec['mode'] = 'RW'
+ vbd_rec['type'] = 'disk'
+ vbd_rec['unpluggable'] = True
+ vbd_rec['empty'] = False
+ vbd_rec['other_config'] = {}
+ vbd_rec['qos_algorithm_type'] = ''
+ vbd_rec['qos_algorithm_params'] = {}
+ vbd_rec['qos_supported_algorithms'] = []
+ return vbd_rec
+
+ def test_create_vbd_default_args(self):
+ self.session.call_xenapi('VBD.create',
+ self.vbd_rec).AndReturn("vbd_ref")
+ self.mock.ReplayAll()
+
+ result = vm_utils.create_vbd(self.session, "vm_ref", "vdi_ref", 0)
+ self.assertEqual(result, "vbd_ref")
+ self.mock.VerifyAll()
+
+ def test_create_vbd_osvol(self):
+ self.session.call_xenapi('VBD.create',
+ self.vbd_rec).AndReturn("vbd_ref")
+ self.session.call_xenapi('VBD.add_to_other_config', "vbd_ref",
+ "osvol", "True")
+ self.mock.ReplayAll()
+ result = vm_utils.create_vbd(self.session, "vm_ref", "vdi_ref", 0,
+ osvol=True)
+ self.assertEqual(result, "vbd_ref")
+ self.mock.VerifyAll()
+
+ def test_create_vbd_extra_args(self):
+ self.vbd_rec['VDI'] = 'OpaqueRef:NULL'
+ self.vbd_rec['type'] = 'a'
+ self.vbd_rec['mode'] = 'RO'
+ self.vbd_rec['bootable'] = True
+ self.vbd_rec['empty'] = True
+ self.vbd_rec['unpluggable'] = False
+ self.session.call_xenapi('VBD.create',
+ self.vbd_rec).AndReturn("vbd_ref")
+ self.mock.ReplayAll()
+
+ result = vm_utils.create_vbd(self.session, "vm_ref", None, 0,
+ vbd_type="a", read_only=True, bootable=True,
+ empty=True, unpluggable=False)
+ self.assertEqual(result, "vbd_ref")
+ self.mock.VerifyAll()
+
+ def test_attach_cd(self):
+ self.mock.StubOutWithMock(vm_utils, 'create_vbd')
+
+ vm_utils.create_vbd(self.session, "vm_ref", None, 1,
+ vbd_type='cd', read_only=True, bootable=True,
+ empty=True, unpluggable=False).AndReturn("vbd_ref")
+ self.session.call_xenapi('VBD.insert', "vbd_ref", "vdi_ref")
+ self.mock.ReplayAll()
+
+ result = vm_utils.attach_cd(self.session, "vm_ref", "vdi_ref", 1)
+ self.assertEqual(result, "vbd_ref")
+ self.mock.VerifyAll()
+
+
+class UnplugVbdTestCase(VMUtilsTestBase):
+ @mock.patch.object(greenthread, 'sleep')
+ def test_unplug_vbd_works(self, mock_sleep):
+ session = _get_fake_session()
+ vbd_ref = "vbd_ref"
+ vm_ref = 'vm_ref'
+
+ vm_utils.unplug_vbd(session, vbd_ref, vm_ref)
+
+ session.call_xenapi.assert_called_once_with('VBD.unplug', vbd_ref)
+ self.assertEqual(0, mock_sleep.call_count)
+
+ def test_unplug_vbd_raises_unexpected_error(self):
+ session = _get_fake_session()
+ vbd_ref = "vbd_ref"
+ vm_ref = 'vm_ref'
+ session.call_xenapi.side_effect = test.TestingException()
+
+ self.assertRaises(test.TestingException, vm_utils.unplug_vbd,
+ session, vm_ref, vbd_ref)
+ self.assertEqual(1, session.call_xenapi.call_count)
+
+ def test_unplug_vbd_already_detached_works(self):
+ error = "DEVICE_ALREADY_DETACHED"
+ session = _get_fake_session(error)
+ vbd_ref = "vbd_ref"
+ vm_ref = 'vm_ref'
+
+ vm_utils.unplug_vbd(session, vbd_ref, vm_ref)
+ self.assertEqual(1, session.call_xenapi.call_count)
+
+ def test_unplug_vbd_already_raises_unexpected_xenapi_error(self):
+ session = _get_fake_session("")
+ vbd_ref = "vbd_ref"
+ vm_ref = 'vm_ref'
+
+ self.assertRaises(exception.StorageError, vm_utils.unplug_vbd,
+ session, vbd_ref, vm_ref)
+ self.assertEqual(1, session.call_xenapi.call_count)
+
+ def _test_uplug_vbd_retries(self, mock_sleep, error):
+ session = _get_fake_session(error)
+ vbd_ref = "vbd_ref"
+ vm_ref = 'vm_ref'
+
+ self.assertRaises(exception.StorageError, vm_utils.unplug_vbd,
+ session, vm_ref, vbd_ref)
+
+ self.assertEqual(11, session.call_xenapi.call_count)
+ self.assertEqual(10, mock_sleep.call_count)
+
+ @mock.patch.object(greenthread, 'sleep')
+ def test_uplug_vbd_retries_on_rejected(self, mock_sleep):
+ self._test_uplug_vbd_retries(mock_sleep,
+ "DEVICE_DETACH_REJECTED")
+
+ @mock.patch.object(greenthread, 'sleep')
+ def test_uplug_vbd_retries_on_internal_error(self, mock_sleep):
+ self._test_uplug_vbd_retries(mock_sleep,
+ "INTERNAL_ERROR")
+
+
+class VDIOtherConfigTestCase(VMUtilsTestBase):
+ """Tests to ensure that the code is populating VDI's `other_config`
+ attribute with the correct metadta.
+ """
+
+ def setUp(self):
+ super(VDIOtherConfigTestCase, self).setUp()
+
+ class _FakeSession():
+ def call_xenapi(self, operation, *args, **kwargs):
+ # VDI.add_to_other_config -> VDI_add_to_other_config
+ method = getattr(self, operation.replace('.', '_'), None)
+ if method:
+ return method(*args, **kwargs)
+
+ self.operation = operation
+ self.args = args
+ self.kwargs = kwargs
+
+ self.session = _FakeSession()
+ self.context = context.get_admin_context()
+ self.fake_instance = {'uuid': 'aaaa-bbbb-cccc-dddd',
+ 'name': 'myinstance'}
+
+ def test_create_vdi(self):
+ # Some images are registered with XenServer explicitly by calling
+ # `create_vdi`
+ vm_utils.create_vdi(self.session, 'sr_ref', self.fake_instance,
+ 'myvdi', 'root', 1024, read_only=True)
+
+ expected = {'nova_disk_type': 'root',
+ 'nova_instance_uuid': 'aaaa-bbbb-cccc-dddd'}
+
+ self.assertEqual(expected, self.session.args[0]['other_config'])
+
+ def test_create_image(self):
+ # Other images are registered implicitly when they are dropped into
+ # the SR by a dom0 plugin or some other process
+ self.flags(cache_images='none', group='xenserver')
+
+ def fake_fetch_image(*args):
+ return {'root': {'uuid': 'fake-uuid'}}
+
+ self.stubs.Set(vm_utils, '_fetch_image', fake_fetch_image)
+
+ other_config = {}
+
+ def VDI_add_to_other_config(ref, key, value):
+ other_config[key] = value
+
+ # Stubbing on the session object and not class so we don't pollute
+ # other tests
+ self.session.VDI_add_to_other_config = VDI_add_to_other_config
+ self.session.VDI_get_other_config = lambda vdi: {}
+
+ vm_utils.create_image(self.context, self.session, self.fake_instance,
+ 'myvdi', 'image1', vm_utils.ImageType.DISK_VHD)
+
+ expected = {'nova_disk_type': 'root',
+ 'nova_instance_uuid': 'aaaa-bbbb-cccc-dddd'}
+
+ self.assertEqual(expected, other_config)
+
+ def test_import_migrated_vhds(self):
+ # Migrated images should preserve the `other_config`
+ other_config = {}
+
+ def VDI_add_to_other_config(ref, key, value):
+ other_config[key] = value
+
+ def call_plugin_serialized(*args, **kwargs):
+ return {'root': {'uuid': 'aaaa-bbbb-cccc-dddd'}}
+
+ # Stubbing on the session object and not class so we don't pollute
+ # other tests
+ self.session.VDI_add_to_other_config = VDI_add_to_other_config
+ self.session.VDI_get_other_config = lambda vdi: {}
+ self.session.call_plugin_serialized = call_plugin_serialized
+
+ self.stubs.Set(vm_utils, 'get_sr_path', lambda *a, **k: None)
+ self.stubs.Set(vm_utils, 'scan_default_sr', lambda *a, **k: None)
+
+ vm_utils._import_migrated_vhds(self.session, self.fake_instance,
+ "disk_label", "root", "vdi_label")
+
+ expected = {'nova_disk_type': 'root',
+ 'nova_instance_uuid': 'aaaa-bbbb-cccc-dddd'}
+
+ self.assertEqual(expected, other_config)
+
+
+class GenerateDiskTestCase(VMUtilsTestBase):
+ def setUp(self):
+ super(GenerateDiskTestCase, self).setUp()
+ self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
+ self.fixture.config(disable_process_locking=True,
+ group='oslo_concurrency')
+ self.flags(instance_name_template='%d',
+ firewall_driver='nova.virt.xenapi.firewall.'
+ 'Dom0IptablesFirewallDriver')
+ self.flags(connection_url='test_url',
+ connection_password='test_pass',
+ group='xenserver')
+ stubs.stubout_session(self.stubs, fake.SessionBase)
+ driver = xenapi_conn.XenAPIDriver(False)
+ self.session = driver._session
+ self.session.is_local_connection = False
+ self.vm_ref = fake.create_vm("foo", "Running")
+
+ def tearDown(self):
+ super(GenerateDiskTestCase, self).tearDown()
+ fake.destroy_vm(self.vm_ref)
+
+ def _expect_parted_calls(self):
+ self.mox.StubOutWithMock(utils, "execute")
+ self.mox.StubOutWithMock(utils, "trycmd")
+ self.mox.StubOutWithMock(vm_utils, "destroy_vdi")
+ self.mox.StubOutWithMock(vm_utils.os.path, "exists")
+ if self.session.is_local_connection:
+ utils.execute('parted', '--script', '/dev/fakedev', 'mklabel',
+ 'msdos', check_exit_code=False, run_as_root=True)
+ utils.execute('parted', '--script', '/dev/fakedev', '--', 'mkpart',
+ 'primary', '0', '-0',
+ check_exit_code=False, run_as_root=True)
+ vm_utils.os.path.exists('/dev/mapper/fakedev1').AndReturn(True)
+ utils.trycmd('kpartx', '-a', '/dev/fakedev',
+ discard_warnings=True, run_as_root=True)
+ else:
+ utils.execute('parted', '--script', '/dev/fakedev', 'mklabel',
+ 'msdos', check_exit_code=True, run_as_root=True)
+ utils.execute('parted', '--script', '/dev/fakedev', '--', 'mkpart',
+ 'primary', '0', '-0',
+ check_exit_code=True, run_as_root=True)
+
+ def _check_vdi(self, vdi_ref, check_attached=True):
+ vdi_rec = self.session.call_xenapi("VDI.get_record", vdi_ref)
+ self.assertEqual(str(10 * units.Mi), vdi_rec["virtual_size"])
+ if check_attached:
+ vbd_ref = vdi_rec["VBDs"][0]
+ vbd_rec = self.session.call_xenapi("VBD.get_record", vbd_ref)
+ self.assertEqual(self.vm_ref, vbd_rec['VM'])
+ else:
+ self.assertEqual(0, len(vdi_rec["VBDs"]))
+
+ @test_xenapi.stub_vm_utils_with_vdi_attached_here
+ def test_generate_disk_with_no_fs_given(self):
+ self._expect_parted_calls()
+
+ self.mox.ReplayAll()
+ vdi_ref = vm_utils._generate_disk(self.session, {"uuid": "fake_uuid"},
+ self.vm_ref, "2", "name", "user", 10, None)
+ self._check_vdi(vdi_ref)
+
+ @test_xenapi.stub_vm_utils_with_vdi_attached_here
+ def test_generate_disk_swap(self):
+ self._expect_parted_calls()
+ utils.execute('mkswap', '/dev/fakedev1', run_as_root=True)
+
+ self.mox.ReplayAll()
+ vdi_ref = vm_utils._generate_disk(self.session, {"uuid": "fake_uuid"},
+ self.vm_ref, "2", "name", "swap", 10, "linux-swap")
+ self._check_vdi(vdi_ref)
+
+ @test_xenapi.stub_vm_utils_with_vdi_attached_here
+ def test_generate_disk_ephemeral(self):
+ self._expect_parted_calls()
+ utils.execute('mkfs', '-t', 'ext4', '/dev/fakedev1',
+ run_as_root=True)
+
+ self.mox.ReplayAll()
+ vdi_ref = vm_utils._generate_disk(self.session, {"uuid": "fake_uuid"},
+ self.vm_ref, "2", "name", "ephemeral", 10, "ext4")
+ self._check_vdi(vdi_ref)
+
+ @test_xenapi.stub_vm_utils_with_vdi_attached_here
+ def test_generate_disk_ensure_cleanup_called(self):
+ self._expect_parted_calls()
+ utils.execute('mkfs', '-t', 'ext4', '/dev/fakedev1',
+ run_as_root=True).AndRaise(test.TestingException)
+ vm_utils.destroy_vdi(self.session,
+ mox.IgnoreArg()).AndRaise(exception.StorageError(reason=""))
+
+ self.mox.ReplayAll()
+ self.assertRaises(test.TestingException, vm_utils._generate_disk,
+ self.session, {"uuid": "fake_uuid"},
+ self.vm_ref, "2", "name", "ephemeral", 10, "ext4")
+
+ @test_xenapi.stub_vm_utils_with_vdi_attached_here
+ def test_generate_disk_ephemeral_local_not_attached(self):
+ self.session.is_local_connection = True
+ self._expect_parted_calls()
+ utils.execute('mkfs', '-t', 'ext4', '/dev/mapper/fakedev1',
+ run_as_root=True)
+
+ self.mox.ReplayAll()
+ vdi_ref = vm_utils._generate_disk(self.session, {"uuid": "fake_uuid"},
+ None, "2", "name", "ephemeral", 10, "ext4")
+ self._check_vdi(vdi_ref, check_attached=False)
+
+
+class GenerateEphemeralTestCase(VMUtilsTestBase):
+ def setUp(self):
+ super(GenerateEphemeralTestCase, self).setUp()
+ self.session = "session"
+ self.instance = "instance"
+ self.vm_ref = "vm_ref"
+ self.name_label = "name"
+ self.ephemeral_name_label = "name ephemeral"
+ self.userdevice = 4
+ self.mox.StubOutWithMock(vm_utils, "_generate_disk")
+ self.mox.StubOutWithMock(vm_utils, "safe_destroy_vdis")
+
+ def test_get_ephemeral_disk_sizes_simple(self):
+ result = vm_utils.get_ephemeral_disk_sizes(20)
+ expected = [20]
+ self.assertEqual(expected, list(result))
+
+ def test_get_ephemeral_disk_sizes_three_disks_2000(self):
+ result = vm_utils.get_ephemeral_disk_sizes(4030)
+ expected = [2000, 2000, 30]
+ self.assertEqual(expected, list(result))
+
+ def test_get_ephemeral_disk_sizes_two_disks_1024(self):
+ result = vm_utils.get_ephemeral_disk_sizes(2048)
+ expected = [1024, 1024]
+ self.assertEqual(expected, list(result))
+
+ def _expect_generate_disk(self, size, device, name_label):
+ vm_utils._generate_disk(self.session, self.instance, self.vm_ref,
+ str(device), name_label, 'ephemeral',
+ size * 1024, None).AndReturn(device)
+
+ def test_generate_ephemeral_adds_one_disk(self):
+ self._expect_generate_disk(20, self.userdevice,
+ self.ephemeral_name_label)
+ self.mox.ReplayAll()
+
+ vm_utils.generate_ephemeral(self.session, self.instance, self.vm_ref,
+ str(self.userdevice), self.name_label, 20)
+
+ def test_generate_ephemeral_adds_multiple_disks(self):
+ self._expect_generate_disk(2000, self.userdevice,
+ self.ephemeral_name_label)
+ self._expect_generate_disk(2000, self.userdevice + 1,
+ self.ephemeral_name_label + " (1)")
+ self._expect_generate_disk(30, self.userdevice + 2,
+ self.ephemeral_name_label + " (2)")
+ self.mox.ReplayAll()
+
+ vm_utils.generate_ephemeral(self.session, self.instance, self.vm_ref,
+ str(self.userdevice), self.name_label, 4030)
+
+ def test_generate_ephemeral_cleans_up_on_error(self):
+ self._expect_generate_disk(1024, self.userdevice,
+ self.ephemeral_name_label)
+ self._expect_generate_disk(1024, self.userdevice + 1,
+ self.ephemeral_name_label + " (1)")
+
+ vm_utils._generate_disk(self.session, self.instance, self.vm_ref,
+ str(self.userdevice + 2), "name ephemeral (2)", 'ephemeral',
+ units.Mi, None).AndRaise(exception.NovaException)
+
+ vm_utils.safe_destroy_vdis(self.session, [4, 5])
+
+ self.mox.ReplayAll()
+
+ self.assertRaises(exception.NovaException, vm_utils.generate_ephemeral,
+ self.session, self.instance, self.vm_ref,
+ str(self.userdevice), self.name_label, 4096)
+
+
+class FakeFile(object):
+ def __init__(self):
+ self._file_operations = []
+
+ def seek(self, offset):
+ self._file_operations.append((self.seek, offset))
+
+
+class StreamDiskTestCase(VMUtilsTestBase):
+ def setUp(self):
+ import __builtin__
+ super(StreamDiskTestCase, self).setUp()
+ self.mox.StubOutWithMock(vm_utils.utils, 'make_dev_path')
+ self.mox.StubOutWithMock(vm_utils.utils, 'temporary_chown')
+ self.mox.StubOutWithMock(vm_utils, '_write_partition')
+
+ # NOTE(matelakat): This might hide the fail reason, as test runners
+ # are unhappy with a mocked out open.
+ self.mox.StubOutWithMock(__builtin__, 'open')
+ self.image_service_func = self.mox.CreateMockAnything()
+
+ def test_non_ami(self):
+ fake_file = FakeFile()
+
+ vm_utils.utils.make_dev_path('dev').AndReturn('some_path')
+ vm_utils.utils.temporary_chown(
+ 'some_path').AndReturn(contextified(None))
+ open('some_path', 'wb').AndReturn(contextified(fake_file))
+ self.image_service_func(fake_file)
+
+ self.mox.ReplayAll()
+
+ vm_utils._stream_disk("session", self.image_service_func,
+ vm_utils.ImageType.KERNEL, None, 'dev')
+
+ self.assertEqual([(fake_file.seek, 0)], fake_file._file_operations)
+
+ def test_ami_disk(self):
+ fake_file = FakeFile()
+
+ vm_utils._write_partition("session", 100, 'dev')
+ vm_utils.utils.make_dev_path('dev').AndReturn('some_path')
+ vm_utils.utils.temporary_chown(
+ 'some_path').AndReturn(contextified(None))
+ open('some_path', 'wb').AndReturn(contextified(fake_file))
+ self.image_service_func(fake_file)
+
+ self.mox.ReplayAll()
+
+ vm_utils._stream_disk("session", self.image_service_func,
+ vm_utils.ImageType.DISK, 100, 'dev')
+
+ self.assertEqual(
+ [(fake_file.seek, vm_utils.MBR_SIZE_BYTES)],
+ fake_file._file_operations)
+
+
+class VMUtilsSRPath(VMUtilsTestBase):
+ def setUp(self):
+ super(VMUtilsSRPath, self).setUp()
+ self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
+ self.fixture.config(disable_process_locking=True,
+ group='oslo_concurrency')
+ self.flags(instance_name_template='%d',
+ firewall_driver='nova.virt.xenapi.firewall.'
+ 'Dom0IptablesFirewallDriver')
+ self.flags(connection_url='test_url',
+ connection_password='test_pass',
+ group='xenserver')
+ stubs.stubout_session(self.stubs, fake.SessionBase)
+ driver = xenapi_conn.XenAPIDriver(False)
+ self.session = driver._session
+ self.session.is_local_connection = False
+
+ def test_defined(self):
+ self.mox.StubOutWithMock(vm_utils, "safe_find_sr")
+ self.mox.StubOutWithMock(self.session, "call_xenapi")
+
+ vm_utils.safe_find_sr(self.session).AndReturn("sr_ref")
+ self.session.host_ref = "host_ref"
+ self.session.call_xenapi('PBD.get_all_records_where',
+ 'field "host"="host_ref" and field "SR"="sr_ref"').AndReturn(
+ {'pbd_ref': {'device_config': {'path': 'sr_path'}}})
+
+ self.mox.ReplayAll()
+ self.assertEqual(vm_utils.get_sr_path(self.session), "sr_path")
+
+ def test_default(self):
+ self.mox.StubOutWithMock(vm_utils, "safe_find_sr")
+ self.mox.StubOutWithMock(self.session, "call_xenapi")
+
+ vm_utils.safe_find_sr(self.session).AndReturn("sr_ref")
+ self.session.host_ref = "host_ref"
+ self.session.call_xenapi('PBD.get_all_records_where',
+ 'field "host"="host_ref" and field "SR"="sr_ref"').AndReturn(
+ {'pbd_ref': {'device_config': {}}})
+ self.session.call_xenapi("SR.get_record", "sr_ref").AndReturn(
+ {'uuid': 'sr_uuid', 'type': 'ext'})
+ self.mox.ReplayAll()
+ self.assertEqual(vm_utils.get_sr_path(self.session),
+ "/var/run/sr-mount/sr_uuid")
+
+
+class CreateKernelRamdiskTestCase(VMUtilsTestBase):
+ def setUp(self):
+ super(CreateKernelRamdiskTestCase, self).setUp()
+ self.context = "context"
+ self.session = FakeSession()
+ self.instance = {"kernel_id": None, "ramdisk_id": None}
+ self.name_label = "name"
+ self.mox.StubOutWithMock(self.session, "call_plugin")
+ self.mox.StubOutWithMock(uuid, "uuid4")
+ self.mox.StubOutWithMock(vm_utils, "_fetch_disk_image")
+
+ def test_create_kernel_and_ramdisk_no_create(self):
+ self.mox.ReplayAll()
+ result = vm_utils.create_kernel_and_ramdisk(self.context,
+ self.session, self.instance, self.name_label)
+ self.assertEqual((None, None), result)
+
+ def test_create_kernel_and_ramdisk_create_both_cached(self):
+ kernel_id = "kernel"
+ ramdisk_id = "ramdisk"
+ self.instance["kernel_id"] = kernel_id
+ self.instance["ramdisk_id"] = ramdisk_id
+
+ args_kernel = {}
+ args_kernel['cached-image'] = kernel_id
+ args_kernel['new-image-uuid'] = "fake_uuid1"
+ uuid.uuid4().AndReturn("fake_uuid1")
+ self.session.call_plugin('kernel', 'create_kernel_ramdisk',
+ args_kernel).AndReturn("k")
+
+ args_ramdisk = {}
+ args_ramdisk['cached-image'] = ramdisk_id
+ args_ramdisk['new-image-uuid'] = "fake_uuid2"
+ uuid.uuid4().AndReturn("fake_uuid2")
+ self.session.call_plugin('kernel', 'create_kernel_ramdisk',
+ args_ramdisk).AndReturn("r")
+
+ self.mox.ReplayAll()
+ result = vm_utils.create_kernel_and_ramdisk(self.context,
+ self.session, self.instance, self.name_label)
+ self.assertEqual(("k", "r"), result)
+
+ def test_create_kernel_and_ramdisk_create_kernel_not_cached(self):
+ kernel_id = "kernel"
+ self.instance["kernel_id"] = kernel_id
+
+ args_kernel = {}
+ args_kernel['cached-image'] = kernel_id
+ args_kernel['new-image-uuid'] = "fake_uuid1"
+ uuid.uuid4().AndReturn("fake_uuid1")
+ self.session.call_plugin('kernel', 'create_kernel_ramdisk',
+ args_kernel).AndReturn("")
+
+ kernel = {"kernel": {"file": "k"}}
+ vm_utils._fetch_disk_image(self.context, self.session, self.instance,
+ self.name_label, kernel_id, 0).AndReturn(kernel)
+
+ self.mox.ReplayAll()
+ result = vm_utils.create_kernel_and_ramdisk(self.context,
+ self.session, self.instance, self.name_label)
+ self.assertEqual(("k", None), result)
+
+
+class ScanSrTestCase(VMUtilsTestBase):
+ @mock.patch.object(vm_utils, "_scan_sr")
+ @mock.patch.object(vm_utils, "safe_find_sr")
+ def test_scan_default_sr(self, mock_safe_find_sr, mock_scan_sr):
+ mock_safe_find_sr.return_value = "sr_ref"
+
+ self.assertEqual("sr_ref", vm_utils.scan_default_sr("fake_session"))
+
+ mock_scan_sr.assert_called_once_with("fake_session", "sr_ref")
+
+ def test_scan_sr_works(self):
+ session = mock.Mock()
+ vm_utils._scan_sr(session, "sr_ref")
+ session.call_xenapi.assert_called_once_with('SR.scan', "sr_ref")
+
+ def test_scan_sr_unknown_error_fails_once(self):
+ session = mock.Mock()
+ session.call_xenapi.side_effect = test.TestingException
+ self.assertRaises(test.TestingException,
+ vm_utils._scan_sr, session, "sr_ref")
+ session.call_xenapi.assert_called_once_with('SR.scan', "sr_ref")
+
+ @mock.patch.object(greenthread, 'sleep')
+ def test_scan_sr_known_error_retries_then_throws(self, mock_sleep):
+ session = mock.Mock()
+
+ class FakeException(Exception):
+ details = ['SR_BACKEND_FAILURE_40', "", "", ""]
+
+ session.XenAPI.Failure = FakeException
+ session.call_xenapi.side_effect = FakeException
+
+ self.assertRaises(FakeException,
+ vm_utils._scan_sr, session, "sr_ref")
+
+ session.call_xenapi.assert_called_with('SR.scan', "sr_ref")
+ self.assertEqual(4, session.call_xenapi.call_count)
+ mock_sleep.assert_has_calls([mock.call(2), mock.call(4), mock.call(8)])
+
+ @mock.patch.object(greenthread, 'sleep')
+ def test_scan_sr_known_error_retries_then_succeeds(self, mock_sleep):
+ session = mock.Mock()
+
+ class FakeException(Exception):
+ details = ['SR_BACKEND_FAILURE_40', "", "", ""]
+
+ session.XenAPI.Failure = FakeException
+
+ def fake_call_xenapi(*args):
+ fake_call_xenapi.count += 1
+ if fake_call_xenapi.count != 2:
+ raise FakeException()
+
+ fake_call_xenapi.count = 0
+ session.call_xenapi.side_effect = fake_call_xenapi
+
+ vm_utils._scan_sr(session, "sr_ref")
+
+ session.call_xenapi.assert_called_with('SR.scan', "sr_ref")
+ self.assertEqual(2, session.call_xenapi.call_count)
+ mock_sleep.assert_called_once_with(2)
+
+
+@mock.patch.object(flavors, 'extract_flavor',
+ return_value={
+ 'memory_mb': 1024,
+ 'vcpus': 1,
+ 'vcpu_weight': 1.0,
+ })
+class CreateVmTestCase(VMUtilsTestBase):
+ def test_vss_provider(self, mock_extract):
+ self.flags(vcpu_pin_set="2,3")
+ session = _get_fake_session()
+ instance = {
+ "uuid": "uuid", "os_type": "windows"
+ }
+
+ vm_utils.create_vm(session, instance, "label",
+ "kernel", "ramdisk")
+
+ vm_rec = {
+ 'VCPUs_params': {'cap': '0', 'mask': '2,3', 'weight': '1.0'},
+ 'PV_args': '',
+ 'memory_static_min': '0',
+ 'ha_restart_priority': '',
+ 'HVM_boot_policy': 'BIOS order',
+ 'PV_bootloader': '', 'tags': [],
+ 'VCPUs_max': '1',
+ 'memory_static_max': '1073741824',
+ 'actions_after_shutdown': 'destroy',
+ 'memory_dynamic_max': '1073741824',
+ 'user_version': '0',
+ 'xenstore_data': {'vm-data/allowvssprovider': 'false'},
+ 'blocked_operations': {},
+ 'is_a_template': False,
+ 'name_description': '',
+ 'memory_dynamic_min': '1073741824',
+ 'actions_after_crash': 'destroy',
+ 'memory_target': '1073741824',
+ 'PV_ramdisk': '',
+ 'PV_bootloader_args': '',
+ 'PCI_bus': '',
+ 'other_config': {'nova_uuid': 'uuid'},
+ 'name_label': 'label',
+ 'actions_after_reboot': 'restart',
+ 'VCPUs_at_startup': '1',
+ 'HVM_boot_params': {'order': 'dc'},
+ 'platform': {'nx': 'true', 'pae': 'true', 'apic': 'true',
+ 'timeoffset': '0', 'viridian': 'true',
+ 'acpi': 'true'},
+ 'PV_legacy_args': '',
+ 'PV_kernel': '',
+ 'affinity': '',
+ 'recommendations': '',
+ 'ha_always_run': False
+ }
+ session.call_xenapi.assert_called_once_with("VM.create", vm_rec)
+
+ def test_invalid_cpu_mask_raises(self, mock_extract):
+ self.flags(vcpu_pin_set="asdf")
+ session = mock.Mock()
+ instance = {
+ "uuid": "uuid",
+ }
+ self.assertRaises(exception.Invalid,
+ vm_utils.create_vm,
+ session, instance, "label",
+ "kernel", "ramdisk")
+
+ def test_destroy_vm(self, mock_extract):
+ session = mock.Mock()
+ instance = {
+ "uuid": "uuid",
+ }
+
+ vm_utils.destroy_vm(session, instance, "vm_ref")
+
+ session.VM.destroy.assert_called_once_with("vm_ref")
+
+ def test_destroy_vm_silently_fails(self, mock_extract):
+ session = mock.Mock()
+ exc = test.TestingException()
+ session.XenAPI.Failure = test.TestingException
+ session.VM.destroy.side_effect = exc
+ instance = {
+ "uuid": "uuid",
+ }
+
+ vm_utils.destroy_vm(session, instance, "vm_ref")
+
+ session.VM.destroy.assert_called_once_with("vm_ref")
+
+
+class DetermineVmModeTestCase(VMUtilsTestBase):
+ def test_determine_vm_mode_returns_xen_mode(self):
+ instance = {"vm_mode": "xen"}
+ self.assertEqual(vm_mode.XEN,
+ vm_utils.determine_vm_mode(instance, None))
+
+ def test_determine_vm_mode_returns_hvm_mode(self):
+ instance = {"vm_mode": "hvm"}
+ self.assertEqual(vm_mode.HVM,
+ vm_utils.determine_vm_mode(instance, None))
+
+ def test_determine_vm_mode_returns_xen_for_linux(self):
+ instance = {"vm_mode": None, "os_type": "linux"}
+ self.assertEqual(vm_mode.XEN,
+ vm_utils.determine_vm_mode(instance, None))
+
+ def test_determine_vm_mode_returns_hvm_for_windows(self):
+ instance = {"vm_mode": None, "os_type": "windows"}
+ self.assertEqual(vm_mode.HVM,
+ vm_utils.determine_vm_mode(instance, None))
+
+ def test_determine_vm_mode_returns_hvm_by_default(self):
+ instance = {"vm_mode": None, "os_type": None}
+ self.assertEqual(vm_mode.HVM,
+ vm_utils.determine_vm_mode(instance, None))
+
+ def test_determine_vm_mode_returns_xen_for_VHD(self):
+ instance = {"vm_mode": None, "os_type": None}
+ self.assertEqual(vm_mode.XEN,
+ vm_utils.determine_vm_mode(instance, vm_utils.ImageType.DISK_VHD))
+
+ def test_determine_vm_mode_returns_xen_for_DISK(self):
+ instance = {"vm_mode": None, "os_type": None}
+ self.assertEqual(vm_mode.XEN,
+ vm_utils.determine_vm_mode(instance, vm_utils.ImageType.DISK))
+
+
+class CallXenAPIHelpersTestCase(VMUtilsTestBase):
+ def test_vm_get_vbd_refs(self):
+ session = mock.Mock()
+ session.call_xenapi.return_value = "foo"
+ self.assertEqual("foo", vm_utils._vm_get_vbd_refs(session, "vm_ref"))
+ session.call_xenapi.assert_called_once_with("VM.get_VBDs", "vm_ref")
+
+ def test_vbd_get_rec(self):
+ session = mock.Mock()
+ session.call_xenapi.return_value = "foo"
+ self.assertEqual("foo", vm_utils._vbd_get_rec(session, "vbd_ref"))
+ session.call_xenapi.assert_called_once_with("VBD.get_record",
+ "vbd_ref")
+
+ def test_vdi_get_rec(self):
+ session = mock.Mock()
+ session.call_xenapi.return_value = "foo"
+ self.assertEqual("foo", vm_utils._vdi_get_rec(session, "vdi_ref"))
+ session.call_xenapi.assert_called_once_with("VDI.get_record",
+ "vdi_ref")
+
+ def test_vdi_snapshot(self):
+ session = mock.Mock()
+ session.call_xenapi.return_value = "foo"
+ self.assertEqual("foo", vm_utils._vdi_snapshot(session, "vdi_ref"))
+ session.call_xenapi.assert_called_once_with("VDI.snapshot",
+ "vdi_ref", {})
+
+ def test_vdi_get_virtual_size(self):
+ session = mock.Mock()
+ session.call_xenapi.return_value = "123"
+ self.assertEqual(123, vm_utils._vdi_get_virtual_size(session, "ref"))
+ session.call_xenapi.assert_called_once_with("VDI.get_virtual_size",
+ "ref")
+
+ @mock.patch.object(vm_utils, '_get_resize_func_name')
+ def test_vdi_resize(self, mock_get_resize_func_name):
+ session = mock.Mock()
+ mock_get_resize_func_name.return_value = "VDI.fake"
+ vm_utils._vdi_resize(session, "ref", 123)
+ session.call_xenapi.assert_called_once_with("VDI.fake", "ref", "123")
+
+ @mock.patch.object(vm_utils, '_vdi_resize')
+ @mock.patch.object(vm_utils, '_vdi_get_virtual_size')
+ def test_update_vdi_virtual_size_works(self, mock_get_size, mock_resize):
+ mock_get_size.return_value = (1024 ** 3) - 1
+ instance = {"uuid": "a"}
+
+ vm_utils.update_vdi_virtual_size("s", instance, "ref", 1)
+
+ mock_get_size.assert_called_once_with("s", "ref")
+ mock_resize.assert_called_once_with("s", "ref", 1024 ** 3)
+
+ @mock.patch.object(vm_utils, '_vdi_resize')
+ @mock.patch.object(vm_utils, '_vdi_get_virtual_size')
+ def test_update_vdi_virtual_size_skips_resize_down(self, mock_get_size,
+ mock_resize):
+ mock_get_size.return_value = 1024 ** 3
+ instance = {"uuid": "a"}
+
+ vm_utils.update_vdi_virtual_size("s", instance, "ref", 1)
+
+ mock_get_size.assert_called_once_with("s", "ref")
+ self.assertFalse(mock_resize.called)
+
+ @mock.patch.object(vm_utils, '_vdi_resize')
+ @mock.patch.object(vm_utils, '_vdi_get_virtual_size')
+ def test_update_vdi_virtual_size_raise_if_disk_big(self, mock_get_size,
+ mock_resize):
+ mock_get_size.return_value = 1024 ** 3 + 1
+ instance = {"uuid": "a"}
+
+ self.assertRaises(exception.ResizeError,
+ vm_utils.update_vdi_virtual_size,
+ "s", instance, "ref", 1)
+
+ mock_get_size.assert_called_once_with("s", "ref")
+ self.assertFalse(mock_resize.called)
+
+
+@mock.patch.object(vm_utils, '_vdi_get_rec')
+@mock.patch.object(vm_utils, '_vbd_get_rec')
+@mock.patch.object(vm_utils, '_vm_get_vbd_refs')
+class GetVdiForVMTestCase(VMUtilsTestBase):
+ def test_get_vdi_for_vm_safely(self, vm_get_vbd_refs,
+ vbd_get_rec, vdi_get_rec):
+ session = "session"
+
+ vm_get_vbd_refs.return_value = ["a", "b"]
+ vbd_get_rec.return_value = {'userdevice': '0', 'VDI': 'vdi_ref'}
+ vdi_get_rec.return_value = {}
+
+ result = vm_utils.get_vdi_for_vm_safely(session, "vm_ref")
+ self.assertEqual(('vdi_ref', {}), result)
+
+ vm_get_vbd_refs.assert_called_once_with(session, "vm_ref")
+ vbd_get_rec.assert_called_once_with(session, "a")
+ vdi_get_rec.assert_called_once_with(session, "vdi_ref")
+
+ def test_get_vdi_for_vm_safely_fails(self, vm_get_vbd_refs,
+ vbd_get_rec, vdi_get_rec):
+ session = "session"
+
+ vm_get_vbd_refs.return_value = ["a", "b"]
+ vbd_get_rec.return_value = {'userdevice': '0', 'VDI': 'vdi_ref'}
+
+ self.assertRaises(exception.NovaException,
+ vm_utils.get_vdi_for_vm_safely,
+ session, "vm_ref", userdevice='1')
+
+ self.assertEqual([], vdi_get_rec.call_args_list)
+ self.assertEqual(2, len(vbd_get_rec.call_args_list))
+
+
+@mock.patch.object(vm_utils, '_vdi_get_uuid')
+@mock.patch.object(vm_utils, '_vbd_get_rec')
+@mock.patch.object(vm_utils, '_vm_get_vbd_refs')
+class GetAllVdiForVMTestCase(VMUtilsTestBase):
+ def _setup_get_all_vdi_uuids_for_vm(self, vm_get_vbd_refs,
+ vbd_get_rec, vdi_get_uuid):
+ def fake_vbd_get_rec(session, vbd_ref):
+ return {'userdevice': vbd_ref, 'VDI': "vdi_ref_%s" % vbd_ref}
+
+ def fake_vdi_get_uuid(session, vdi_ref):
+ return vdi_ref
+
+ vm_get_vbd_refs.return_value = ["0", "2"]
+ vbd_get_rec.side_effect = fake_vbd_get_rec
+ vdi_get_uuid.side_effect = fake_vdi_get_uuid
+
+ def test_get_all_vdi_uuids_for_vm_works(self, vm_get_vbd_refs,
+ vbd_get_rec, vdi_get_uuid):
+ self._setup_get_all_vdi_uuids_for_vm(vm_get_vbd_refs,
+ vbd_get_rec, vdi_get_uuid)
+
+ result = vm_utils.get_all_vdi_uuids_for_vm('session', "vm_ref")
+ expected = ['vdi_ref_0', 'vdi_ref_2']
+ self.assertEqual(expected, list(result))
+
+ def test_get_all_vdi_uuids_for_vm_finds_none(self, vm_get_vbd_refs,
+ vbd_get_rec, vdi_get_uuid):
+ self._setup_get_all_vdi_uuids_for_vm(vm_get_vbd_refs,
+ vbd_get_rec, vdi_get_uuid)
+
+ result = vm_utils.get_all_vdi_uuids_for_vm('session', "vm_ref",
+ min_userdevice=1)
+ expected = ["vdi_ref_2"]
+ self.assertEqual(expected, list(result))
+
+
+class GetAllVdisTestCase(VMUtilsTestBase):
+ def test_get_all_vdis_in_sr(self):
+
+ def fake_get_rec(record_type, ref):
+ if ref == "2":
+ return "vdi_rec_2"
+
+ session = mock.Mock()
+ session.call_xenapi.return_value = ["1", "2"]
+ session.get_rec.side_effect = fake_get_rec
+
+ sr_ref = "sr_ref"
+ actual = list(vm_utils._get_all_vdis_in_sr(session, sr_ref))
+ self.assertEqual(actual, [('2', 'vdi_rec_2')])
+
+ session.call_xenapi.assert_called_once_with("SR.get_VDIs", sr_ref)
+
+
+class VDIAttachedHere(VMUtilsTestBase):
+ @mock.patch.object(vm_utils, 'destroy_vbd')
+ @mock.patch.object(vm_utils, '_get_this_vm_ref')
+ @mock.patch.object(vm_utils, 'create_vbd')
+ @mock.patch.object(vm_utils, '_remap_vbd_dev')
+ @mock.patch.object(vm_utils, '_wait_for_device')
+ @mock.patch.object(utils, 'execute')
+ def test_sync_called(self, mock_execute, mock_wait_for_device,
+ mock_remap_vbd_dev, mock_create_vbd,
+ mock_get_this_vm_ref, mock_destroy_vbd):
+ session = _get_fake_session()
+ with vm_utils.vdi_attached_here(session, 'vdi_ref'):
+ pass
+ mock_execute.assert_called_with('sync', run_as_root=True)
+
+
+class SnapshotAttachedHereTestCase(VMUtilsTestBase):
+ @mock.patch.object(vm_utils, '_snapshot_attached_here_impl')
+ def test_snapshot_attached_here(self, mock_impl):
+ def fake_impl(session, instance, vm_ref, label, userdevice,
+ post_snapshot_callback):
+ self.assertEqual("session", session)
+ self.assertEqual("instance", instance)
+ self.assertEqual("vm_ref", vm_ref)
+ self.assertEqual("label", label)
+ self.assertEqual('0', userdevice)
+ self.assertIsNone(post_snapshot_callback)
+ yield "fake"
+
+ mock_impl.side_effect = fake_impl
+
+ with vm_utils.snapshot_attached_here("session", "instance", "vm_ref",
+ "label") as result:
+ self.assertEqual("fake", result)
+
+ mock_impl.assert_called_once_with("session", "instance", "vm_ref",
+ "label", '0', None)
+
+ @mock.patch.object(vm_utils, '_delete_snapshots_in_vdi_chain')
+ @mock.patch.object(vm_utils, 'safe_destroy_vdis')
+ @mock.patch.object(vm_utils, '_walk_vdi_chain')
+ @mock.patch.object(vm_utils, '_wait_for_vhd_coalesce')
+ @mock.patch.object(vm_utils, '_vdi_get_uuid')
+ @mock.patch.object(vm_utils, '_vdi_snapshot')
+ @mock.patch.object(vm_utils, 'get_vdi_for_vm_safely')
+ def test_snapshot_attached_here_impl(self, mock_get_vdi_for_vm_safely,
+ mock_vdi_snapshot, mock_vdi_get_uuid,
+ mock_wait_for_vhd_coalesce, mock_walk_vdi_chain,
+ mock_safe_destroy_vdis, mock_delete_snapshots_in_vdi_chain):
+ session = "session"
+ instance = {"uuid": "uuid"}
+ mock_callback = mock.Mock()
+
+ mock_get_vdi_for_vm_safely.return_value = ("vdi_ref",
+ {"SR": "sr_ref",
+ "uuid": "vdi_uuid"})
+ mock_vdi_snapshot.return_value = "snap_ref"
+ mock_vdi_get_uuid.return_value = "snap_uuid"
+ mock_walk_vdi_chain.return_value = [{"uuid": "a"}, {"uuid": "b"}]
+
+ try:
+ with vm_utils.snapshot_attached_here(session, instance, "vm_ref",
+ "label", '2', mock_callback) as result:
+ self.assertEqual(["a", "b"], result)
+ raise test.TestingException()
+ self.assertTrue(False)
+ except test.TestingException:
+ pass
+
+ mock_get_vdi_for_vm_safely.assert_called_once_with(session, "vm_ref",
+ '2')
+ mock_vdi_snapshot.assert_called_once_with(session, "vdi_ref")
+ mock_wait_for_vhd_coalesce.assert_called_once_with(session, instance,
+ "sr_ref", "vdi_ref", ['a', 'b'])
+ mock_vdi_get_uuid.assert_called_once_with(session, "snap_ref")
+ mock_walk_vdi_chain.assert_has_calls([mock.call(session, "vdi_uuid"),
+ mock.call(session, "snap_uuid")])
+ mock_callback.assert_called_once_with(
+ task_state="image_pending_upload")
+ mock_safe_destroy_vdis.assert_called_once_with(session, ["snap_ref"])
+ mock_delete_snapshots_in_vdi_chain.assert_called_once_with(session,
+ instance, ['a', 'b'], "sr_ref")
+
+ @mock.patch.object(greenthread, 'sleep')
+ def test_wait_for_vhd_coalesce_leaf_node(self, mock_sleep):
+ instance = {"uuid": "fake"}
+ vm_utils._wait_for_vhd_coalesce("session", instance,
+ "sr_ref", "vdi_ref", ["uuid"])
+ self.assertFalse(mock_sleep.called)
+
+ @mock.patch.object(vm_utils, '_count_children')
+ @mock.patch.object(greenthread, 'sleep')
+ def test_wait_for_vhd_coalesce_parent_snapshot(self, mock_sleep,
+ mock_count):
+ mock_count.return_value = 2
+ instance = {"uuid": "fake"}
+
+ vm_utils._wait_for_vhd_coalesce("session", instance,
+ "sr_ref", "vdi_ref", ["uuid1", "uuid2"])
+
+ self.assertFalse(mock_sleep.called)
+ self.assertTrue(mock_count.called)
+
+ @mock.patch.object(greenthread, 'sleep')
+ @mock.patch.object(vm_utils, '_get_vhd_parent_uuid')
+ @mock.patch.object(vm_utils, '_count_children')
+ @mock.patch.object(vm_utils, '_scan_sr')
+ def test_wait_for_vhd_coalesce_raises(self, mock_scan_sr,
+ mock_count, mock_get_vhd_parent_uuid, mock_sleep):
+ mock_count.return_value = 1
+ instance = {"uuid": "fake"}
+
+ self.assertRaises(exception.NovaException,
+ vm_utils._wait_for_vhd_coalesce, "session", instance,
+ "sr_ref", "vdi_ref", ["uuid1", "uuid2"])
+
+ self.assertTrue(mock_count.called)
+ self.assertEqual(20, mock_sleep.call_count)
+ self.assertEqual(20, mock_scan_sr.call_count)
+
+ @mock.patch.object(greenthread, 'sleep')
+ @mock.patch.object(vm_utils, '_get_vhd_parent_uuid')
+ @mock.patch.object(vm_utils, '_count_children')
+ @mock.patch.object(vm_utils, '_scan_sr')
+ def test_wait_for_vhd_coalesce_success(self, mock_scan_sr,
+ mock_count, mock_get_vhd_parent_uuid, mock_sleep):
+ mock_count.return_value = 1
+ instance = {"uuid": "fake"}
+ mock_get_vhd_parent_uuid.side_effect = ["bad", "uuid2"]
+
+ vm_utils._wait_for_vhd_coalesce("session", instance,
+ "sr_ref", "vdi_ref", ["uuid1", "uuid2"])
+
+ self.assertEqual(1, mock_sleep.call_count)
+ self.assertEqual(2, mock_scan_sr.call_count)
+
+ @mock.patch.object(vm_utils, '_get_all_vdis_in_sr')
+ def test_count_children(self, mock_get_all_vdis_in_sr):
+ vdis = [('child1', {'sm_config': {'vhd-parent': 'parent1'}}),
+ ('child2', {'sm_config': {'vhd-parent': 'parent2'}}),
+ ('child3', {'sm_config': {'vhd-parent': 'parent1'}})]
+ mock_get_all_vdis_in_sr.return_value = vdis
+ self.assertEqual(2, vm_utils._count_children('session',
+ 'parent1', 'sr'))
+
+
+class ImportMigratedDisksTestCase(VMUtilsTestBase):
+ @mock.patch.object(vm_utils, '_import_migrate_ephemeral_disks')
+ @mock.patch.object(vm_utils, '_import_migrated_root_disk')
+ def test_import_all_migrated_disks(self, mock_root, mock_ephemeral):
+ session = "session"
+ instance = "instance"
+ mock_root.return_value = "root_vdi"
+ mock_ephemeral.return_value = ["a", "b"]
+
+ result = vm_utils.import_all_migrated_disks(session, instance)
+
+ expected = {'root': 'root_vdi', 'ephemerals': ["a", "b"]}
+ self.assertEqual(expected, result)
+ mock_root.assert_called_once_with(session, instance)
+ mock_ephemeral.assert_called_once_with(session, instance)
+
+ @mock.patch.object(vm_utils, '_import_migrated_vhds')
+ def test_import_migrated_root_disk(self, mock_migrate):
+ mock_migrate.return_value = "foo"
+ instance = {"uuid": "uuid", "name": "name"}
+
+ result = vm_utils._import_migrated_root_disk("s", instance)
+
+ self.assertEqual("foo", result)
+ mock_migrate.assert_called_once_with("s", instance, "uuid", "root",
+ "name")
+
+ @mock.patch.object(vm_utils, '_import_migrated_vhds')
+ def test_import_migrate_ephemeral_disks(self, mock_migrate):
+ mock_migrate.return_value = "foo"
+ instance = {"uuid": "uuid", "name": "name", "ephemeral_gb": 4000}
+
+ result = vm_utils._import_migrate_ephemeral_disks("s", instance)
+
+ self.assertEqual({'4': 'foo', '5': 'foo'}, result)
+ expected_calls = [mock.call("s", instance, "uuid_ephemeral_1",
+ "ephemeral", "name ephemeral (1)"),
+ mock.call("s", instance, "uuid_ephemeral_2",
+ "ephemeral", "name ephemeral (2)")]
+ self.assertEqual(expected_calls, mock_migrate.call_args_list)
+
+ @mock.patch.object(vm_utils, '_set_vdi_info')
+ @mock.patch.object(vm_utils, 'scan_default_sr')
+ @mock.patch.object(vm_utils, 'get_sr_path')
+ def test_import_migrated_vhds(self, mock_get_sr_path, mock_scan_sr,
+ mock_set_info):
+ session = mock.Mock()
+ instance = {"uuid": "uuid"}
+ session.call_plugin_serialized.return_value = {"root": {"uuid": "a"}}
+ session.call_xenapi.return_value = "vdi_ref"
+ mock_get_sr_path.return_value = "sr_path"
+
+ result = vm_utils._import_migrated_vhds(session, instance,
+ 'chain_label', 'disk_type', 'vdi_label')
+
+ expected = {'uuid': "a", 'ref': "vdi_ref"}
+ self.assertEqual(expected, result)
+ mock_get_sr_path.assert_called_once_with(session)
+ session.call_plugin_serialized.assert_called_once_with('migration',
+ 'move_vhds_into_sr', instance_uuid='chain_label',
+ sr_path='sr_path', uuid_stack=mock.ANY)
+ mock_scan_sr.assert_called_once_with(session)
+ session.call_xenapi.assert_called_once_with('VDI.get_by_uuid', 'a')
+ mock_set_info.assert_called_once_with(session, 'vdi_ref', 'disk_type',
+ 'vdi_label', 'disk_type', instance)
+
+ def test_get_vhd_parent_uuid_rec_provided(self):
+ session = mock.Mock()
+ vdi_ref = 'vdi_ref'
+ vdi_rec = {'sm_config': {}}
+ self.assertIsNone(vm_utils._get_vhd_parent_uuid(session,
+ vdi_ref,
+ vdi_rec))
+ self.assertFalse(session.call_xenapi.called)
+
+
+class MigrateVHDTestCase(VMUtilsTestBase):
+ def _assert_transfer_called(self, session, label):
+ session.call_plugin_serialized.assert_called_once_with(
+ 'migration', 'transfer_vhd', instance_uuid=label, host="dest",
+ vdi_uuid="vdi_uuid", sr_path="sr_path", seq_num=2)
+
+ def test_migrate_vhd_root(self):
+ session = mock.Mock()
+ instance = {"uuid": "a"}
+
+ vm_utils.migrate_vhd(session, instance, "vdi_uuid", "dest",
+ "sr_path", 2)
+
+ self._assert_transfer_called(session, "a")
+
+ def test_migrate_vhd_ephemeral(self):
+ session = mock.Mock()
+ instance = {"uuid": "a"}
+
+ vm_utils.migrate_vhd(session, instance, "vdi_uuid", "dest",
+ "sr_path", 2, 2)
+
+ self._assert_transfer_called(session, "a_ephemeral_2")
+
+ def test_migrate_vhd_converts_exceptions(self):
+ session = mock.Mock()
+ session.XenAPI.Failure = test.TestingException
+ session.call_plugin_serialized.side_effect = test.TestingException()
+ instance = {"uuid": "a"}
+
+ self.assertRaises(exception.MigrationError, vm_utils.migrate_vhd,
+ session, instance, "vdi_uuid", "dest", "sr_path", 2)
+ self._assert_transfer_called(session, "a")
+
+
+class StripBaseMirrorTestCase(VMUtilsTestBase):
+ def test_strip_base_mirror_from_vdi_works(self):
+ session = mock.Mock()
+ vm_utils._try_strip_base_mirror_from_vdi(session, "vdi_ref")
+ session.call_xenapi.assert_called_once_with(
+ "VDI.remove_from_sm_config", "vdi_ref", "base_mirror")
+
+ def test_strip_base_mirror_from_vdi_hides_error(self):
+ session = mock.Mock()
+ session.XenAPI.Failure = test.TestingException
+ session.call_xenapi.side_effect = test.TestingException()
+
+ vm_utils._try_strip_base_mirror_from_vdi(session, "vdi_ref")
+
+ session.call_xenapi.assert_called_once_with(
+ "VDI.remove_from_sm_config", "vdi_ref", "base_mirror")
+
+ @mock.patch.object(vm_utils, '_try_strip_base_mirror_from_vdi')
+ def test_strip_base_mirror_from_vdis(self, mock_strip):
+ def call_xenapi(method, arg):
+ if method == "VM.get_VBDs":
+ return ['VBD_ref_1', 'VBD_ref_2']
+ if method == "VBD.get_VDI":
+ return 'VDI' + arg[3:]
+ return "Unexpected call_xenapi: %s.%s" % (method, arg)
+
+ session = mock.Mock()
+ session.call_xenapi.side_effect = call_xenapi
+
+ vm_utils.strip_base_mirror_from_vdis(session, "vm_ref")
+
+ expected = [mock.call('VM.get_VBDs', "vm_ref"),
+ mock.call('VBD.get_VDI', "VBD_ref_1"),
+ mock.call('VBD.get_VDI', "VBD_ref_2")]
+ self.assertEqual(expected, session.call_xenapi.call_args_list)
+
+ expected = [mock.call(session, "VDI_ref_1"),
+ mock.call(session, "VDI_ref_2")]
+ self.assertEqual(expected, mock_strip.call_args_list)
+
+
+class DeviceIdTestCase(VMUtilsTestBase):
+ def test_device_id_is_none_if_not_specified_in_meta_data(self):
+ image_meta = {}
+ session = mock.Mock()
+ session.product_version = (6, 1, 0)
+ self.assertIsNone(vm_utils.get_vm_device_id(session, image_meta))
+
+ def test_get_device_id_if_hypervisor_version_is_greater_than_6_1(self):
+ image_meta = {'xenapi_device_id': '0002'}
+ session = mock.Mock()
+ session.product_version = (6, 2, 0)
+ self.assertEqual('0002',
+ vm_utils.get_vm_device_id(session, image_meta))
+ session.product_version = (6, 3, 1)
+ self.assertEqual('0002',
+ vm_utils.get_vm_device_id(session, image_meta))
+
+ def test_raise_exception_if_device_id_not_supported_by_hyp_version(self):
+ image_meta = {'xenapi_device_id': '0002'}
+ session = mock.Mock()
+ session.product_version = (6, 0)
+ exc = self.assertRaises(exception.NovaException,
+ vm_utils.get_vm_device_id, session, image_meta)
+ self.assertEqual("Device id 0002 specified is not supported by "
+ "hypervisor version (6, 0)", exc.message)
+ session.product_version = ('6a')
+ exc = self.assertRaises(exception.NovaException,
+ vm_utils.get_vm_device_id, session, image_meta)
+ self.assertEqual("Device id 0002 specified is not supported by "
+ "hypervisor version 6a", exc.message)
+
+
+class CreateVmRecordTestCase(VMUtilsTestBase):
+ @mock.patch.object(flavors, 'extract_flavor')
+ def test_create_vm_record_linux(self, mock_extract_flavor):
+ instance = {"uuid": "uuid123", "os_type": "linux"}
+ self._test_create_vm_record(mock_extract_flavor, instance, False)
+
+ @mock.patch.object(flavors, 'extract_flavor')
+ def test_create_vm_record_windows(self, mock_extract_flavor):
+ instance = {"uuid": "uuid123", "os_type": "windows"}
+ self._test_create_vm_record(mock_extract_flavor, instance, True)
+
+ def _test_create_vm_record(self, mock_extract_flavor, instance,
+ is_viridian):
+ session = _get_fake_session()
+ flavor = {"memory_mb": 1024, "vcpus": 1, "vcpu_weight": 2}
+ mock_extract_flavor.return_value = flavor
+
+ vm_utils.create_vm(session, instance, "name", "kernel", "ramdisk",
+ device_id="0002")
+
+ is_viridian_str = str(is_viridian).lower()
+
+ expected_vm_rec = {
+ 'VCPUs_params': {'cap': '0', 'weight': '2'},
+ 'PV_args': '',
+ 'memory_static_min': '0',
+ 'ha_restart_priority': '',
+ 'HVM_boot_policy': 'BIOS order',
+ 'PV_bootloader': '',
+ 'tags': [],
+ 'VCPUs_max': '1',
+ 'memory_static_max': '1073741824',
+ 'actions_after_shutdown': 'destroy',
+ 'memory_dynamic_max': '1073741824',
+ 'user_version': '0',
+ 'xenstore_data': {'vm-data/allowvssprovider': 'false'},
+ 'blocked_operations': {},
+ 'is_a_template': False,
+ 'name_description': '',
+ 'memory_dynamic_min': '1073741824',
+ 'actions_after_crash': 'destroy',
+ 'memory_target': '1073741824',
+ 'PV_ramdisk': '',
+ 'PV_bootloader_args': '',
+ 'PCI_bus': '',
+ 'other_config': {'nova_uuid': 'uuid123'},
+ 'name_label': 'name',
+ 'actions_after_reboot': 'restart',
+ 'VCPUs_at_startup': '1',
+ 'HVM_boot_params': {'order': 'dc'},
+ 'platform': {'nx': 'true', 'pae': 'true', 'apic': 'true',
+ 'timeoffset': '0', 'viridian': is_viridian_str,
+ 'acpi': 'true', 'device_id': '0002'},
+ 'PV_legacy_args': '',
+ 'PV_kernel': '',
+ 'affinity': '',
+ 'recommendations': '',
+ 'ha_always_run': False}
+
+ session.call_xenapi.assert_called_with('VM.create', expected_vm_rec)
+
+ def test_list_vms(self):
+ self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
+ self.fixture.config(disable_process_locking=True,
+ group='oslo_concurrency')
+ self.flags(instance_name_template='%d',
+ firewall_driver='nova.virt.xenapi.firewall.'
+ 'Dom0IptablesFirewallDriver')
+ self.flags(connection_url='test_url',
+ connection_password='test_pass',
+ group='xenserver')
+
+ fake.create_vm("foo1", "Halted")
+ vm_ref = fake.create_vm("foo2", "Running")
+
+ stubs.stubout_session(self.stubs, fake.SessionBase)
+ driver = xenapi_conn.XenAPIDriver(False)
+
+ result = list(vm_utils.list_vms(driver._session))
+
+ # Will have 3 VMs - but one is Dom0 and one is not running on the host
+ self.assertEqual(len(driver._session.call_xenapi('VM.get_all')), 3)
+ self.assertEqual(len(result), 1)
+
+ result_keys = [key for (key, value) in result]
+
+ self.assertIn(vm_ref, result_keys)
+
+
+class ChildVHDsTestCase(test.NoDBTestCase):
+ all_vdis = [
+ ("my-vdi-ref",
+ {"uuid": "my-uuid", "sm_config": {},
+ "is_a_snapshot": False, "other_config": {}}),
+ ("non-parent",
+ {"uuid": "uuid-1", "sm_config": {},
+ "is_a_snapshot": False, "other_config": {}}),
+ ("diff-parent",
+ {"uuid": "uuid-1", "sm_config": {"vhd-parent": "other-uuid"},
+ "is_a_snapshot": False, "other_config": {}}),
+ ("child",
+ {"uuid": "uuid-child", "sm_config": {"vhd-parent": "my-uuid"},
+ "is_a_snapshot": False, "other_config": {}}),
+ ("child-snap",
+ {"uuid": "uuid-child-snap", "sm_config": {"vhd-parent": "my-uuid"},
+ "is_a_snapshot": True, "other_config": {}}),
+ ]
+
+ @mock.patch.object(vm_utils, '_get_all_vdis_in_sr')
+ def test_child_vhds_defaults(self, mock_get_all):
+ mock_get_all.return_value = self.all_vdis
+
+ result = vm_utils._child_vhds("session", "sr_ref", ["my-uuid"])
+
+ self.assertEqual(['uuid-child', 'uuid-child-snap'], result)
+
+ @mock.patch.object(vm_utils, '_get_all_vdis_in_sr')
+ def test_child_vhds_only_snapshots(self, mock_get_all):
+ mock_get_all.return_value = self.all_vdis
+
+ result = vm_utils._child_vhds("session", "sr_ref", ["my-uuid"],
+ old_snapshots_only=True)
+
+ self.assertEqual(['uuid-child-snap'], result)
+
+ @mock.patch.object(vm_utils, '_get_all_vdis_in_sr')
+ def test_child_vhds_chain(self, mock_get_all):
+ mock_get_all.return_value = self.all_vdis
+
+ result = vm_utils._child_vhds("session", "sr_ref",
+ ["my-uuid", "other-uuid"], old_snapshots_only=True)
+
+ self.assertEqual(['uuid-child-snap'], result)
+
+ def test_is_vdi_a_snapshot_works(self):
+ vdi_rec = {"is_a_snapshot": True,
+ "other_config": {}}
+
+ self.assertTrue(vm_utils._is_vdi_a_snapshot(vdi_rec))
+
+ def test_is_vdi_a_snapshot_base_images_false(self):
+ vdi_rec = {"is_a_snapshot": True,
+ "other_config": {"image-id": "fake"}}
+
+ self.assertFalse(vm_utils._is_vdi_a_snapshot(vdi_rec))
+
+ def test_is_vdi_a_snapshot_false_for_non_snapshot(self):
+ vdi_rec = {"is_a_snapshot": False,
+ "other_config": {}}
+
+ self.assertFalse(vm_utils._is_vdi_a_snapshot(vdi_rec))
+
+
+class RemoveOldSnapshotsTestCase(test.NoDBTestCase):
+
+ @mock.patch.object(vm_utils, 'get_vdi_for_vm_safely')
+ @mock.patch.object(vm_utils, '_walk_vdi_chain')
+ @mock.patch.object(vm_utils, '_delete_snapshots_in_vdi_chain')
+ def test_remove_old_snapshots(self, mock_delete, mock_walk, mock_get):
+ instance = {"uuid": "fake"}
+ mock_get.return_value = ("ref", {"uuid": "vdi", "SR": "sr_ref"})
+ mock_walk.return_value = [{"uuid": "uuid1"}, {"uuid": "uuid2"}]
+
+ vm_utils.remove_old_snapshots("session", instance, "vm_ref")
+
+ mock_delete.assert_called_once_with("session", instance,
+ ["uuid1", "uuid2"], "sr_ref")
+ mock_get.assert_called_once_with("session", "vm_ref")
+ mock_walk.assert_called_once_with("session", "vdi")
+
+ @mock.patch.object(vm_utils, '_child_vhds')
+ def test_delete_snapshots_in_vdi_chain_no_chain(self, mock_child):
+ instance = {"uuid": "fake"}
+
+ vm_utils._delete_snapshots_in_vdi_chain("session", instance,
+ ["uuid"], "sr")
+
+ self.assertFalse(mock_child.called)
+
+ @mock.patch.object(vm_utils, '_child_vhds')
+ def test_delete_snapshots_in_vdi_chain_no_snapshots(self, mock_child):
+ instance = {"uuid": "fake"}
+ mock_child.return_value = []
+
+ vm_utils._delete_snapshots_in_vdi_chain("session", instance,
+ ["uuid1", "uuid2"], "sr")
+
+ mock_child.assert_called_once_with("session", "sr", ["uuid2"],
+ old_snapshots_only=True)
+
+ @mock.patch.object(vm_utils, '_scan_sr')
+ @mock.patch.object(vm_utils, 'safe_destroy_vdis')
+ @mock.patch.object(vm_utils, '_child_vhds')
+ def test_delete_snapshots_in_vdi_chain_calls_destroy(self, mock_child,
+ mock_destroy, mock_scan):
+ instance = {"uuid": "fake"}
+ mock_child.return_value = ["suuid1", "suuid2"]
+ session = mock.Mock()
+ session.VDI.get_by_uuid.side_effect = ["ref1", "ref2"]
+
+ vm_utils._delete_snapshots_in_vdi_chain(session, instance,
+ ["uuid1", "uuid2"], "sr")
+
+ mock_child.assert_called_once_with(session, "sr", ["uuid2"],
+ old_snapshots_only=True)
+ session.VDI.get_by_uuid.assert_has_calls([
+ mock.call("suuid1"), mock.call("suuid2")])
+ mock_destroy.assert_called_once_with(session, ["ref1", "ref2"])
+ mock_scan.assert_called_once_with(session, "sr")
+
+
+class ResizeFunctionTestCase(test.NoDBTestCase):
+ def _call_get_resize_func_name(self, brand, version):
+ session = mock.Mock()
+ session.product_brand = brand
+ session.product_version = version
+
+ return vm_utils._get_resize_func_name(session)
+
+ def _test_is_resize(self, brand, version):
+ result = self._call_get_resize_func_name(brand, version)
+ self.assertEqual("VDI.resize", result)
+
+ def _test_is_resize_online(self, brand, version):
+ result = self._call_get_resize_func_name(brand, version)
+ self.assertEqual("VDI.resize_online", result)
+
+ def test_xenserver_5_5(self):
+ self._test_is_resize_online("XenServer", (5, 5, 0))
+
+ def test_xenserver_6_0(self):
+ self._test_is_resize("XenServer", (6, 0, 0))
+
+ def test_xcp_1_1(self):
+ self._test_is_resize_online("XCP", (1, 1, 0))
+
+ def test_xcp_1_2(self):
+ self._test_is_resize("XCP", (1, 2, 0))
+
+ def test_xcp_2_0(self):
+ self._test_is_resize("XCP", (2, 0, 0))
+
+ def test_random_brand(self):
+ self._test_is_resize("asfd", (1, 1, 0))
+
+ def test_default(self):
+ self._test_is_resize(None, None)
+
+ def test_empty(self):
+ self._test_is_resize("", "")
+
+ def test_bad_version(self):
+ self._test_is_resize("XenServer", "asdf")
+
+
+class VMInfoTests(VMUtilsTestBase):
+ def setUp(self):
+ super(VMInfoTests, self).setUp()
+ self.session = mock.Mock()
+
+ def test_get_power_state_valid(self):
+ # Save on test setup calls by having these simple tests in one method
+ self.session.call_xenapi.return_value = "Running"
+ self.assertEqual(vm_utils.get_power_state(self.session, "ref"),
+ power_state.RUNNING)
+
+ self.session.call_xenapi.return_value = "Halted"
+ self.assertEqual(vm_utils.get_power_state(self.session, "ref"),
+ power_state.SHUTDOWN)
+
+ self.session.call_xenapi.return_value = "Paused"
+ self.assertEqual(vm_utils.get_power_state(self.session, "ref"),
+ power_state.PAUSED)
+
+ self.session.call_xenapi.return_value = "Suspended"
+ self.assertEqual(vm_utils.get_power_state(self.session, "ref"),
+ power_state.SUSPENDED)
+
+ self.session.call_xenapi.return_value = "Crashed"
+ self.assertEqual(vm_utils.get_power_state(self.session, "ref"),
+ power_state.CRASHED)
+
+ def test_get_power_state_invalid(self):
+ self.session.call_xenapi.return_value = "Invalid"
+ self.assertRaises(KeyError,
+ vm_utils.get_power_state, self.session, "ref")
+
+ _XAPI_record = {'power_state': 'Running',
+ 'memory_static_max': str(10 << 10),
+ 'memory_dynamic_max': str(9 << 10),
+ 'VCPUs_max': '5'}
+
+ def test_compile_info(self):
+
+ def call_xenapi(method, *args):
+ if method.startswith('VM.get_') and args[0] == 'dummy':
+ return self._XAPI_record[method[7:]]
+
+ self.session.call_xenapi.side_effect = call_xenapi
+
+ expected = {'state': power_state.RUNNING,
+ 'max_mem': 10L,
+ 'mem': 9L,
+ 'num_cpu': '5',
+ 'cpu_time': 0}
+
+ self.assertEqual(vm_utils.compile_info(self.session, "dummy"),
+ expected)
diff --git a/nova/tests/unit/virt/xenapi/test_vmops.py b/nova/tests/unit/virt/xenapi/test_vmops.py
new file mode 100644
index 0000000000..8140f997d2
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/test_vmops.py
@@ -0,0 +1,1124 @@
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from eventlet import greenthread
+import mock
+
+from nova.compute import power_state
+from nova.compute import task_states
+from nova import context
+from nova import exception
+from nova import objects
+from nova.pci import manager as pci_manager
+from nova import test
+from nova.tests.unit import fake_instance
+from nova.tests.unit.virt.xenapi import stubs
+from nova.virt import fake
+from nova.virt.xenapi import agent as xenapi_agent
+from nova.virt.xenapi.client import session as xenapi_session
+from nova.virt.xenapi import fake as xenapi_fake
+from nova.virt.xenapi import vm_utils
+from nova.virt.xenapi import vmops
+from nova.virt.xenapi import volume_utils
+from nova.virt.xenapi import volumeops
+
+
+class VMOpsTestBase(stubs.XenAPITestBaseNoDB):
+ def setUp(self):
+ super(VMOpsTestBase, self).setUp()
+ self._setup_mock_vmops()
+ self.vms = []
+
+ def _setup_mock_vmops(self, product_brand=None, product_version=None):
+ stubs.stubout_session(self.stubs, xenapi_fake.SessionBase)
+ self._session = xenapi_session.XenAPISession('test_url', 'root',
+ 'test_pass')
+ self.vmops = vmops.VMOps(self._session, fake.FakeVirtAPI())
+
+ def create_vm(self, name, state="Running"):
+ vm_ref = xenapi_fake.create_vm(name, state)
+ self.vms.append(vm_ref)
+ vm = xenapi_fake.get_record("VM", vm_ref)
+ return vm, vm_ref
+
+ def tearDown(self):
+ super(VMOpsTestBase, self).tearDown()
+ for vm in self.vms:
+ xenapi_fake.destroy_vm(vm)
+
+
+class VMOpsTestCase(VMOpsTestBase):
+ def setUp(self):
+ super(VMOpsTestCase, self).setUp()
+ self._setup_mock_vmops()
+
+ def _setup_mock_vmops(self, product_brand=None, product_version=None):
+ self._session = self._get_mock_session(product_brand, product_version)
+ self._vmops = vmops.VMOps(self._session, fake.FakeVirtAPI())
+
+ def _get_mock_session(self, product_brand, product_version):
+ class Mock(object):
+ pass
+
+ mock_session = Mock()
+ mock_session.product_brand = product_brand
+ mock_session.product_version = product_version
+ return mock_session
+
+ def _test_finish_revert_migration_after_crash(self, backup_made, new_made,
+ vm_shutdown=True):
+ instance = {'name': 'foo',
+ 'task_state': task_states.RESIZE_MIGRATING}
+ context = 'fake_context'
+
+ self.mox.StubOutWithMock(vm_utils, 'lookup')
+ self.mox.StubOutWithMock(self._vmops, '_destroy')
+ self.mox.StubOutWithMock(vm_utils, 'set_vm_name_label')
+ self.mox.StubOutWithMock(self._vmops, '_attach_mapped_block_devices')
+ self.mox.StubOutWithMock(self._vmops, '_start')
+ self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown')
+
+ vm_utils.lookup(self._session, 'foo-orig').AndReturn(
+ backup_made and 'foo' or None)
+ vm_utils.lookup(self._session, 'foo').AndReturn(
+ (not backup_made or new_made) and 'foo' or None)
+ if backup_made:
+ if new_made:
+ self._vmops._destroy(instance, 'foo')
+ vm_utils.set_vm_name_label(self._session, 'foo', 'foo')
+ self._vmops._attach_mapped_block_devices(instance, [])
+
+ vm_utils.is_vm_shutdown(self._session, 'foo').AndReturn(vm_shutdown)
+ if vm_shutdown:
+ self._vmops._start(instance, 'foo')
+
+ self.mox.ReplayAll()
+
+ self._vmops.finish_revert_migration(context, instance, [])
+
+ def test_finish_revert_migration_after_crash(self):
+ self._test_finish_revert_migration_after_crash(True, True)
+
+ def test_finish_revert_migration_after_crash_before_new(self):
+ self._test_finish_revert_migration_after_crash(True, False)
+
+ def test_finish_revert_migration_after_crash_before_backup(self):
+ self._test_finish_revert_migration_after_crash(False, False)
+
+ def test_xsm_sr_check_relaxed_cached(self):
+ self.make_plugin_call_count = 0
+
+ def fake_make_plugin_call(plugin, method, **args):
+ self.make_plugin_call_count = self.make_plugin_call_count + 1
+ return "true"
+
+ self.stubs.Set(self._vmops, "_make_plugin_call",
+ fake_make_plugin_call)
+
+ self.assertTrue(self._vmops._is_xsm_sr_check_relaxed())
+ self.assertTrue(self._vmops._is_xsm_sr_check_relaxed())
+
+ self.assertEqual(self.make_plugin_call_count, 1)
+
+ def test_get_vm_opaque_ref_raises_instance_not_found(self):
+ instance = {"name": "dummy"}
+ self.mox.StubOutWithMock(vm_utils, 'lookup')
+ vm_utils.lookup(self._session, instance['name'], False).AndReturn(None)
+ self.mox.ReplayAll()
+
+ self.assertRaises(exception.InstanceNotFound,
+ self._vmops._get_vm_opaque_ref, instance)
+
+
+class InjectAutoDiskConfigTestCase(VMOpsTestBase):
+ def test_inject_auto_disk_config_when_present(self):
+ vm, vm_ref = self.create_vm("dummy")
+ instance = {"name": "dummy", "uuid": "1234", "auto_disk_config": True}
+ self.vmops._inject_auto_disk_config(instance, vm_ref)
+ xenstore_data = vm['xenstore_data']
+ self.assertEqual(xenstore_data['vm-data/auto-disk-config'], 'True')
+
+ def test_inject_auto_disk_config_none_as_false(self):
+ vm, vm_ref = self.create_vm("dummy")
+ instance = {"name": "dummy", "uuid": "1234", "auto_disk_config": None}
+ self.vmops._inject_auto_disk_config(instance, vm_ref)
+ xenstore_data = vm['xenstore_data']
+ self.assertEqual(xenstore_data['vm-data/auto-disk-config'], 'False')
+
+
+class GetConsoleOutputTestCase(VMOpsTestBase):
+ def test_get_console_output_works(self):
+ self.mox.StubOutWithMock(self.vmops, '_get_dom_id')
+
+ instance = {"name": "dummy"}
+ self.vmops._get_dom_id(instance, check_rescue=True).AndReturn(42)
+ self.mox.ReplayAll()
+
+ self.assertEqual("dom_id: 42", self.vmops.get_console_output(instance))
+
+ def test_get_console_output_throws_nova_exception(self):
+ self.mox.StubOutWithMock(self.vmops, '_get_dom_id')
+
+ instance = {"name": "dummy"}
+ # dom_id=0 used to trigger exception in fake XenAPI
+ self.vmops._get_dom_id(instance, check_rescue=True).AndReturn(0)
+ self.mox.ReplayAll()
+
+ self.assertRaises(exception.NovaException,
+ self.vmops.get_console_output, instance)
+
+ def test_get_dom_id_works(self):
+ instance = {"name": "dummy"}
+ vm, vm_ref = self.create_vm("dummy")
+ self.assertEqual(vm["domid"], self.vmops._get_dom_id(instance))
+
+ def test_get_dom_id_works_with_rescue_vm(self):
+ instance = {"name": "dummy"}
+ vm, vm_ref = self.create_vm("dummy-rescue")
+ self.assertEqual(vm["domid"],
+ self.vmops._get_dom_id(instance, check_rescue=True))
+
+ def test_get_dom_id_raises_not_found(self):
+ instance = {"name": "dummy"}
+ self.create_vm("not-dummy")
+ self.assertRaises(exception.NotFound, self.vmops._get_dom_id, instance)
+
+ def test_get_dom_id_works_with_vmref(self):
+ vm, vm_ref = self.create_vm("dummy")
+ self.assertEqual(vm["domid"],
+ self.vmops._get_dom_id(vm_ref=vm_ref))
+
+
+class SpawnTestCase(VMOpsTestBase):
+ def _stub_out_common(self):
+ self.mox.StubOutWithMock(self.vmops, '_ensure_instance_name_unique')
+ self.mox.StubOutWithMock(self.vmops, '_ensure_enough_free_mem')
+ self.mox.StubOutWithMock(self.vmops, '_update_instance_progress')
+ self.mox.StubOutWithMock(vm_utils, 'determine_disk_image_type')
+ self.mox.StubOutWithMock(self.vmops, '_get_vdis_for_instance')
+ self.mox.StubOutWithMock(vm_utils, 'safe_destroy_vdis')
+ self.mox.StubOutWithMock(self.vmops._volumeops,
+ 'safe_cleanup_from_vdis')
+ self.mox.StubOutWithMock(self.vmops, '_resize_up_vdis')
+ self.mox.StubOutWithMock(vm_utils,
+ 'create_kernel_and_ramdisk')
+ self.mox.StubOutWithMock(vm_utils, 'destroy_kernel_ramdisk')
+ self.mox.StubOutWithMock(self.vmops, '_create_vm_record')
+ self.mox.StubOutWithMock(self.vmops, '_destroy')
+ self.mox.StubOutWithMock(self.vmops, '_attach_disks')
+ self.mox.StubOutWithMock(pci_manager, 'get_instance_pci_devs')
+ self.mox.StubOutWithMock(vm_utils, 'set_other_config_pci')
+ self.mox.StubOutWithMock(self.vmops, '_attach_orig_disks')
+ self.mox.StubOutWithMock(self.vmops, 'inject_network_info')
+ self.mox.StubOutWithMock(self.vmops, '_inject_hostname')
+ self.mox.StubOutWithMock(self.vmops, '_inject_instance_metadata')
+ self.mox.StubOutWithMock(self.vmops, '_inject_auto_disk_config')
+ self.mox.StubOutWithMock(self.vmops, '_file_inject_vm_settings')
+ self.mox.StubOutWithMock(self.vmops, '_create_vifs')
+ self.mox.StubOutWithMock(self.vmops.firewall_driver,
+ 'setup_basic_filtering')
+ self.mox.StubOutWithMock(self.vmops.firewall_driver,
+ 'prepare_instance_filter')
+ self.mox.StubOutWithMock(self.vmops, '_start')
+ self.mox.StubOutWithMock(self.vmops, '_wait_for_instance_to_start')
+ self.mox.StubOutWithMock(self.vmops,
+ '_configure_new_instance_with_agent')
+ self.mox.StubOutWithMock(self.vmops, '_remove_hostname')
+ self.mox.StubOutWithMock(self.vmops.firewall_driver,
+ 'apply_instance_filter')
+
+ def _test_spawn(self, name_label_param=None, block_device_info_param=None,
+ rescue=False, include_root_vdi=True, throw_exception=None,
+ attach_pci_dev=False):
+ self._stub_out_common()
+
+ instance = {"name": "dummy", "uuid": "fake_uuid"}
+ name_label = name_label_param
+ if name_label is None:
+ name_label = "dummy"
+ image_meta = {"id": "image_id"}
+ context = "context"
+ session = self.vmops._session
+ injected_files = "fake_files"
+ admin_password = "password"
+ network_info = "net_info"
+ steps = 10
+ if rescue:
+ steps += 1
+
+ block_device_info = block_device_info_param
+ if block_device_info and not block_device_info['root_device_name']:
+ block_device_info = dict(block_device_info_param)
+ block_device_info['root_device_name'] = \
+ self.vmops.default_root_dev
+
+ di_type = "di_type"
+ vm_utils.determine_disk_image_type(image_meta).AndReturn(di_type)
+ step = 1
+ self.vmops._update_instance_progress(context, instance, step, steps)
+
+ vdis = {"other": {"ref": "fake_ref_2", "osvol": True}}
+ if include_root_vdi:
+ vdis["root"] = {"ref": "fake_ref"}
+ self.vmops._get_vdis_for_instance(context, instance,
+ name_label, "image_id", di_type,
+ block_device_info).AndReturn(vdis)
+ self.vmops._resize_up_vdis(instance, vdis)
+ step += 1
+ self.vmops._update_instance_progress(context, instance, step, steps)
+
+ kernel_file = "kernel"
+ ramdisk_file = "ramdisk"
+ vm_utils.create_kernel_and_ramdisk(context, session,
+ instance, name_label).AndReturn((kernel_file, ramdisk_file))
+ step += 1
+ self.vmops._update_instance_progress(context, instance, step, steps)
+
+ vm_ref = "fake_vm_ref"
+ self.vmops._ensure_instance_name_unique(name_label)
+ self.vmops._ensure_enough_free_mem(instance)
+ self.vmops._create_vm_record(context, instance, name_label,
+ di_type, kernel_file,
+ ramdisk_file, image_meta).AndReturn(vm_ref)
+ step += 1
+ self.vmops._update_instance_progress(context, instance, step, steps)
+
+ self.vmops._attach_disks(instance, vm_ref, name_label, vdis, di_type,
+ network_info, rescue, admin_password, injected_files)
+ if attach_pci_dev:
+ fake_dev = {
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': None,
+ 'id': 1,
+ 'compute_node_id': 1,
+ 'address': '00:00.0',
+ 'vendor_id': '1234',
+ 'product_id': 'abcd',
+ 'dev_type': 'type-PCI',
+ 'status': 'available',
+ 'dev_id': 'devid',
+ 'label': 'label',
+ 'instance_uuid': None,
+ 'extra_info': '{}',
+ }
+ pci_manager.get_instance_pci_devs(instance).AndReturn([fake_dev])
+ vm_utils.set_other_config_pci(self.vmops._session,
+ vm_ref,
+ "0/0000:00:00.0")
+ else:
+ pci_manager.get_instance_pci_devs(instance).AndReturn([])
+ step += 1
+ self.vmops._update_instance_progress(context, instance, step, steps)
+
+ self.vmops._inject_instance_metadata(instance, vm_ref)
+ self.vmops._inject_auto_disk_config(instance, vm_ref)
+ self.vmops._inject_hostname(instance, vm_ref, rescue)
+ self.vmops._file_inject_vm_settings(instance, vm_ref, vdis,
+ network_info)
+ self.vmops.inject_network_info(instance, network_info, vm_ref)
+ step += 1
+ self.vmops._update_instance_progress(context, instance, step, steps)
+
+ self.vmops._create_vifs(instance, vm_ref, network_info)
+ self.vmops.firewall_driver.setup_basic_filtering(instance,
+ network_info).AndRaise(NotImplementedError)
+ self.vmops.firewall_driver.prepare_instance_filter(instance,
+ network_info)
+ step += 1
+ self.vmops._update_instance_progress(context, instance, step, steps)
+
+ if rescue:
+ self.vmops._attach_orig_disks(instance, vm_ref)
+ step += 1
+ self.vmops._update_instance_progress(context, instance, step,
+ steps)
+ self.vmops._start(instance, vm_ref)
+ self.vmops._wait_for_instance_to_start(instance, vm_ref)
+ step += 1
+ self.vmops._update_instance_progress(context, instance, step, steps)
+
+ self.vmops._configure_new_instance_with_agent(instance, vm_ref,
+ injected_files, admin_password)
+ self.vmops._remove_hostname(instance, vm_ref)
+ step += 1
+ self.vmops._update_instance_progress(context, instance, step, steps)
+
+ self.vmops.firewall_driver.apply_instance_filter(instance,
+ network_info)
+ step += 1
+ last_call = self.vmops._update_instance_progress(context, instance,
+ step, steps)
+ if throw_exception:
+ last_call.AndRaise(throw_exception)
+ self.vmops._destroy(instance, vm_ref, network_info=network_info)
+ vm_utils.destroy_kernel_ramdisk(self.vmops._session, instance,
+ kernel_file, ramdisk_file)
+ vm_utils.safe_destroy_vdis(self.vmops._session, ["fake_ref"])
+ self.vmops._volumeops.safe_cleanup_from_vdis(["fake_ref_2"])
+
+ self.mox.ReplayAll()
+ self.vmops.spawn(context, instance, image_meta, injected_files,
+ admin_password, network_info,
+ block_device_info_param, name_label_param, rescue)
+
+ def test_spawn(self):
+ self._test_spawn()
+
+ def test_spawn_with_alternate_options(self):
+ self._test_spawn(include_root_vdi=False, rescue=True,
+ name_label_param="bob",
+ block_device_info_param={"root_device_name": ""})
+
+ def test_spawn_with_pci_available_on_the_host(self):
+ self._test_spawn(attach_pci_dev=True)
+
+ def test_spawn_performs_rollback_and_throws_exception(self):
+ self.assertRaises(test.TestingException, self._test_spawn,
+ throw_exception=test.TestingException())
+
+ def _test_finish_migration(self, power_on=True, resize_instance=True,
+ throw_exception=None):
+ self._stub_out_common()
+ self.mox.StubOutWithMock(vm_utils, "import_all_migrated_disks")
+ self.mox.StubOutWithMock(self.vmops, "_attach_mapped_block_devices")
+
+ context = "context"
+ migration = {}
+ name_label = "dummy"
+ instance = {"name": name_label, "uuid": "fake_uuid"}
+ disk_info = "disk_info"
+ network_info = "net_info"
+ image_meta = {"id": "image_id"}
+ block_device_info = "bdi"
+ session = self.vmops._session
+
+ self.vmops._ensure_instance_name_unique(name_label)
+ self.vmops._ensure_enough_free_mem(instance)
+
+ di_type = "di_type"
+ vm_utils.determine_disk_image_type(image_meta).AndReturn(di_type)
+
+ root_vdi = {"ref": "fake_ref"}
+ ephemeral_vdi = {"ref": "fake_ref_e"}
+ vdis = {"root": root_vdi, "ephemerals": {4: ephemeral_vdi}}
+ vm_utils.import_all_migrated_disks(self.vmops._session,
+ instance).AndReturn(vdis)
+
+ kernel_file = "kernel"
+ ramdisk_file = "ramdisk"
+ vm_utils.create_kernel_and_ramdisk(context, session,
+ instance, name_label).AndReturn((kernel_file, ramdisk_file))
+
+ vm_ref = "fake_vm_ref"
+ self.vmops._create_vm_record(context, instance, name_label,
+ di_type, kernel_file,
+ ramdisk_file, image_meta).AndReturn(vm_ref)
+
+ if resize_instance:
+ self.vmops._resize_up_vdis(instance, vdis)
+ self.vmops._attach_disks(instance, vm_ref, name_label, vdis, di_type,
+ network_info, False, None, None)
+ self.vmops._attach_mapped_block_devices(instance, block_device_info)
+ pci_manager.get_instance_pci_devs(instance).AndReturn([])
+
+ self.vmops._inject_instance_metadata(instance, vm_ref)
+ self.vmops._inject_auto_disk_config(instance, vm_ref)
+ self.vmops._file_inject_vm_settings(instance, vm_ref, vdis,
+ network_info)
+ self.vmops.inject_network_info(instance, network_info, vm_ref)
+
+ self.vmops._create_vifs(instance, vm_ref, network_info)
+ self.vmops.firewall_driver.setup_basic_filtering(instance,
+ network_info).AndRaise(NotImplementedError)
+ self.vmops.firewall_driver.prepare_instance_filter(instance,
+ network_info)
+
+ if power_on:
+ self.vmops._start(instance, vm_ref)
+ self.vmops._wait_for_instance_to_start(instance, vm_ref)
+
+ self.vmops.firewall_driver.apply_instance_filter(instance,
+ network_info)
+
+ last_call = self.vmops._update_instance_progress(context, instance,
+ step=5, total_steps=5)
+ if throw_exception:
+ last_call.AndRaise(throw_exception)
+ self.vmops._destroy(instance, vm_ref, network_info=network_info)
+ vm_utils.destroy_kernel_ramdisk(self.vmops._session, instance,
+ kernel_file, ramdisk_file)
+ vm_utils.safe_destroy_vdis(self.vmops._session,
+ ["fake_ref_e", "fake_ref"])
+
+ self.mox.ReplayAll()
+ self.vmops.finish_migration(context, migration, instance, disk_info,
+ network_info, image_meta, resize_instance,
+ block_device_info, power_on)
+
+ def test_finish_migration(self):
+ self._test_finish_migration()
+
+ def test_finish_migration_no_power_on(self):
+ self._test_finish_migration(power_on=False, resize_instance=False)
+
+ def test_finish_migrate_performs_rollback_on_error(self):
+ self.assertRaises(test.TestingException, self._test_finish_migration,
+ power_on=False, resize_instance=False,
+ throw_exception=test.TestingException())
+
+ def test_remove_hostname(self):
+ vm, vm_ref = self.create_vm("dummy")
+ instance = {"name": "dummy", "uuid": "1234", "auto_disk_config": None}
+ self.mox.StubOutWithMock(self._session, 'call_xenapi')
+ self._session.call_xenapi("VM.remove_from_xenstore_data", vm_ref,
+ "vm-data/hostname")
+
+ self.mox.ReplayAll()
+ self.vmops._remove_hostname(instance, vm_ref)
+ self.mox.VerifyAll()
+
+ def test_reset_network(self):
+ class mock_agent(object):
+ def __init__(self):
+ self.called = False
+
+ def resetnetwork(self):
+ self.called = True
+
+ vm, vm_ref = self.create_vm("dummy")
+ instance = {"name": "dummy", "uuid": "1234", "auto_disk_config": None}
+ agent = mock_agent()
+
+ self.mox.StubOutWithMock(self.vmops, 'agent_enabled')
+ self.mox.StubOutWithMock(self.vmops, '_get_agent')
+ self.mox.StubOutWithMock(self.vmops, '_inject_hostname')
+ self.mox.StubOutWithMock(self.vmops, '_remove_hostname')
+
+ self.vmops.agent_enabled(instance).AndReturn(True)
+ self.vmops._get_agent(instance, vm_ref).AndReturn(agent)
+ self.vmops._inject_hostname(instance, vm_ref, False)
+ self.vmops._remove_hostname(instance, vm_ref)
+ self.mox.ReplayAll()
+ self.vmops.reset_network(instance)
+ self.assertTrue(agent.called)
+ self.mox.VerifyAll()
+
+ def test_inject_hostname(self):
+ instance = {"hostname": "dummy", "os_type": "fake", "uuid": "uuid"}
+ vm_ref = "vm_ref"
+
+ self.mox.StubOutWithMock(self.vmops, '_add_to_param_xenstore')
+ self.vmops._add_to_param_xenstore(vm_ref, 'vm-data/hostname', 'dummy')
+
+ self.mox.ReplayAll()
+ self.vmops._inject_hostname(instance, vm_ref, rescue=False)
+
+ def test_inject_hostname_with_rescue_prefix(self):
+ instance = {"hostname": "dummy", "os_type": "fake", "uuid": "uuid"}
+ vm_ref = "vm_ref"
+
+ self.mox.StubOutWithMock(self.vmops, '_add_to_param_xenstore')
+ self.vmops._add_to_param_xenstore(vm_ref, 'vm-data/hostname',
+ 'RESCUE-dummy')
+
+ self.mox.ReplayAll()
+ self.vmops._inject_hostname(instance, vm_ref, rescue=True)
+
+ def test_inject_hostname_with_windows_name_truncation(self):
+ instance = {"hostname": "dummydummydummydummydummy",
+ "os_type": "windows", "uuid": "uuid"}
+ vm_ref = "vm_ref"
+
+ self.mox.StubOutWithMock(self.vmops, '_add_to_param_xenstore')
+ self.vmops._add_to_param_xenstore(vm_ref, 'vm-data/hostname',
+ 'RESCUE-dummydum')
+
+ self.mox.ReplayAll()
+ self.vmops._inject_hostname(instance, vm_ref, rescue=True)
+
+ def test_wait_for_instance_to_start(self):
+ instance = {"uuid": "uuid"}
+ vm_ref = "vm_ref"
+
+ self.mox.StubOutWithMock(vm_utils, 'get_power_state')
+ self.mox.StubOutWithMock(greenthread, 'sleep')
+ vm_utils.get_power_state(self._session, vm_ref).AndReturn(
+ power_state.SHUTDOWN)
+ greenthread.sleep(0.5)
+ vm_utils.get_power_state(self._session, vm_ref).AndReturn(
+ power_state.RUNNING)
+
+ self.mox.ReplayAll()
+ self.vmops._wait_for_instance_to_start(instance, vm_ref)
+
+ def test_attach_orig_disks(self):
+ instance = {"name": "dummy"}
+ vm_ref = "vm_ref"
+ vbd_refs = {vmops.DEVICE_ROOT: "vdi_ref"}
+
+ self.mox.StubOutWithMock(vm_utils, 'lookup')
+ self.mox.StubOutWithMock(self.vmops, '_find_vdi_refs')
+ self.mox.StubOutWithMock(vm_utils, 'create_vbd')
+
+ vm_utils.lookup(self.vmops._session, "dummy").AndReturn("ref")
+ self.vmops._find_vdi_refs("ref", exclude_volumes=True).AndReturn(
+ vbd_refs)
+ vm_utils.create_vbd(self.vmops._session, vm_ref, "vdi_ref",
+ vmops.DEVICE_RESCUE, bootable=False)
+
+ self.mox.ReplayAll()
+ self.vmops._attach_orig_disks(instance, vm_ref)
+
+ def test_agent_update_setup(self):
+ # agent updates need to occur after networking is configured
+ instance = {'name': 'betelgeuse',
+ 'uuid': '1-2-3-4-5-6'}
+ vm_ref = 'vm_ref'
+ agent = xenapi_agent.XenAPIBasedAgent(self.vmops._session,
+ self.vmops._virtapi, instance, vm_ref)
+
+ self.mox.StubOutWithMock(xenapi_agent, 'should_use_agent')
+ self.mox.StubOutWithMock(self.vmops, '_get_agent')
+ self.mox.StubOutWithMock(agent, 'get_version')
+ self.mox.StubOutWithMock(agent, 'resetnetwork')
+ self.mox.StubOutWithMock(agent, 'update_if_needed')
+
+ xenapi_agent.should_use_agent(instance).AndReturn(True)
+ self.vmops._get_agent(instance, vm_ref).AndReturn(agent)
+ agent.get_version().AndReturn('1.2.3')
+ agent.resetnetwork()
+ agent.update_if_needed('1.2.3')
+
+ self.mox.ReplayAll()
+ self.vmops._configure_new_instance_with_agent(instance, vm_ref,
+ None, None)
+
+
+class DestroyTestCase(VMOpsTestBase):
+ def setUp(self):
+ super(DestroyTestCase, self).setUp()
+ self.context = context.RequestContext(user_id=None, project_id=None)
+ self.instance = fake_instance.fake_instance_obj(self.context)
+
+ @mock.patch.object(vm_utils, 'lookup', side_effect=[None, None])
+ @mock.patch.object(vm_utils, 'hard_shutdown_vm')
+ @mock.patch.object(volume_utils, 'find_sr_by_uuid')
+ @mock.patch.object(volume_utils, 'forget_sr')
+ def test_no_vm_no_bdm(self, forget_sr, find_sr_by_uuid, hard_shutdown_vm,
+ lookup):
+ self.vmops.destroy(self.instance, 'network_info',
+ {'block_device_mapping': []})
+ self.assertEqual(0, find_sr_by_uuid.call_count)
+ self.assertEqual(0, forget_sr.call_count)
+ self.assertEqual(0, hard_shutdown_vm.call_count)
+
+ @mock.patch.object(vm_utils, 'lookup', side_effect=[None, None])
+ @mock.patch.object(vm_utils, 'hard_shutdown_vm')
+ @mock.patch.object(volume_utils, 'find_sr_by_uuid', return_value=None)
+ @mock.patch.object(volume_utils, 'forget_sr')
+ def test_no_vm_orphaned_volume_no_sr(self, forget_sr, find_sr_by_uuid,
+ hard_shutdown_vm, lookup):
+ self.vmops.destroy(self.instance, 'network_info',
+ {'block_device_mapping': [{'connection_info':
+ {'data': {'volume_id': 'fake-uuid'}}}]})
+ find_sr_by_uuid.assert_called_once_with(self.vmops._session,
+ 'FA15E-D15C-fake-uuid')
+ self.assertEqual(0, forget_sr.call_count)
+ self.assertEqual(0, hard_shutdown_vm.call_count)
+
+ @mock.patch.object(vm_utils, 'lookup', side_effect=[None, None])
+ @mock.patch.object(vm_utils, 'hard_shutdown_vm')
+ @mock.patch.object(volume_utils, 'find_sr_by_uuid', return_value='sr_ref')
+ @mock.patch.object(volume_utils, 'forget_sr')
+ def test_no_vm_orphaned_volume(self, forget_sr, find_sr_by_uuid,
+ hard_shutdown_vm, lookup):
+ self.vmops.destroy(self.instance, 'network_info',
+ {'block_device_mapping': [{'connection_info':
+ {'data': {'volume_id': 'fake-uuid'}}}]})
+ find_sr_by_uuid.assert_called_once_with(self.vmops._session,
+ 'FA15E-D15C-fake-uuid')
+ forget_sr.assert_called_once_with(self.vmops._session, 'sr_ref')
+ self.assertEqual(0, hard_shutdown_vm.call_count)
+
+
+@mock.patch.object(vmops.VMOps, '_update_instance_progress')
+@mock.patch.object(vmops.VMOps, '_get_vm_opaque_ref')
+@mock.patch.object(vm_utils, 'get_sr_path')
+@mock.patch.object(vmops.VMOps, '_detach_block_devices_from_orig_vm')
+@mock.patch.object(vmops.VMOps, '_migrate_disk_resizing_down')
+@mock.patch.object(vmops.VMOps, '_migrate_disk_resizing_up')
+class MigrateDiskAndPowerOffTestCase(VMOpsTestBase):
+ def test_migrate_disk_and_power_off_works_down(self,
+ migrate_up, migrate_down, *mocks):
+ instance = {"root_gb": 2, "ephemeral_gb": 0, "uuid": "uuid"}
+ flavor = {"root_gb": 1, "ephemeral_gb": 0}
+
+ self.vmops.migrate_disk_and_power_off(None, instance, None,
+ flavor, None)
+
+ self.assertFalse(migrate_up.called)
+ self.assertTrue(migrate_down.called)
+
+ def test_migrate_disk_and_power_off_works_up(self,
+ migrate_up, migrate_down, *mocks):
+ instance = {"root_gb": 1, "ephemeral_gb": 1, "uuid": "uuid"}
+ flavor = {"root_gb": 2, "ephemeral_gb": 2}
+
+ self.vmops.migrate_disk_and_power_off(None, instance, None,
+ flavor, None)
+
+ self.assertFalse(migrate_down.called)
+ self.assertTrue(migrate_up.called)
+
+ def test_migrate_disk_and_power_off_resize_down_ephemeral_fails(self,
+ migrate_up, migrate_down, *mocks):
+ instance = {"ephemeral_gb": 2}
+ flavor = {"ephemeral_gb": 1}
+
+ self.assertRaises(exception.ResizeError,
+ self.vmops.migrate_disk_and_power_off,
+ None, instance, None, flavor, None)
+
+
+@mock.patch.object(vm_utils, 'migrate_vhd')
+@mock.patch.object(vmops.VMOps, '_resize_ensure_vm_is_shutdown')
+@mock.patch.object(vm_utils, 'get_all_vdi_uuids_for_vm')
+@mock.patch.object(vmops.VMOps, '_update_instance_progress')
+@mock.patch.object(vmops.VMOps, '_apply_orig_vm_name_label')
+class MigrateDiskResizingUpTestCase(VMOpsTestBase):
+ def _fake_snapshot_attached_here(self, session, instance, vm_ref, label,
+ userdevice, post_snapshot_callback):
+ self.assertIsInstance(instance, dict)
+ if userdevice == '0':
+ self.assertEqual("vm_ref", vm_ref)
+ self.assertEqual("fake-snapshot", label)
+ yield ["leaf", "parent", "grandp"]
+ else:
+ leaf = userdevice + "-leaf"
+ parent = userdevice + "-parent"
+ yield [leaf, parent]
+
+ def test_migrate_disk_resizing_up_works_no_ephemeral(self,
+ mock_apply_orig, mock_update_progress, mock_get_all_vdi_uuids,
+ mock_shutdown, mock_migrate_vhd):
+ context = "ctxt"
+ instance = {"name": "fake", "uuid": "uuid"}
+ dest = "dest"
+ vm_ref = "vm_ref"
+ sr_path = "sr_path"
+
+ mock_get_all_vdi_uuids.return_value = None
+
+ with mock.patch.object(vm_utils, '_snapshot_attached_here_impl',
+ self._fake_snapshot_attached_here):
+ self.vmops._migrate_disk_resizing_up(context, instance, dest,
+ vm_ref, sr_path)
+
+ mock_get_all_vdi_uuids.assert_called_once_with(self.vmops._session,
+ vm_ref, min_userdevice=4)
+ mock_apply_orig.assert_called_once_with(instance, vm_ref)
+ mock_shutdown.assert_called_once_with(instance, vm_ref)
+
+ m_vhd_expected = [mock.call(self.vmops._session, instance, "parent",
+ dest, sr_path, 1),
+ mock.call(self.vmops._session, instance, "grandp",
+ dest, sr_path, 2),
+ mock.call(self.vmops._session, instance, "leaf",
+ dest, sr_path, 0)]
+ self.assertEqual(m_vhd_expected, mock_migrate_vhd.call_args_list)
+
+ prog_expected = [
+ mock.call(context, instance, 1, 5),
+ mock.call(context, instance, 2, 5),
+ mock.call(context, instance, 3, 5),
+ mock.call(context, instance, 4, 5)
+ # 5/5: step to be executed by finish migration.
+ ]
+ self.assertEqual(prog_expected, mock_update_progress.call_args_list)
+
+ def test_migrate_disk_resizing_up_works_with_two_ephemeral(self,
+ mock_apply_orig, mock_update_progress, mock_get_all_vdi_uuids,
+ mock_shutdown, mock_migrate_vhd):
+ context = "ctxt"
+ instance = {"name": "fake", "uuid": "uuid"}
+ dest = "dest"
+ vm_ref = "vm_ref"
+ sr_path = "sr_path"
+
+ mock_get_all_vdi_uuids.return_value = ["vdi-eph1", "vdi-eph2"]
+
+ with mock.patch.object(vm_utils, '_snapshot_attached_here_impl',
+ self._fake_snapshot_attached_here):
+ self.vmops._migrate_disk_resizing_up(context, instance, dest,
+ vm_ref, sr_path)
+
+ mock_get_all_vdi_uuids.assert_called_once_with(self.vmops._session,
+ vm_ref, min_userdevice=4)
+ mock_apply_orig.assert_called_once_with(instance, vm_ref)
+ mock_shutdown.assert_called_once_with(instance, vm_ref)
+
+ m_vhd_expected = [mock.call(self.vmops._session, instance,
+ "parent", dest, sr_path, 1),
+ mock.call(self.vmops._session, instance,
+ "grandp", dest, sr_path, 2),
+ mock.call(self.vmops._session, instance,
+ "4-parent", dest, sr_path, 1, 1),
+ mock.call(self.vmops._session, instance,
+ "5-parent", dest, sr_path, 1, 2),
+ mock.call(self.vmops._session, instance,
+ "leaf", dest, sr_path, 0),
+ mock.call(self.vmops._session, instance,
+ "4-leaf", dest, sr_path, 0, 1),
+ mock.call(self.vmops._session, instance,
+ "5-leaf", dest, sr_path, 0, 2)]
+ self.assertEqual(m_vhd_expected, mock_migrate_vhd.call_args_list)
+
+ prog_expected = [
+ mock.call(context, instance, 1, 5),
+ mock.call(context, instance, 2, 5),
+ mock.call(context, instance, 3, 5),
+ mock.call(context, instance, 4, 5)
+ # 5/5: step to be executed by finish migration.
+ ]
+ self.assertEqual(prog_expected, mock_update_progress.call_args_list)
+
+ @mock.patch.object(vmops.VMOps, '_restore_orig_vm_and_cleanup_orphan')
+ def test_migrate_disk_resizing_up_rollback(self,
+ mock_restore,
+ mock_apply_orig, mock_update_progress, mock_get_all_vdi_uuids,
+ mock_shutdown, mock_migrate_vhd):
+ context = "ctxt"
+ instance = {"name": "fake", "uuid": "fake"}
+ dest = "dest"
+ vm_ref = "vm_ref"
+ sr_path = "sr_path"
+
+ mock_migrate_vhd.side_effect = test.TestingException
+ mock_restore.side_effect = test.TestingException
+
+ with mock.patch.object(vm_utils, '_snapshot_attached_here_impl',
+ self._fake_snapshot_attached_here):
+ self.assertRaises(exception.InstanceFaultRollback,
+ self.vmops._migrate_disk_resizing_up,
+ context, instance, dest, vm_ref, sr_path)
+
+ mock_apply_orig.assert_called_once_with(instance, vm_ref)
+ mock_restore.assert_called_once_with(instance)
+ mock_migrate_vhd.assert_called_once_with(self.vmops._session,
+ instance, "parent", dest, sr_path, 1)
+
+
+class CreateVMRecordTestCase(VMOpsTestBase):
+ @mock.patch.object(vm_utils, 'determine_vm_mode')
+ @mock.patch.object(vm_utils, 'get_vm_device_id')
+ @mock.patch.object(vm_utils, 'create_vm')
+ def test_create_vm_record_with_vm_device_id(self, mock_create_vm,
+ mock_get_vm_device_id, mock_determine_vm_mode):
+
+ context = "context"
+ instance = objects.Instance(vm_mode="vm_mode", uuid="uuid123")
+ name_label = "dummy"
+ disk_image_type = "vhd"
+ kernel_file = "kernel"
+ ramdisk_file = "ram"
+ device_id = "0002"
+ image_properties = {"xenapi_device_id": device_id}
+ image_meta = {"properties": image_properties}
+ session = "session"
+ self.vmops._session = session
+ mock_get_vm_device_id.return_value = device_id
+ mock_determine_vm_mode.return_value = "vm_mode"
+
+ self.vmops._create_vm_record(context, instance, name_label,
+ disk_image_type, kernel_file, ramdisk_file, image_meta)
+
+ mock_get_vm_device_id.assert_called_with(session, image_properties)
+ mock_create_vm.assert_called_with(session, instance, name_label,
+ kernel_file, ramdisk_file, False, device_id)
+
+
+class BootableTestCase(VMOpsTestBase):
+
+ def setUp(self):
+ super(BootableTestCase, self).setUp()
+
+ self.instance = {"name": "test", "uuid": "fake"}
+ vm_rec, self.vm_ref = self.create_vm('test')
+
+ # sanity check bootlock is initially disabled:
+ self.assertEqual({}, vm_rec['blocked_operations'])
+
+ def _get_blocked(self):
+ vm_rec = self._session.call_xenapi("VM.get_record", self.vm_ref)
+ return vm_rec['blocked_operations']
+
+ def test_acquire_bootlock(self):
+ self.vmops._acquire_bootlock(self.vm_ref)
+ blocked = self._get_blocked()
+ self.assertIn('start', blocked)
+
+ def test_release_bootlock(self):
+ self.vmops._acquire_bootlock(self.vm_ref)
+ self.vmops._release_bootlock(self.vm_ref)
+ blocked = self._get_blocked()
+ self.assertNotIn('start', blocked)
+
+ def test_set_bootable(self):
+ self.vmops.set_bootable(self.instance, True)
+ blocked = self._get_blocked()
+ self.assertNotIn('start', blocked)
+
+ def test_set_not_bootable(self):
+ self.vmops.set_bootable(self.instance, False)
+ blocked = self._get_blocked()
+ self.assertIn('start', blocked)
+
+
+@mock.patch.object(vm_utils, 'update_vdi_virtual_size', autospec=True)
+class ResizeVdisTestCase(VMOpsTestBase):
+ def test_dont_resize_root_volumes_osvol_false(self, mock_resize):
+ instance = fake_instance.fake_db_instance(root_gb=20)
+ vdis = {'root': {'osvol': False, 'ref': 'vdi_ref'}}
+ self.vmops._resize_up_vdis(instance, vdis)
+ self.assertTrue(mock_resize.called)
+
+ def test_dont_resize_root_volumes_osvol_true(self, mock_resize):
+ instance = fake_instance.fake_db_instance(root_gb=20)
+ vdis = {'root': {'osvol': True}}
+ self.vmops._resize_up_vdis(instance, vdis)
+ self.assertFalse(mock_resize.called)
+
+ def test_dont_resize_root_volumes_no_osvol(self, mock_resize):
+ instance = fake_instance.fake_db_instance(root_gb=20)
+ vdis = {'root': {}}
+ self.vmops._resize_up_vdis(instance, vdis)
+ self.assertFalse(mock_resize.called)
+
+ @mock.patch.object(vm_utils, 'get_ephemeral_disk_sizes')
+ def test_ensure_ephemeral_resize_with_root_volume(self, mock_sizes,
+ mock_resize):
+ mock_sizes.return_value = [2000, 1000]
+ instance = fake_instance.fake_db_instance(root_gb=20, ephemeral_gb=20)
+ ephemerals = {"4": {"ref": 4}, "5": {"ref": 5}}
+ vdis = {'root': {'osvol': True, 'ref': 'vdi_ref'},
+ 'ephemerals': ephemerals}
+ with mock.patch.object(vm_utils, 'generate_single_ephemeral',
+ autospec=True) as g:
+ self.vmops._resize_up_vdis(instance, vdis)
+ self.assertEqual([mock.call(self.vmops._session, instance, 4,
+ 2000),
+ mock.call(self.vmops._session, instance, 5,
+ 1000)],
+ mock_resize.call_args_list)
+ self.assertFalse(g.called)
+
+ def test_resize_up_vdis_root(self, mock_resize):
+ instance = {"root_gb": 20, "ephemeral_gb": 0}
+ self.vmops._resize_up_vdis(instance, {"root": {"ref": "vdi_ref"}})
+ mock_resize.assert_called_once_with(self.vmops._session, instance,
+ "vdi_ref", 20)
+
+ def test_resize_up_vdis_zero_disks(self, mock_resize):
+ instance = {"root_gb": 0, "ephemeral_gb": 0}
+ self.vmops._resize_up_vdis(instance, {"root": {}})
+ self.assertFalse(mock_resize.called)
+
+ def test_resize_up_vdis_no_vdis_like_initial_spawn(self, mock_resize):
+ instance = {"root_gb": 0, "ephemeral_gb": 3000}
+ vdis = {}
+
+ self.vmops._resize_up_vdis(instance, vdis)
+
+ self.assertFalse(mock_resize.called)
+
+ @mock.patch.object(vm_utils, 'get_ephemeral_disk_sizes')
+ def test_resize_up_vdis_ephemeral(self, mock_sizes, mock_resize):
+ mock_sizes.return_value = [2000, 1000]
+ instance = {"root_gb": 0, "ephemeral_gb": 3000}
+ ephemerals = {"4": {"ref": 4}, "5": {"ref": 5}}
+ vdis = {"ephemerals": ephemerals}
+
+ self.vmops._resize_up_vdis(instance, vdis)
+
+ mock_sizes.assert_called_once_with(3000)
+ expected = [mock.call(self.vmops._session, instance, 4, 2000),
+ mock.call(self.vmops._session, instance, 5, 1000)]
+ self.assertEqual(expected, mock_resize.call_args_list)
+
+ @mock.patch.object(vm_utils, 'generate_single_ephemeral')
+ @mock.patch.object(vm_utils, 'get_ephemeral_disk_sizes')
+ def test_resize_up_vdis_ephemeral_with_generate(self, mock_sizes,
+ mock_generate,
+ mock_resize):
+ mock_sizes.return_value = [2000, 1000]
+ instance = {"root_gb": 0, "ephemeral_gb": 3000, "uuid": "a"}
+ ephemerals = {"4": {"ref": 4}}
+ vdis = {"ephemerals": ephemerals}
+
+ self.vmops._resize_up_vdis(instance, vdis)
+
+ mock_sizes.assert_called_once_with(3000)
+ mock_resize.assert_called_once_with(self.vmops._session, instance,
+ 4, 2000)
+ mock_generate.assert_called_once_with(self.vmops._session, instance,
+ None, 5, 1000)
+
+
+@mock.patch.object(vm_utils, 'remove_old_snapshots')
+class CleanupFailedSnapshotTestCase(VMOpsTestBase):
+ def test_post_interrupted_snapshot_cleanup(self, mock_remove):
+ self.vmops._get_vm_opaque_ref = mock.Mock()
+ self.vmops._get_vm_opaque_ref.return_value = "vm_ref"
+
+ self.vmops.post_interrupted_snapshot_cleanup("context", "instance")
+
+ mock_remove.assert_called_once_with(self.vmops._session,
+ "instance", "vm_ref")
+
+
+class LiveMigrateHelperTestCase(VMOpsTestBase):
+ def test_connect_block_device_volumes_none(self):
+ self.assertEqual({}, self.vmops.connect_block_device_volumes(None))
+
+ @mock.patch.object(volumeops.VolumeOps, "connect_volume")
+ def test_connect_block_device_volumes_calls_connect(self, mock_connect):
+ with mock.patch.object(self.vmops._session,
+ "call_xenapi") as mock_session:
+ mock_connect.return_value = ("sr_uuid", None)
+ mock_session.return_value = "sr_ref"
+ bdm = {"connection_info": "c_info"}
+ bdi = {"block_device_mapping": [bdm]}
+ result = self.vmops.connect_block_device_volumes(bdi)
+
+ self.assertEqual({'sr_uuid': 'sr_ref'}, result)
+
+ mock_connect.assert_called_once_with("c_info")
+ mock_session.assert_called_once_with("SR.get_by_uuid",
+ "sr_uuid")
+
+
+@mock.patch.object(vmops.VMOps, '_resize_ensure_vm_is_shutdown')
+@mock.patch.object(vmops.VMOps, '_apply_orig_vm_name_label')
+@mock.patch.object(vmops.VMOps, '_update_instance_progress')
+@mock.patch.object(vm_utils, 'get_vdi_for_vm_safely')
+@mock.patch.object(vm_utils, 'resize_disk')
+@mock.patch.object(vm_utils, 'migrate_vhd')
+@mock.patch.object(vm_utils, 'destroy_vdi')
+class MigrateDiskResizingDownTestCase(VMOpsTestBase):
+ def test_migrate_disk_resizing_down_works_no_ephemeral(
+ self,
+ mock_destroy_vdi,
+ mock_migrate_vhd,
+ mock_resize_disk,
+ mock_get_vdi_for_vm_safely,
+ mock_update_instance_progress,
+ mock_apply_orig_vm_name_label,
+ mock_resize_ensure_vm_is_shutdown):
+
+ context = "ctx"
+ instance = {"name": "fake", "uuid": "uuid"}
+ dest = "dest"
+ vm_ref = "vm_ref"
+ sr_path = "sr_path"
+ instance_type = dict(root_gb=1)
+ old_vdi_ref = "old_ref"
+ new_vdi_ref = "new_ref"
+ new_vdi_uuid = "new_uuid"
+
+ mock_get_vdi_for_vm_safely.return_value = (old_vdi_ref, None)
+ mock_resize_disk.return_value = (new_vdi_ref, new_vdi_uuid)
+
+ self.vmops._migrate_disk_resizing_down(context, instance, dest,
+ instance_type, vm_ref, sr_path)
+
+ mock_get_vdi_for_vm_safely.assert_called_once_with(
+ self.vmops._session,
+ vm_ref)
+ mock_resize_ensure_vm_is_shutdown.assert_called_once_with(
+ instance, vm_ref)
+ mock_apply_orig_vm_name_label.assert_called_once_with(
+ instance, vm_ref)
+ mock_resize_disk.assert_called_once_with(
+ self.vmops._session,
+ instance,
+ old_vdi_ref,
+ instance_type)
+ mock_migrate_vhd.assert_called_once_with(
+ self.vmops._session,
+ instance,
+ new_vdi_uuid,
+ dest,
+ sr_path, 0)
+ mock_destroy_vdi.assert_called_once_with(
+ self.vmops._session,
+ new_vdi_ref)
+
+ prog_expected = [
+ mock.call(context, instance, 1, 5),
+ mock.call(context, instance, 2, 5),
+ mock.call(context, instance, 3, 5),
+ mock.call(context, instance, 4, 5)
+ # 5/5: step to be executed by finish migration.
+ ]
+ self.assertEqual(prog_expected,
+ mock_update_instance_progress.call_args_list)
+
+
+class GetVdisForInstanceTestCase(VMOpsTestBase):
+ """Tests get_vdis_for_instance utility method."""
+ def setUp(self):
+ super(GetVdisForInstanceTestCase, self).setUp()
+ self.context = context.get_admin_context()
+ self.context.auth_token = 'auth_token'
+ self.session = mock.Mock()
+ self.vmops._session = self.session
+ self.instance = fake_instance.fake_instance_obj(self.context)
+ self.name_label = 'name'
+ self.image = 'fake_image_id'
+
+ @mock.patch.object(volumeops.VolumeOps, "connect_volume",
+ return_value=("sr", "vdi_uuid"))
+ def test_vdis_for_instance_bdi_password_scrubbed(self, get_uuid_mock):
+ # setup fake data
+ data = {'name_label': self.name_label,
+ 'sr_uuid': 'fake',
+ 'auth_password': 'scrubme'}
+ bdm = [{'mount_device': '/dev/vda',
+ 'connection_info': {'data': data}}]
+ bdi = {'root_device_name': 'vda',
+ 'block_device_mapping': bdm}
+
+ # Tests that the parameters to the to_xml method are sanitized for
+ # passwords when logged.
+ def fake_debug(*args, **kwargs):
+ if 'auth_password' in args[0]:
+ self.assertNotIn('scrubme', args[0])
+ fake_debug.matched = True
+
+ fake_debug.matched = False
+
+ with mock.patch.object(vmops.LOG, 'debug',
+ side_effect=fake_debug) as debug_mock:
+ vdis = self.vmops._get_vdis_for_instance(self.context,
+ self.instance, self.name_label, self.image,
+ image_type=4, block_device_info=bdi)
+ self.assertEqual(1, len(vdis))
+ get_uuid_mock.assert_called_once_with({"data": data})
+ # we don't care what the log message is, we just want to make sure
+ # our stub method is called which asserts the password is scrubbed
+ self.assertTrue(debug_mock.called)
+ self.assertTrue(fake_debug.matched)
diff --git a/nova/tests/unit/virt/xenapi/test_volume_utils.py b/nova/tests/unit/virt/xenapi/test_volume_utils.py
new file mode 100644
index 0000000000..59fd4626b9
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/test_volume_utils.py
@@ -0,0 +1,232 @@
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from eventlet import greenthread
+import mock
+
+from nova import exception
+from nova import test
+from nova.tests.unit.virt.xenapi import stubs
+from nova.virt.xenapi import volume_utils
+
+
+class SROps(stubs.XenAPITestBaseNoDB):
+ def test_find_sr_valid_uuid(self):
+ self.session = mock.Mock()
+ self.session.call_xenapi.return_value = 'sr_ref'
+ self.assertEqual(volume_utils.find_sr_by_uuid(self.session,
+ 'sr_uuid'),
+ 'sr_ref')
+
+ def test_find_sr_invalid_uuid(self):
+ class UUIDException(Exception):
+ details = ["UUID_INVALID", "", "", ""]
+
+ self.session = mock.Mock()
+ self.session.XenAPI.Failure = UUIDException
+ self.session.call_xenapi.side_effect = UUIDException
+ self.assertIsNone(
+ volume_utils.find_sr_by_uuid(self.session, 'sr_uuid'))
+
+ def test_find_sr_from_vdi(self):
+ vdi_ref = 'fake-ref'
+
+ def fake_call_xenapi(method, *args):
+ self.assertEqual(method, 'VDI.get_SR')
+ self.assertEqual(args[0], vdi_ref)
+ return args[0]
+
+ session = mock.Mock()
+ session.call_xenapi.side_effect = fake_call_xenapi
+ self.assertEqual(volume_utils.find_sr_from_vdi(session, vdi_ref),
+ vdi_ref)
+
+ def test_find_sr_from_vdi_exception(self):
+ vdi_ref = 'fake-ref'
+
+ class FakeException(Exception):
+ pass
+
+ def fake_call_xenapi(method, *args):
+ self.assertEqual(method, 'VDI.get_SR')
+ self.assertEqual(args[0], vdi_ref)
+ return args[0]
+
+ session = mock.Mock()
+ session.XenAPI.Failure = FakeException
+ session.call_xenapi.side_effect = FakeException
+ self.assertRaises(exception.StorageError,
+ volume_utils.find_sr_from_vdi, session, vdi_ref)
+
+
+class ISCSIParametersTestCase(stubs.XenAPITestBaseNoDB):
+ def test_target_host(self):
+ self.assertEqual(volume_utils._get_target_host('host:port'),
+ 'host')
+
+ self.assertEqual(volume_utils._get_target_host('host'),
+ 'host')
+
+ # There is no default value
+ self.assertIsNone(volume_utils._get_target_host(':port'))
+
+ self.assertIsNone(volume_utils._get_target_host(None))
+
+ def test_target_port(self):
+ self.assertEqual(volume_utils._get_target_port('host:port'),
+ 'port')
+
+ self.assertEqual(volume_utils._get_target_port('host'),
+ '3260')
+
+
+class IntroduceTestCase(stubs.XenAPITestBaseNoDB):
+
+ @mock.patch.object(volume_utils, '_get_vdi_ref')
+ @mock.patch.object(greenthread, 'sleep')
+ def test_introduce_vdi_retry(self, mock_sleep, mock_get_vdi_ref):
+ def fake_get_vdi_ref(session, sr_ref, vdi_uuid, target_lun):
+ fake_get_vdi_ref.call_count += 1
+ if fake_get_vdi_ref.call_count == 2:
+ return 'vdi_ref'
+
+ def fake_call_xenapi(method, *args):
+ if method == 'SR.scan':
+ return
+ elif method == 'VDI.get_record':
+ return {'managed': 'true'}
+
+ session = mock.Mock()
+ session.call_xenapi.side_effect = fake_call_xenapi
+
+ mock_get_vdi_ref.side_effect = fake_get_vdi_ref
+ fake_get_vdi_ref.call_count = 0
+
+ self.assertEqual(volume_utils.introduce_vdi(session, 'sr_ref'),
+ 'vdi_ref')
+ mock_sleep.assert_called_once_with(20)
+
+ @mock.patch.object(volume_utils, '_get_vdi_ref')
+ @mock.patch.object(greenthread, 'sleep')
+ def test_introduce_vdi_exception(self, mock_sleep, mock_get_vdi_ref):
+ def fake_call_xenapi(method, *args):
+ if method == 'SR.scan':
+ return
+ elif method == 'VDI.get_record':
+ return {'managed': 'true'}
+
+ session = mock.Mock()
+ session.call_xenapi.side_effect = fake_call_xenapi
+ mock_get_vdi_ref.return_value = None
+
+ self.assertRaises(exception.StorageError,
+ volume_utils.introduce_vdi, session, 'sr_ref')
+ mock_sleep.assert_called_once_with(20)
+
+
+class ParseVolumeInfoTestCase(stubs.XenAPITestBaseNoDB):
+ def test_mountpoint_to_number(self):
+ cases = {
+ 'sda': 0,
+ 'sdp': 15,
+ 'hda': 0,
+ 'hdp': 15,
+ 'vda': 0,
+ 'xvda': 0,
+ '0': 0,
+ '10': 10,
+ 'vdq': -1,
+ 'sdq': -1,
+ 'hdq': -1,
+ 'xvdq': -1,
+ }
+
+ for (input, expected) in cases.iteritems():
+ actual = volume_utils._mountpoint_to_number(input)
+ self.assertEqual(actual, expected,
+ '%s yielded %s, not %s' % (input, actual, expected))
+
+ @classmethod
+ def _make_connection_info(cls):
+ target_iqn = 'iqn.2010-10.org.openstack:volume-00000001'
+ return {'driver_volume_type': 'iscsi',
+ 'data': {'volume_id': 1,
+ 'target_iqn': target_iqn,
+ 'target_portal': '127.0.0.1:3260,fake',
+ 'target_lun': None,
+ 'auth_method': 'CHAP',
+ 'auth_username': 'username',
+ 'auth_password': 'password'}}
+
+ def test_parse_volume_info_parsing_auth_details(self):
+ conn_info = self._make_connection_info()
+ result = volume_utils._parse_volume_info(conn_info['data'])
+
+ self.assertEqual('username', result['chapuser'])
+ self.assertEqual('password', result['chappassword'])
+
+ def test_get_device_number_raise_exception_on_wrong_mountpoint(self):
+ self.assertRaises(
+ exception.StorageError,
+ volume_utils.get_device_number,
+ 'dev/sd')
+
+
+class FindVBDTestCase(stubs.XenAPITestBaseNoDB):
+ def test_find_vbd_by_number_works(self):
+ session = mock.Mock()
+ session.VM.get_VBDs.return_value = ["a", "b"]
+ session.VBD.get_userdevice.return_value = "1"
+
+ result = volume_utils.find_vbd_by_number(session, "vm_ref", 1)
+
+ self.assertEqual("a", result)
+ session.VM.get_VBDs.assert_called_once_with("vm_ref")
+ session.VBD.get_userdevice.assert_called_once_with("a")
+
+ def test_find_vbd_by_number_no_matches(self):
+ session = mock.Mock()
+ session.VM.get_VBDs.return_value = ["a", "b"]
+ session.VBD.get_userdevice.return_value = "3"
+
+ result = volume_utils.find_vbd_by_number(session, "vm_ref", 1)
+
+ self.assertIsNone(result)
+ session.VM.get_VBDs.assert_called_once_with("vm_ref")
+ expected = [mock.call("a"), mock.call("b")]
+ self.assertEqual(expected,
+ session.VBD.get_userdevice.call_args_list)
+
+ def test_find_vbd_by_number_no_vbds(self):
+ session = mock.Mock()
+ session.VM.get_VBDs.return_value = []
+
+ result = volume_utils.find_vbd_by_number(session, "vm_ref", 1)
+
+ self.assertIsNone(result)
+ session.VM.get_VBDs.assert_called_once_with("vm_ref")
+ self.assertFalse(session.VBD.get_userdevice.called)
+
+ def test_find_vbd_by_number_ignores_exception(self):
+ session = mock.Mock()
+ session.XenAPI.Failure = test.TestingException
+ session.VM.get_VBDs.return_value = ["a"]
+ session.VBD.get_userdevice.side_effect = test.TestingException
+
+ result = volume_utils.find_vbd_by_number(session, "vm_ref", 1)
+
+ self.assertIsNone(result)
+ session.VM.get_VBDs.assert_called_once_with("vm_ref")
+ session.VBD.get_userdevice.assert_called_once_with("a")
diff --git a/nova/tests/unit/virt/xenapi/test_volumeops.py b/nova/tests/unit/virt/xenapi/test_volumeops.py
new file mode 100644
index 0000000000..0e840bb209
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/test_volumeops.py
@@ -0,0 +1,549 @@
+# Copyright (c) 2012 Citrix Systems, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova import exception
+from nova import test
+from nova.tests.unit.virt.xenapi import stubs
+from nova.virt.xenapi import vm_utils
+from nova.virt.xenapi import volume_utils
+from nova.virt.xenapi import volumeops
+
+
+class VolumeOpsTestBase(stubs.XenAPITestBaseNoDB):
+ def setUp(self):
+ super(VolumeOpsTestBase, self).setUp()
+ self._setup_mock_volumeops()
+
+ def _setup_mock_volumeops(self):
+ self.session = stubs.FakeSessionForVolumeTests('fake_uri')
+ self.ops = volumeops.VolumeOps(self.session)
+
+
+class VolumeDetachTestCase(VolumeOpsTestBase):
+ def test_detach_volume_call(self):
+ registered_calls = []
+
+ def regcall(label):
+ def side_effect(*args, **kwargs):
+ registered_calls.append(label)
+ return side_effect
+
+ ops = volumeops.VolumeOps('session')
+ self.mox.StubOutWithMock(volumeops.vm_utils, 'lookup')
+ self.mox.StubOutWithMock(volumeops.volume_utils, 'find_vbd_by_number')
+ self.mox.StubOutWithMock(volumeops.vm_utils, 'is_vm_shutdown')
+ self.mox.StubOutWithMock(volumeops.vm_utils, 'unplug_vbd')
+ self.mox.StubOutWithMock(volumeops.vm_utils, 'destroy_vbd')
+ self.mox.StubOutWithMock(volumeops.volume_utils, 'get_device_number')
+ self.mox.StubOutWithMock(volumeops.volume_utils, 'find_sr_from_vbd')
+ self.mox.StubOutWithMock(volumeops.volume_utils, 'purge_sr')
+
+ volumeops.vm_utils.lookup('session', 'instance_1').AndReturn(
+ 'vmref')
+
+ volumeops.volume_utils.get_device_number('mountpoint').AndReturn(
+ 'devnumber')
+
+ volumeops.volume_utils.find_vbd_by_number(
+ 'session', 'vmref', 'devnumber').AndReturn('vbdref')
+
+ volumeops.vm_utils.is_vm_shutdown('session', 'vmref').AndReturn(
+ False)
+
+ volumeops.vm_utils.unplug_vbd('session', 'vbdref', 'vmref')
+
+ volumeops.vm_utils.destroy_vbd('session', 'vbdref').WithSideEffects(
+ regcall('destroy_vbd'))
+
+ volumeops.volume_utils.find_sr_from_vbd(
+ 'session', 'vbdref').WithSideEffects(
+ regcall('find_sr_from_vbd')).AndReturn('srref')
+
+ volumeops.volume_utils.purge_sr('session', 'srref')
+
+ self.mox.ReplayAll()
+
+ ops.detach_volume(
+ dict(driver_volume_type='iscsi', data='conn_data'),
+ 'instance_1', 'mountpoint')
+
+ self.assertEqual(
+ ['find_sr_from_vbd', 'destroy_vbd'], registered_calls)
+
+ @mock.patch.object(volumeops.VolumeOps, "_detach_vbds_and_srs")
+ @mock.patch.object(volume_utils, "find_vbd_by_number")
+ @mock.patch.object(vm_utils, "vm_ref_or_raise")
+ def test_detach_volume(self, mock_vm, mock_vbd, mock_detach):
+ mock_vm.return_value = "vm_ref"
+ mock_vbd.return_value = "vbd_ref"
+
+ self.ops.detach_volume({}, "name", "/dev/xvdd")
+
+ mock_vm.assert_called_once_with(self.session, "name")
+ mock_vbd.assert_called_once_with(self.session, "vm_ref", 3)
+ mock_detach.assert_called_once_with("vm_ref", ["vbd_ref"])
+
+ @mock.patch.object(volumeops.VolumeOps, "_detach_vbds_and_srs")
+ @mock.patch.object(volume_utils, "find_vbd_by_number")
+ @mock.patch.object(vm_utils, "vm_ref_or_raise")
+ def test_detach_volume_skips_error_skip_attach(self, mock_vm, mock_vbd,
+ mock_detach):
+ mock_vm.return_value = "vm_ref"
+ mock_vbd.return_value = None
+
+ self.ops.detach_volume({}, "name", "/dev/xvdd")
+
+ self.assertFalse(mock_detach.called)
+
+ @mock.patch.object(volumeops.VolumeOps, "_detach_vbds_and_srs")
+ @mock.patch.object(volume_utils, "find_vbd_by_number")
+ @mock.patch.object(vm_utils, "vm_ref_or_raise")
+ def test_detach_volume_raises(self, mock_vm, mock_vbd,
+ mock_detach):
+ mock_vm.return_value = "vm_ref"
+ mock_vbd.side_effect = test.TestingException
+
+ self.assertRaises(test.TestingException,
+ self.ops.detach_volume, {}, "name", "/dev/xvdd")
+ self.assertFalse(mock_detach.called)
+
+ @mock.patch.object(volume_utils, "purge_sr")
+ @mock.patch.object(vm_utils, "destroy_vbd")
+ @mock.patch.object(volume_utils, "find_sr_from_vbd")
+ @mock.patch.object(vm_utils, "unplug_vbd")
+ @mock.patch.object(vm_utils, "is_vm_shutdown")
+ def test_detach_vbds_and_srs_not_shutdown(self, mock_shutdown, mock_unplug,
+ mock_find_sr, mock_destroy, mock_purge):
+ mock_shutdown.return_value = False
+ mock_find_sr.return_value = "sr_ref"
+
+ self.ops._detach_vbds_and_srs("vm_ref", ["vbd_ref"])
+
+ mock_shutdown.assert_called_once_with(self.session, "vm_ref")
+ mock_find_sr.assert_called_once_with(self.session, "vbd_ref")
+ mock_unplug.assert_called_once_with(self.session, "vbd_ref", "vm_ref")
+ mock_destroy.assert_called_once_with(self.session, "vbd_ref")
+ mock_purge.assert_called_once_with(self.session, "sr_ref")
+
+ @mock.patch.object(volume_utils, "purge_sr")
+ @mock.patch.object(vm_utils, "destroy_vbd")
+ @mock.patch.object(volume_utils, "find_sr_from_vbd")
+ @mock.patch.object(vm_utils, "unplug_vbd")
+ @mock.patch.object(vm_utils, "is_vm_shutdown")
+ def test_detach_vbds_and_srs_is_shutdown(self, mock_shutdown, mock_unplug,
+ mock_find_sr, mock_destroy, mock_purge):
+ mock_shutdown.return_value = True
+ mock_find_sr.return_value = "sr_ref"
+
+ self.ops._detach_vbds_and_srs("vm_ref", ["vbd_ref_1", "vbd_ref_2"])
+
+ expected = [mock.call(self.session, "vbd_ref_1"),
+ mock.call(self.session, "vbd_ref_2")]
+ self.assertEqual(expected, mock_destroy.call_args_list)
+ mock_purge.assert_called_with(self.session, "sr_ref")
+ self.assertFalse(mock_unplug.called)
+
+ @mock.patch.object(volumeops.VolumeOps, "_detach_vbds_and_srs")
+ @mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs")
+ def test_detach_all_no_volumes(self, mock_get_all, mock_detach):
+ mock_get_all.return_value = []
+
+ self.ops.detach_all("vm_ref")
+
+ mock_get_all.assert_called_once_with("vm_ref")
+ self.assertFalse(mock_detach.called)
+
+ @mock.patch.object(volumeops.VolumeOps, "_detach_vbds_and_srs")
+ @mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs")
+ def test_detach_all_volumes(self, mock_get_all, mock_detach):
+ mock_get_all.return_value = ["1"]
+
+ self.ops.detach_all("vm_ref")
+
+ mock_get_all.assert_called_once_with("vm_ref")
+ mock_detach.assert_called_once_with("vm_ref", ["1"])
+
+ def test_get_all_volume_vbd_refs_no_vbds(self):
+ with mock.patch.object(self.session.VM, "get_VBDs") as mock_get:
+ with mock.patch.object(self.session.VBD,
+ "get_other_config") as mock_conf:
+ mock_get.return_value = []
+
+ result = self.ops._get_all_volume_vbd_refs("vm_ref")
+
+ self.assertEqual([], list(result))
+ mock_get.assert_called_once_with("vm_ref")
+ self.assertFalse(mock_conf.called)
+
+ def test_get_all_volume_vbd_refs_no_volumes(self):
+ with mock.patch.object(self.session.VM, "get_VBDs") as mock_get:
+ with mock.patch.object(self.session.VBD,
+ "get_other_config") as mock_conf:
+ mock_get.return_value = ["1"]
+ mock_conf.return_value = {}
+
+ result = self.ops._get_all_volume_vbd_refs("vm_ref")
+
+ self.assertEqual([], list(result))
+ mock_get.assert_called_once_with("vm_ref")
+ mock_conf.assert_called_once_with("1")
+
+ def test_get_all_volume_vbd_refs_with_volumes(self):
+ with mock.patch.object(self.session.VM, "get_VBDs") as mock_get:
+ with mock.patch.object(self.session.VBD,
+ "get_other_config") as mock_conf:
+ mock_get.return_value = ["1", "2"]
+ mock_conf.return_value = {"osvol": True}
+
+ result = self.ops._get_all_volume_vbd_refs("vm_ref")
+
+ self.assertEqual(["1", "2"], list(result))
+ mock_get.assert_called_once_with("vm_ref")
+
+
+class AttachVolumeTestCase(VolumeOpsTestBase):
+ @mock.patch.object(volumeops.VolumeOps, "_attach_volume")
+ @mock.patch.object(vm_utils, "vm_ref_or_raise")
+ def test_attach_volume_default_hotplug(self, mock_get_vm, mock_attach):
+ mock_get_vm.return_value = "vm_ref"
+
+ self.ops.attach_volume({}, "instance_name", "/dev/xvda")
+
+ mock_attach.assert_called_once_with({}, "vm_ref", "instance_name", 0,
+ True)
+
+ @mock.patch.object(volumeops.VolumeOps, "_attach_volume")
+ @mock.patch.object(vm_utils, "vm_ref_or_raise")
+ def test_attach_volume_hotplug(self, mock_get_vm, mock_attach):
+ mock_get_vm.return_value = "vm_ref"
+
+ self.ops.attach_volume({}, "instance_name", "/dev/xvda", False)
+
+ mock_attach.assert_called_once_with({}, "vm_ref", "instance_name", 0,
+ False)
+
+ @mock.patch.object(volumeops.VolumeOps, "_attach_volume")
+ def test_attach_volume_default_hotplug_connect_volume(self, mock_attach):
+ self.ops.connect_volume({})
+ mock_attach.assert_called_once_with({})
+
+ @mock.patch.object(volumeops.VolumeOps, "_check_is_supported_driver_type")
+ @mock.patch.object(volumeops.VolumeOps, "_connect_to_volume_provider")
+ @mock.patch.object(volumeops.VolumeOps, "_connect_hypervisor_to_volume")
+ @mock.patch.object(volumeops.VolumeOps, "_attach_volume_to_vm")
+ def test_attach_volume_with_defaults(self, mock_attach, mock_hypervisor,
+ mock_provider, mock_driver):
+ connection_info = {"data": {}}
+ with mock.patch.object(self.session.VDI, "get_uuid") as mock_vdi:
+ mock_provider.return_value = ("sr_ref", "sr_uuid")
+ mock_vdi.return_value = "vdi_uuid"
+
+ result = self.ops._attach_volume(connection_info)
+
+ self.assertEqual(result, ("sr_uuid", "vdi_uuid"))
+
+ mock_driver.assert_called_once_with(connection_info)
+ mock_provider.assert_called_once_with({}, None)
+ mock_hypervisor.assert_called_once_with("sr_ref", {})
+ self.assertFalse(mock_attach.called)
+
+ @mock.patch.object(volumeops.VolumeOps, "_check_is_supported_driver_type")
+ @mock.patch.object(volumeops.VolumeOps, "_connect_to_volume_provider")
+ @mock.patch.object(volumeops.VolumeOps, "_connect_hypervisor_to_volume")
+ @mock.patch.object(volumeops.VolumeOps, "_attach_volume_to_vm")
+ def test_attach_volume_with_hot_attach(self, mock_attach, mock_hypervisor,
+ mock_provider, mock_driver):
+ connection_info = {"data": {}}
+ with mock.patch.object(self.session.VDI, "get_uuid") as mock_vdi:
+ mock_provider.return_value = ("sr_ref", "sr_uuid")
+ mock_hypervisor.return_value = "vdi_ref"
+ mock_vdi.return_value = "vdi_uuid"
+
+ result = self.ops._attach_volume(connection_info, "vm_ref",
+ "name", 2, True)
+
+ self.assertEqual(result, ("sr_uuid", "vdi_uuid"))
+
+ mock_driver.assert_called_once_with(connection_info)
+ mock_provider.assert_called_once_with({}, "name")
+ mock_hypervisor.assert_called_once_with("sr_ref", {})
+ mock_attach.assert_called_once_with("vdi_ref", "vm_ref", "name", 2,
+ True)
+
+ @mock.patch.object(volume_utils, "forget_sr")
+ @mock.patch.object(volumeops.VolumeOps, "_check_is_supported_driver_type")
+ @mock.patch.object(volumeops.VolumeOps, "_connect_to_volume_provider")
+ @mock.patch.object(volumeops.VolumeOps, "_connect_hypervisor_to_volume")
+ @mock.patch.object(volumeops.VolumeOps, "_attach_volume_to_vm")
+ def test_attach_volume_cleanup(self, mock_attach, mock_hypervisor,
+ mock_provider, mock_driver, mock_forget):
+ connection_info = {"data": {}}
+ mock_provider.return_value = ("sr_ref", "sr_uuid")
+ mock_hypervisor.side_effect = test.TestingException
+
+ self.assertRaises(test.TestingException,
+ self.ops._attach_volume, connection_info)
+
+ mock_driver.assert_called_once_with(connection_info)
+ mock_provider.assert_called_once_with({}, None)
+ mock_hypervisor.assert_called_once_with("sr_ref", {})
+ mock_forget.assert_called_once_with(self.session, "sr_ref")
+ self.assertFalse(mock_attach.called)
+
+ def test_check_is_supported_driver_type_pass_iscsi(self):
+ conn_info = {"driver_volume_type": "iscsi"}
+ self.ops._check_is_supported_driver_type(conn_info)
+
+ def test_check_is_supported_driver_type_pass_xensm(self):
+ conn_info = {"driver_volume_type": "xensm"}
+ self.ops._check_is_supported_driver_type(conn_info)
+
+ def test_check_is_supported_driver_type_pass_bad(self):
+ conn_info = {"driver_volume_type": "bad"}
+ self.assertRaises(exception.VolumeDriverNotFound,
+ self.ops._check_is_supported_driver_type, conn_info)
+
+ @mock.patch.object(volume_utils, "introduce_sr")
+ @mock.patch.object(volume_utils, "find_sr_by_uuid")
+ @mock.patch.object(volume_utils, "parse_sr_info")
+ def test_connect_to_volume_provider_new_sr(self, mock_parse, mock_find_sr,
+ mock_introduce_sr):
+ mock_parse.return_value = ("uuid", "label", "params")
+ mock_find_sr.return_value = None
+ mock_introduce_sr.return_value = "sr_ref"
+
+ ref, uuid = self.ops._connect_to_volume_provider({}, "name")
+
+ self.assertEqual("sr_ref", ref)
+ self.assertEqual("uuid", uuid)
+ mock_parse.assert_called_once_with({}, "Disk-for:name")
+ mock_find_sr.assert_called_once_with(self.session, "uuid")
+ mock_introduce_sr.assert_called_once_with(self.session, "uuid",
+ "label", "params")
+
+ @mock.patch.object(volume_utils, "introduce_sr")
+ @mock.patch.object(volume_utils, "find_sr_by_uuid")
+ @mock.patch.object(volume_utils, "parse_sr_info")
+ def test_connect_to_volume_provider_old_sr(self, mock_parse, mock_find_sr,
+ mock_introduce_sr):
+ mock_parse.return_value = ("uuid", "label", "params")
+ mock_find_sr.return_value = "sr_ref"
+
+ ref, uuid = self.ops._connect_to_volume_provider({}, "name")
+
+ self.assertEqual("sr_ref", ref)
+ self.assertEqual("uuid", uuid)
+ mock_parse.assert_called_once_with({}, "Disk-for:name")
+ mock_find_sr.assert_called_once_with(self.session, "uuid")
+ self.assertFalse(mock_introduce_sr.called)
+
+ @mock.patch.object(volume_utils, "introduce_vdi")
+ def test_connect_hypervisor_to_volume_regular(self, mock_intro):
+ mock_intro.return_value = "vdi"
+
+ result = self.ops._connect_hypervisor_to_volume("sr", {})
+
+ self.assertEqual("vdi", result)
+ mock_intro.assert_called_once_with(self.session, "sr")
+
+ @mock.patch.object(volume_utils, "introduce_vdi")
+ def test_connect_hypervisor_to_volume_vdi(self, mock_intro):
+ mock_intro.return_value = "vdi"
+
+ conn = {"vdi_uuid": "id"}
+ result = self.ops._connect_hypervisor_to_volume("sr", conn)
+
+ self.assertEqual("vdi", result)
+ mock_intro.assert_called_once_with(self.session, "sr",
+ vdi_uuid="id")
+
+ @mock.patch.object(volume_utils, "introduce_vdi")
+ def test_connect_hypervisor_to_volume_lun(self, mock_intro):
+ mock_intro.return_value = "vdi"
+
+ conn = {"target_lun": "lun"}
+ result = self.ops._connect_hypervisor_to_volume("sr", conn)
+
+ self.assertEqual("vdi", result)
+ mock_intro.assert_called_once_with(self.session, "sr",
+ target_lun="lun")
+
+ @mock.patch.object(vm_utils, "is_vm_shutdown")
+ @mock.patch.object(vm_utils, "create_vbd")
+ def test_attach_volume_to_vm_plug(self, mock_vbd, mock_shutdown):
+ mock_vbd.return_value = "vbd"
+ mock_shutdown.return_value = False
+
+ with mock.patch.object(self.session.VBD, "plug") as mock_plug:
+ self.ops._attach_volume_to_vm("vdi", "vm", "name", 2, True)
+ mock_plug.assert_called_once_with("vbd", "vm")
+
+ mock_vbd.assert_called_once_with(self.session, "vm", "vdi", 2,
+ bootable=False, osvol=True)
+ mock_shutdown.assert_called_once_with(self.session, "vm")
+
+ @mock.patch.object(vm_utils, "is_vm_shutdown")
+ @mock.patch.object(vm_utils, "create_vbd")
+ def test_attach_volume_to_vm_no_plug(self, mock_vbd, mock_shutdown):
+ mock_vbd.return_value = "vbd"
+ mock_shutdown.return_value = True
+
+ with mock.patch.object(self.session.VBD, "plug") as mock_plug:
+ self.ops._attach_volume_to_vm("vdi", "vm", "name", 2, True)
+ self.assertFalse(mock_plug.called)
+
+ mock_vbd.assert_called_once_with(self.session, "vm", "vdi", 2,
+ bootable=False, osvol=True)
+ mock_shutdown.assert_called_once_with(self.session, "vm")
+
+ @mock.patch.object(vm_utils, "is_vm_shutdown")
+ @mock.patch.object(vm_utils, "create_vbd")
+ def test_attach_volume_to_vm_no_hotplug(self, mock_vbd, mock_shutdown):
+ mock_vbd.return_value = "vbd"
+
+ with mock.patch.object(self.session.VBD, "plug") as mock_plug:
+ self.ops._attach_volume_to_vm("vdi", "vm", "name", 2, False)
+ self.assertFalse(mock_plug.called)
+
+ mock_vbd.assert_called_once_with(self.session, "vm", "vdi", 2,
+ bootable=False, osvol=True)
+ self.assertFalse(mock_shutdown.called)
+
+
+class FindBadVolumeTestCase(VolumeOpsTestBase):
+ @mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs")
+ def test_find_bad_volumes_no_vbds(self, mock_get_all):
+ mock_get_all.return_value = []
+
+ result = self.ops.find_bad_volumes("vm_ref")
+
+ mock_get_all.assert_called_once_with("vm_ref")
+ self.assertEqual([], result)
+
+ @mock.patch.object(volume_utils, "find_sr_from_vbd")
+ @mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs")
+ def test_find_bad_volumes_no_bad_vbds(self, mock_get_all, mock_find_sr):
+ mock_get_all.return_value = ["1", "2"]
+ mock_find_sr.return_value = "sr_ref"
+
+ with mock.patch.object(self.session.SR, "scan") as mock_scan:
+ result = self.ops.find_bad_volumes("vm_ref")
+
+ mock_get_all.assert_called_once_with("vm_ref")
+ expected_find = [mock.call(self.session, "1"),
+ mock.call(self.session, "2")]
+ self.assertEqual(expected_find, mock_find_sr.call_args_list)
+ expected_scan = [mock.call("sr_ref"), mock.call("sr_ref")]
+ self.assertEqual(expected_scan, mock_scan.call_args_list)
+ self.assertEqual([], result)
+
+ @mock.patch.object(volume_utils, "find_sr_from_vbd")
+ @mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs")
+ def test_find_bad_volumes_bad_vbds(self, mock_get_all, mock_find_sr):
+ mock_get_all.return_value = ["vbd_ref"]
+ mock_find_sr.return_value = "sr_ref"
+
+ class FakeException(Exception):
+ details = ['SR_BACKEND_FAILURE_40', "", "", ""]
+
+ session = mock.Mock()
+ session.XenAPI.Failure = FakeException
+ self.ops._session = session
+
+ with mock.patch.object(session.SR, "scan") as mock_scan:
+ with mock.patch.object(session.VBD,
+ "get_device") as mock_get:
+ mock_scan.side_effect = FakeException
+ mock_get.return_value = "xvdb"
+
+ result = self.ops.find_bad_volumes("vm_ref")
+
+ mock_get_all.assert_called_once_with("vm_ref")
+ mock_scan.assert_called_once_with("sr_ref")
+ mock_get.assert_called_once_with("vbd_ref")
+ self.assertEqual(["/dev/xvdb"], result)
+
+ @mock.patch.object(volume_utils, "find_sr_from_vbd")
+ @mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs")
+ def test_find_bad_volumes_raises(self, mock_get_all, mock_find_sr):
+ mock_get_all.return_value = ["vbd_ref"]
+ mock_find_sr.return_value = "sr_ref"
+
+ class FakeException(Exception):
+ details = ['foo', "", "", ""]
+
+ session = mock.Mock()
+ session.XenAPI.Failure = FakeException
+ self.ops._session = session
+
+ with mock.patch.object(session.SR, "scan") as mock_scan:
+ with mock.patch.object(session.VBD,
+ "get_device") as mock_get:
+ mock_scan.side_effect = FakeException
+ mock_get.return_value = "xvdb"
+
+ self.assertRaises(FakeException,
+ self.ops.find_bad_volumes, "vm_ref")
+ mock_scan.assert_called_once_with("sr_ref")
+
+
+class CleanupFromVDIsTestCase(VolumeOpsTestBase):
+ def _check_find_purge_calls(self, find_sr_from_vdi, purge_sr, vdi_refs,
+ sr_refs):
+ find_sr_calls = [mock.call(self.ops._session, vdi_ref) for vdi_ref
+ in vdi_refs]
+ find_sr_from_vdi.assert_has_calls(find_sr_calls)
+ purge_sr_calls = [mock.call(self.ops._session, sr_ref) for sr_ref
+ in sr_refs]
+ purge_sr.assert_has_calls(purge_sr_calls)
+
+ @mock.patch.object(volume_utils, 'find_sr_from_vdi')
+ @mock.patch.object(volume_utils, 'purge_sr')
+ def test_safe_cleanup_from_vdis(self, purge_sr, find_sr_from_vdi):
+ vdi_refs = ['vdi_ref1', 'vdi_ref2']
+ sr_refs = ['sr_ref1', 'sr_ref2']
+ find_sr_from_vdi.side_effect = sr_refs
+ self.ops.safe_cleanup_from_vdis(vdi_refs)
+
+ self._check_find_purge_calls(find_sr_from_vdi, purge_sr, vdi_refs,
+ sr_refs)
+
+ @mock.patch.object(volume_utils, 'find_sr_from_vdi',
+ side_effect=[exception.StorageError(reason=''), 'sr_ref2'])
+ @mock.patch.object(volume_utils, 'purge_sr')
+ def test_safe_cleanup_from_vdis_handles_find_sr_exception(self, purge_sr,
+ find_sr_from_vdi):
+ vdi_refs = ['vdi_ref1', 'vdi_ref2']
+ sr_refs = ['sr_ref2']
+ find_sr_from_vdi.side_effect = [exception.StorageError(reason=''),
+ sr_refs[0]]
+ self.ops.safe_cleanup_from_vdis(vdi_refs)
+
+ self._check_find_purge_calls(find_sr_from_vdi, purge_sr, vdi_refs,
+ sr_refs)
+
+ @mock.patch.object(volume_utils, 'find_sr_from_vdi')
+ @mock.patch.object(volume_utils, 'purge_sr')
+ def test_safe_cleanup_from_vdis_handles_purge_sr_exception(self, purge_sr,
+ find_sr_from_vdi):
+ vdi_refs = ['vdi_ref1', 'vdi_ref2']
+ sr_refs = ['sr_ref1', 'sr_ref2']
+ find_sr_from_vdi.side_effect = sr_refs
+ purge_sr.side_effects = [test.TestingException, None]
+ self.ops.safe_cleanup_from_vdis(vdi_refs)
+
+ self._check_find_purge_calls(find_sr_from_vdi, purge_sr, vdi_refs,
+ sr_refs)
diff --git a/nova/tests/unit/virt/xenapi/test_xenapi.py b/nova/tests/unit/virt/xenapi/test_xenapi.py
new file mode 100644
index 0000000000..c90f8c2f63
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/test_xenapi.py
@@ -0,0 +1,4105 @@
+# Copyright (c) 2010 Citrix Systems, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Test suite for XenAPI."""
+
+import ast
+import base64
+import contextlib
+import copy
+import functools
+import os
+import re
+
+import mock
+import mox
+from oslo.concurrency import lockutils
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+from oslo.utils import importutils
+
+from nova.compute import api as compute_api
+from nova.compute import arch
+from nova.compute import flavors
+from nova.compute import hvtype
+from nova.compute import power_state
+from nova.compute import task_states
+from nova.compute import utils as compute_utils
+from nova.compute import vm_states
+from nova.conductor import api as conductor_api
+from nova import context
+from nova import crypto
+from nova import db
+from nova import exception
+from nova import objects
+from nova.objects import instance as instance_obj
+from nova.openstack.common.fixture import config as config_fixture
+from nova.openstack.common import log as logging
+from nova import test
+from nova.tests.unit.db import fakes as db_fakes
+from nova.tests.unit import fake_instance
+from nova.tests.unit import fake_network
+from nova.tests.unit import fake_processutils
+import nova.tests.unit.image.fake as fake_image
+from nova.tests.unit import matchers
+from nova.tests.unit.objects import test_aggregate
+from nova.tests.unit.virt.xenapi import stubs
+from nova.virt import fake
+from nova.virt.xenapi import agent
+from nova.virt.xenapi.client import session as xenapi_session
+from nova.virt.xenapi import driver as xenapi_conn
+from nova.virt.xenapi import fake as xenapi_fake
+from nova.virt.xenapi import host
+from nova.virt.xenapi.image import glance
+from nova.virt.xenapi import pool
+from nova.virt.xenapi import pool_states
+from nova.virt.xenapi import vm_utils
+from nova.virt.xenapi import vmops
+from nova.virt.xenapi import volume_utils
+
+LOG = logging.getLogger(__name__)
+
+CONF = cfg.CONF
+CONF.import_opt('compute_manager', 'nova.service')
+CONF.import_opt('network_manager', 'nova.service')
+CONF.import_opt('compute_driver', 'nova.virt.driver')
+CONF.import_opt('host', 'nova.netconf')
+CONF.import_opt('default_availability_zone', 'nova.availability_zones')
+CONF.import_opt('login_timeout', 'nova.virt.xenapi.client.session',
+ group="xenserver")
+
+IMAGE_MACHINE = '1'
+IMAGE_KERNEL = '2'
+IMAGE_RAMDISK = '3'
+IMAGE_RAW = '4'
+IMAGE_VHD = '5'
+IMAGE_ISO = '6'
+IMAGE_IPXE_ISO = '7'
+IMAGE_FROM_VOLUME = '8'
+
+IMAGE_FIXTURES = {
+ IMAGE_MACHINE: {
+ 'image_meta': {'name': 'fakemachine', 'size': 0,
+ 'disk_format': 'ami',
+ 'container_format': 'ami'},
+ },
+ IMAGE_KERNEL: {
+ 'image_meta': {'name': 'fakekernel', 'size': 0,
+ 'disk_format': 'aki',
+ 'container_format': 'aki'},
+ },
+ IMAGE_RAMDISK: {
+ 'image_meta': {'name': 'fakeramdisk', 'size': 0,
+ 'disk_format': 'ari',
+ 'container_format': 'ari'},
+ },
+ IMAGE_RAW: {
+ 'image_meta': {'name': 'fakeraw', 'size': 0,
+ 'disk_format': 'raw',
+ 'container_format': 'bare'},
+ },
+ IMAGE_VHD: {
+ 'image_meta': {'name': 'fakevhd', 'size': 0,
+ 'disk_format': 'vhd',
+ 'container_format': 'ovf'},
+ },
+ IMAGE_ISO: {
+ 'image_meta': {'name': 'fakeiso', 'size': 0,
+ 'disk_format': 'iso',
+ 'container_format': 'bare'},
+ },
+ IMAGE_IPXE_ISO: {
+ 'image_meta': {'name': 'fake_ipxe_iso', 'size': 0,
+ 'disk_format': 'iso',
+ 'container_format': 'bare',
+ 'properties': {'ipxe_boot': 'true'}},
+ },
+ IMAGE_FROM_VOLUME: {
+ 'image_meta': {'name': 'fake_ipxe_iso',
+ 'properties': {'foo': 'bar'}},
+ },
+}
+
+
+def get_session():
+ return xenapi_session.XenAPISession('test_url', 'root', 'test_pass')
+
+
+def set_image_fixtures():
+ image_service = fake_image.FakeImageService()
+ image_service.images.clear()
+ for image_id, image_meta in IMAGE_FIXTURES.items():
+ image_meta = image_meta['image_meta']
+ image_meta['id'] = image_id
+ image_service.create(None, image_meta)
+
+
+def get_fake_device_info():
+ # FIXME: 'sr_uuid', 'introduce_sr_keys', sr_type and vdi_uuid
+ # can be removed from the dict when LP bug #1087308 is fixed
+ fake_vdi_ref = xenapi_fake.create_vdi('fake-vdi', None)
+ fake_vdi_uuid = xenapi_fake.get_record('VDI', fake_vdi_ref)['uuid']
+ fake = {'block_device_mapping':
+ [{'connection_info': {'driver_volume_type': 'iscsi',
+ 'data': {'sr_uuid': 'falseSR',
+ 'introduce_sr_keys': ['sr_type'],
+ 'sr_type': 'iscsi',
+ 'vdi_uuid': fake_vdi_uuid,
+ 'target_discovered': False,
+ 'target_iqn': 'foo_iqn:foo_volid',
+ 'target_portal': 'localhost:3260',
+ 'volume_id': 'foo_volid',
+ 'target_lun': 1,
+ 'auth_password': 'my-p@55w0rd',
+ 'auth_username': 'johndoe',
+ 'auth_method': u'CHAP'}, },
+ 'mount_device': 'vda',
+ 'delete_on_termination': False}, ],
+ 'root_device_name': '/dev/sda',
+ 'ephemerals': [],
+ 'swap': None, }
+ return fake
+
+
+def stub_vm_utils_with_vdi_attached_here(function):
+ """vm_utils.with_vdi_attached_here needs to be stubbed out because it
+ calls down to the filesystem to attach a vdi. This provides a
+ decorator to handle that.
+ """
+ @functools.wraps(function)
+ def decorated_function(self, *args, **kwargs):
+ @contextlib.contextmanager
+ def fake_vdi_attached_here(*args, **kwargs):
+ fake_dev = 'fakedev'
+ yield fake_dev
+
+ def fake_image_download(*args, **kwargs):
+ pass
+
+ orig_vdi_attached_here = vm_utils.vdi_attached_here
+ orig_image_download = fake_image._FakeImageService.download
+ try:
+ vm_utils.vdi_attached_here = fake_vdi_attached_here
+ fake_image._FakeImageService.download = fake_image_download
+ return function(self, *args, **kwargs)
+ finally:
+ fake_image._FakeImageService.download = orig_image_download
+ vm_utils.vdi_attached_here = orig_vdi_attached_here
+
+ return decorated_function
+
+
+def get_create_system_metadata(context, instance_type_id):
+ flavor = db.flavor_get(context, instance_type_id)
+ return flavors.save_flavor_info({}, flavor)
+
+
+def create_instance_with_system_metadata(context, instance_values):
+ instance_values['system_metadata'] = get_create_system_metadata(
+ context, instance_values['instance_type_id'])
+ instance_values['pci_devices'] = []
+ return db.instance_create(context, instance_values)
+
+
+class XenAPIVolumeTestCase(stubs.XenAPITestBaseNoDB):
+ """Unit tests for Volume operations."""
+ def setUp(self):
+ super(XenAPIVolumeTestCase, self).setUp()
+ self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
+ self.fixture.config(disable_process_locking=True,
+ group='oslo_concurrency')
+ self.flags(firewall_driver='nova.virt.xenapi.firewall.'
+ 'Dom0IptablesFirewallDriver')
+ self.flags(connection_url='test_url',
+ connection_password='test_pass',
+ group='xenserver')
+
+ self.instance = fake_instance.fake_db_instance(name='foo')
+
+ @classmethod
+ def _make_connection_info(cls):
+ target_iqn = 'iqn.2010-10.org.openstack:volume-00000001'
+ return {'driver_volume_type': 'iscsi',
+ 'data': {'volume_id': 1,
+ 'target_iqn': target_iqn,
+ 'target_portal': '127.0.0.1:3260,fake',
+ 'target_lun': None,
+ 'auth_method': 'CHAP',
+ 'auth_username': 'username',
+ 'auth_password': 'password'}}
+
+ def test_attach_volume(self):
+ # This shows how to test Ops classes' methods.
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ vm = xenapi_fake.create_vm(self.instance['name'], 'Running')
+ conn_info = self._make_connection_info()
+ self.assertIsNone(
+ conn.attach_volume(None, conn_info, self.instance, '/dev/sdc'))
+
+ # check that the VM has a VBD attached to it
+ # Get XenAPI record for VBD
+ vbds = xenapi_fake.get_all('VBD')
+ vbd = xenapi_fake.get_record('VBD', vbds[0])
+ vm_ref = vbd['VM']
+ self.assertEqual(vm_ref, vm)
+
+ def test_attach_volume_raise_exception(self):
+ # This shows how to test when exceptions are raised.
+ stubs.stubout_session(self.stubs,
+ stubs.FakeSessionForVolumeFailedTests)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ xenapi_fake.create_vm(self.instance['name'], 'Running')
+ self.assertRaises(exception.VolumeDriverNotFound,
+ conn.attach_volume,
+ None, {'driver_volume_type': 'nonexist'},
+ self.instance, '/dev/sdc')
+
+
+# FIXME(sirp): convert this to use XenAPITestBaseNoDB
+class XenAPIVMTestCase(stubs.XenAPITestBase):
+ """Unit tests for VM operations."""
+ def setUp(self):
+ super(XenAPIVMTestCase, self).setUp()
+ self.useFixture(test.SampleNetworks())
+ self.network = importutils.import_object(CONF.network_manager)
+ self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
+ self.fixture.config(disable_process_locking=True,
+ group='oslo_concurrency')
+ self.flags(instance_name_template='%d',
+ firewall_driver='nova.virt.xenapi.firewall.'
+ 'Dom0IptablesFirewallDriver')
+ self.flags(connection_url='test_url',
+ connection_password='test_pass',
+ group='xenserver')
+ db_fakes.stub_out_db_instance_api(self.stubs)
+ xenapi_fake.create_network('fake', 'fake_br1')
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ stubs.stubout_get_this_vm_uuid(self.stubs)
+ stubs.stub_out_vm_methods(self.stubs)
+ fake_processutils.stub_out_processutils_execute(self.stubs)
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = context.RequestContext(self.user_id, self.project_id)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ self.conn._session.is_local_connection = False
+
+ fake_image.stub_out_image_service(self.stubs)
+ set_image_fixtures()
+ stubs.stubout_image_service_download(self.stubs)
+ stubs.stubout_stream_disk(self.stubs)
+
+ def fake_inject_instance_metadata(self, instance, vm):
+ pass
+ self.stubs.Set(vmops.VMOps, '_inject_instance_metadata',
+ fake_inject_instance_metadata)
+
+ def fake_safe_copy_vdi(session, sr_ref, instance, vdi_to_copy_ref):
+ name_label = "fakenamelabel"
+ disk_type = "fakedisktype"
+ virtual_size = 777
+ return vm_utils.create_vdi(
+ session, sr_ref, instance, name_label, disk_type,
+ virtual_size)
+ self.stubs.Set(vm_utils, '_safe_copy_vdi', fake_safe_copy_vdi)
+
+ def tearDown(self):
+ fake_image.FakeImageService_reset()
+ super(XenAPIVMTestCase, self).tearDown()
+
+ def test_init_host(self):
+ session = get_session()
+ vm = vm_utils._get_this_vm_ref(session)
+ # Local root disk
+ vdi0 = xenapi_fake.create_vdi('compute', None)
+ vbd0 = xenapi_fake.create_vbd(vm, vdi0)
+ # Instance VDI
+ vdi1 = xenapi_fake.create_vdi('instance-aaaa', None,
+ other_config={'nova_instance_uuid': 'aaaa'})
+ xenapi_fake.create_vbd(vm, vdi1)
+ # Only looks like instance VDI
+ vdi2 = xenapi_fake.create_vdi('instance-bbbb', None)
+ vbd2 = xenapi_fake.create_vbd(vm, vdi2)
+
+ self.conn.init_host(None)
+ self.assertEqual(set(xenapi_fake.get_all('VBD')), set([vbd0, vbd2]))
+
+ def test_instance_exists(self):
+ self.mox.StubOutWithMock(vm_utils, 'lookup')
+ vm_utils.lookup(mox.IgnoreArg(), 'foo').AndReturn(True)
+ self.mox.ReplayAll()
+
+ self.stubs.Set(objects.Instance, 'name', 'foo')
+ instance = objects.Instance(uuid='fake-uuid')
+ self.assertTrue(self.conn.instance_exists(instance))
+
+ def test_instance_not_exists(self):
+ self.mox.StubOutWithMock(vm_utils, 'lookup')
+ vm_utils.lookup(mox.IgnoreArg(), 'bar').AndReturn(None)
+ self.mox.ReplayAll()
+
+ self.stubs.Set(objects.Instance, 'name', 'bar')
+ instance = objects.Instance(uuid='fake-uuid')
+ self.assertFalse(self.conn.instance_exists(instance))
+
+ def test_list_instances_0(self):
+ instances = self.conn.list_instances()
+ self.assertEqual(instances, [])
+
+ def test_list_instance_uuids_0(self):
+ instance_uuids = self.conn.list_instance_uuids()
+ self.assertEqual(instance_uuids, [])
+
+ def test_list_instance_uuids(self):
+ uuids = []
+ for x in xrange(1, 4):
+ instance = self._create_instance(x)
+ uuids.append(instance['uuid'])
+ instance_uuids = self.conn.list_instance_uuids()
+ self.assertEqual(len(uuids), len(instance_uuids))
+ self.assertEqual(set(uuids), set(instance_uuids))
+
+ def test_get_rrd_server(self):
+ self.flags(connection_url='myscheme://myaddress/',
+ group='xenserver')
+ server_info = vm_utils._get_rrd_server()
+ self.assertEqual(server_info[0], 'myscheme')
+ self.assertEqual(server_info[1], 'myaddress')
+
+ expected_raw_diagnostics = {
+ 'vbd_xvdb_write': '0.0',
+ 'memory_target': '4294967296.0000',
+ 'memory_internal_free': '1415564.0000',
+ 'memory': '4294967296.0000',
+ 'vbd_xvda_write': '0.0',
+ 'cpu0': '0.0042',
+ 'vif_0_tx': '287.4134',
+ 'vbd_xvda_read': '0.0',
+ 'vif_0_rx': '1816.0144',
+ 'vif_2_rx': '0.0',
+ 'vif_2_tx': '0.0',
+ 'vbd_xvdb_read': '0.0',
+ 'last_update': '1328795567',
+ }
+
+ def test_get_diagnostics(self):
+ def fake_get_rrd(host, vm_uuid):
+ path = os.path.dirname(os.path.realpath(__file__))
+ with open(os.path.join(path, 'vm_rrd.xml')) as f:
+ return re.sub(r'\s', '', f.read())
+ self.stubs.Set(vm_utils, '_get_rrd', fake_get_rrd)
+
+ expected = self.expected_raw_diagnostics
+ instance = self._create_instance()
+ actual = self.conn.get_diagnostics(instance)
+ self.assertThat(actual, matchers.DictMatches(expected))
+
+ def test_get_instance_diagnostics(self):
+ def fake_get_rrd(host, vm_uuid):
+ path = os.path.dirname(os.path.realpath(__file__))
+ with open(os.path.join(path, 'vm_rrd.xml')) as f:
+ return re.sub(r'\s', '', f.read())
+ self.stubs.Set(vm_utils, '_get_rrd', fake_get_rrd)
+
+ expected = {
+ 'config_drive': False,
+ 'state': 'running',
+ 'driver': 'xenapi',
+ 'version': '1.0',
+ 'uptime': 0,
+ 'hypervisor_os': None,
+ 'cpu_details': [{'time': 0}, {'time': 0},
+ {'time': 0}, {'time': 0}],
+ 'nic_details': [{'mac_address': '00:00:00:00:00:00',
+ 'rx_drop': 0,
+ 'rx_errors': 0,
+ 'rx_octets': 0,
+ 'rx_packets': 0,
+ 'tx_drop': 0,
+ 'tx_errors': 0,
+ 'tx_octets': 0,
+ 'tx_packets': 0}],
+ 'disk_details': [{'errors_count': 0,
+ 'id': '',
+ 'read_bytes': 0,
+ 'read_requests': 0,
+ 'write_bytes': 0,
+ 'write_requests': 0}],
+ 'memory_details': {'maximum': 8192, 'used': 0}}
+
+ instance = self._create_instance()
+ actual = self.conn.get_instance_diagnostics(instance)
+ self.assertEqual(expected, actual.serialize())
+
+ def test_get_vnc_console(self):
+ instance = self._create_instance(obj=True)
+ session = get_session()
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ vm_ref = vm_utils.lookup(session, instance['name'])
+
+ console = conn.get_vnc_console(self.context, instance)
+
+ # Note(sulo): We don't care about session id in test
+ # they will always differ so strip that out
+ actual_path = console.internal_access_path.split('&')[0]
+ expected_path = "/console?ref=%s" % str(vm_ref)
+
+ self.assertEqual(expected_path, actual_path)
+
+ def test_get_vnc_console_for_rescue(self):
+ instance = self._create_instance(obj=True)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ rescue_vm = xenapi_fake.create_vm(instance['name'] + '-rescue',
+ 'Running')
+ # Set instance state to rescued
+ instance['vm_state'] = 'rescued'
+
+ console = conn.get_vnc_console(self.context, instance)
+
+ # Note(sulo): We don't care about session id in test
+ # they will always differ so strip that out
+ actual_path = console.internal_access_path.split('&')[0]
+ expected_path = "/console?ref=%s" % str(rescue_vm)
+
+ self.assertEqual(expected_path, actual_path)
+
+ def test_get_vnc_console_instance_not_ready(self):
+ instance = self._create_instance(obj=True, spawn=False)
+ instance.vm_state = 'building'
+
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ self.assertRaises(exception.InstanceNotFound,
+ conn.get_vnc_console, self.context, instance)
+
+ def test_get_vnc_console_rescue_not_ready(self):
+ instance = self._create_instance(obj=True, spawn=False)
+ instance.vm_state = 'rescued'
+
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ self.assertRaises(exception.InstanceNotReady,
+ conn.get_vnc_console, self.context, instance)
+
+ def test_instance_snapshot_fails_with_no_primary_vdi(self):
+
+ def create_bad_vbd(session, vm_ref, vdi_ref, userdevice,
+ vbd_type='disk', read_only=False, bootable=False,
+ osvol=False):
+ vbd_rec = {'VM': vm_ref,
+ 'VDI': vdi_ref,
+ 'userdevice': 'fake',
+ 'currently_attached': False}
+ vbd_ref = xenapi_fake._create_object('VBD', vbd_rec)
+ xenapi_fake.after_VBD_create(vbd_ref, vbd_rec)
+ return vbd_ref
+
+ self.stubs.Set(vm_utils, 'create_vbd', create_bad_vbd)
+ stubs.stubout_instance_snapshot(self.stubs)
+ # Stubbing out firewall driver as previous stub sets alters
+ # xml rpc result parsing
+ stubs.stubout_firewall_driver(self.stubs, self.conn)
+ instance = self._create_instance()
+
+ image_id = "my_snapshot_id"
+ self.assertRaises(exception.NovaException, self.conn.snapshot,
+ self.context, instance, image_id,
+ lambda *args, **kwargs: None)
+
+ def test_instance_snapshot(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+ image_id = "my_snapshot_id"
+
+ stubs.stubout_instance_snapshot(self.stubs)
+ stubs.stubout_is_snapshot(self.stubs)
+ # Stubbing out firewall driver as previous stub sets alters
+ # xml rpc result parsing
+ stubs.stubout_firewall_driver(self.stubs, self.conn)
+
+ instance = self._create_instance()
+
+ self.fake_upload_called = False
+
+ def fake_image_upload(_self, ctx, session, inst, img_id, vdi_uuids):
+ self.fake_upload_called = True
+ self.assertEqual(ctx, self.context)
+ self.assertEqual(inst, instance)
+ self.assertIsInstance(vdi_uuids, list)
+ self.assertEqual(img_id, image_id)
+
+ self.stubs.Set(glance.GlanceStore, 'upload_image',
+ fake_image_upload)
+
+ self.conn.snapshot(self.context, instance, image_id,
+ func_call_matcher.call)
+
+ # Ensure VM was torn down
+ vm_labels = []
+ for vm_ref in xenapi_fake.get_all('VM'):
+ vm_rec = xenapi_fake.get_record('VM', vm_ref)
+ if not vm_rec["is_control_domain"]:
+ vm_labels.append(vm_rec["name_label"])
+
+ self.assertEqual(vm_labels, [instance['name']])
+
+ # Ensure VBDs were torn down
+ vbd_labels = []
+ for vbd_ref in xenapi_fake.get_all('VBD'):
+ vbd_rec = xenapi_fake.get_record('VBD', vbd_ref)
+ vbd_labels.append(vbd_rec["vm_name_label"])
+
+ self.assertEqual(vbd_labels, [instance['name']])
+
+ # Ensure task states changed in correct order
+ self.assertIsNone(func_call_matcher.match())
+
+ # Ensure VDIs were torn down
+ for vdi_ref in xenapi_fake.get_all('VDI'):
+ vdi_rec = xenapi_fake.get_record('VDI', vdi_ref)
+ name_label = vdi_rec["name_label"]
+ self.assertFalse(name_label.endswith('snapshot'))
+
+ self.assertTrue(self.fake_upload_called)
+
+ def create_vm_record(self, conn, os_type, name):
+ instances = conn.list_instances()
+ self.assertEqual(instances, [name])
+
+ # Get Nova record for VM
+ vm_info = conn.get_info({'name': name})
+ # Get XenAPI record for VM
+ vms = [rec for ref, rec
+ in xenapi_fake.get_all_records('VM').iteritems()
+ if not rec['is_control_domain']]
+ vm = vms[0]
+ self.vm_info = vm_info
+ self.vm = vm
+
+ def check_vm_record(self, conn, instance_type_id, check_injection):
+ flavor = db.flavor_get(conn, instance_type_id)
+ mem_kib = long(flavor['memory_mb']) << 10
+ mem_bytes = str(mem_kib << 10)
+ vcpus = flavor['vcpus']
+ vcpu_weight = flavor['vcpu_weight']
+
+ self.assertEqual(self.vm_info['max_mem'], mem_kib)
+ self.assertEqual(self.vm_info['mem'], mem_kib)
+ self.assertEqual(self.vm['memory_static_max'], mem_bytes)
+ self.assertEqual(self.vm['memory_dynamic_max'], mem_bytes)
+ self.assertEqual(self.vm['memory_dynamic_min'], mem_bytes)
+ self.assertEqual(self.vm['VCPUs_max'], str(vcpus))
+ self.assertEqual(self.vm['VCPUs_at_startup'], str(vcpus))
+ if vcpu_weight is None:
+ self.assertEqual(self.vm['VCPUs_params'], {})
+ else:
+ self.assertEqual(self.vm['VCPUs_params'],
+ {'weight': str(vcpu_weight), 'cap': '0'})
+
+ # Check that the VM is running according to Nova
+ self.assertEqual(self.vm_info['state'], power_state.RUNNING)
+
+ # Check that the VM is running according to XenAPI.
+ self.assertEqual(self.vm['power_state'], 'Running')
+
+ if check_injection:
+ xenstore_data = self.vm['xenstore_data']
+ self.assertNotIn('vm-data/hostname', xenstore_data)
+ key = 'vm-data/networking/DEADBEEF0001'
+ xenstore_value = xenstore_data[key]
+ tcpip_data = ast.literal_eval(xenstore_value)
+ self.assertEqual(tcpip_data,
+ {'broadcast': '192.168.1.255',
+ 'dns': ['192.168.1.4', '192.168.1.3'],
+ 'gateway': '192.168.1.1',
+ 'gateway_v6': '2001:db8:0:1::1',
+ 'ip6s': [{'enabled': '1',
+ 'ip': '2001:db8:0:1:dcad:beff:feef:1',
+ 'netmask': 64,
+ 'gateway': '2001:db8:0:1::1'}],
+ 'ips': [{'enabled': '1',
+ 'ip': '192.168.1.100',
+ 'netmask': '255.255.255.0',
+ 'gateway': '192.168.1.1'},
+ {'enabled': '1',
+ 'ip': '192.168.1.101',
+ 'netmask': '255.255.255.0',
+ 'gateway': '192.168.1.1'}],
+ 'label': 'test1',
+ 'mac': 'DE:AD:BE:EF:00:01'})
+
+ def check_vm_params_for_windows(self):
+ self.assertEqual(self.vm['platform']['nx'], 'true')
+ self.assertEqual(self.vm['HVM_boot_params'], {'order': 'dc'})
+ self.assertEqual(self.vm['HVM_boot_policy'], 'BIOS order')
+
+ # check that these are not set
+ self.assertEqual(self.vm['PV_args'], '')
+ self.assertEqual(self.vm['PV_bootloader'], '')
+ self.assertEqual(self.vm['PV_kernel'], '')
+ self.assertEqual(self.vm['PV_ramdisk'], '')
+
+ def check_vm_params_for_linux(self):
+ self.assertEqual(self.vm['platform']['nx'], 'false')
+ self.assertEqual(self.vm['PV_args'], '')
+ self.assertEqual(self.vm['PV_bootloader'], 'pygrub')
+
+ # check that these are not set
+ self.assertEqual(self.vm['PV_kernel'], '')
+ self.assertEqual(self.vm['PV_ramdisk'], '')
+ self.assertEqual(self.vm['HVM_boot_params'], {})
+ self.assertEqual(self.vm['HVM_boot_policy'], '')
+
+ def check_vm_params_for_linux_with_external_kernel(self):
+ self.assertEqual(self.vm['platform']['nx'], 'false')
+ self.assertEqual(self.vm['PV_args'], 'root=/dev/xvda1')
+ self.assertNotEqual(self.vm['PV_kernel'], '')
+ self.assertNotEqual(self.vm['PV_ramdisk'], '')
+
+ # check that these are not set
+ self.assertEqual(self.vm['HVM_boot_params'], {})
+ self.assertEqual(self.vm['HVM_boot_policy'], '')
+
+ def _list_vdis(self):
+ session = get_session()
+ return session.call_xenapi('VDI.get_all')
+
+ def _list_vms(self):
+ session = get_session()
+ return session.call_xenapi('VM.get_all')
+
+ def _check_vdis(self, start_list, end_list):
+ for vdi_ref in end_list:
+ if vdi_ref not in start_list:
+ vdi_rec = xenapi_fake.get_record('VDI', vdi_ref)
+ # If the cache is turned on then the base disk will be
+ # there even after the cleanup
+ if 'other_config' in vdi_rec:
+ if 'image-id' not in vdi_rec['other_config']:
+ self.fail('Found unexpected VDI:%s' % vdi_ref)
+ else:
+ self.fail('Found unexpected VDI:%s' % vdi_ref)
+
+ def _test_spawn(self, image_ref, kernel_id, ramdisk_id,
+ instance_type_id="3", os_type="linux",
+ hostname="test", architecture="x86-64", instance_id=1,
+ injected_files=None, check_injection=False,
+ create_record=True, empty_dns=False,
+ block_device_info=None,
+ key_data=None):
+ if injected_files is None:
+ injected_files = []
+
+ # Fake out inject_instance_metadata
+ def fake_inject_instance_metadata(self, instance, vm):
+ pass
+ self.stubs.Set(vmops.VMOps, '_inject_instance_metadata',
+ fake_inject_instance_metadata)
+
+ if create_record:
+ instance = objects.Instance(context=self.context)
+ instance.project_id = self.project_id
+ instance.user_id = self.user_id
+ instance.image_ref = image_ref
+ instance.kernel_id = kernel_id
+ instance.ramdisk_id = ramdisk_id
+ instance.root_gb = 20
+ instance.ephemeral_gb = 0
+ instance.instance_type_id = instance_type_id
+ instance.os_type = os_type
+ instance.hostname = hostname
+ instance.key_data = key_data
+ instance.architecture = architecture
+ instance.system_metadata = get_create_system_metadata(
+ self.context, instance_type_id)
+ instance.create()
+ else:
+ instance = objects.Instance.get_by_id(self.context, instance_id)
+
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs)
+ if empty_dns:
+ # NOTE(tr3buchet): this is a terrible way to do this...
+ network_info[0]['network']['subnets'][0]['dns'] = []
+
+ image_meta = {}
+ if image_ref:
+ image_meta = IMAGE_FIXTURES[image_ref]["image_meta"]
+ self.conn.spawn(self.context, instance, image_meta, injected_files,
+ 'herp', network_info, block_device_info)
+ self.create_vm_record(self.conn, os_type, instance['name'])
+ self.check_vm_record(self.conn, instance_type_id, check_injection)
+ self.assertEqual(instance['os_type'], os_type)
+ self.assertEqual(instance['architecture'], architecture)
+
+ def test_spawn_ipxe_iso_success(self):
+ self.mox.StubOutWithMock(vm_utils, 'get_sr_path')
+ vm_utils.get_sr_path(mox.IgnoreArg()).AndReturn('/sr/path')
+
+ self.flags(ipxe_network_name='test1',
+ ipxe_boot_menu_url='http://boot.example.com',
+ ipxe_mkisofs_cmd='/root/mkisofs',
+ group='xenserver')
+ self.mox.StubOutWithMock(self.conn._session, 'call_plugin_serialized')
+ self.conn._session.call_plugin_serialized(
+ 'ipxe', 'inject', '/sr/path', mox.IgnoreArg(),
+ 'http://boot.example.com', '192.168.1.100', '255.255.255.0',
+ '192.168.1.1', '192.168.1.3', '/root/mkisofs')
+
+ self.mox.ReplayAll()
+ self._test_spawn(IMAGE_IPXE_ISO, None, None)
+
+ def test_spawn_ipxe_iso_no_network_name(self):
+ self.flags(ipxe_network_name=None,
+ ipxe_boot_menu_url='http://boot.example.com',
+ group='xenserver')
+
+ # call_plugin_serialized shouldn't be called
+ self.mox.StubOutWithMock(self.conn._session, 'call_plugin_serialized')
+
+ self.mox.ReplayAll()
+ self._test_spawn(IMAGE_IPXE_ISO, None, None)
+
+ def test_spawn_ipxe_iso_no_boot_menu_url(self):
+ self.flags(ipxe_network_name='test1',
+ ipxe_boot_menu_url=None,
+ group='xenserver')
+
+ # call_plugin_serialized shouldn't be called
+ self.mox.StubOutWithMock(self.conn._session, 'call_plugin_serialized')
+
+ self.mox.ReplayAll()
+ self._test_spawn(IMAGE_IPXE_ISO, None, None)
+
+ def test_spawn_ipxe_iso_unknown_network_name(self):
+ self.flags(ipxe_network_name='test2',
+ ipxe_boot_menu_url='http://boot.example.com',
+ group='xenserver')
+
+ # call_plugin_serialized shouldn't be called
+ self.mox.StubOutWithMock(self.conn._session, 'call_plugin_serialized')
+
+ self.mox.ReplayAll()
+ self._test_spawn(IMAGE_IPXE_ISO, None, None)
+
+ def test_spawn_empty_dns(self):
+ # Test spawning with an empty dns list.
+ self._test_spawn(IMAGE_VHD, None, None,
+ os_type="linux", architecture="x86-64",
+ empty_dns=True)
+ self.check_vm_params_for_linux()
+
+ def test_spawn_not_enough_memory(self):
+ self.assertRaises(exception.InsufficientFreeMemory,
+ self._test_spawn,
+ '1', 2, 3, "4") # m1.xlarge
+
+ def test_spawn_fail_cleanup_1(self):
+ """Simulates an error while downloading an image.
+
+ Verifies that the VM and VDIs created are properly cleaned up.
+ """
+ vdi_recs_start = self._list_vdis()
+ start_vms = self._list_vms()
+ stubs.stubout_fetch_disk_image(self.stubs, raise_failure=True)
+ self.assertRaises(xenapi_fake.Failure,
+ self._test_spawn, '1', 2, 3)
+ # No additional VDI should be found.
+ vdi_recs_end = self._list_vdis()
+ end_vms = self._list_vms()
+ self._check_vdis(vdi_recs_start, vdi_recs_end)
+ # No additional VMs should be found.
+ self.assertEqual(start_vms, end_vms)
+
+ def test_spawn_fail_cleanup_2(self):
+ """Simulates an error while creating VM record.
+
+ Verifies that the VM and VDIs created are properly cleaned up.
+ """
+ vdi_recs_start = self._list_vdis()
+ start_vms = self._list_vms()
+ stubs.stubout_create_vm(self.stubs)
+ self.assertRaises(xenapi_fake.Failure,
+ self._test_spawn, '1', 2, 3)
+ # No additional VDI should be found.
+ vdi_recs_end = self._list_vdis()
+ end_vms = self._list_vms()
+ self._check_vdis(vdi_recs_start, vdi_recs_end)
+ # No additional VMs should be found.
+ self.assertEqual(start_vms, end_vms)
+
+ def test_spawn_fail_cleanup_3(self):
+ """Simulates an error while attaching disks.
+
+ Verifies that the VM and VDIs created are properly cleaned up.
+ """
+ stubs.stubout_attach_disks(self.stubs)
+ vdi_recs_start = self._list_vdis()
+ start_vms = self._list_vms()
+ self.assertRaises(xenapi_fake.Failure,
+ self._test_spawn, '1', 2, 3)
+ # No additional VDI should be found.
+ vdi_recs_end = self._list_vdis()
+ end_vms = self._list_vms()
+ self._check_vdis(vdi_recs_start, vdi_recs_end)
+ # No additional VMs should be found.
+ self.assertEqual(start_vms, end_vms)
+
+ def test_spawn_raw_glance(self):
+ self._test_spawn(IMAGE_RAW, None, None, os_type=None)
+ self.check_vm_params_for_windows()
+
+ def test_spawn_vhd_glance_linux(self):
+ self._test_spawn(IMAGE_VHD, None, None,
+ os_type="linux", architecture="x86-64")
+ self.check_vm_params_for_linux()
+
+ def test_spawn_vhd_glance_windows(self):
+ self._test_spawn(IMAGE_VHD, None, None,
+ os_type="windows", architecture="i386",
+ instance_type_id=5)
+ self.check_vm_params_for_windows()
+
+ def test_spawn_iso_glance(self):
+ self._test_spawn(IMAGE_ISO, None, None,
+ os_type="windows", architecture="i386")
+ self.check_vm_params_for_windows()
+
+ def test_spawn_glance(self):
+
+ def fake_fetch_disk_image(context, session, instance, name_label,
+ image_id, image_type):
+ sr_ref = vm_utils.safe_find_sr(session)
+ image_type_str = vm_utils.ImageType.to_string(image_type)
+ vdi_ref = vm_utils.create_vdi(session, sr_ref, instance,
+ name_label, image_type_str, "20")
+ vdi_role = vm_utils.ImageType.get_role(image_type)
+ vdi_uuid = session.call_xenapi("VDI.get_uuid", vdi_ref)
+ return {vdi_role: dict(uuid=vdi_uuid, file=None)}
+ self.stubs.Set(vm_utils, '_fetch_disk_image',
+ fake_fetch_disk_image)
+
+ self._test_spawn(IMAGE_MACHINE,
+ IMAGE_KERNEL,
+ IMAGE_RAMDISK)
+ self.check_vm_params_for_linux_with_external_kernel()
+
+ def test_spawn_boot_from_volume_no_image_meta(self):
+ dev_info = get_fake_device_info()
+ self._test_spawn(None, None, None,
+ block_device_info=dev_info)
+
+ def test_spawn_boot_from_volume_no_glance_image_meta(self):
+ dev_info = get_fake_device_info()
+ self._test_spawn(IMAGE_FROM_VOLUME, None, None,
+ block_device_info=dev_info)
+
+ def test_spawn_boot_from_volume_with_image_meta(self):
+ dev_info = get_fake_device_info()
+ self._test_spawn(IMAGE_VHD, None, None,
+ block_device_info=dev_info)
+
+ def test_spawn_netinject_file(self):
+ self.flags(flat_injected=True)
+ db_fakes.stub_out_db_instance_api(self.stubs, injected=True)
+
+ self._tee_executed = False
+
+ def _tee_handler(cmd, **kwargs):
+ actual = kwargs.get('process_input', None)
+ expected = """\
+# Injected by Nova on instance boot
+#
+# This file describes the network interfaces available on your system
+# and how to activate them. For more information, see interfaces(5).
+
+# The loopback network interface
+auto lo
+iface lo inet loopback
+
+auto eth0
+iface eth0 inet static
+ address 192.168.1.100
+ netmask 255.255.255.0
+ broadcast 192.168.1.255
+ gateway 192.168.1.1
+ dns-nameservers 192.168.1.3 192.168.1.4
+iface eth0 inet6 static
+ address 2001:db8:0:1:dcad:beff:feef:1
+ netmask 64
+ gateway 2001:db8:0:1::1
+"""
+ self.assertEqual(expected, actual)
+ self._tee_executed = True
+ return '', ''
+
+ def _readlink_handler(cmd_parts, **kwargs):
+ return os.path.realpath(cmd_parts[2]), ''
+
+ fake_processutils.fake_execute_set_repliers([
+ # Capture the tee .../etc/network/interfaces command
+ (r'tee.*interfaces', _tee_handler),
+ (r'readlink -nm.*', _readlink_handler),
+ ])
+ self._test_spawn(IMAGE_MACHINE,
+ IMAGE_KERNEL,
+ IMAGE_RAMDISK,
+ check_injection=True)
+ self.assertTrue(self._tee_executed)
+
+ def test_spawn_netinject_xenstore(self):
+ db_fakes.stub_out_db_instance_api(self.stubs, injected=True)
+
+ self._tee_executed = False
+
+ def _mount_handler(cmd, *ignore_args, **ignore_kwargs):
+ # When mounting, create real files under the mountpoint to simulate
+ # files in the mounted filesystem
+
+ # mount point will be the last item of the command list
+ self._tmpdir = cmd[len(cmd) - 1]
+ LOG.debug('Creating files in %s to simulate guest agent',
+ self._tmpdir)
+ os.makedirs(os.path.join(self._tmpdir, 'usr', 'sbin'))
+ # Touch the file using open
+ open(os.path.join(self._tmpdir, 'usr', 'sbin',
+ 'xe-update-networking'), 'w').close()
+ return '', ''
+
+ def _umount_handler(cmd, *ignore_args, **ignore_kwargs):
+ # Umount would normally make files in the mounted filesystem
+ # disappear, so do that here
+ LOG.debug('Removing simulated guest agent files in %s',
+ self._tmpdir)
+ os.remove(os.path.join(self._tmpdir, 'usr', 'sbin',
+ 'xe-update-networking'))
+ os.rmdir(os.path.join(self._tmpdir, 'usr', 'sbin'))
+ os.rmdir(os.path.join(self._tmpdir, 'usr'))
+ return '', ''
+
+ def _tee_handler(cmd, *ignore_args, **ignore_kwargs):
+ self._tee_executed = True
+ return '', ''
+
+ fake_processutils.fake_execute_set_repliers([
+ (r'mount', _mount_handler),
+ (r'umount', _umount_handler),
+ (r'tee.*interfaces', _tee_handler)])
+ self._test_spawn('1', 2, 3, check_injection=True)
+
+ # tee must not run in this case, where an injection-capable
+ # guest agent is detected
+ self.assertFalse(self._tee_executed)
+
+ def test_spawn_injects_auto_disk_config_to_xenstore(self):
+ instance = self._create_instance(spawn=False)
+ self.mox.StubOutWithMock(self.conn._vmops, '_inject_auto_disk_config')
+ self.conn._vmops._inject_auto_disk_config(instance, mox.IgnoreArg())
+ self.mox.ReplayAll()
+ self.conn.spawn(self.context, instance,
+ IMAGE_FIXTURES['1']["image_meta"], [], 'herp', '')
+
+ def test_spawn_vlanmanager(self):
+ self.flags(network_manager='nova.network.manager.VlanManager',
+ vlan_interface='fake0')
+
+ def dummy(*args, **kwargs):
+ pass
+
+ self.stubs.Set(vmops.VMOps, '_create_vifs', dummy)
+ # Reset network table
+ xenapi_fake.reset_table('network')
+ # Instance id = 2 will use vlan network (see db/fakes.py)
+ ctxt = self.context.elevated()
+ self.network.conductor_api = conductor_api.LocalAPI()
+ self._create_instance(2, False)
+ networks = self.network.db.network_get_all(ctxt)
+ with mock.patch('nova.objects.network.Network._from_db_object'):
+ for network in networks:
+ self.network.set_network_host(ctxt, network)
+
+ self.network.allocate_for_instance(ctxt,
+ instance_id=2,
+ instance_uuid='00000000-0000-0000-0000-000000000002',
+ host=CONF.host,
+ vpn=None,
+ rxtx_factor=3,
+ project_id=self.project_id,
+ macs=None)
+ self._test_spawn(IMAGE_MACHINE,
+ IMAGE_KERNEL,
+ IMAGE_RAMDISK,
+ instance_id=2,
+ create_record=False)
+ # TODO(salvatore-orlando): a complete test here would require
+ # a check for making sure the bridge for the VM's VIF is
+ # consistent with bridge specified in nova db
+
+ def test_spawn_with_network_qos(self):
+ self._create_instance()
+ for vif_ref in xenapi_fake.get_all('VIF'):
+ vif_rec = xenapi_fake.get_record('VIF', vif_ref)
+ self.assertEqual(vif_rec['qos_algorithm_type'], 'ratelimit')
+ self.assertEqual(vif_rec['qos_algorithm_params']['kbps'],
+ str(3 * 10 * 1024))
+
+ def test_spawn_ssh_key_injection(self):
+ # Test spawning with key_data on an instance. Should use
+ # agent file injection.
+ self.flags(use_agent_default=True,
+ group='xenserver')
+ actual_injected_files = []
+
+ def fake_inject_file(self, method, args):
+ path = base64.b64decode(args['b64_path'])
+ contents = base64.b64decode(args['b64_contents'])
+ actual_injected_files.append((path, contents))
+ return jsonutils.dumps({'returncode': '0', 'message': 'success'})
+
+ self.stubs.Set(stubs.FakeSessionForVMTests,
+ '_plugin_agent_inject_file', fake_inject_file)
+
+ def fake_encrypt_text(sshkey, new_pass):
+ self.assertEqual("ssh-rsa fake_keydata", sshkey)
+ return "fake"
+
+ self.stubs.Set(crypto, 'ssh_encrypt_text', fake_encrypt_text)
+
+ expected_data = ('\n# The following ssh key was injected by '
+ 'Nova\nssh-rsa fake_keydata\n')
+
+ injected_files = [('/root/.ssh/authorized_keys', expected_data)]
+ self._test_spawn(IMAGE_VHD, None, None,
+ os_type="linux", architecture="x86-64",
+ key_data='ssh-rsa fake_keydata')
+ self.assertEqual(actual_injected_files, injected_files)
+
+ def test_spawn_ssh_key_injection_non_rsa(self):
+ # Test spawning with key_data on an instance. Should use
+ # agent file injection.
+ self.flags(use_agent_default=True,
+ group='xenserver')
+ actual_injected_files = []
+
+ def fake_inject_file(self, method, args):
+ path = base64.b64decode(args['b64_path'])
+ contents = base64.b64decode(args['b64_contents'])
+ actual_injected_files.append((path, contents))
+ return jsonutils.dumps({'returncode': '0', 'message': 'success'})
+
+ self.stubs.Set(stubs.FakeSessionForVMTests,
+ '_plugin_agent_inject_file', fake_inject_file)
+
+ def fake_encrypt_text(sshkey, new_pass):
+ raise NotImplementedError("Should not be called")
+
+ self.stubs.Set(crypto, 'ssh_encrypt_text', fake_encrypt_text)
+
+ expected_data = ('\n# The following ssh key was injected by '
+ 'Nova\nssh-dsa fake_keydata\n')
+
+ injected_files = [('/root/.ssh/authorized_keys', expected_data)]
+ self._test_spawn(IMAGE_VHD, None, None,
+ os_type="linux", architecture="x86-64",
+ key_data='ssh-dsa fake_keydata')
+ self.assertEqual(actual_injected_files, injected_files)
+
+ def test_spawn_injected_files(self):
+ # Test spawning with injected_files.
+ self.flags(use_agent_default=True,
+ group='xenserver')
+ actual_injected_files = []
+
+ def fake_inject_file(self, method, args):
+ path = base64.b64decode(args['b64_path'])
+ contents = base64.b64decode(args['b64_contents'])
+ actual_injected_files.append((path, contents))
+ return jsonutils.dumps({'returncode': '0', 'message': 'success'})
+ self.stubs.Set(stubs.FakeSessionForVMTests,
+ '_plugin_agent_inject_file', fake_inject_file)
+
+ injected_files = [('/tmp/foo', 'foobar')]
+ self._test_spawn(IMAGE_VHD, None, None,
+ os_type="linux", architecture="x86-64",
+ injected_files=injected_files)
+ self.check_vm_params_for_linux()
+ self.assertEqual(actual_injected_files, injected_files)
+
+ @mock.patch('nova.db.agent_build_get_by_triple')
+ def test_spawn_agent_upgrade(self, mock_get):
+ self.flags(use_agent_default=True,
+ group='xenserver')
+
+ mock_get.return_value = {"version": "1.1.0", "architecture": "x86-64",
+ "hypervisor": "xen", "os": "windows",
+ "url": "url", "md5hash": "asdf",
+ 'created_at': None, 'updated_at': None,
+ 'deleted_at': None, 'deleted': False,
+ 'id': 1}
+
+ self._test_spawn(IMAGE_VHD, None, None,
+ os_type="linux", architecture="x86-64")
+
+ @mock.patch('nova.db.agent_build_get_by_triple')
+ def test_spawn_agent_upgrade_fails_silently(self, mock_get):
+ mock_get.return_value = {"version": "1.1.0", "architecture": "x86-64",
+ "hypervisor": "xen", "os": "windows",
+ "url": "url", "md5hash": "asdf",
+ 'created_at': None, 'updated_at': None,
+ 'deleted_at': None, 'deleted': False,
+ 'id': 1}
+
+ self._test_spawn_fails_silently_with(exception.AgentError,
+ method="_plugin_agent_agentupdate", failure="fake_error")
+
+ def test_spawn_with_resetnetwork_alternative_returncode(self):
+ self.flags(use_agent_default=True,
+ group='xenserver')
+
+ def fake_resetnetwork(self, method, args):
+ fake_resetnetwork.called = True
+ # NOTE(johngarbutt): as returned by FreeBSD and Gentoo
+ return jsonutils.dumps({'returncode': '500',
+ 'message': 'success'})
+ self.stubs.Set(stubs.FakeSessionForVMTests,
+ '_plugin_agent_resetnetwork', fake_resetnetwork)
+ fake_resetnetwork.called = False
+
+ self._test_spawn(IMAGE_VHD, None, None,
+ os_type="linux", architecture="x86-64")
+ self.assertTrue(fake_resetnetwork.called)
+
+ def _test_spawn_fails_silently_with(self, expected_exception_cls,
+ method="_plugin_agent_version",
+ failure=None, value=None):
+ self.flags(use_agent_default=True,
+ agent_version_timeout=0,
+ group='xenserver')
+
+ def fake_agent_call(self, method, args):
+ if failure:
+ raise xenapi_fake.Failure([failure])
+ else:
+ return value
+
+ self.stubs.Set(stubs.FakeSessionForVMTests,
+ method, fake_agent_call)
+
+ called = {}
+
+ def fake_add_instance_fault(*args, **kwargs):
+ called["fake_add_instance_fault"] = args[2]
+
+ self.stubs.Set(compute_utils, 'add_instance_fault_from_exc',
+ fake_add_instance_fault)
+
+ self._test_spawn(IMAGE_VHD, None, None,
+ os_type="linux", architecture="x86-64")
+ actual_exception = called["fake_add_instance_fault"]
+ self.assertIsInstance(actual_exception, expected_exception_cls)
+
+ def test_spawn_fails_silently_with_agent_timeout(self):
+ self._test_spawn_fails_silently_with(exception.AgentTimeout,
+ failure="TIMEOUT:fake")
+
+ def test_spawn_fails_silently_with_agent_not_implemented(self):
+ self._test_spawn_fails_silently_with(exception.AgentNotImplemented,
+ failure="NOT IMPLEMENTED:fake")
+
+ def test_spawn_fails_silently_with_agent_error(self):
+ self._test_spawn_fails_silently_with(exception.AgentError,
+ failure="fake_error")
+
+ def test_spawn_fails_silently_with_agent_bad_return(self):
+ error = jsonutils.dumps({'returncode': -1, 'message': 'fake'})
+ self._test_spawn_fails_silently_with(exception.AgentError,
+ value=error)
+
+ def test_rescue(self):
+ instance = self._create_instance(spawn=False)
+ xenapi_fake.create_vm(instance['name'], 'Running')
+
+ session = get_session()
+ vm_ref = vm_utils.lookup(session, instance['name'])
+
+ swap_vdi_ref = xenapi_fake.create_vdi('swap', None)
+ root_vdi_ref = xenapi_fake.create_vdi('root', None)
+ eph1_vdi_ref = xenapi_fake.create_vdi('eph', None)
+ eph2_vdi_ref = xenapi_fake.create_vdi('eph', None)
+ vol_vdi_ref = xenapi_fake.create_vdi('volume', None)
+
+ xenapi_fake.create_vbd(vm_ref, swap_vdi_ref, userdevice=2)
+ xenapi_fake.create_vbd(vm_ref, root_vdi_ref, userdevice=0)
+ xenapi_fake.create_vbd(vm_ref, eph1_vdi_ref, userdevice=4)
+ xenapi_fake.create_vbd(vm_ref, eph2_vdi_ref, userdevice=5)
+ xenapi_fake.create_vbd(vm_ref, vol_vdi_ref, userdevice=6,
+ other_config={'osvol': True})
+
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ image_meta = {'id': IMAGE_VHD,
+ 'disk_format': 'vhd'}
+ conn.rescue(self.context, instance, [], image_meta, '')
+
+ vm = xenapi_fake.get_record('VM', vm_ref)
+ rescue_name = "%s-rescue" % vm["name_label"]
+ rescue_ref = vm_utils.lookup(session, rescue_name)
+ rescue_vm = xenapi_fake.get_record('VM', rescue_ref)
+
+ vdi_refs = {}
+ for vbd_ref in rescue_vm['VBDs']:
+ vbd = xenapi_fake.get_record('VBD', vbd_ref)
+ vdi_refs[vbd['VDI']] = vbd['userdevice']
+
+ self.assertEqual('1', vdi_refs[root_vdi_ref])
+ self.assertEqual('2', vdi_refs[swap_vdi_ref])
+ self.assertEqual('4', vdi_refs[eph1_vdi_ref])
+ self.assertEqual('5', vdi_refs[eph2_vdi_ref])
+ self.assertNotIn(vol_vdi_ref, vdi_refs)
+
+ def test_rescue_preserve_disk_on_failure(self):
+ # test that the original disk is preserved if rescue setup fails
+ # bug #1227898
+ instance = self._create_instance()
+ session = get_session()
+ image_meta = {'id': IMAGE_VHD,
+ 'disk_format': 'vhd'}
+
+ vm_ref = vm_utils.lookup(session, instance['name'])
+ vdi_ref, vdi_rec = vm_utils.get_vdi_for_vm_safely(session, vm_ref)
+
+ # raise an error in the spawn setup process and trigger the
+ # undo manager logic:
+ def fake_start(*args, **kwargs):
+ raise test.TestingException('Start Error')
+
+ self.stubs.Set(self.conn._vmops, '_start', fake_start)
+
+ self.assertRaises(test.TestingException, self.conn.rescue,
+ self.context, instance, [], image_meta, '')
+
+ # confirm original disk still exists:
+ vdi_ref2, vdi_rec2 = vm_utils.get_vdi_for_vm_safely(session, vm_ref)
+ self.assertEqual(vdi_ref, vdi_ref2)
+ self.assertEqual(vdi_rec['uuid'], vdi_rec2['uuid'])
+
+ def test_unrescue(self):
+ instance = self._create_instance()
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ # Unrescue expects the original instance to be powered off
+ conn.power_off(instance)
+ xenapi_fake.create_vm(instance['name'] + '-rescue', 'Running')
+ conn.unrescue(instance, None)
+
+ def test_unrescue_not_in_rescue(self):
+ instance = self._create_instance()
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ # Ensure that it will not unrescue a non-rescued instance.
+ self.assertRaises(exception.InstanceNotInRescueMode, conn.unrescue,
+ instance, None)
+
+ def test_finish_revert_migration(self):
+ instance = self._create_instance()
+
+ class VMOpsMock():
+
+ def __init__(self):
+ self.finish_revert_migration_called = False
+
+ def finish_revert_migration(self, context, instance, block_info,
+ power_on):
+ self.finish_revert_migration_called = True
+
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ conn._vmops = VMOpsMock()
+ conn.finish_revert_migration(self.context, instance, None)
+ self.assertTrue(conn._vmops.finish_revert_migration_called)
+
+ def test_reboot_hard(self):
+ instance = self._create_instance()
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ conn.reboot(self.context, instance, None, "HARD")
+
+ def test_poll_rebooting_instances(self):
+ self.mox.StubOutWithMock(compute_api.API, 'reboot')
+ compute_api.API.reboot(mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg())
+ self.mox.ReplayAll()
+ instance = self._create_instance()
+ instances = [instance]
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ conn.poll_rebooting_instances(60, instances)
+
+ def test_reboot_soft(self):
+ instance = self._create_instance()
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ conn.reboot(self.context, instance, None, "SOFT")
+
+ def test_reboot_halted(self):
+ session = get_session()
+ instance = self._create_instance(spawn=False)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ xenapi_fake.create_vm(instance['name'], 'Halted')
+ conn.reboot(self.context, instance, None, "SOFT")
+ vm_ref = vm_utils.lookup(session, instance['name'])
+ vm = xenapi_fake.get_record('VM', vm_ref)
+ self.assertEqual(vm['power_state'], 'Running')
+
+ def test_reboot_unknown_state(self):
+ instance = self._create_instance(spawn=False)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ xenapi_fake.create_vm(instance['name'], 'Unknown')
+ self.assertRaises(xenapi_fake.Failure, conn.reboot, self.context,
+ instance, None, "SOFT")
+
+ def test_reboot_rescued(self):
+ instance = self._create_instance()
+ instance['vm_state'] = vm_states.RESCUED
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ real_result = vm_utils.lookup(conn._session, instance['name'])
+
+ self.mox.StubOutWithMock(vm_utils, 'lookup')
+ vm_utils.lookup(conn._session, instance['name'],
+ True).AndReturn(real_result)
+ self.mox.ReplayAll()
+
+ conn.reboot(self.context, instance, None, "SOFT")
+
+ def test_get_console_output_succeeds(self):
+
+ def fake_get_console_output(instance):
+ self.assertEqual("instance", instance)
+ return "console_log"
+ self.stubs.Set(self.conn._vmops, 'get_console_output',
+ fake_get_console_output)
+
+ self.assertEqual(self.conn.get_console_output('context', "instance"),
+ "console_log")
+
+ def _test_maintenance_mode(self, find_host, find_aggregate):
+ real_call_xenapi = self.conn._session.call_xenapi
+ instance = self._create_instance(spawn=True)
+ api_calls = {}
+
+ # Record all the xenapi calls, and return a fake list of hosts
+ # for the host.get_all call
+ def fake_call_xenapi(method, *args):
+ api_calls[method] = args
+ if method == 'host.get_all':
+ return ['foo', 'bar', 'baz']
+ return real_call_xenapi(method, *args)
+ self.stubs.Set(self.conn._session, 'call_xenapi', fake_call_xenapi)
+
+ def fake_aggregate_get(context, host, key):
+ if find_aggregate:
+ return [test_aggregate.fake_aggregate]
+ else:
+ return []
+ self.stubs.Set(db, 'aggregate_get_by_host',
+ fake_aggregate_get)
+
+ def fake_host_find(context, session, src, dst):
+ if find_host:
+ return 'bar'
+ else:
+ raise exception.NoValidHost("I saw this one coming...")
+ self.stubs.Set(host, '_host_find', fake_host_find)
+
+ result = self.conn.host_maintenance_mode('bar', 'on_maintenance')
+ self.assertEqual(result, 'on_maintenance')
+
+ # We expect the VM.pool_migrate call to have been called to
+ # migrate our instance to the 'bar' host
+ vm_ref = vm_utils.lookup(self.conn._session, instance['name'])
+ host_ref = "foo"
+ expected = (vm_ref, host_ref, {"live": "true"})
+ self.assertEqual(api_calls.get('VM.pool_migrate'), expected)
+
+ instance = db.instance_get_by_uuid(self.context, instance['uuid'])
+ self.assertEqual(instance['vm_state'], vm_states.ACTIVE)
+ self.assertEqual(instance['task_state'], task_states.MIGRATING)
+
+ def test_maintenance_mode(self):
+ self._test_maintenance_mode(True, True)
+
+ def test_maintenance_mode_no_host(self):
+ self.assertRaises(exception.NoValidHost,
+ self._test_maintenance_mode, False, True)
+
+ def test_maintenance_mode_no_aggregate(self):
+ self.assertRaises(exception.NotFound,
+ self._test_maintenance_mode, True, False)
+
+ def test_uuid_find(self):
+ self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
+ fake_inst = fake_instance.fake_db_instance(id=123)
+ fake_inst2 = fake_instance.fake_db_instance(id=456)
+ db.instance_get_all_by_host(self.context, fake_inst['host'],
+ columns_to_join=None,
+ use_slave=False
+ ).AndReturn([fake_inst, fake_inst2])
+ self.mox.ReplayAll()
+ expected_name = CONF.instance_name_template % fake_inst['id']
+ inst_uuid = host._uuid_find(self.context, fake_inst['host'],
+ expected_name)
+ self.assertEqual(inst_uuid, fake_inst['uuid'])
+
+ def test_session_virtapi(self):
+ was = {'called': False}
+
+ def fake_aggregate_get_by_host(self, *args, **kwargs):
+ was['called'] = True
+ raise test.TestingException()
+ self.stubs.Set(db, "aggregate_get_by_host",
+ fake_aggregate_get_by_host)
+
+ self.stubs.Set(self.conn._session, "is_slave", True)
+
+ self.assertRaises(test.TestingException,
+ self.conn._session._get_host_uuid)
+ self.assertTrue(was['called'])
+
+ def test_per_instance_usage_running(self):
+ instance = self._create_instance(spawn=True)
+ flavor = flavors.get_flavor(3)
+
+ expected = {instance['uuid']: {'memory_mb': flavor['memory_mb'],
+ 'uuid': instance['uuid']}}
+ actual = self.conn.get_per_instance_usage()
+ self.assertEqual(expected, actual)
+
+ # Paused instances still consume resources:
+ self.conn.pause(instance)
+ actual = self.conn.get_per_instance_usage()
+ self.assertEqual(expected, actual)
+
+ def test_per_instance_usage_suspended(self):
+ # Suspended instances do not consume memory:
+ instance = self._create_instance(spawn=True)
+ self.conn.suspend(instance)
+ actual = self.conn.get_per_instance_usage()
+ self.assertEqual({}, actual)
+
+ def test_per_instance_usage_halted(self):
+ instance = self._create_instance(spawn=True)
+ self.conn.power_off(instance)
+ actual = self.conn.get_per_instance_usage()
+ self.assertEqual({}, actual)
+
+ def _create_instance(self, instance_id=1, spawn=True, obj=False, **attrs):
+ """Creates and spawns a test instance."""
+ instance_values = {
+ 'id': instance_id,
+ 'uuid': '00000000-0000-0000-0000-00000000000%d' % instance_id,
+ 'display_name': 'host-%d' % instance_id,
+ 'project_id': self.project_id,
+ 'user_id': self.user_id,
+ 'image_ref': 1,
+ 'kernel_id': 2,
+ 'ramdisk_id': 3,
+ 'root_gb': 80,
+ 'ephemeral_gb': 0,
+ 'instance_type_id': '3', # m1.large
+ 'os_type': 'linux',
+ 'vm_mode': 'hvm',
+ 'architecture': 'x86-64'}
+ instance_values.update(attrs)
+
+ instance = create_instance_with_system_metadata(self.context,
+ instance_values)
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs)
+ image_meta = {'id': IMAGE_VHD,
+ 'disk_format': 'vhd'}
+ if spawn:
+ self.conn.spawn(self.context, instance, image_meta, [], 'herp',
+ network_info)
+ if obj:
+ instance = objects.Instance._from_db_object(
+ self.context, objects.Instance(), instance,
+ expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS)
+ return instance
+
+ def test_destroy_clean_up_kernel_and_ramdisk(self):
+ def fake_lookup_kernel_ramdisk(session, vm_ref):
+ return "kernel", "ramdisk"
+
+ self.stubs.Set(vm_utils, "lookup_kernel_ramdisk",
+ fake_lookup_kernel_ramdisk)
+
+ def fake_destroy_kernel_ramdisk(session, instance, kernel, ramdisk):
+ fake_destroy_kernel_ramdisk.called = True
+ self.assertEqual("kernel", kernel)
+ self.assertEqual("ramdisk", ramdisk)
+
+ fake_destroy_kernel_ramdisk.called = False
+
+ self.stubs.Set(vm_utils, "destroy_kernel_ramdisk",
+ fake_destroy_kernel_ramdisk)
+
+ instance = self._create_instance(spawn=True)
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs)
+ self.conn.destroy(self.context, instance, network_info)
+
+ vm_ref = vm_utils.lookup(self.conn._session, instance['name'])
+ self.assertIsNone(vm_ref)
+ self.assertTrue(fake_destroy_kernel_ramdisk.called)
+
+
+class XenAPIDiffieHellmanTestCase(test.NoDBTestCase):
+ """Unit tests for Diffie-Hellman code."""
+ def setUp(self):
+ super(XenAPIDiffieHellmanTestCase, self).setUp()
+ self.alice = agent.SimpleDH()
+ self.bob = agent.SimpleDH()
+
+ def test_shared(self):
+ alice_pub = self.alice.get_public()
+ bob_pub = self.bob.get_public()
+ alice_shared = self.alice.compute_shared(bob_pub)
+ bob_shared = self.bob.compute_shared(alice_pub)
+ self.assertEqual(alice_shared, bob_shared)
+
+ def _test_encryption(self, message):
+ enc = self.alice.encrypt(message)
+ self.assertFalse(enc.endswith('\n'))
+ dec = self.bob.decrypt(enc)
+ self.assertEqual(dec, message)
+
+ def test_encrypt_simple_message(self):
+ self._test_encryption('This is a simple message.')
+
+ def test_encrypt_message_with_newlines_at_end(self):
+ self._test_encryption('This message has a newline at the end.\n')
+
+ def test_encrypt_many_newlines_at_end(self):
+ self._test_encryption('Message with lotsa newlines.\n\n\n')
+
+ def test_encrypt_newlines_inside_message(self):
+ self._test_encryption('Message\nwith\ninterior\nnewlines.')
+
+ def test_encrypt_with_leading_newlines(self):
+ self._test_encryption('\n\nMessage with leading newlines.')
+
+ def test_encrypt_really_long_message(self):
+ self._test_encryption(''.join(['abcd' for i in xrange(1024)]))
+
+
+# FIXME(sirp): convert this to use XenAPITestBaseNoDB
+class XenAPIMigrateInstance(stubs.XenAPITestBase):
+ """Unit test for verifying migration-related actions."""
+
+ REQUIRES_LOCKING = True
+
+ def setUp(self):
+ super(XenAPIMigrateInstance, self).setUp()
+ self.flags(connection_url='test_url',
+ connection_password='test_pass',
+ group='xenserver')
+ self.flags(firewall_driver='nova.virt.xenapi.firewall.'
+ 'Dom0IptablesFirewallDriver')
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ db_fakes.stub_out_db_instance_api(self.stubs)
+ xenapi_fake.create_network('fake', 'fake_br1')
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = context.RequestContext(self.user_id, self.project_id)
+ self.instance_values = {'id': 1,
+ 'project_id': self.project_id,
+ 'user_id': self.user_id,
+ 'image_ref': 1,
+ 'kernel_id': None,
+ 'ramdisk_id': None,
+ 'root_gb': 80,
+ 'ephemeral_gb': 0,
+ 'instance_type_id': '3', # m1.large
+ 'os_type': 'linux',
+ 'architecture': 'x86-64'}
+
+ migration_values = {
+ 'source_compute': 'nova-compute',
+ 'dest_compute': 'nova-compute',
+ 'dest_host': '10.127.5.114',
+ 'status': 'post-migrating',
+ 'instance_uuid': '15f23e6a-cc6e-4d22-b651-d9bdaac316f7',
+ 'old_instance_type_id': 5,
+ 'new_instance_type_id': 1
+ }
+ self.migration = db.migration_create(
+ context.get_admin_context(), migration_values)
+
+ fake_processutils.stub_out_processutils_execute(self.stubs)
+ stubs.stub_out_migration_methods(self.stubs)
+ stubs.stubout_get_this_vm_uuid(self.stubs)
+
+ def fake_inject_instance_metadata(self, instance, vm):
+ pass
+ self.stubs.Set(vmops.VMOps, '_inject_instance_metadata',
+ fake_inject_instance_metadata)
+
+ def test_migrate_disk_and_power_off(self):
+ instance = db.instance_create(self.context, self.instance_values)
+ xenapi_fake.create_vm(instance['name'], 'Running')
+ flavor = {"root_gb": 80, 'ephemeral_gb': 0}
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ conn.migrate_disk_and_power_off(self.context, instance,
+ '127.0.0.1', flavor, None)
+
+ def test_migrate_disk_and_power_off_passes_exceptions(self):
+ instance = db.instance_create(self.context, self.instance_values)
+ xenapi_fake.create_vm(instance['name'], 'Running')
+ flavor = {"root_gb": 80, 'ephemeral_gb': 0}
+
+ def fake_raise(*args, **kwargs):
+ raise exception.MigrationError(reason='test failure')
+ self.stubs.Set(vmops.VMOps, "_migrate_disk_resizing_up", fake_raise)
+
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ self.assertRaises(exception.MigrationError,
+ conn.migrate_disk_and_power_off,
+ self.context, instance,
+ '127.0.0.1', flavor, None)
+
+ def test_migrate_disk_and_power_off_throws_on_zero_gb_resize_down(self):
+ instance = db.instance_create(self.context, self.instance_values)
+ flavor = {"root_gb": 0, 'ephemeral_gb': 0}
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ self.assertRaises(exception.ResizeError,
+ conn.migrate_disk_and_power_off,
+ self.context, instance,
+ 'fake_dest', flavor, None)
+
+ def test_migrate_disk_and_power_off_with_zero_gb_old_and_new_works(self):
+ flavor = {"root_gb": 0, 'ephemeral_gb': 0}
+ values = copy.copy(self.instance_values)
+ values["root_gb"] = 0
+ values["ephemeral_gb"] = 0
+ instance = db.instance_create(self.context, values)
+ xenapi_fake.create_vm(instance['name'], 'Running')
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ conn.migrate_disk_and_power_off(self.context, instance,
+ '127.0.0.1', flavor, None)
+
+ def _test_revert_migrate(self, power_on):
+ instance = create_instance_with_system_metadata(self.context,
+ self.instance_values)
+ self.called = False
+ self.fake_vm_start_called = False
+ self.fake_finish_revert_migration_called = False
+ context = 'fake_context'
+
+ def fake_vm_start(*args, **kwargs):
+ self.fake_vm_start_called = True
+
+ def fake_vdi_resize(*args, **kwargs):
+ self.called = True
+
+ def fake_finish_revert_migration(*args, **kwargs):
+ self.fake_finish_revert_migration_called = True
+
+ self.stubs.Set(stubs.FakeSessionForVMTests,
+ "VDI_resize_online", fake_vdi_resize)
+ self.stubs.Set(vmops.VMOps, '_start', fake_vm_start)
+ self.stubs.Set(vmops.VMOps, 'finish_revert_migration',
+ fake_finish_revert_migration)
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests,
+ product_version=(4, 0, 0),
+ product_brand='XenServer')
+
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs)
+ image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'}
+ base = xenapi_fake.create_vdi('hurr', 'fake')
+ base_uuid = xenapi_fake.get_record('VDI', base)['uuid']
+ cow = xenapi_fake.create_vdi('durr', 'fake')
+ cow_uuid = xenapi_fake.get_record('VDI', cow)['uuid']
+ conn.finish_migration(self.context, self.migration, instance,
+ dict(base_copy=base_uuid, cow=cow_uuid),
+ network_info, image_meta, resize_instance=True,
+ block_device_info=None, power_on=power_on)
+ self.assertEqual(self.called, True)
+ self.assertEqual(self.fake_vm_start_called, power_on)
+
+ conn.finish_revert_migration(context, instance, network_info)
+ self.assertEqual(self.fake_finish_revert_migration_called, True)
+
+ def test_revert_migrate_power_on(self):
+ self._test_revert_migrate(True)
+
+ def test_revert_migrate_power_off(self):
+ self._test_revert_migrate(False)
+
+ def _test_finish_migrate(self, power_on):
+ instance = create_instance_with_system_metadata(self.context,
+ self.instance_values)
+ self.called = False
+ self.fake_vm_start_called = False
+
+ def fake_vm_start(*args, **kwargs):
+ self.fake_vm_start_called = True
+
+ def fake_vdi_resize(*args, **kwargs):
+ self.called = True
+
+ self.stubs.Set(vmops.VMOps, '_start', fake_vm_start)
+ self.stubs.Set(stubs.FakeSessionForVMTests,
+ "VDI_resize_online", fake_vdi_resize)
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests,
+ product_version=(4, 0, 0),
+ product_brand='XenServer')
+
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs)
+ image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'}
+ conn.finish_migration(self.context, self.migration, instance,
+ dict(base_copy='hurr', cow='durr'),
+ network_info, image_meta, resize_instance=True,
+ block_device_info=None, power_on=power_on)
+ self.assertEqual(self.called, True)
+ self.assertEqual(self.fake_vm_start_called, power_on)
+
+ def test_finish_migrate_power_on(self):
+ self._test_finish_migrate(True)
+
+ def test_finish_migrate_power_off(self):
+ self._test_finish_migrate(False)
+
+ def test_finish_migrate_no_local_storage(self):
+ values = copy.copy(self.instance_values)
+ values["root_gb"] = 0
+ values["ephemeral_gb"] = 0
+ instance = create_instance_with_system_metadata(self.context, values)
+
+ def fake_vdi_resize(*args, **kwargs):
+ raise Exception("This shouldn't be called")
+
+ self.stubs.Set(stubs.FakeSessionForVMTests,
+ "VDI_resize_online", fake_vdi_resize)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs)
+ image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'}
+ conn.finish_migration(self.context, self.migration, instance,
+ dict(base_copy='hurr', cow='durr'),
+ network_info, image_meta, resize_instance=True)
+
+ def test_finish_migrate_no_resize_vdi(self):
+ instance = create_instance_with_system_metadata(self.context,
+ self.instance_values)
+
+ def fake_vdi_resize(*args, **kwargs):
+ raise Exception("This shouldn't be called")
+
+ self.stubs.Set(stubs.FakeSessionForVMTests,
+ "VDI_resize_online", fake_vdi_resize)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs)
+ # Resize instance would be determined by the compute call
+ image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'}
+ conn.finish_migration(self.context, self.migration, instance,
+ dict(base_copy='hurr', cow='durr'),
+ network_info, image_meta, resize_instance=False)
+
+ @stub_vm_utils_with_vdi_attached_here
+ def test_migrate_too_many_partitions_no_resize_down(self):
+ instance_values = self.instance_values
+ instance = db.instance_create(self.context, instance_values)
+ xenapi_fake.create_vm(instance['name'], 'Running')
+ flavor = db.flavor_get_by_name(self.context, 'm1.small')
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ def fake_get_partitions(partition):
+ return [(1, 2, 3, 4, "", ""), (1, 2, 3, 4, "", "")]
+
+ self.stubs.Set(vm_utils, '_get_partitions', fake_get_partitions)
+
+ self.assertRaises(exception.InstanceFaultRollback,
+ conn.migrate_disk_and_power_off,
+ self.context, instance,
+ '127.0.0.1', flavor, None)
+
+ @stub_vm_utils_with_vdi_attached_here
+ def test_migrate_bad_fs_type_no_resize_down(self):
+ instance_values = self.instance_values
+ instance = db.instance_create(self.context, instance_values)
+ xenapi_fake.create_vm(instance['name'], 'Running')
+ flavor = db.flavor_get_by_name(self.context, 'm1.small')
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ def fake_get_partitions(partition):
+ return [(1, 2, 3, "ext2", "", "boot")]
+
+ self.stubs.Set(vm_utils, '_get_partitions', fake_get_partitions)
+
+ self.assertRaises(exception.InstanceFaultRollback,
+ conn.migrate_disk_and_power_off,
+ self.context, instance,
+ '127.0.0.1', flavor, None)
+
+ def test_migrate_rollback_when_resize_down_fs_fails(self):
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ vmops = conn._vmops
+
+ self.mox.StubOutWithMock(vmops, '_resize_ensure_vm_is_shutdown')
+ self.mox.StubOutWithMock(vmops, '_apply_orig_vm_name_label')
+ self.mox.StubOutWithMock(vm_utils, 'resize_disk')
+ self.mox.StubOutWithMock(vm_utils, 'migrate_vhd')
+ self.mox.StubOutWithMock(vm_utils, 'destroy_vdi')
+ self.mox.StubOutWithMock(vm_utils, 'get_vdi_for_vm_safely')
+ self.mox.StubOutWithMock(vmops, '_restore_orig_vm_and_cleanup_orphan')
+
+ instance = objects.Instance(context=self.context,
+ auto_disk_config=True, uuid='uuid')
+ instance.obj_reset_changes()
+ vm_ref = "vm_ref"
+ dest = "dest"
+ flavor = "type"
+ sr_path = "sr_path"
+
+ vmops._resize_ensure_vm_is_shutdown(instance, vm_ref)
+ vmops._apply_orig_vm_name_label(instance, vm_ref)
+ old_vdi_ref = "old_ref"
+ vm_utils.get_vdi_for_vm_safely(vmops._session, vm_ref).AndReturn(
+ (old_vdi_ref, None))
+ new_vdi_ref = "new_ref"
+ new_vdi_uuid = "new_uuid"
+ vm_utils.resize_disk(vmops._session, instance, old_vdi_ref,
+ flavor).AndReturn((new_vdi_ref, new_vdi_uuid))
+ vm_utils.migrate_vhd(vmops._session, instance, new_vdi_uuid, dest,
+ sr_path, 0).AndRaise(
+ exception.ResizeError(reason="asdf"))
+
+ vm_utils.destroy_vdi(vmops._session, new_vdi_ref)
+ vmops._restore_orig_vm_and_cleanup_orphan(instance)
+
+ self.mox.ReplayAll()
+
+ with mock.patch.object(instance, 'save') as mock_save:
+ self.assertRaises(exception.InstanceFaultRollback,
+ vmops._migrate_disk_resizing_down, self.context,
+ instance, dest, flavor, vm_ref, sr_path)
+ self.assertEqual(3, mock_save.call_count)
+ self.assertEqual(60.0, instance.progress)
+
+ def test_resize_ensure_vm_is_shutdown_cleanly(self):
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ vmops = conn._vmops
+ fake_instance = {'uuid': 'uuid'}
+
+ self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown')
+ self.mox.StubOutWithMock(vm_utils, 'clean_shutdown_vm')
+ self.mox.StubOutWithMock(vm_utils, 'hard_shutdown_vm')
+
+ vm_utils.is_vm_shutdown(vmops._session, "ref").AndReturn(False)
+ vm_utils.clean_shutdown_vm(vmops._session, fake_instance,
+ "ref").AndReturn(True)
+
+ self.mox.ReplayAll()
+
+ vmops._resize_ensure_vm_is_shutdown(fake_instance, "ref")
+
+ def test_resize_ensure_vm_is_shutdown_forced(self):
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ vmops = conn._vmops
+ fake_instance = {'uuid': 'uuid'}
+
+ self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown')
+ self.mox.StubOutWithMock(vm_utils, 'clean_shutdown_vm')
+ self.mox.StubOutWithMock(vm_utils, 'hard_shutdown_vm')
+
+ vm_utils.is_vm_shutdown(vmops._session, "ref").AndReturn(False)
+ vm_utils.clean_shutdown_vm(vmops._session, fake_instance,
+ "ref").AndReturn(False)
+ vm_utils.hard_shutdown_vm(vmops._session, fake_instance,
+ "ref").AndReturn(True)
+
+ self.mox.ReplayAll()
+
+ vmops._resize_ensure_vm_is_shutdown(fake_instance, "ref")
+
+ def test_resize_ensure_vm_is_shutdown_fails(self):
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ vmops = conn._vmops
+ fake_instance = {'uuid': 'uuid'}
+
+ self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown')
+ self.mox.StubOutWithMock(vm_utils, 'clean_shutdown_vm')
+ self.mox.StubOutWithMock(vm_utils, 'hard_shutdown_vm')
+
+ vm_utils.is_vm_shutdown(vmops._session, "ref").AndReturn(False)
+ vm_utils.clean_shutdown_vm(vmops._session, fake_instance,
+ "ref").AndReturn(False)
+ vm_utils.hard_shutdown_vm(vmops._session, fake_instance,
+ "ref").AndReturn(False)
+
+ self.mox.ReplayAll()
+
+ self.assertRaises(exception.ResizeError,
+ vmops._resize_ensure_vm_is_shutdown, fake_instance, "ref")
+
+ def test_resize_ensure_vm_is_shutdown_already_shutdown(self):
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ vmops = conn._vmops
+ fake_instance = {'uuid': 'uuid'}
+
+ self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown')
+ self.mox.StubOutWithMock(vm_utils, 'clean_shutdown_vm')
+ self.mox.StubOutWithMock(vm_utils, 'hard_shutdown_vm')
+
+ vm_utils.is_vm_shutdown(vmops._session, "ref").AndReturn(True)
+
+ self.mox.ReplayAll()
+
+ vmops._resize_ensure_vm_is_shutdown(fake_instance, "ref")
+
+
+class XenAPIImageTypeTestCase(test.NoDBTestCase):
+ """Test ImageType class."""
+
+ def test_to_string(self):
+ # Can convert from type id to type string.
+ self.assertEqual(
+ vm_utils.ImageType.to_string(vm_utils.ImageType.KERNEL),
+ vm_utils.ImageType.KERNEL_STR)
+
+ def _assert_role(self, expected_role, image_type_id):
+ self.assertEqual(
+ expected_role,
+ vm_utils.ImageType.get_role(image_type_id))
+
+ def test_get_image_role_kernel(self):
+ self._assert_role('kernel', vm_utils.ImageType.KERNEL)
+
+ def test_get_image_role_ramdisk(self):
+ self._assert_role('ramdisk', vm_utils.ImageType.RAMDISK)
+
+ def test_get_image_role_disk(self):
+ self._assert_role('root', vm_utils.ImageType.DISK)
+
+ def test_get_image_role_disk_raw(self):
+ self._assert_role('root', vm_utils.ImageType.DISK_RAW)
+
+ def test_get_image_role_disk_vhd(self):
+ self._assert_role('root', vm_utils.ImageType.DISK_VHD)
+
+
+class XenAPIDetermineDiskImageTestCase(test.NoDBTestCase):
+ """Unit tests for code that detects the ImageType."""
+ def assert_disk_type(self, image_meta, expected_disk_type):
+ actual = vm_utils.determine_disk_image_type(image_meta)
+ self.assertEqual(expected_disk_type, actual)
+
+ def test_machine(self):
+ image_meta = {'id': 'a', 'disk_format': 'ami'}
+ self.assert_disk_type(image_meta, vm_utils.ImageType.DISK)
+
+ def test_raw(self):
+ image_meta = {'id': 'a', 'disk_format': 'raw'}
+ self.assert_disk_type(image_meta, vm_utils.ImageType.DISK_RAW)
+
+ def test_vhd(self):
+ image_meta = {'id': 'a', 'disk_format': 'vhd'}
+ self.assert_disk_type(image_meta, vm_utils.ImageType.DISK_VHD)
+
+ def test_none(self):
+ image_meta = None
+ self.assert_disk_type(image_meta, None)
+
+
+# FIXME(sirp): convert this to use XenAPITestBaseNoDB
+class XenAPIHostTestCase(stubs.XenAPITestBase):
+ """Tests HostState, which holds metrics from XenServer that get
+ reported back to the Schedulers.
+ """
+
+ def setUp(self):
+ super(XenAPIHostTestCase, self).setUp()
+ self.flags(connection_url='test_url',
+ connection_password='test_pass',
+ group='xenserver')
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.context = context.get_admin_context()
+ self.flags(use_local=True, group='conductor')
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ self.instance = fake_instance.fake_db_instance(name='foo')
+
+ def test_host_state(self):
+ stats = self.conn.host_state.get_host_stats(False)
+ # Values from fake.create_local_srs (ext SR)
+ self.assertEqual(stats['disk_total'], 40000)
+ self.assertEqual(stats['disk_used'], 20000)
+ # Values from fake._plugin_xenhost_host_data
+ self.assertEqual(stats['host_memory_total'], 10)
+ self.assertEqual(stats['host_memory_overhead'], 20)
+ self.assertEqual(stats['host_memory_free'], 30)
+ self.assertEqual(stats['host_memory_free_computed'], 40)
+ self.assertEqual(stats['hypervisor_hostname'], 'fake-xenhost')
+ self.assertThat({'cpu_count': 50},
+ matchers.DictMatches(stats['host_cpu_info']))
+ # No VMs running
+ self.assertEqual(stats['vcpus_used'], 0)
+
+ def test_host_state_vcpus_used(self):
+ stats = self.conn.host_state.get_host_stats(True)
+ self.assertEqual(stats['vcpus_used'], 0)
+ xenapi_fake.create_vm(self.instance['name'], 'Running')
+ stats = self.conn.host_state.get_host_stats(True)
+ self.assertEqual(stats['vcpus_used'], 4)
+
+ def test_pci_passthrough_devices_whitelist(self):
+ # NOTE(guillaume-thouvenin): This pci whitelist will be used to
+ # match with _plugin_xenhost_get_pci_device_details method in fake.py.
+ white_list = '{"vendor_id":"10de", "product_id":"11bf"}'
+ self.flags(pci_passthrough_whitelist=[white_list])
+ stats = self.conn.host_state.get_host_stats(False)
+ self.assertEqual(len(stats['pci_passthrough_devices']), 1)
+
+ def test_pci_passthrough_devices_no_whitelist(self):
+ stats = self.conn.host_state.get_host_stats(False)
+ self.assertEqual(len(stats['pci_passthrough_devices']), 0)
+
+ def test_host_state_missing_sr(self):
+ # Must trigger construction of 'host_state' property
+ # before introducing the stub which raises the error
+ hs = self.conn.host_state
+
+ def fake_safe_find_sr(session):
+ raise exception.StorageRepositoryNotFound('not there')
+
+ self.stubs.Set(vm_utils, 'safe_find_sr', fake_safe_find_sr)
+ self.assertRaises(exception.StorageRepositoryNotFound,
+ hs.get_host_stats,
+ refresh=True)
+
+ def _test_host_action(self, method, action, expected=None):
+ result = method('host', action)
+ if not expected:
+ expected = action
+ self.assertEqual(result, expected)
+
+ def test_host_reboot(self):
+ self._test_host_action(self.conn.host_power_action, 'reboot')
+
+ def test_host_shutdown(self):
+ self._test_host_action(self.conn.host_power_action, 'shutdown')
+
+ def test_host_startup(self):
+ self.assertRaises(NotImplementedError,
+ self.conn.host_power_action, 'host', 'startup')
+
+ def test_host_maintenance_on(self):
+ self._test_host_action(self.conn.host_maintenance_mode,
+ True, 'on_maintenance')
+
+ def test_host_maintenance_off(self):
+ self._test_host_action(self.conn.host_maintenance_mode,
+ False, 'off_maintenance')
+
+ def test_set_enable_host_enable(self):
+ _create_service_entries(self.context, values={'nova': ['fake-mini']})
+ self._test_host_action(self.conn.set_host_enabled, True, 'enabled')
+ service = db.service_get_by_args(self.context, 'fake-mini',
+ 'nova-compute')
+ self.assertEqual(service.disabled, False)
+
+ def test_set_enable_host_disable(self):
+ _create_service_entries(self.context, values={'nova': ['fake-mini']})
+ self._test_host_action(self.conn.set_host_enabled, False, 'disabled')
+ service = db.service_get_by_args(self.context, 'fake-mini',
+ 'nova-compute')
+ self.assertEqual(service.disabled, True)
+
+ def test_get_host_uptime(self):
+ result = self.conn.get_host_uptime('host')
+ self.assertEqual(result, 'fake uptime')
+
+ def test_supported_instances_is_included_in_host_state(self):
+ stats = self.conn.host_state.get_host_stats(False)
+ self.assertIn('supported_instances', stats)
+
+ def test_supported_instances_is_calculated_by_to_supported_instances(self):
+
+ def to_supported_instances(somedata):
+ self.assertIsNone(somedata)
+ return "SOMERETURNVALUE"
+ self.stubs.Set(host, 'to_supported_instances', to_supported_instances)
+
+ stats = self.conn.host_state.get_host_stats(False)
+ self.assertEqual("SOMERETURNVALUE", stats['supported_instances'])
+
+ def test_update_stats_caches_hostname(self):
+ self.mox.StubOutWithMock(host, 'call_xenhost')
+ self.mox.StubOutWithMock(vm_utils, 'scan_default_sr')
+ self.mox.StubOutWithMock(vm_utils, 'list_vms')
+ self.mox.StubOutWithMock(self.conn._session, 'call_xenapi')
+ data = {'disk_total': 0,
+ 'disk_used': 0,
+ 'disk_available': 0,
+ 'supported_instances': 0,
+ 'host_capabilities': [],
+ 'host_hostname': 'foo',
+ 'vcpus_used': 0,
+ }
+ sr_rec = {
+ 'physical_size': 0,
+ 'physical_utilisation': 0,
+ 'virtual_allocation': 0,
+ }
+
+ for i in range(3):
+ host.call_xenhost(mox.IgnoreArg(), 'host_data', {}).AndReturn(data)
+ vm_utils.scan_default_sr(self.conn._session).AndReturn("ref")
+ vm_utils.list_vms(self.conn._session).AndReturn([])
+ self.conn._session.call_xenapi('SR.get_record', "ref").AndReturn(
+ sr_rec)
+ if i == 2:
+ # On the third call (the second below) change the hostname
+ data = dict(data, host_hostname='bar')
+
+ self.mox.ReplayAll()
+ stats = self.conn.host_state.get_host_stats(refresh=True)
+ self.assertEqual('foo', stats['hypervisor_hostname'])
+ stats = self.conn.host_state.get_host_stats(refresh=True)
+ self.assertEqual('foo', stats['hypervisor_hostname'])
+
+
+class ToSupportedInstancesTestCase(test.NoDBTestCase):
+ def test_default_return_value(self):
+ self.assertEqual([],
+ host.to_supported_instances(None))
+
+ def test_return_value(self):
+ self.assertEqual([(arch.X86_64, hvtype.XEN, 'xen')],
+ host.to_supported_instances([u'xen-3.0-x86_64']))
+
+ def test_invalid_values_do_not_break(self):
+ self.assertEqual([(arch.X86_64, hvtype.XEN, 'xen')],
+ host.to_supported_instances([u'xen-3.0-x86_64', 'spam']))
+
+ def test_multiple_values(self):
+ self.assertEqual(
+ [
+ (arch.X86_64, hvtype.XEN, 'xen'),
+ (arch.I686, hvtype.XEN, 'hvm')
+ ],
+ host.to_supported_instances([u'xen-3.0-x86_64', 'hvm-3.0-x86_32'])
+ )
+
+
+# FIXME(sirp): convert this to use XenAPITestBaseNoDB
+class XenAPIAutoDiskConfigTestCase(stubs.XenAPITestBase):
+ def setUp(self):
+ super(XenAPIAutoDiskConfigTestCase, self).setUp()
+ self.flags(connection_url='test_url',
+ connection_password='test_pass',
+ group='xenserver')
+ self.flags(firewall_driver='nova.virt.xenapi.firewall.'
+ 'Dom0IptablesFirewallDriver')
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+
+ self.instance_values = {'id': 1,
+ 'project_id': self.project_id,
+ 'user_id': self.user_id,
+ 'image_ref': 1,
+ 'kernel_id': 2,
+ 'ramdisk_id': 3,
+ 'root_gb': 80,
+ 'ephemeral_gb': 0,
+ 'instance_type_id': '3', # m1.large
+ 'os_type': 'linux',
+ 'architecture': 'x86-64'}
+
+ self.context = context.RequestContext(self.user_id, self.project_id)
+
+ def fake_create_vbd(session, vm_ref, vdi_ref, userdevice,
+ vbd_type='disk', read_only=False, bootable=True,
+ osvol=False):
+ pass
+
+ self.stubs.Set(vm_utils, 'create_vbd', fake_create_vbd)
+
+ def assertIsPartitionCalled(self, called):
+ marker = {"partition_called": False}
+
+ def fake_resize_part_and_fs(dev, start, old_sectors, new_sectors,
+ flags):
+ marker["partition_called"] = True
+ self.stubs.Set(vm_utils, "_resize_part_and_fs",
+ fake_resize_part_and_fs)
+
+ context.RequestContext(self.user_id, self.project_id)
+ session = get_session()
+
+ disk_image_type = vm_utils.ImageType.DISK_VHD
+ instance = create_instance_with_system_metadata(self.context,
+ self.instance_values)
+ vm_ref = xenapi_fake.create_vm(instance['name'], 'Halted')
+ vdi_ref = xenapi_fake.create_vdi(instance['name'], 'fake')
+
+ vdi_uuid = session.call_xenapi('VDI.get_record', vdi_ref)['uuid']
+ vdis = {'root': {'uuid': vdi_uuid, 'ref': vdi_ref}}
+
+ self.conn._vmops._attach_disks(instance, vm_ref, instance['name'],
+ vdis, disk_image_type, "fake_nw_inf")
+
+ self.assertEqual(marker["partition_called"], called)
+
+ def test_instance_not_auto_disk_config(self):
+ """Should not partition unless instance is marked as
+ auto_disk_config.
+ """
+ self.instance_values['auto_disk_config'] = False
+ self.assertIsPartitionCalled(False)
+
+ @stub_vm_utils_with_vdi_attached_here
+ def test_instance_auto_disk_config_fails_safe_two_partitions(self):
+ # Should not partition unless fail safes pass.
+ self.instance_values['auto_disk_config'] = True
+
+ def fake_get_partitions(dev):
+ return [(1, 0, 100, 'ext4', "", ""), (2, 100, 200, 'ext4' "", "")]
+ self.stubs.Set(vm_utils, "_get_partitions",
+ fake_get_partitions)
+
+ self.assertIsPartitionCalled(False)
+
+ @stub_vm_utils_with_vdi_attached_here
+ def test_instance_auto_disk_config_fails_safe_badly_numbered(self):
+ # Should not partition unless fail safes pass.
+ self.instance_values['auto_disk_config'] = True
+
+ def fake_get_partitions(dev):
+ return [(2, 100, 200, 'ext4', "", "")]
+ self.stubs.Set(vm_utils, "_get_partitions",
+ fake_get_partitions)
+
+ self.assertIsPartitionCalled(False)
+
+ @stub_vm_utils_with_vdi_attached_here
+ def test_instance_auto_disk_config_fails_safe_bad_fstype(self):
+ # Should not partition unless fail safes pass.
+ self.instance_values['auto_disk_config'] = True
+
+ def fake_get_partitions(dev):
+ return [(1, 100, 200, 'asdf', "", "")]
+ self.stubs.Set(vm_utils, "_get_partitions",
+ fake_get_partitions)
+
+ self.assertIsPartitionCalled(False)
+
+ @stub_vm_utils_with_vdi_attached_here
+ def test_instance_auto_disk_config_passes_fail_safes(self):
+ """Should partition if instance is marked as auto_disk_config=True and
+ virt-layer specific fail-safe checks pass.
+ """
+ self.instance_values['auto_disk_config'] = True
+
+ def fake_get_partitions(dev):
+ return [(1, 0, 100, 'ext4', "", "boot")]
+ self.stubs.Set(vm_utils, "_get_partitions",
+ fake_get_partitions)
+
+ self.assertIsPartitionCalled(True)
+
+
+# FIXME(sirp): convert this to use XenAPITestBaseNoDB
+class XenAPIGenerateLocal(stubs.XenAPITestBase):
+ """Test generating of local disks, like swap and ephemeral."""
+ def setUp(self):
+ super(XenAPIGenerateLocal, self).setUp()
+ self.flags(connection_url='test_url',
+ connection_password='test_pass',
+ group='xenserver')
+ self.flags(firewall_driver='nova.virt.xenapi.firewall.'
+ 'Dom0IptablesFirewallDriver')
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ db_fakes.stub_out_db_instance_api(self.stubs)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+
+ self.instance_values = {'id': 1,
+ 'project_id': self.project_id,
+ 'user_id': self.user_id,
+ 'image_ref': 1,
+ 'kernel_id': 2,
+ 'ramdisk_id': 3,
+ 'root_gb': 80,
+ 'ephemeral_gb': 0,
+ 'instance_type_id': '3', # m1.large
+ 'os_type': 'linux',
+ 'architecture': 'x86-64'}
+
+ self.context = context.RequestContext(self.user_id, self.project_id)
+
+ def fake_create_vbd(session, vm_ref, vdi_ref, userdevice,
+ vbd_type='disk', read_only=False, bootable=True,
+ osvol=False, empty=False, unpluggable=True):
+ return session.call_xenapi('VBD.create', {'VM': vm_ref,
+ 'VDI': vdi_ref})
+
+ self.stubs.Set(vm_utils, 'create_vbd', fake_create_vbd)
+
+ def assertCalled(self, instance,
+ disk_image_type=vm_utils.ImageType.DISK_VHD):
+ context.RequestContext(self.user_id, self.project_id)
+ session = get_session()
+
+ vm_ref = xenapi_fake.create_vm(instance['name'], 'Halted')
+ vdi_ref = xenapi_fake.create_vdi(instance['name'], 'fake')
+
+ vdi_uuid = session.call_xenapi('VDI.get_record', vdi_ref)['uuid']
+
+ vdi_key = 'root'
+ if disk_image_type == vm_utils.ImageType.DISK_ISO:
+ vdi_key = 'iso'
+ vdis = {vdi_key: {'uuid': vdi_uuid, 'ref': vdi_ref}}
+
+ self.called = False
+ self.conn._vmops._attach_disks(instance, vm_ref, instance['name'],
+ vdis, disk_image_type, "fake_nw_inf")
+ self.assertTrue(self.called)
+
+ def test_generate_swap(self):
+ # Test swap disk generation.
+ instance_values = dict(self.instance_values, instance_type_id=5)
+ instance = create_instance_with_system_metadata(self.context,
+ instance_values)
+
+ def fake_generate_swap(*args, **kwargs):
+ self.called = True
+ self.stubs.Set(vm_utils, 'generate_swap', fake_generate_swap)
+
+ self.assertCalled(instance)
+
+ def test_generate_ephemeral(self):
+ # Test ephemeral disk generation.
+ instance_values = dict(self.instance_values, instance_type_id=4)
+ instance = create_instance_with_system_metadata(self.context,
+ instance_values)
+
+ def fake_generate_ephemeral(*args):
+ self.called = True
+ self.stubs.Set(vm_utils, 'generate_ephemeral', fake_generate_ephemeral)
+
+ self.assertCalled(instance)
+
+ def test_generate_iso_blank_root_disk(self):
+ instance_values = dict(self.instance_values, instance_type_id=4)
+ instance_values.pop('kernel_id')
+ instance_values.pop('ramdisk_id')
+ instance = create_instance_with_system_metadata(self.context,
+ instance_values)
+
+ def fake_generate_ephemeral(*args):
+ pass
+ self.stubs.Set(vm_utils, 'generate_ephemeral', fake_generate_ephemeral)
+
+ def fake_generate_iso(*args):
+ self.called = True
+ self.stubs.Set(vm_utils, 'generate_iso_blank_root_disk',
+ fake_generate_iso)
+
+ self.assertCalled(instance, vm_utils.ImageType.DISK_ISO)
+
+
+class XenAPIBWCountersTestCase(stubs.XenAPITestBaseNoDB):
+ FAKE_VMS = {'test1:ref': dict(name_label='test1',
+ other_config=dict(nova_uuid='hash'),
+ domid='12',
+ _vifmap={'0': "a:b:c:d...",
+ '1': "e:f:12:q..."}),
+ 'test2:ref': dict(name_label='test2',
+ other_config=dict(nova_uuid='hash'),
+ domid='42',
+ _vifmap={'0': "a:3:c:d...",
+ '1': "e:f:42:q..."}),
+ }
+
+ def setUp(self):
+ super(XenAPIBWCountersTestCase, self).setUp()
+ self.stubs.Set(vm_utils, 'list_vms',
+ XenAPIBWCountersTestCase._fake_list_vms)
+ self.flags(connection_url='test_url',
+ connection_password='test_pass',
+ group='xenserver')
+ self.flags(firewall_driver='nova.virt.xenapi.firewall.'
+ 'Dom0IptablesFirewallDriver')
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ def _fake_get_vif_device_map(vm_rec):
+ return vm_rec['_vifmap']
+
+ self.stubs.Set(self.conn._vmops, "_get_vif_device_map",
+ _fake_get_vif_device_map)
+
+ @classmethod
+ def _fake_list_vms(cls, session):
+ return cls.FAKE_VMS.iteritems()
+
+ @staticmethod
+ def _fake_fetch_bandwidth_mt(session):
+ return {}
+
+ @staticmethod
+ def _fake_fetch_bandwidth(session):
+ return {'42':
+ {'0': {'bw_in': 21024, 'bw_out': 22048},
+ '1': {'bw_in': 231337, 'bw_out': 221212121}},
+ '12':
+ {'0': {'bw_in': 1024, 'bw_out': 2048},
+ '1': {'bw_in': 31337, 'bw_out': 21212121}},
+ }
+
+ def test_get_all_bw_counters(self):
+ instances = [dict(name='test1', uuid='1-2-3'),
+ dict(name='test2', uuid='4-5-6')]
+
+ self.stubs.Set(vm_utils, 'fetch_bandwidth',
+ self._fake_fetch_bandwidth)
+ result = self.conn.get_all_bw_counters(instances)
+ self.assertEqual(len(result), 4)
+ self.assertIn(dict(uuid='1-2-3',
+ mac_address="a:b:c:d...",
+ bw_in=1024,
+ bw_out=2048), result)
+ self.assertIn(dict(uuid='1-2-3',
+ mac_address="e:f:12:q...",
+ bw_in=31337,
+ bw_out=21212121), result)
+
+ self.assertIn(dict(uuid='4-5-6',
+ mac_address="a:3:c:d...",
+ bw_in=21024,
+ bw_out=22048), result)
+ self.assertIn(dict(uuid='4-5-6',
+ mac_address="e:f:42:q...",
+ bw_in=231337,
+ bw_out=221212121), result)
+
+ def test_get_all_bw_counters_in_failure_case(self):
+ """Test that get_all_bw_conters returns an empty list when
+ no data returned from Xenserver. c.f. bug #910045.
+ """
+ instances = [dict(name='instance-0001', uuid='1-2-3-4-5')]
+
+ self.stubs.Set(vm_utils, 'fetch_bandwidth',
+ self._fake_fetch_bandwidth_mt)
+ result = self.conn.get_all_bw_counters(instances)
+ self.assertEqual(result, [])
+
+
+# TODO(salvatore-orlando): this class and
+# nova.tests.unit.virt.test_libvirt.IPTablesFirewallDriverTestCase
+# share a lot of code. Consider abstracting common code in a base
+# class for firewall driver testing.
+#
+# FIXME(sirp): convert this to use XenAPITestBaseNoDB
+class XenAPIDom0IptablesFirewallTestCase(stubs.XenAPITestBase):
+
+ REQUIRES_LOCKING = True
+
+ _in_rules = [
+ '# Generated by iptables-save v1.4.10 on Sat Feb 19 00:03:19 2011',
+ '*nat',
+ ':PREROUTING ACCEPT [1170:189210]',
+ ':INPUT ACCEPT [844:71028]',
+ ':OUTPUT ACCEPT [5149:405186]',
+ ':POSTROUTING ACCEPT [5063:386098]',
+ '# Completed on Mon Dec 6 11:54:13 2010',
+ '# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010',
+ '*mangle',
+ ':INPUT ACCEPT [969615:281627771]',
+ ':FORWARD ACCEPT [0:0]',
+ ':OUTPUT ACCEPT [915599:63811649]',
+ ':nova-block-ipv4 - [0:0]',
+ '[0:0] -A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ',
+ '[0:0] -A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED'
+ ',ESTABLISHED -j ACCEPT ',
+ '[0:0] -A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ',
+ '[0:0] -A FORWARD -i virbr0 -o virbr0 -j ACCEPT ',
+ '[0:0] -A FORWARD -o virbr0 -j REJECT '
+ '--reject-with icmp-port-unreachable ',
+ '[0:0] -A FORWARD -i virbr0 -j REJECT '
+ '--reject-with icmp-port-unreachable ',
+ 'COMMIT',
+ '# Completed on Mon Dec 6 11:54:13 2010',
+ '# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010',
+ '*filter',
+ ':INPUT ACCEPT [969615:281627771]',
+ ':FORWARD ACCEPT [0:0]',
+ ':OUTPUT ACCEPT [915599:63811649]',
+ ':nova-block-ipv4 - [0:0]',
+ '[0:0] -A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ',
+ '[0:0] -A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED'
+ ',ESTABLISHED -j ACCEPT ',
+ '[0:0] -A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ',
+ '[0:0] -A FORWARD -i virbr0 -o virbr0 -j ACCEPT ',
+ '[0:0] -A FORWARD -o virbr0 -j REJECT '
+ '--reject-with icmp-port-unreachable ',
+ '[0:0] -A FORWARD -i virbr0 -j REJECT '
+ '--reject-with icmp-port-unreachable ',
+ 'COMMIT',
+ '# Completed on Mon Dec 6 11:54:13 2010',
+ ]
+
+ _in6_filter_rules = [
+ '# Generated by ip6tables-save v1.4.4 on Tue Jan 18 23:47:56 2011',
+ '*filter',
+ ':INPUT ACCEPT [349155:75810423]',
+ ':FORWARD ACCEPT [0:0]',
+ ':OUTPUT ACCEPT [349256:75777230]',
+ 'COMMIT',
+ '# Completed on Tue Jan 18 23:47:56 2011',
+ ]
+
+ def setUp(self):
+ super(XenAPIDom0IptablesFirewallTestCase, self).setUp()
+ self.flags(connection_url='test_url',
+ connection_password='test_pass',
+ group='xenserver')
+ self.flags(instance_name_template='%d',
+ firewall_driver='nova.virt.xenapi.firewall.'
+ 'Dom0IptablesFirewallDriver')
+ self.user_id = 'mappin'
+ self.project_id = 'fake'
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForFirewallTests,
+ test_case=self)
+ self.context = context.RequestContext(self.user_id, self.project_id)
+ self.network = importutils.import_object(CONF.network_manager)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ self.fw = self.conn._vmops.firewall_driver
+
+ def _create_instance_ref(self):
+ return db.instance_create(self.context,
+ {'user_id': self.user_id,
+ 'project_id': self.project_id,
+ 'instance_type_id': 1})
+
+ def _create_test_security_group(self):
+ admin_ctxt = context.get_admin_context()
+ secgroup = db.security_group_create(admin_ctxt,
+ {'user_id': self.user_id,
+ 'project_id': self.project_id,
+ 'name': 'testgroup',
+ 'description': 'test group'})
+ db.security_group_rule_create(admin_ctxt,
+ {'parent_group_id': secgroup['id'],
+ 'protocol': 'icmp',
+ 'from_port': -1,
+ 'to_port': -1,
+ 'cidr': '192.168.11.0/24'})
+
+ db.security_group_rule_create(admin_ctxt,
+ {'parent_group_id': secgroup['id'],
+ 'protocol': 'icmp',
+ 'from_port': 8,
+ 'to_port': -1,
+ 'cidr': '192.168.11.0/24'})
+
+ db.security_group_rule_create(admin_ctxt,
+ {'parent_group_id': secgroup['id'],
+ 'protocol': 'tcp',
+ 'from_port': 80,
+ 'to_port': 81,
+ 'cidr': '192.168.10.0/24'})
+ return secgroup
+
+ def _validate_security_group(self):
+ in_rules = filter(lambda l: not l.startswith('#'),
+ self._in_rules)
+ for rule in in_rules:
+ if 'nova' not in rule:
+ self.assertTrue(rule in self._out_rules,
+ 'Rule went missing: %s' % rule)
+
+ instance_chain = None
+ for rule in self._out_rules:
+ # This is pretty crude, but it'll do for now
+ # last two octets change
+ if re.search('-d 192.168.[0-9]{1,3}.[0-9]{1,3} -j', rule):
+ instance_chain = rule.split(' ')[-1]
+ break
+ self.assertTrue(instance_chain, "The instance chain wasn't added")
+ security_group_chain = None
+ for rule in self._out_rules:
+ # This is pretty crude, but it'll do for now
+ if '-A %s -j' % instance_chain in rule:
+ security_group_chain = rule.split(' ')[-1]
+ break
+ self.assertTrue(security_group_chain,
+ "The security group chain wasn't added")
+
+ regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p icmp'
+ ' -s 192.168.11.0/24')
+ self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
+ "ICMP acceptance rule wasn't added")
+
+ regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p icmp -m icmp'
+ ' --icmp-type 8 -s 192.168.11.0/24')
+ self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
+ "ICMP Echo Request acceptance rule wasn't added")
+
+ regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p tcp --dport 80:81'
+ ' -s 192.168.10.0/24')
+ self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
+ "TCP port 80/81 acceptance rule wasn't added")
+
+ def test_static_filters(self):
+ instance_ref = self._create_instance_ref()
+ src_instance_ref = self._create_instance_ref()
+ admin_ctxt = context.get_admin_context()
+ secgroup = self._create_test_security_group()
+
+ src_secgroup = db.security_group_create(admin_ctxt,
+ {'user_id': self.user_id,
+ 'project_id': self.project_id,
+ 'name': 'testsourcegroup',
+ 'description': 'src group'})
+ db.security_group_rule_create(admin_ctxt,
+ {'parent_group_id': secgroup['id'],
+ 'protocol': 'tcp',
+ 'from_port': 80,
+ 'to_port': 81,
+ 'group_id': src_secgroup['id']})
+
+ db.instance_add_security_group(admin_ctxt, instance_ref['uuid'],
+ secgroup['id'])
+ db.instance_add_security_group(admin_ctxt, src_instance_ref['uuid'],
+ src_secgroup['id'])
+ instance_ref = db.instance_get(admin_ctxt, instance_ref['id'])
+ src_instance_ref = db.instance_get(admin_ctxt, src_instance_ref['id'])
+
+ network_model = fake_network.fake_get_instance_nw_info(self.stubs, 1)
+
+ from nova.compute import utils as compute_utils # noqa
+ self.stubs.Set(compute_utils, 'get_nw_info_for_instance',
+ lambda instance: network_model)
+
+ self.fw.prepare_instance_filter(instance_ref, network_model)
+ self.fw.apply_instance_filter(instance_ref, network_model)
+
+ self._validate_security_group()
+ # Extra test for TCP acceptance rules
+ for ip in network_model.fixed_ips():
+ if ip['version'] != 4:
+ continue
+ regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p tcp'
+ ' --dport 80:81 -s %s' % ip['address'])
+ self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
+ "TCP port 80/81 acceptance rule wasn't added")
+
+ db.instance_destroy(admin_ctxt, instance_ref['uuid'])
+
+ def test_filters_for_instance_with_ip_v6(self):
+ self.flags(use_ipv6=True)
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1)
+ rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info)
+ self.assertEqual(len(rulesv4), 2)
+ self.assertEqual(len(rulesv6), 1)
+
+ def test_filters_for_instance_without_ip_v6(self):
+ self.flags(use_ipv6=False)
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1)
+ rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info)
+ self.assertEqual(len(rulesv4), 2)
+ self.assertEqual(len(rulesv6), 0)
+
+ def test_multinic_iptables(self):
+ ipv4_rules_per_addr = 1
+ ipv4_addr_per_network = 2
+ ipv6_rules_per_addr = 1
+ ipv6_addr_per_network = 1
+ networks_count = 5
+ instance_ref = self._create_instance_ref()
+ _get_instance_nw_info = fake_network.fake_get_instance_nw_info
+ network_info = _get_instance_nw_info(self.stubs,
+ networks_count,
+ ipv4_addr_per_network)
+ network_info[0]['network']['subnets'][0]['meta']['dhcp_server'] = \
+ '1.1.1.1'
+ ipv4_len = len(self.fw.iptables.ipv4['filter'].rules)
+ ipv6_len = len(self.fw.iptables.ipv6['filter'].rules)
+ inst_ipv4, inst_ipv6 = self.fw.instance_rules(instance_ref,
+ network_info)
+ self.fw.prepare_instance_filter(instance_ref, network_info)
+ ipv4 = self.fw.iptables.ipv4['filter'].rules
+ ipv6 = self.fw.iptables.ipv6['filter'].rules
+ ipv4_network_rules = len(ipv4) - len(inst_ipv4) - ipv4_len
+ ipv6_network_rules = len(ipv6) - len(inst_ipv6) - ipv6_len
+ # Extra rules are for the DHCP request
+ rules = (ipv4_rules_per_addr * ipv4_addr_per_network *
+ networks_count) + 2
+ self.assertEqual(ipv4_network_rules, rules)
+ self.assertEqual(ipv6_network_rules,
+ ipv6_rules_per_addr * ipv6_addr_per_network * networks_count)
+
+ def test_do_refresh_security_group_rules(self):
+ admin_ctxt = context.get_admin_context()
+ instance_ref = self._create_instance_ref()
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1, 1)
+ secgroup = self._create_test_security_group()
+ db.instance_add_security_group(admin_ctxt, instance_ref['uuid'],
+ secgroup['id'])
+ self.fw.prepare_instance_filter(instance_ref, network_info)
+ self.fw.instance_info[instance_ref['id']] = (instance_ref,
+ network_info)
+ self._validate_security_group()
+ # add a rule to the security group
+ db.security_group_rule_create(admin_ctxt,
+ {'parent_group_id': secgroup['id'],
+ 'protocol': 'udp',
+ 'from_port': 200,
+ 'to_port': 299,
+ 'cidr': '192.168.99.0/24'})
+ # validate the extra rule
+ self.fw.refresh_security_group_rules(secgroup)
+ regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p udp --dport 200:299'
+ ' -s 192.168.99.0/24')
+ self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
+ "Rules were not updated properly."
+ "The rule for UDP acceptance is missing")
+
+ def test_provider_firewall_rules(self):
+ # setup basic instance data
+ instance_ref = self._create_instance_ref()
+ # FRAGILE: as in libvirt tests
+ # peeks at how the firewall names chains
+ chain_name = 'inst-%s' % instance_ref['id']
+
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1, 1)
+ self.fw.prepare_instance_filter(instance_ref, network_info)
+ self.assertIn('provider', self.fw.iptables.ipv4['filter'].chains)
+ rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
+ if rule.chain == 'provider']
+ self.assertEqual(0, len(rules))
+
+ admin_ctxt = context.get_admin_context()
+ # add a rule and send the update message, check for 1 rule
+ db.provider_fw_rule_create(admin_ctxt,
+ {'protocol': 'tcp',
+ 'cidr': '10.99.99.99/32',
+ 'from_port': 1,
+ 'to_port': 65535})
+ self.fw.refresh_provider_fw_rules()
+ rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
+ if rule.chain == 'provider']
+ self.assertEqual(1, len(rules))
+
+ # Add another, refresh, and make sure number of rules goes to two
+ provider_fw1 = db.provider_fw_rule_create(admin_ctxt,
+ {'protocol': 'udp',
+ 'cidr': '10.99.99.99/32',
+ 'from_port': 1,
+ 'to_port': 65535})
+ self.fw.refresh_provider_fw_rules()
+ rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
+ if rule.chain == 'provider']
+ self.assertEqual(2, len(rules))
+
+ # create the instance filter and make sure it has a jump rule
+ self.fw.prepare_instance_filter(instance_ref, network_info)
+ self.fw.apply_instance_filter(instance_ref, network_info)
+ inst_rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
+ if rule.chain == chain_name]
+ jump_rules = [rule for rule in inst_rules if '-j' in rule.rule]
+ provjump_rules = []
+ # IptablesTable doesn't make rules unique internally
+ for rule in jump_rules:
+ if 'provider' in rule.rule and rule not in provjump_rules:
+ provjump_rules.append(rule)
+ self.assertEqual(1, len(provjump_rules))
+
+ # remove a rule from the db, cast to compute to refresh rule
+ db.provider_fw_rule_destroy(admin_ctxt, provider_fw1['id'])
+ self.fw.refresh_provider_fw_rules()
+ rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
+ if rule.chain == 'provider']
+ self.assertEqual(1, len(rules))
+
+
+class XenAPISRSelectionTestCase(stubs.XenAPITestBaseNoDB):
+ """Unit tests for testing we find the right SR."""
+ def test_safe_find_sr_raise_exception(self):
+ # Ensure StorageRepositoryNotFound is raise when wrong filter.
+ self.flags(sr_matching_filter='yadayadayada', group='xenserver')
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ session = get_session()
+ self.assertRaises(exception.StorageRepositoryNotFound,
+ vm_utils.safe_find_sr, session)
+
+ def test_safe_find_sr_local_storage(self):
+ # Ensure the default local-storage is found.
+ self.flags(sr_matching_filter='other-config:i18n-key=local-storage',
+ group='xenserver')
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ session = get_session()
+ # This test is only guaranteed if there is one host in the pool
+ self.assertEqual(len(xenapi_fake.get_all('host')), 1)
+ host_ref = xenapi_fake.get_all('host')[0]
+ pbd_refs = xenapi_fake.get_all('PBD')
+ for pbd_ref in pbd_refs:
+ pbd_rec = xenapi_fake.get_record('PBD', pbd_ref)
+ if pbd_rec['host'] != host_ref:
+ continue
+ sr_rec = xenapi_fake.get_record('SR', pbd_rec['SR'])
+ if sr_rec['other_config']['i18n-key'] == 'local-storage':
+ local_sr = pbd_rec['SR']
+ expected = vm_utils.safe_find_sr(session)
+ self.assertEqual(local_sr, expected)
+
+ def test_safe_find_sr_by_other_criteria(self):
+ # Ensure the SR is found when using a different filter.
+ self.flags(sr_matching_filter='other-config:my_fake_sr=true',
+ group='xenserver')
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ session = get_session()
+ host_ref = xenapi_fake.get_all('host')[0]
+ local_sr = xenapi_fake.create_sr(name_label='Fake Storage',
+ type='lvm',
+ other_config={'my_fake_sr': 'true'},
+ host_ref=host_ref)
+ expected = vm_utils.safe_find_sr(session)
+ self.assertEqual(local_sr, expected)
+
+ def test_safe_find_sr_default(self):
+ # Ensure the default SR is found regardless of other-config.
+ self.flags(sr_matching_filter='default-sr:true',
+ group='xenserver')
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ session = get_session()
+ pool_ref = session.call_xenapi('pool.get_all')[0]
+ expected = vm_utils.safe_find_sr(session)
+ self.assertEqual(session.call_xenapi('pool.get_default_SR', pool_ref),
+ expected)
+
+
+def _create_service_entries(context, values={'avail_zone1': ['fake_host1',
+ 'fake_host2'],
+ 'avail_zone2': ['fake_host3'], }):
+ for avail_zone, hosts in values.iteritems():
+ for service_host in hosts:
+ db.service_create(context,
+ {'host': service_host,
+ 'binary': 'nova-compute',
+ 'topic': 'compute',
+ 'report_count': 0})
+ return values
+
+
+# FIXME(sirp): convert this to use XenAPITestBaseNoDB
+class XenAPIAggregateTestCase(stubs.XenAPITestBase):
+ """Unit tests for aggregate operations."""
+ def setUp(self):
+ super(XenAPIAggregateTestCase, self).setUp()
+ self.flags(connection_url='http://test_url',
+ connection_username='test_user',
+ connection_password='test_pass',
+ group='xenserver')
+ self.flags(instance_name_template='%d',
+ firewall_driver='nova.virt.xenapi.firewall.'
+ 'Dom0IptablesFirewallDriver',
+ host='host',
+ compute_driver='xenapi.XenAPIDriver',
+ default_availability_zone='avail_zone1')
+ self.flags(use_local=True, group='conductor')
+ host_ref = xenapi_fake.get_all('host')[0]
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.context = context.get_admin_context()
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ self.compute = importutils.import_object(CONF.compute_manager)
+ self.api = compute_api.AggregateAPI()
+ values = {'name': 'test_aggr',
+ 'metadata': {'availability_zone': 'test_zone',
+ pool_states.POOL_FLAG: 'XenAPI'}}
+ self.aggr = db.aggregate_create(self.context, values)
+ self.fake_metadata = {pool_states.POOL_FLAG: 'XenAPI',
+ 'master_compute': 'host',
+ 'availability_zone': 'fake_zone',
+ pool_states.KEY: pool_states.ACTIVE,
+ 'host': xenapi_fake.get_record('host',
+ host_ref)['uuid']}
+
+ def test_pool_add_to_aggregate_called_by_driver(self):
+
+ calls = []
+
+ def pool_add_to_aggregate(context, aggregate, host, slave_info=None):
+ self.assertEqual("CONTEXT", context)
+ self.assertEqual("AGGREGATE", aggregate)
+ self.assertEqual("HOST", host)
+ self.assertEqual("SLAVEINFO", slave_info)
+ calls.append(pool_add_to_aggregate)
+ self.stubs.Set(self.conn._pool,
+ "add_to_aggregate",
+ pool_add_to_aggregate)
+
+ self.conn.add_to_aggregate("CONTEXT", "AGGREGATE", "HOST",
+ slave_info="SLAVEINFO")
+
+ self.assertIn(pool_add_to_aggregate, calls)
+
+ def test_pool_remove_from_aggregate_called_by_driver(self):
+
+ calls = []
+
+ def pool_remove_from_aggregate(context, aggregate, host,
+ slave_info=None):
+ self.assertEqual("CONTEXT", context)
+ self.assertEqual("AGGREGATE", aggregate)
+ self.assertEqual("HOST", host)
+ self.assertEqual("SLAVEINFO", slave_info)
+ calls.append(pool_remove_from_aggregate)
+ self.stubs.Set(self.conn._pool,
+ "remove_from_aggregate",
+ pool_remove_from_aggregate)
+
+ self.conn.remove_from_aggregate("CONTEXT", "AGGREGATE", "HOST",
+ slave_info="SLAVEINFO")
+
+ self.assertIn(pool_remove_from_aggregate, calls)
+
+ def test_add_to_aggregate_for_first_host_sets_metadata(self):
+ def fake_init_pool(id, name):
+ fake_init_pool.called = True
+ self.stubs.Set(self.conn._pool, "_init_pool", fake_init_pool)
+
+ aggregate = self._aggregate_setup()
+ self.conn._pool.add_to_aggregate(self.context, aggregate, "host")
+ result = db.aggregate_get(self.context, aggregate['id'])
+ self.assertTrue(fake_init_pool.called)
+ self.assertThat(self.fake_metadata,
+ matchers.DictMatches(result['metadetails']))
+
+ def test_join_slave(self):
+ # Ensure join_slave gets called when the request gets to master.
+ def fake_join_slave(id, compute_uuid, host, url, user, password):
+ fake_join_slave.called = True
+ self.stubs.Set(self.conn._pool, "_join_slave", fake_join_slave)
+
+ aggregate = self._aggregate_setup(hosts=['host', 'host2'],
+ metadata=self.fake_metadata)
+ self.conn._pool.add_to_aggregate(self.context, aggregate, "host2",
+ dict(compute_uuid='fake_uuid',
+ url='fake_url',
+ user='fake_user',
+ passwd='fake_pass',
+ xenhost_uuid='fake_uuid'))
+ self.assertTrue(fake_join_slave.called)
+
+ def test_add_to_aggregate_first_host(self):
+ def fake_pool_set_name_label(self, session, pool_ref, name):
+ fake_pool_set_name_label.called = True
+ self.stubs.Set(xenapi_fake.SessionBase, "pool_set_name_label",
+ fake_pool_set_name_label)
+ self.conn._session.call_xenapi("pool.create", {"name": "asdf"})
+
+ metadata = {'availability_zone': 'fake_zone',
+ pool_states.POOL_FLAG: "XenAPI",
+ pool_states.KEY: pool_states.CREATED}
+
+ aggregate = objects.Aggregate()
+ aggregate.name = 'fake_aggregate'
+ aggregate.metadata = dict(metadata)
+ aggregate.create(self.context)
+ aggregate.add_host('host')
+ self.assertEqual(["host"], aggregate.hosts)
+ self.assertEqual(metadata, aggregate.metadata)
+
+ self.conn._pool.add_to_aggregate(self.context, aggregate, "host")
+ self.assertTrue(fake_pool_set_name_label.called)
+
+ def test_remove_from_aggregate_called(self):
+ def fake_remove_from_aggregate(context, aggregate, host):
+ fake_remove_from_aggregate.called = True
+ self.stubs.Set(self.conn._pool,
+ "remove_from_aggregate",
+ fake_remove_from_aggregate)
+
+ self.conn.remove_from_aggregate(None, None, None)
+ self.assertTrue(fake_remove_from_aggregate.called)
+
+ def test_remove_from_empty_aggregate(self):
+ result = self._aggregate_setup()
+ self.assertRaises(exception.InvalidAggregateAction,
+ self.conn._pool.remove_from_aggregate,
+ self.context, result, "test_host")
+
+ def test_remove_slave(self):
+ # Ensure eject slave gets called.
+ def fake_eject_slave(id, compute_uuid, host_uuid):
+ fake_eject_slave.called = True
+ self.stubs.Set(self.conn._pool, "_eject_slave", fake_eject_slave)
+
+ self.fake_metadata['host2'] = 'fake_host2_uuid'
+ aggregate = self._aggregate_setup(hosts=['host', 'host2'],
+ metadata=self.fake_metadata, aggr_state=pool_states.ACTIVE)
+ self.conn._pool.remove_from_aggregate(self.context, aggregate, "host2")
+ self.assertTrue(fake_eject_slave.called)
+
+ def test_remove_master_solo(self):
+ # Ensure metadata are cleared after removal.
+ def fake_clear_pool(id):
+ fake_clear_pool.called = True
+ self.stubs.Set(self.conn._pool, "_clear_pool", fake_clear_pool)
+
+ aggregate = self._aggregate_setup(metadata=self.fake_metadata)
+ self.conn._pool.remove_from_aggregate(self.context, aggregate, "host")
+ result = db.aggregate_get(self.context, aggregate['id'])
+ self.assertTrue(fake_clear_pool.called)
+ self.assertThat({'availability_zone': 'fake_zone',
+ pool_states.POOL_FLAG: 'XenAPI',
+ pool_states.KEY: pool_states.ACTIVE},
+ matchers.DictMatches(result['metadetails']))
+
+ def test_remote_master_non_empty_pool(self):
+ # Ensure AggregateError is raised if removing the master.
+ aggregate = self._aggregate_setup(hosts=['host', 'host2'],
+ metadata=self.fake_metadata)
+
+ self.assertRaises(exception.InvalidAggregateAction,
+ self.conn._pool.remove_from_aggregate,
+ self.context, aggregate, "host")
+
+ def _aggregate_setup(self, aggr_name='fake_aggregate',
+ aggr_zone='fake_zone',
+ aggr_state=pool_states.CREATED,
+ hosts=['host'], metadata=None):
+ aggregate = objects.Aggregate()
+ aggregate.name = aggr_name
+ aggregate.metadata = {'availability_zone': aggr_zone,
+ pool_states.POOL_FLAG: 'XenAPI',
+ pool_states.KEY: aggr_state,
+ }
+ if metadata:
+ aggregate.metadata.update(metadata)
+ aggregate.create(self.context)
+ for aggregate_host in hosts:
+ aggregate.add_host(aggregate_host)
+ return aggregate
+
+ def test_add_host_to_aggregate_invalid_changing_status(self):
+ """Ensure InvalidAggregateAction is raised when adding host while
+ aggregate is not ready.
+ """
+ aggregate = self._aggregate_setup(aggr_state=pool_states.CHANGING)
+ ex = self.assertRaises(exception.InvalidAggregateAction,
+ self.conn.add_to_aggregate, self.context,
+ aggregate, 'host')
+ self.assertIn('setup in progress', str(ex))
+
+ def test_add_host_to_aggregate_invalid_dismissed_status(self):
+ """Ensure InvalidAggregateAction is raised when aggregate is
+ deleted.
+ """
+ aggregate = self._aggregate_setup(aggr_state=pool_states.DISMISSED)
+ ex = self.assertRaises(exception.InvalidAggregateAction,
+ self.conn.add_to_aggregate, self.context,
+ aggregate, 'fake_host')
+ self.assertIn('aggregate deleted', str(ex))
+
+ def test_add_host_to_aggregate_invalid_error_status(self):
+ """Ensure InvalidAggregateAction is raised when aggregate is
+ in error.
+ """
+ aggregate = self._aggregate_setup(aggr_state=pool_states.ERROR)
+ ex = self.assertRaises(exception.InvalidAggregateAction,
+ self.conn.add_to_aggregate, self.context,
+ aggregate, 'fake_host')
+ self.assertIn('aggregate in error', str(ex))
+
+ def test_remove_host_from_aggregate_error(self):
+ # Ensure we can remove a host from an aggregate even if in error.
+ values = _create_service_entries(self.context)
+ fake_zone = values.keys()[0]
+ aggr = self.api.create_aggregate(self.context,
+ 'fake_aggregate', fake_zone)
+ # let's mock the fact that the aggregate is ready!
+ metadata = {pool_states.POOL_FLAG: "XenAPI",
+ pool_states.KEY: pool_states.ACTIVE}
+ db.aggregate_metadata_add(self.context, aggr['id'], metadata)
+ for aggregate_host in values[fake_zone]:
+ aggr = self.api.add_host_to_aggregate(self.context,
+ aggr['id'], aggregate_host)
+ # let's mock the fact that the aggregate is in error!
+ expected = self.api.remove_host_from_aggregate(self.context,
+ aggr['id'],
+ values[fake_zone][0])
+ self.assertEqual(len(aggr['hosts']) - 1, len(expected['hosts']))
+ self.assertEqual(expected['metadata'][pool_states.KEY],
+ pool_states.ACTIVE)
+
+ def test_remove_host_from_aggregate_invalid_dismissed_status(self):
+ """Ensure InvalidAggregateAction is raised when aggregate is
+ deleted.
+ """
+ aggregate = self._aggregate_setup(aggr_state=pool_states.DISMISSED)
+ self.assertRaises(exception.InvalidAggregateAction,
+ self.conn.remove_from_aggregate, self.context,
+ aggregate, 'fake_host')
+
+ def test_remove_host_from_aggregate_invalid_changing_status(self):
+ """Ensure InvalidAggregateAction is raised when aggregate is
+ changing.
+ """
+ aggregate = self._aggregate_setup(aggr_state=pool_states.CHANGING)
+ self.assertRaises(exception.InvalidAggregateAction,
+ self.conn.remove_from_aggregate, self.context,
+ aggregate, 'fake_host')
+
+ def test_add_aggregate_host_raise_err(self):
+ # Ensure the undo operation works correctly on add.
+ def fake_driver_add_to_aggregate(context, aggregate, host, **_ignore):
+ raise exception.AggregateError(
+ aggregate_id='', action='', reason='')
+ self.stubs.Set(self.compute.driver, "add_to_aggregate",
+ fake_driver_add_to_aggregate)
+ metadata = {pool_states.POOL_FLAG: "XenAPI",
+ pool_states.KEY: pool_states.ACTIVE}
+ db.aggregate_metadata_add(self.context, self.aggr['id'], metadata)
+ db.aggregate_host_add(self.context, self.aggr['id'], 'fake_host')
+
+ self.assertRaises(exception.AggregateError,
+ self.compute.add_aggregate_host,
+ self.context, host="fake_host",
+ aggregate=jsonutils.to_primitive(self.aggr),
+ slave_info=None)
+ excepted = db.aggregate_get(self.context, self.aggr['id'])
+ self.assertEqual(excepted['metadetails'][pool_states.KEY],
+ pool_states.ERROR)
+ self.assertEqual(excepted['hosts'], [])
+
+
+class MockComputeAPI(object):
+ def __init__(self):
+ self._mock_calls = []
+
+ def add_aggregate_host(self, ctxt, aggregate,
+ host_param, host, slave_info):
+ self._mock_calls.append((
+ self.add_aggregate_host, ctxt, aggregate,
+ host_param, host, slave_info))
+
+ def remove_aggregate_host(self, ctxt, aggregate_id, host_param,
+ host, slave_info):
+ self._mock_calls.append((
+ self.remove_aggregate_host, ctxt, aggregate_id,
+ host_param, host, slave_info))
+
+
+class StubDependencies(object):
+ """Stub dependencies for ResourcePool."""
+
+ def __init__(self):
+ self.compute_rpcapi = MockComputeAPI()
+
+ def _is_hv_pool(self, *_ignore):
+ return True
+
+ def _get_metadata(self, *_ignore):
+ return {
+ pool_states.KEY: {},
+ 'master_compute': 'master'
+ }
+
+ def _create_slave_info(self, *ignore):
+ return "SLAVE_INFO"
+
+
+class ResourcePoolWithStubs(StubDependencies, pool.ResourcePool):
+ """A ResourcePool, use stub dependencies."""
+
+
+class HypervisorPoolTestCase(test.NoDBTestCase):
+
+ fake_aggregate = {
+ 'id': 98,
+ 'hosts': [],
+ 'metadata': {
+ 'master_compute': 'master',
+ pool_states.POOL_FLAG: {},
+ pool_states.KEY: {}
+ }
+ }
+
+ def test_slave_asks_master_to_add_slave_to_pool(self):
+ slave = ResourcePoolWithStubs()
+
+ slave.add_to_aggregate("CONTEXT", self.fake_aggregate, "slave")
+
+ self.assertIn(
+ (slave.compute_rpcapi.add_aggregate_host,
+ "CONTEXT", jsonutils.to_primitive(self.fake_aggregate),
+ "slave", "master", "SLAVE_INFO"),
+ slave.compute_rpcapi._mock_calls)
+
+ def test_slave_asks_master_to_remove_slave_from_pool(self):
+ slave = ResourcePoolWithStubs()
+
+ slave.remove_from_aggregate("CONTEXT", self.fake_aggregate, "slave")
+
+ self.assertIn(
+ (slave.compute_rpcapi.remove_aggregate_host,
+ "CONTEXT", 98, "slave", "master", "SLAVE_INFO"),
+ slave.compute_rpcapi._mock_calls)
+
+
+class SwapXapiHostTestCase(test.NoDBTestCase):
+
+ def test_swapping(self):
+ self.assertEqual(
+ "http://otherserver:8765/somepath",
+ pool.swap_xapi_host(
+ "http://someserver:8765/somepath", 'otherserver'))
+
+ def test_no_port(self):
+ self.assertEqual(
+ "http://otherserver/somepath",
+ pool.swap_xapi_host(
+ "http://someserver/somepath", 'otherserver'))
+
+ def test_no_path(self):
+ self.assertEqual(
+ "http://otherserver",
+ pool.swap_xapi_host(
+ "http://someserver", 'otherserver'))
+
+
+class XenAPILiveMigrateTestCase(stubs.XenAPITestBaseNoDB):
+ """Unit tests for live_migration."""
+ def setUp(self):
+ super(XenAPILiveMigrateTestCase, self).setUp()
+ self.flags(connection_url='test_url',
+ connection_password='test_pass',
+ group='xenserver')
+ self.flags(firewall_driver='nova.virt.xenapi.firewall.'
+ 'Dom0IptablesFirewallDriver',
+ host='host')
+ db_fakes.stub_out_db_instance_api(self.stubs)
+ self.context = context.get_admin_context()
+
+ def test_live_migration_calls_vmops(self):
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ def fake_live_migrate(context, instance_ref, dest, post_method,
+ recover_method, block_migration, migrate_data):
+ fake_live_migrate.called = True
+
+ self.stubs.Set(self.conn._vmops, "live_migrate", fake_live_migrate)
+
+ self.conn.live_migration(None, None, None, None, None)
+ self.assertTrue(fake_live_migrate.called)
+
+ def test_pre_live_migration(self):
+ # ensure method is present
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ self.conn.pre_live_migration(None, None, None, None, None)
+
+ def test_post_live_migration_at_destination(self):
+ # ensure method is present
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ fake_instance = {"name": "name"}
+ fake_network_info = "network_info"
+
+ def fake_fw(instance, network_info):
+ self.assertEqual(instance, fake_instance)
+ self.assertEqual(network_info, fake_network_info)
+ fake_fw.call_count += 1
+
+ def fake_create_kernel_and_ramdisk(context, session, instance,
+ name_label):
+ return "fake-kernel-file", "fake-ramdisk-file"
+
+ fake_fw.call_count = 0
+ _vmops = self.conn._vmops
+ self.stubs.Set(_vmops.firewall_driver,
+ 'setup_basic_filtering', fake_fw)
+ self.stubs.Set(_vmops.firewall_driver,
+ 'prepare_instance_filter', fake_fw)
+ self.stubs.Set(_vmops.firewall_driver,
+ 'apply_instance_filter', fake_fw)
+ self.stubs.Set(vm_utils, "create_kernel_and_ramdisk",
+ fake_create_kernel_and_ramdisk)
+
+ def fake_get_vm_opaque_ref(instance):
+ fake_get_vm_opaque_ref.called = True
+ self.stubs.Set(_vmops, "_get_vm_opaque_ref", fake_get_vm_opaque_ref)
+ fake_get_vm_opaque_ref.called = False
+
+ def fake_strip_base_mirror_from_vdis(session, vm_ref):
+ fake_strip_base_mirror_from_vdis.called = True
+ self.stubs.Set(vm_utils, "strip_base_mirror_from_vdis",
+ fake_strip_base_mirror_from_vdis)
+ fake_strip_base_mirror_from_vdis.called = False
+
+ self.conn.post_live_migration_at_destination(None, fake_instance,
+ fake_network_info, None)
+ self.assertEqual(fake_fw.call_count, 3)
+ self.assertTrue(fake_get_vm_opaque_ref.called)
+ self.assertTrue(fake_strip_base_mirror_from_vdis.called)
+
+ def test_check_can_live_migrate_destination_with_block_migration(self):
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ self.stubs.Set(vm_utils, "safe_find_sr", lambda _x: "asdf")
+
+ expected = {'block_migration': True,
+ 'migrate_data': {
+ 'migrate_send_data': "fake_migrate_data",
+ 'destination_sr_ref': 'asdf'
+ }
+ }
+ result = self.conn.check_can_live_migrate_destination(self.context,
+ {'host': 'host'},
+ {}, {},
+ True, False)
+ self.assertEqual(expected, result)
+
+ def test_check_live_migrate_destination_verifies_ip(self):
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ for pif_ref in xenapi_fake.get_all('PIF'):
+ pif_rec = xenapi_fake.get_record('PIF', pif_ref)
+ pif_rec['IP'] = ''
+ pif_rec['IPv6'] = ''
+
+ self.stubs.Set(vm_utils, "safe_find_sr", lambda _x: "asdf")
+
+ self.assertRaises(exception.MigrationError,
+ self.conn.check_can_live_migrate_destination,
+ self.context, {'host': 'host'},
+ {}, {},
+ True, False)
+
+ def test_check_can_live_migrate_destination_block_migration_fails(self):
+ stubs.stubout_session(self.stubs,
+ stubs.FakeSessionForFailedMigrateTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ self.assertRaises(exception.MigrationError,
+ self.conn.check_can_live_migrate_destination,
+ self.context, {'host': 'host'},
+ {}, {},
+ True, False)
+
+ def _add_default_live_migrate_stubs(self, conn):
+ def fake_generate_vdi_map(destination_sr_ref, _vm_ref):
+ pass
+
+ def fake_get_iscsi_srs(destination_sr_ref, _vm_ref):
+ return []
+
+ def fake_get_vm_opaque_ref(instance):
+ return "fake_vm"
+
+ def fake_lookup_kernel_ramdisk(session, vm):
+ return ("fake_PV_kernel", "fake_PV_ramdisk")
+
+ self.stubs.Set(conn._vmops, "_generate_vdi_map",
+ fake_generate_vdi_map)
+ self.stubs.Set(conn._vmops, "_get_iscsi_srs",
+ fake_get_iscsi_srs)
+ self.stubs.Set(conn._vmops, "_get_vm_opaque_ref",
+ fake_get_vm_opaque_ref)
+ self.stubs.Set(vm_utils, "lookup_kernel_ramdisk",
+ fake_lookup_kernel_ramdisk)
+
+ def test_check_can_live_migrate_source_with_block_migrate(self):
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ self._add_default_live_migrate_stubs(self.conn)
+
+ dest_check_data = {'block_migration': True,
+ 'migrate_data': {
+ 'destination_sr_ref': None,
+ 'migrate_send_data': None
+ }}
+ result = self.conn.check_can_live_migrate_source(self.context,
+ {'host': 'host'},
+ dest_check_data)
+ self.assertEqual(dest_check_data, result)
+
+ def test_check_can_live_migrate_source_with_block_migrate_iscsi(self):
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ self._add_default_live_migrate_stubs(self.conn)
+
+ def fake_get_iscsi_srs(destination_sr_ref, _vm_ref):
+ return ['sr_ref']
+ self.stubs.Set(self.conn._vmops, "_get_iscsi_srs",
+ fake_get_iscsi_srs)
+
+ def fake_make_plugin_call(plugin, method, **args):
+ return "true"
+ self.stubs.Set(self.conn._vmops, "_make_plugin_call",
+ fake_make_plugin_call)
+
+ dest_check_data = {'block_migration': True,
+ 'migrate_data': {
+ 'destination_sr_ref': None,
+ 'migrate_send_data': None
+ }}
+ result = self.conn.check_can_live_migrate_source(self.context,
+ {'host': 'host'},
+ dest_check_data)
+ self.assertEqual(dest_check_data, result)
+
+ def test_check_can_live_migrate_source_with_block_iscsi_fails(self):
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ self._add_default_live_migrate_stubs(self.conn)
+
+ def fake_get_iscsi_srs(destination_sr_ref, _vm_ref):
+ return ['sr_ref']
+ self.stubs.Set(self.conn._vmops, "_get_iscsi_srs",
+ fake_get_iscsi_srs)
+
+ def fake_make_plugin_call(plugin, method, **args):
+ return {'returncode': 'error', 'message': 'Plugin not found'}
+ self.stubs.Set(self.conn._vmops, "_make_plugin_call",
+ fake_make_plugin_call)
+
+ self.assertRaises(exception.MigrationError,
+ self.conn.check_can_live_migrate_source,
+ self.context, {'host': 'host'},
+ {})
+
+ def test_check_can_live_migrate_source_with_block_migrate_fails(self):
+ stubs.stubout_session(self.stubs,
+ stubs.FakeSessionForFailedMigrateTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ self._add_default_live_migrate_stubs(self.conn)
+
+ dest_check_data = {'block_migration': True,
+ 'migrate_data': {
+ 'destination_sr_ref': None,
+ 'migrate_send_data': None
+ }}
+ self.assertRaises(exception.MigrationError,
+ self.conn.check_can_live_migrate_source,
+ self.context,
+ {'host': 'host'},
+ dest_check_data)
+
+ def test_check_can_live_migrate_works(self):
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ def fake_aggregate_get_by_host(context, host, key=None):
+ self.assertEqual(CONF.host, host)
+ return [dict(test_aggregate.fake_aggregate,
+ metadetails={"host": "test_host_uuid"})]
+
+ self.stubs.Set(db, "aggregate_get_by_host",
+ fake_aggregate_get_by_host)
+ self.conn.check_can_live_migrate_destination(self.context,
+ {'host': 'host'}, False, False)
+
+ def test_check_can_live_migrate_fails(self):
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ def fake_aggregate_get_by_host(context, host, key=None):
+ self.assertEqual(CONF.host, host)
+ return [dict(test_aggregate.fake_aggregate,
+ metadetails={"dest_other": "test_host_uuid"})]
+
+ self.stubs.Set(db, "aggregate_get_by_host",
+ fake_aggregate_get_by_host)
+ self.assertRaises(exception.MigrationError,
+ self.conn.check_can_live_migrate_destination,
+ self.context, {'host': 'host'}, None, None)
+
+ def test_live_migration(self):
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ def fake_get_vm_opaque_ref(instance):
+ return "fake_vm"
+ self.stubs.Set(self.conn._vmops, "_get_vm_opaque_ref",
+ fake_get_vm_opaque_ref)
+
+ def fake_get_host_opaque_ref(context, destination_hostname):
+ return "fake_host"
+ self.stubs.Set(self.conn._vmops, "_get_host_opaque_ref",
+ fake_get_host_opaque_ref)
+
+ def post_method(context, instance, destination_hostname,
+ block_migration, migrate_data):
+ post_method.called = True
+
+ self.conn.live_migration(self.conn, None, None, post_method, None)
+
+ self.assertTrue(post_method.called, "post_method.called")
+
+ def test_live_migration_on_failure(self):
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ def fake_get_vm_opaque_ref(instance):
+ return "fake_vm"
+ self.stubs.Set(self.conn._vmops, "_get_vm_opaque_ref",
+ fake_get_vm_opaque_ref)
+
+ def fake_get_host_opaque_ref(context, destination_hostname):
+ return "fake_host"
+ self.stubs.Set(self.conn._vmops, "_get_host_opaque_ref",
+ fake_get_host_opaque_ref)
+
+ def fake_call_xenapi(*args):
+ raise NotImplementedError()
+ self.stubs.Set(self.conn._vmops._session, "call_xenapi",
+ fake_call_xenapi)
+
+ def recover_method(context, instance, destination_hostname,
+ block_migration):
+ recover_method.called = True
+
+ self.assertRaises(NotImplementedError, self.conn.live_migration,
+ self.conn, None, None, None, recover_method)
+ self.assertTrue(recover_method.called, "recover_method.called")
+
+ def test_live_migration_calls_post_migration(self):
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ self._add_default_live_migrate_stubs(self.conn)
+
+ def post_method(context, instance, destination_hostname,
+ block_migration, migrate_data):
+ post_method.called = True
+
+ # pass block_migration = True and migrate data
+ migrate_data = {"destination_sr_ref": "foo",
+ "migrate_send_data": "bar"}
+ self.conn.live_migration(self.conn, None, None, post_method, None,
+ True, migrate_data)
+ self.assertTrue(post_method.called, "post_method.called")
+
+ def test_live_migration_block_cleans_srs(self):
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ self._add_default_live_migrate_stubs(self.conn)
+
+ def fake_get_iscsi_srs(context, instance):
+ return ['sr_ref']
+ self.stubs.Set(self.conn._vmops, "_get_iscsi_srs",
+ fake_get_iscsi_srs)
+
+ def fake_forget_sr(context, instance):
+ fake_forget_sr.called = True
+ self.stubs.Set(volume_utils, "forget_sr",
+ fake_forget_sr)
+
+ def post_method(context, instance, destination_hostname,
+ block_migration, migrate_data):
+ post_method.called = True
+
+ migrate_data = {"destination_sr_ref": "foo",
+ "migrate_send_data": "bar"}
+ self.conn.live_migration(self.conn, None, None, post_method, None,
+ True, migrate_data)
+
+ self.assertTrue(post_method.called, "post_method.called")
+ self.assertTrue(fake_forget_sr.called, "forget_sr.called")
+
+ def test_live_migration_with_block_migration_raises_invalid_param(self):
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ self._add_default_live_migrate_stubs(self.conn)
+
+ def recover_method(context, instance, destination_hostname,
+ block_migration):
+ recover_method.called = True
+ # pass block_migration = True and no migrate data
+ self.assertRaises(exception.InvalidParameterValue,
+ self.conn.live_migration, self.conn,
+ None, None, None, recover_method, True, None)
+ self.assertTrue(recover_method.called, "recover_method.called")
+
+ def test_live_migration_with_block_migration_fails_migrate_send(self):
+ stubs.stubout_session(self.stubs,
+ stubs.FakeSessionForFailedMigrateTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ self._add_default_live_migrate_stubs(self.conn)
+
+ def recover_method(context, instance, destination_hostname,
+ block_migration):
+ recover_method.called = True
+ # pass block_migration = True and migrate data
+ migrate_data = dict(destination_sr_ref='foo', migrate_send_data='bar')
+ self.assertRaises(exception.MigrationError,
+ self.conn.live_migration, self.conn,
+ None, None, None, recover_method, True, migrate_data)
+ self.assertTrue(recover_method.called, "recover_method.called")
+
+ def test_live_migrate_block_migration_xapi_call_parameters(self):
+
+ fake_vdi_map = object()
+
+ class Session(xenapi_fake.SessionBase):
+ def VM_migrate_send(self_, session, vmref, migrate_data, islive,
+ vdi_map, vif_map, options):
+ self.assertEqual('SOMEDATA', migrate_data)
+ self.assertEqual(fake_vdi_map, vdi_map)
+
+ stubs.stubout_session(self.stubs, Session)
+
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ self._add_default_live_migrate_stubs(conn)
+
+ def fake_generate_vdi_map(destination_sr_ref, _vm_ref):
+ return fake_vdi_map
+
+ self.stubs.Set(conn._vmops, "_generate_vdi_map",
+ fake_generate_vdi_map)
+
+ def dummy_callback(*args, **kwargs):
+ pass
+
+ conn.live_migration(
+ self.context, instance=dict(name='ignore'), dest=None,
+ post_method=dummy_callback, recover_method=dummy_callback,
+ block_migration="SOMEDATA",
+ migrate_data=dict(migrate_send_data='SOMEDATA',
+ destination_sr_ref="TARGET_SR_OPAQUE_REF"))
+
+ def test_live_migrate_pool_migration_xapi_call_parameters(self):
+
+ class Session(xenapi_fake.SessionBase):
+ def VM_pool_migrate(self_, session, vm_ref, host_ref, options):
+ self.assertEqual("fake_ref", host_ref)
+ self.assertEqual({"live": "true"}, options)
+ raise IOError()
+
+ stubs.stubout_session(self.stubs, Session)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ self._add_default_live_migrate_stubs(conn)
+
+ def fake_get_host_opaque_ref(context, destination):
+ return "fake_ref"
+
+ self.stubs.Set(conn._vmops, "_get_host_opaque_ref",
+ fake_get_host_opaque_ref)
+
+ def dummy_callback(*args, **kwargs):
+ pass
+
+ self.assertRaises(IOError, conn.live_migration,
+ self.context, instance=dict(name='ignore'), dest=None,
+ post_method=dummy_callback, recover_method=dummy_callback,
+ block_migration=False, migrate_data={})
+
+ def test_generate_vdi_map(self):
+ stubs.stubout_session(self.stubs, xenapi_fake.SessionBase)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ vm_ref = "fake_vm_ref"
+
+ def fake_find_sr(_session):
+ self.assertEqual(conn._session, _session)
+ return "source_sr_ref"
+ self.stubs.Set(vm_utils, "safe_find_sr", fake_find_sr)
+
+ def fake_get_instance_vdis_for_sr(_session, _vm_ref, _sr_ref):
+ self.assertEqual(conn._session, _session)
+ self.assertEqual(vm_ref, _vm_ref)
+ self.assertEqual("source_sr_ref", _sr_ref)
+ return ["vdi0", "vdi1"]
+
+ self.stubs.Set(vm_utils, "get_instance_vdis_for_sr",
+ fake_get_instance_vdis_for_sr)
+
+ result = conn._vmops._generate_vdi_map("dest_sr_ref", vm_ref)
+
+ self.assertEqual({"vdi0": "dest_sr_ref",
+ "vdi1": "dest_sr_ref"}, result)
+
+ def test_rollback_live_migration_at_destination(self):
+ stubs.stubout_session(self.stubs, xenapi_fake.SessionBase)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ with mock.patch.object(conn, "destroy") as mock_destroy:
+ conn.rollback_live_migration_at_destination("context",
+ "instance", [], None)
+ self.assertFalse(mock_destroy.called)
+
+
+class XenAPIInjectMetadataTestCase(stubs.XenAPITestBaseNoDB):
+ def setUp(self):
+ super(XenAPIInjectMetadataTestCase, self).setUp()
+ self.flags(connection_url='test_url',
+ connection_password='test_pass',
+ group='xenserver')
+ self.flags(firewall_driver='nova.virt.xenapi.firewall.'
+ 'Dom0IptablesFirewallDriver')
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ self.xenstore = dict(persist={}, ephem={})
+
+ self.called_fake_get_vm_opaque_ref = False
+
+ def fake_get_vm_opaque_ref(inst, instance):
+ self.called_fake_get_vm_opaque_ref = True
+ if instance["uuid"] == "not_found":
+ raise exception.NotFound
+ self.assertEqual(instance, {'uuid': 'fake'})
+ return 'vm_ref'
+
+ def fake_add_to_param_xenstore(inst, vm_ref, key, val):
+ self.assertEqual(vm_ref, 'vm_ref')
+ self.xenstore['persist'][key] = val
+
+ def fake_remove_from_param_xenstore(inst, vm_ref, key):
+ self.assertEqual(vm_ref, 'vm_ref')
+ if key in self.xenstore['persist']:
+ del self.xenstore['persist'][key]
+
+ def fake_write_to_xenstore(inst, instance, path, value, vm_ref=None):
+ self.assertEqual(instance, {'uuid': 'fake'})
+ self.assertEqual(vm_ref, 'vm_ref')
+ self.xenstore['ephem'][path] = jsonutils.dumps(value)
+
+ def fake_delete_from_xenstore(inst, instance, path, vm_ref=None):
+ self.assertEqual(instance, {'uuid': 'fake'})
+ self.assertEqual(vm_ref, 'vm_ref')
+ if path in self.xenstore['ephem']:
+ del self.xenstore['ephem'][path]
+
+ self.stubs.Set(vmops.VMOps, '_get_vm_opaque_ref',
+ fake_get_vm_opaque_ref)
+ self.stubs.Set(vmops.VMOps, '_add_to_param_xenstore',
+ fake_add_to_param_xenstore)
+ self.stubs.Set(vmops.VMOps, '_remove_from_param_xenstore',
+ fake_remove_from_param_xenstore)
+ self.stubs.Set(vmops.VMOps, '_write_to_xenstore',
+ fake_write_to_xenstore)
+ self.stubs.Set(vmops.VMOps, '_delete_from_xenstore',
+ fake_delete_from_xenstore)
+
+ def test_inject_instance_metadata(self):
+
+ # Add some system_metadata to ensure it doesn't get added
+ # to xenstore
+ instance = dict(metadata=[{'key': 'a', 'value': 1},
+ {'key': 'b', 'value': 2},
+ {'key': 'c', 'value': 3},
+ # Check xenstore key sanitizing
+ {'key': 'hi.there', 'value': 4},
+ {'key': 'hi!t.e/e', 'value': 5}],
+ # Check xenstore key sanitizing
+ system_metadata=[{'key': 'sys_a', 'value': 1},
+ {'key': 'sys_b', 'value': 2},
+ {'key': 'sys_c', 'value': 3}],
+ uuid='fake')
+ self.conn._vmops._inject_instance_metadata(instance, 'vm_ref')
+
+ self.assertEqual(self.xenstore, {
+ 'persist': {
+ 'vm-data/user-metadata/a': '1',
+ 'vm-data/user-metadata/b': '2',
+ 'vm-data/user-metadata/c': '3',
+ 'vm-data/user-metadata/hi_there': '4',
+ 'vm-data/user-metadata/hi_t_e_e': '5',
+ },
+ 'ephem': {},
+ })
+
+ def test_change_instance_metadata_add(self):
+ # Test XenStore key sanitizing here, too.
+ diff = {'test.key': ['+', 4]}
+ instance = {'uuid': 'fake'}
+ self.xenstore = {
+ 'persist': {
+ 'vm-data/user-metadata/a': '1',
+ 'vm-data/user-metadata/b': '2',
+ 'vm-data/user-metadata/c': '3',
+ },
+ 'ephem': {
+ 'vm-data/user-metadata/a': '1',
+ 'vm-data/user-metadata/b': '2',
+ 'vm-data/user-metadata/c': '3',
+ },
+ }
+
+ self.conn._vmops.change_instance_metadata(instance, diff)
+
+ self.assertEqual(self.xenstore, {
+ 'persist': {
+ 'vm-data/user-metadata/a': '1',
+ 'vm-data/user-metadata/b': '2',
+ 'vm-data/user-metadata/c': '3',
+ 'vm-data/user-metadata/test_key': '4',
+ },
+ 'ephem': {
+ 'vm-data/user-metadata/a': '1',
+ 'vm-data/user-metadata/b': '2',
+ 'vm-data/user-metadata/c': '3',
+ 'vm-data/user-metadata/test_key': '4',
+ },
+ })
+
+ def test_change_instance_metadata_update(self):
+ diff = dict(b=['+', 4])
+ instance = {'uuid': 'fake'}
+ self.xenstore = {
+ 'persist': {
+ 'vm-data/user-metadata/a': '1',
+ 'vm-data/user-metadata/b': '2',
+ 'vm-data/user-metadata/c': '3',
+ },
+ 'ephem': {
+ 'vm-data/user-metadata/a': '1',
+ 'vm-data/user-metadata/b': '2',
+ 'vm-data/user-metadata/c': '3',
+ },
+ }
+
+ self.conn._vmops.change_instance_metadata(instance, diff)
+
+ self.assertEqual(self.xenstore, {
+ 'persist': {
+ 'vm-data/user-metadata/a': '1',
+ 'vm-data/user-metadata/b': '4',
+ 'vm-data/user-metadata/c': '3',
+ },
+ 'ephem': {
+ 'vm-data/user-metadata/a': '1',
+ 'vm-data/user-metadata/b': '4',
+ 'vm-data/user-metadata/c': '3',
+ },
+ })
+
+ def test_change_instance_metadata_delete(self):
+ diff = dict(b=['-'])
+ instance = {'uuid': 'fake'}
+ self.xenstore = {
+ 'persist': {
+ 'vm-data/user-metadata/a': '1',
+ 'vm-data/user-metadata/b': '2',
+ 'vm-data/user-metadata/c': '3',
+ },
+ 'ephem': {
+ 'vm-data/user-metadata/a': '1',
+ 'vm-data/user-metadata/b': '2',
+ 'vm-data/user-metadata/c': '3',
+ },
+ }
+
+ self.conn._vmops.change_instance_metadata(instance, diff)
+
+ self.assertEqual(self.xenstore, {
+ 'persist': {
+ 'vm-data/user-metadata/a': '1',
+ 'vm-data/user-metadata/c': '3',
+ },
+ 'ephem': {
+ 'vm-data/user-metadata/a': '1',
+ 'vm-data/user-metadata/c': '3',
+ },
+ })
+
+ def test_change_instance_metadata_not_found(self):
+ instance = {'uuid': 'not_found'}
+ self.conn._vmops.change_instance_metadata(instance, "fake_diff")
+ self.assertTrue(self.called_fake_get_vm_opaque_ref)
+
+
+class XenAPISessionTestCase(test.NoDBTestCase):
+ def _get_mock_xapisession(self, software_version):
+ class MockXapiSession(xenapi_session.XenAPISession):
+ def __init__(_ignore):
+ "Skip the superclass's dirty init"
+
+ def _get_software_version(_ignore):
+ return software_version
+
+ return MockXapiSession()
+
+ def test_local_session(self):
+ session = self._get_mock_xapisession({})
+ session.is_local_connection = True
+ session.XenAPI = self.mox.CreateMockAnything()
+ session.XenAPI.xapi_local().AndReturn("local_connection")
+
+ self.mox.ReplayAll()
+ self.assertEqual("local_connection",
+ session._create_session("unix://local"))
+
+ def test_remote_session(self):
+ session = self._get_mock_xapisession({})
+ session.is_local_connection = False
+ session.XenAPI = self.mox.CreateMockAnything()
+ session.XenAPI.Session("url").AndReturn("remote_connection")
+
+ self.mox.ReplayAll()
+ self.assertEqual("remote_connection", session._create_session("url"))
+
+ def test_get_product_version_product_brand_does_not_fail(self):
+ session = self._get_mock_xapisession({
+ 'build_number': '0',
+ 'date': '2012-08-03',
+ 'hostname': 'komainu',
+ 'linux': '3.2.0-27-generic',
+ 'network_backend': 'bridge',
+ 'platform_name': 'XCP_Kronos',
+ 'platform_version': '1.6.0',
+ 'xapi': '1.3',
+ 'xen': '4.1.2',
+ 'xencenter_max': '1.10',
+ 'xencenter_min': '1.10'
+ })
+
+ self.assertEqual(
+ ((1, 6, 0), None),
+ session._get_product_version_and_brand()
+ )
+
+ def test_get_product_version_product_brand_xs_6(self):
+ session = self._get_mock_xapisession({
+ 'product_brand': 'XenServer',
+ 'product_version': '6.0.50',
+ 'platform_version': '0.0.1'
+ })
+
+ self.assertEqual(
+ ((6, 0, 50), 'XenServer'),
+ session._get_product_version_and_brand()
+ )
+
+ def test_verify_plugin_version_same(self):
+ session = self._get_mock_xapisession({})
+
+ session.PLUGIN_REQUIRED_VERSION = '2.4'
+
+ self.mox.StubOutWithMock(session, 'call_plugin_serialized')
+ session.call_plugin_serialized('nova_plugin_version', 'get_version',
+ ).AndReturn("2.4")
+
+ self.mox.ReplayAll()
+ session._verify_plugin_version()
+
+ def test_verify_plugin_version_compatible(self):
+ session = self._get_mock_xapisession({})
+ session.XenAPI = xenapi_fake.FakeXenAPI()
+
+ session.PLUGIN_REQUIRED_VERSION = '2.4'
+
+ self.mox.StubOutWithMock(session, 'call_plugin_serialized')
+ session.call_plugin_serialized('nova_plugin_version', 'get_version',
+ ).AndReturn("2.5")
+
+ self.mox.ReplayAll()
+ session._verify_plugin_version()
+
+ def test_verify_plugin_version_bad_maj(self):
+ session = self._get_mock_xapisession({})
+ session.XenAPI = xenapi_fake.FakeXenAPI()
+
+ session.PLUGIN_REQUIRED_VERSION = '2.4'
+
+ self.mox.StubOutWithMock(session, 'call_plugin_serialized')
+ session.call_plugin_serialized('nova_plugin_version', 'get_version',
+ ).AndReturn("3.0")
+
+ self.mox.ReplayAll()
+ self.assertRaises(xenapi_fake.Failure, session._verify_plugin_version)
+
+ def test_verify_plugin_version_bad_min(self):
+ session = self._get_mock_xapisession({})
+ session.XenAPI = xenapi_fake.FakeXenAPI()
+
+ session.PLUGIN_REQUIRED_VERSION = '2.4'
+
+ self.mox.StubOutWithMock(session, 'call_plugin_serialized')
+ session.call_plugin_serialized('nova_plugin_version', 'get_version',
+ ).AndReturn("2.3")
+
+ self.mox.ReplayAll()
+ self.assertRaises(xenapi_fake.Failure, session._verify_plugin_version)
+
+ def test_verify_current_version_matches(self):
+ session = self._get_mock_xapisession({})
+
+ # Import the plugin to extract its version
+ path = os.path.dirname(__file__)
+ rel_path_elem = "../../../../../plugins/xenserver/xenapi/etc/xapi.d/" \
+ "plugins/nova_plugin_version"
+ for elem in rel_path_elem.split('/'):
+ path = os.path.join(path, elem)
+ path = os.path.realpath(path)
+
+ plugin_version = None
+ with open(path) as plugin_file:
+ for line in plugin_file:
+ if "PLUGIN_VERSION = " in line:
+ plugin_version = line.strip()[17:].strip('"')
+
+ self.assertEqual(session.PLUGIN_REQUIRED_VERSION,
+ plugin_version)
+
+
+class XenAPIFakeTestCase(test.NoDBTestCase):
+ def test_query_matches(self):
+ record = {'a': '1', 'b': '2', 'c_d': '3'}
+
+ tests = {'field "a"="1"': True,
+ 'field "b"="2"': True,
+ 'field "b"="4"': False,
+ 'not field "b"="4"': True,
+ 'field "a"="1" and field "b"="4"': False,
+ 'field "a"="1" or field "b"="4"': True,
+ 'field "c__d"="3"': True,
+ 'field \'b\'=\'2\'': True,
+ }
+
+ for query in tests.keys():
+ expected = tests[query]
+ fail_msg = "for test '%s'" % query
+ self.assertEqual(xenapi_fake._query_matches(record, query),
+ expected, fail_msg)
+
+ def test_query_bad_format(self):
+ record = {'a': '1', 'b': '2', 'c': '3'}
+
+ tests = ['"a"="1" or "b"="4"',
+ 'a=1',
+ ]
+
+ for query in tests:
+ fail_msg = "for test '%s'" % query
+ self.assertFalse(xenapi_fake._query_matches(record, query),
+ fail_msg)
diff --git a/nova/tests/unit/virt/xenapi/vm_rrd.xml b/nova/tests/unit/virt/xenapi/vm_rrd.xml
new file mode 100644
index 0000000000..f9a7c8083e
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/vm_rrd.xml
@@ -0,0 +1,1101 @@
+<rrd>
+ <version>0003</version>
+ <step>5</step>
+ <lastupdate>1328795567</lastupdate>
+ <ds>
+ <name>cpu0</name>
+ <type>DERIVE</type>
+ <minimal_heartbeat>300.0000</minimal_heartbeat>
+ <min>0.0</min>
+ <max>1.0000</max>
+ <last_ds>5102.8417</last_ds>
+ <value>0.0110</value>
+ <unknown_sec>0</unknown_sec>
+ </ds>
+ <ds>
+ <name>memory</name>
+ <type>GAUGE</type>
+ <minimal_heartbeat>300.0000</minimal_heartbeat>
+ <min>0.0</min>
+ <max>Infinity</max>
+ <last_ds>4294967296</last_ds>
+ <value>10961792000.0000</value>
+ <unknown_sec>0</unknown_sec>
+ </ds>
+ <ds>
+ <name>memory_target</name>
+ <type>GAUGE</type>
+ <minimal_heartbeat>300.0000</minimal_heartbeat>
+ <min>0.0</min>
+ <max>Infinity</max>
+ <last_ds>4294967296</last_ds>
+ <value>10961792000.0000</value>
+ <unknown_sec>0</unknown_sec>
+ </ds>
+ <ds>
+ <name>vif_0_tx</name>
+ <type>DERIVE</type>
+ <minimal_heartbeat>300.0000</minimal_heartbeat>
+ <min>-Infinity</min>
+ <max>Infinity</max>
+ <last_ds>1079132206</last_ds>
+ <value>752.4007</value>
+ <unknown_sec>0</unknown_sec>
+ </ds>
+ <ds>
+ <name>vif_0_rx</name>
+ <type>DERIVE</type>
+ <minimal_heartbeat>300.0000</minimal_heartbeat>
+ <min>-Infinity</min>
+ <max>Infinity</max>
+ <last_ds>1093250983</last_ds>
+ <value>4837.8805</value>
+ <unknown_sec>0</unknown_sec>
+ </ds>
+ <ds>
+ <name>vbd_xvda_write</name>
+ <type>DERIVE</type>
+ <minimal_heartbeat>300.0000</minimal_heartbeat>
+ <min>-Infinity</min>
+ <max>Infinity</max>
+ <last_ds>4552440832</last_ds>
+ <value>0.0</value>
+ <unknown_sec>0</unknown_sec>
+ </ds>
+ <ds>
+ <name>vbd_xvda_read</name>
+ <type>DERIVE</type>
+ <minimal_heartbeat>300.0000</minimal_heartbeat>
+ <min>-Infinity</min>
+ <max>Infinity</max>
+ <last_ds>1371223040</last_ds>
+ <value>0.0</value>
+ <unknown_sec>0</unknown_sec>
+ </ds>
+ <ds>
+ <name>memory_internal_free</name>
+ <type>GAUGE</type>
+ <minimal_heartbeat>300.0000</minimal_heartbeat>
+ <min>-Infinity</min>
+ <max>Infinity</max>
+ <last_ds>1415564</last_ds>
+ <value>3612860.6020</value>
+ <unknown_sec>0</unknown_sec>
+ </ds>
+ <ds>
+ <name>vbd_xvdb_write</name>
+ <type>DERIVE</type>
+ <minimal_heartbeat>300.0000</minimal_heartbeat>
+ <min>-Infinity</min>
+ <max>Infinity</max>
+ <last_ds>0.0</last_ds>
+ <value>0.0</value>
+ <unknown_sec>2</unknown_sec>
+ </ds>
+ <ds>
+ <name>vbd_xvdb_read</name>
+ <type>DERIVE</type>
+ <minimal_heartbeat>300.0000</minimal_heartbeat>
+ <min>-Infinity</min>
+ <max>Infinity</max>
+ <last_ds>0.0</last_ds>
+ <value>0.0</value>
+ <unknown_sec>2</unknown_sec>
+ </ds>
+ <ds>
+ <name>vif_2_tx</name>
+ <type>DERIVE</type>
+ <minimal_heartbeat>300.0000</minimal_heartbeat>
+ <min>-Infinity</min>
+ <max>Infinity</max>
+ <last_ds>0.0</last_ds>
+ <value>0.0</value>
+ <unknown_sec>2</unknown_sec>
+ </ds>
+ <ds>
+ <name>vif_2_rx</name>
+ <type>DERIVE</type>
+ <minimal_heartbeat>300.0000</minimal_heartbeat>
+ <min>-Infinity</min>
+ <max>Infinity</max>
+ <last_ds>0.0</last_ds>
+ <value>0.0</value>
+ <unknown_sec>2</unknown_sec>
+ </ds>
+ <rra>
+ <cf>AVERAGE</cf>
+ <pdp_per_row>1</pdp_per_row>
+ <params>
+ <xff>0.5000</xff>
+ </params>
+ <cdp_prep>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ </cdp_prep>
+ <database>
+ <row>
+ <v>0.0259</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>270.6642</v>
+ <v>1968.1381</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>1433552.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0042</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>258.6530</v>
+ <v>1890.5522</v>
+ <v>565.3453</v>
+ <v>0.0</v>
+ <v>1433552.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0043</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>249.1120</v>
+ <v>1778.2501</v>
+ <v>817.5985</v>
+ <v>0.0</v>
+ <v>1433552.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0039</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>270.5131</v>
+ <v>1806.3336</v>
+ <v>9811.4443</v>
+ <v>0.0</v>
+ <v>1433552.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0041</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>264.3683</v>
+ <v>1952.4054</v>
+ <v>4370.4121</v>
+ <v>0.0</v>
+ <v>1433552.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0034</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>251.6331</v>
+ <v>1958.8002</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>1433552.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0042</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>274.5222</v>
+ <v>2067.5947</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>1433552.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0046</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>260.9790</v>
+ <v>2042.7045</v>
+ <v>1671.6940</v>
+ <v>0.0</v>
+ <v>1433552.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0163</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>249.0992</v>
+ <v>1845.3728</v>
+ <v>4119.4312</v>
+ <v>0.0</v>
+ <v>1431698.1250</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0098</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>273.9898</v>
+ <v>1879.1331</v>
+ <v>5459.4102</v>
+ <v>0.0</v>
+ <v>1430824.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0043</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>261.3513</v>
+ <v>2335.3000</v>
+ <v>6837.4907</v>
+ <v>0.0</v>
+ <v>1430824.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0793</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>249.2620</v>
+ <v>2092.4504</v>
+ <v>2391.9744</v>
+ <v>0.0</v>
+ <v>1430824.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0406</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>270.0746</v>
+ <v>1859.9802</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>1430824.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0043</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>263.4259</v>
+ <v>2010.8950</v>
+ <v>550.1484</v>
+ <v>0.0</v>
+ <v>1430824.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0565</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>29891.2227</v>
+ <v>26210.6699</v>
+ <v>3213.4324</v>
+ <v>0.0</v>
+ <v>1415564.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0645</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>31501.1562</v>
+ <v>29642.1641</v>
+ <v>400.9566</v>
+ <v>0.0</v>
+ <v>1415564.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0381</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>17350.7676</v>
+ <v>20748.6133</v>
+ <v>1247.4755</v>
+ <v>0.0</v>
+ <v>1415564.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0212</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>11981.0918</v>
+ <v>12866.9775</v>
+ <v>5774.9497</v>
+ <v>0.0</v>
+ <v>1415564.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0045</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>249.0901</v>
+ <v>1898.6758</v>
+ <v>4446.3750</v>
+ <v>0.0</v>
+ <v>1415564.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0614</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>249.0959</v>
+ <v>2255.1912</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>1415564.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0609</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>253.1091</v>
+ <v>2099.0601</v>
+ <v>1230.0925</v>
+ <v>0.0</v>
+ <v>1415564.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0047</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>268.6620</v>
+ <v>1759.5667</v>
+ <v>2861.2107</v>
+ <v>0.0</v>
+ <v>1415564.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0100</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>292.2647</v>
+ <v>1828.5435</v>
+ <v>3270.3474</v>
+ <v>0.0</v>
+ <v>1415564.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0093</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>303.5810</v>
+ <v>1932.1176</v>
+ <v>4485.4355</v>
+ <v>0.0</v>
+ <v>1415564.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0038</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>291.6633</v>
+ <v>1842.4425</v>
+ <v>2898.5137</v>
+ <v>0.0</v>
+ <v>1415564.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0042</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>287.4134</v>
+ <v>1816.0144</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>1415564.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ </database>
+ </rra>
+ <rra>
+ <cf>AVERAGE</cf>
+ <pdp_per_row>12</pdp_per_row>
+ <params>
+ <xff>0.5000</xff>
+ </params>
+ <cdp_prep>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0150</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>3221225472.0000</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>3221225472.0000</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>1181.3309</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>2358.2158</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>2080.5770</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>1061673.0000</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ </cdp_prep>
+ <database>
+ <row>
+ <v>0.0130</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>261.6000</v>
+ <v>1990.6442</v>
+ <v>1432.2385</v>
+ <v>0.0</v>
+ <v>1441908.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0172</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>318.8885</v>
+ <v>1979.7030</v>
+ <v>1724.9528</v>
+ <v>0.0</v>
+ <v>1441912.7500</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0483</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>3108.1233</v>
+ <v>4815.9639</v>
+ <v>4962.0503</v>
+ <v>68.2667</v>
+ <v>1441916.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0229</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>1944.2039</v>
+ <v>3757.9177</v>
+ <v>10861.6670</v>
+ <v>0.0</v>
+ <v>1439546.7500</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0639</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>44504.8789</v>
+ <v>34745.1523</v>
+ <v>9571.1455</v>
+ <v>0.0</v>
+ <v>1437892.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.2945</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>79219.1641</v>
+ <v>102827.0781</v>
+ <v>438999.3438</v>
+ <v>0.0</v>
+ <v>1415337.7500</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.1219</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>61093.7109</v>
+ <v>49836.3164</v>
+ <v>8734.3730</v>
+ <v>0.0</v>
+ <v>1399324.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0151</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>48.3914</v>
+ <v>1922.5935</v>
+ <v>2251.4346</v>
+ <v>0.0</v>
+ <v>1421237.1250</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.3162</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>80667.4922</v>
+ <v>53950.0430</v>
+ <v>416858.5000</v>
+ <v>0.0</v>
+ <v>1437032.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ </database>
+ </rra>
+ <rra>
+ <cf>AVERAGE</cf>
+ <pdp_per_row>720</pdp_per_row>
+ <params>
+ <xff>0.5000</xff>
+ </params>
+ <cdp_prep>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0848</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>3775992081.0667</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>3775992081.0667</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>16179.3166</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>13379.7997</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>109091.4636</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>323.1289</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>1259057.5294</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ </cdp_prep>
+ <database>
+ <row>
+ <v>0.1458</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>6454.3096</v>
+ <v>5327.6709</v>
+ <v>116520.9609</v>
+ <v>738.4178</v>
+ <v>2653538.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0971</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>10180.4941</v>
+ <v>10825.1777</v>
+ <v>98749.3438</v>
+ <v>523.3778</v>
+ <v>2381725.7500</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0683</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>23183.2695</v>
+ <v>19607.6523</v>
+ <v>93946.5703</v>
+ <v>807.8222</v>
+ <v>2143269.2500</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0352</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>7552.5708</v>
+ <v>7320.5391</v>
+ <v>30907.9453</v>
+ <v>150384.6406</v>
+ <v>1583336.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ </database>
+ </rra>
+ <rra>
+ <cf>AVERAGE</cf>
+ <pdp_per_row>17280</pdp_per_row>
+ <params>
+ <xff>0.5000</xff>
+ </params>
+ <cdp_prep>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0187</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>2483773622.0445</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>2483773622.0445</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>2648.2715</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>3002.4238</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>19129.3156</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>6365.7244</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>1468863.7753</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ </cdp_prep>
+ <database>
+ <row>
+ <v>0.0579</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>6291.0151</v>
+ <v>7489.2583</v>
+ <v>70915.3750</v>
+ <v>50.1570</v>
+ <v>613674.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0541</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>10406.3682</v>
+ <v>10638.9365</v>
+ <v>32972.1250</v>
+ <v>7.6800</v>
+ <v>647683.5625</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0189</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>207.0768</v>
+ <v>2145.3167</v>
+ <v>1685.8905</v>
+ <v>0.0</v>
+ <v>599934.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0202</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>71.0270</v>
+ <v>2046.6521</v>
+ <v>6703.9795</v>
+ <v>182.0444</v>
+ <v>595963.8750</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0661</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>8520.3213</v>
+ <v>8488.0664</v>
+ <v>52978.7930</v>
+ <v>7.3956</v>
+ <v>727540.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0219</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>40443.0117</v>
+ <v>20702.5996</v>
+ <v>-1377536.8750</v>
+ <v>36990.5898</v>
+ <v>1823778.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0265</v>
+ <v>4294971904.0000</v>
+ <v>4294754304.0000</v>
+ <v>6384.6367</v>
+ <v>6513.4951</v>
+ <v>22415.6348</v>
+ <v>2486.9690</v>
+ <v>3072170.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ </database>
+ </rra>
+</rrd>