summaryrefslogtreecommitdiff
path: root/nova/tests/unit/virt/xenapi
diff options
context:
space:
mode:
authorSean Dague <sean@dague.net>2014-11-07 14:27:03 +0100
committerSean Dague <sean@dague.net>2014-11-12 15:31:08 -0500
commit89cd6a0c493e26b5a9e017c99d731464292abbaf (patch)
treec2bf790d1684cd539b820247113492495123a163 /nova/tests/unit/virt/xenapi
parent5c8bbaafef590e4d346a03051a0ba55c8be26c5c (diff)
downloadnova-89cd6a0c493e26b5a9e017c99d731464292abbaf.tar.gz
move all tests to nova/tests/unit
As part of the split of functional and unit tests we need to isolate the unit tests into a separate directory for having multiple test targets in a sane way. Part of bp:functional-tests-for-nova Change-Id: Id42ba373c1bda6a312b673ab2b489ca56da8c628
Diffstat (limited to 'nova/tests/unit/virt/xenapi')
-rw-r--r--nova/tests/unit/virt/xenapi/__init__.py0
-rw-r--r--nova/tests/unit/virt/xenapi/client/__init__.py0
-rw-r--r--nova/tests/unit/virt/xenapi/client/test_objects.py113
-rw-r--r--nova/tests/unit/virt/xenapi/client/test_session.py158
-rw-r--r--nova/tests/unit/virt/xenapi/image/__init__.py0
-rw-r--r--nova/tests/unit/virt/xenapi/image/test_bittorrent.py163
-rw-r--r--nova/tests/unit/virt/xenapi/image/test_glance.py256
-rw-r--r--nova/tests/unit/virt/xenapi/image/test_utils.py252
-rw-r--r--nova/tests/unit/virt/xenapi/image/test_vdi_through_dev.py182
-rw-r--r--nova/tests/unit/virt/xenapi/stubs.py365
-rw-r--r--nova/tests/unit/virt/xenapi/test_agent.py468
-rw-r--r--nova/tests/unit/virt/xenapi/test_driver.py101
-rw-r--r--nova/tests/unit/virt/xenapi/test_network_utils.py76
-rw-r--r--nova/tests/unit/virt/xenapi/test_vm_utils.py2422
-rw-r--r--nova/tests/unit/virt/xenapi/test_vmops.py1124
-rw-r--r--nova/tests/unit/virt/xenapi/test_volume_utils.py232
-rw-r--r--nova/tests/unit/virt/xenapi/test_volumeops.py549
-rw-r--r--nova/tests/unit/virt/xenapi/test_xenapi.py4105
-rw-r--r--nova/tests/unit/virt/xenapi/vm_rrd.xml1101
19 files changed, 11667 insertions, 0 deletions
diff --git a/nova/tests/unit/virt/xenapi/__init__.py b/nova/tests/unit/virt/xenapi/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/__init__.py
diff --git a/nova/tests/unit/virt/xenapi/client/__init__.py b/nova/tests/unit/virt/xenapi/client/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/client/__init__.py
diff --git a/nova/tests/unit/virt/xenapi/client/test_objects.py b/nova/tests/unit/virt/xenapi/client/test_objects.py
new file mode 100644
index 0000000000..efaf17a9c7
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/client/test_objects.py
@@ -0,0 +1,113 @@
+# Copyright (c) 2014 Rackspace Hosting
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova.tests.unit.virt.xenapi import stubs
+from nova import utils
+from nova.virt.xenapi.client import objects
+
+
+class XenAPISessionObjectTestCase(stubs.XenAPITestBaseNoDB):
+ def setUp(self):
+ super(XenAPISessionObjectTestCase, self).setUp()
+ self.session = mock.Mock()
+ self.obj = objects.XenAPISessionObject(self.session, "FAKE")
+
+ def test_call_method_via_attr(self):
+ self.session.call_xenapi.return_value = "asdf"
+
+ result = self.obj.get_X("ref")
+
+ self.assertEqual(result, "asdf")
+ self.session.call_xenapi.assert_called_once_with("FAKE.get_X", "ref")
+
+
+class ObjectsTestCase(stubs.XenAPITestBaseNoDB):
+ def setUp(self):
+ super(ObjectsTestCase, self).setUp()
+ self.session = mock.Mock()
+
+ def test_VM(self):
+ vm = objects.VM(self.session)
+ vm.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("VM.get_X", "ref")
+
+ def test_SR(self):
+ sr = objects.SR(self.session)
+ sr.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("SR.get_X", "ref")
+
+ def test_VDI(self):
+ vdi = objects.VDI(self.session)
+ vdi.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("VDI.get_X", "ref")
+
+ def test_VBD(self):
+ vbd = objects.VBD(self.session)
+ vbd.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("VBD.get_X", "ref")
+
+ def test_PBD(self):
+ pbd = objects.PBD(self.session)
+ pbd.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("PBD.get_X", "ref")
+
+ def test_PIF(self):
+ pif = objects.PIF(self.session)
+ pif.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("PIF.get_X", "ref")
+
+ def test_VLAN(self):
+ vlan = objects.VLAN(self.session)
+ vlan.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("VLAN.get_X", "ref")
+
+ def test_host(self):
+ host = objects.Host(self.session)
+ host.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("host.get_X", "ref")
+
+ def test_network(self):
+ network = objects.Network(self.session)
+ network.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("network.get_X",
+ "ref")
+
+ def test_pool(self):
+ pool = objects.Pool(self.session)
+ pool.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("pool.get_X", "ref")
+
+
+class VBDTestCase(stubs.XenAPITestBaseNoDB):
+ def setUp(self):
+ super(VBDTestCase, self).setUp()
+ self.session = mock.Mock()
+ self.session.VBD = objects.VBD(self.session)
+
+ def test_plug(self):
+ self.session.VBD.plug("vbd_ref", "vm_ref")
+ self.session.call_xenapi.assert_called_once_with("VBD.plug", "vbd_ref")
+
+ def test_unplug(self):
+ self.session.VBD.unplug("vbd_ref", "vm_ref")
+ self.session.call_xenapi.assert_called_once_with("VBD.unplug",
+ "vbd_ref")
+
+ @mock.patch.object(utils, 'synchronized')
+ def test_vbd_plug_check_synchronized(self, mock_synchronized):
+ self.session.VBD.unplug("vbd_ref", "vm_ref")
+ mock_synchronized.assert_called_once_with("xenapi-vbd-vm_ref")
diff --git a/nova/tests/unit/virt/xenapi/client/test_session.py b/nova/tests/unit/virt/xenapi/client/test_session.py
new file mode 100644
index 0000000000..1fbbbf752d
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/client/test_session.py
@@ -0,0 +1,158 @@
+# Copyright (c) 2014 Rackspace Hosting
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import errno
+import socket
+
+import mock
+
+from nova import exception
+from nova.tests.unit.virt.xenapi import stubs
+from nova import version
+from nova.virt.xenapi.client import session
+
+
+class SessionTestCase(stubs.XenAPITestBaseNoDB):
+ @mock.patch.object(session.XenAPISession, '_create_session')
+ @mock.patch.object(session.XenAPISession, '_get_product_version_and_brand')
+ @mock.patch.object(session.XenAPISession, '_verify_plugin_version')
+ def test_session_passes_version(self, mock_verify, mock_version,
+ create_session):
+ sess = mock.Mock()
+ create_session.return_value = sess
+ mock_version.return_value = ('version', 'brand')
+
+ session.XenAPISession('url', 'username', 'password')
+
+ expected_version = '%s %s %s' % (version.vendor_string(),
+ version.product_string(),
+ version.version_string_with_package())
+ sess.login_with_password.assert_called_with('username', 'password',
+ expected_version,
+ 'OpenStack')
+
+
+class ApplySessionHelpersTestCase(stubs.XenAPITestBaseNoDB):
+ def setUp(self):
+ super(ApplySessionHelpersTestCase, self).setUp()
+ self.session = mock.Mock()
+ session.apply_session_helpers(self.session)
+
+ def test_apply_session_helpers_add_VM(self):
+ self.session.VM.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("VM.get_X", "ref")
+
+ def test_apply_session_helpers_add_SR(self):
+ self.session.SR.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("SR.get_X", "ref")
+
+ def test_apply_session_helpers_add_VDI(self):
+ self.session.VDI.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("VDI.get_X", "ref")
+
+ def test_apply_session_helpers_add_VBD(self):
+ self.session.VBD.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("VBD.get_X", "ref")
+
+ def test_apply_session_helpers_add_PBD(self):
+ self.session.PBD.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("PBD.get_X", "ref")
+
+ def test_apply_session_helpers_add_PIF(self):
+ self.session.PIF.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("PIF.get_X", "ref")
+
+ def test_apply_session_helpers_add_VLAN(self):
+ self.session.VLAN.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("VLAN.get_X", "ref")
+
+ def test_apply_session_helpers_add_host(self):
+ self.session.host.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("host.get_X", "ref")
+
+ def test_apply_session_helpers_add_network(self):
+ self.session.network.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("network.get_X",
+ "ref")
+
+ def test_apply_session_helpers_add_pool(self):
+ self.session.pool.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("pool.get_X", "ref")
+
+
+class CallPluginTestCase(stubs.XenAPITestBaseNoDB):
+ def _get_fake_xapisession(self):
+ class FakeXapiSession(session.XenAPISession):
+ def __init__(self, **kwargs):
+ "Skip the superclass's dirty init"
+ self.XenAPI = mock.MagicMock()
+
+ return FakeXapiSession()
+
+ def setUp(self):
+ super(CallPluginTestCase, self).setUp()
+ self.session = self._get_fake_xapisession()
+
+ def test_serialized_with_retry_socket_error_conn_reset(self):
+ exc = socket.error
+ exc.errno = errno.ECONNRESET
+ plugin = 'glance'
+ fn = 'download_vhd'
+ num_retries = 1
+ callback = None
+ retry_cb = mock.Mock()
+ with mock.patch.object(self.session, 'call_plugin_serialized',
+ autospec=True) as call_plugin_serialized:
+ call_plugin_serialized.side_effect = exc
+ self.assertRaises(exception.PluginRetriesExceeded,
+ self.session.call_plugin_serialized_with_retry, plugin, fn,
+ num_retries, callback, retry_cb)
+ call_plugin_serialized.assert_called_with(plugin, fn)
+ self.assertEqual(2, call_plugin_serialized.call_count)
+ self.assertEqual(2, retry_cb.call_count)
+
+ def test_serialized_with_retry_socket_error_reraised(self):
+ exc = socket.error
+ exc.errno = errno.ECONNREFUSED
+ plugin = 'glance'
+ fn = 'download_vhd'
+ num_retries = 1
+ callback = None
+ retry_cb = mock.Mock()
+ with mock.patch.object(self.session, 'call_plugin_serialized',
+ autospec=True) as call_plugin_serialized:
+ call_plugin_serialized.side_effect = exc
+ self.assertRaises(socket.error,
+ self.session.call_plugin_serialized_with_retry, plugin, fn,
+ num_retries, callback, retry_cb)
+ call_plugin_serialized.assert_called_once_with(plugin, fn)
+ self.assertEqual(0, retry_cb.call_count)
+
+ def test_serialized_with_retry_socket_reset_reraised(self):
+ exc = socket.error
+ exc.errno = errno.ECONNRESET
+ plugin = 'glance'
+ fn = 'download_vhd'
+ num_retries = 1
+ callback = None
+ retry_cb = mock.Mock()
+ with mock.patch.object(self.session, 'call_plugin_serialized',
+ autospec=True) as call_plugin_serialized:
+ call_plugin_serialized.side_effect = exc
+ self.assertRaises(exception.PluginRetriesExceeded,
+ self.session.call_plugin_serialized_with_retry, plugin, fn,
+ num_retries, callback, retry_cb)
+ call_plugin_serialized.assert_called_with(plugin, fn)
+ self.assertEqual(2, call_plugin_serialized.call_count)
diff --git a/nova/tests/unit/virt/xenapi/image/__init__.py b/nova/tests/unit/virt/xenapi/image/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/image/__init__.py
diff --git a/nova/tests/unit/virt/xenapi/image/test_bittorrent.py b/nova/tests/unit/virt/xenapi/image/test_bittorrent.py
new file mode 100644
index 0000000000..5422036b98
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/image/test_bittorrent.py
@@ -0,0 +1,163 @@
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mox
+import pkg_resources
+import six
+
+from nova import context
+from nova.i18n import _
+from nova import test
+from nova.tests.unit.virt.xenapi import stubs
+from nova.virt.xenapi import driver as xenapi_conn
+from nova.virt.xenapi import fake
+from nova.virt.xenapi.image import bittorrent
+from nova.virt.xenapi import vm_utils
+
+
+class TestBittorrentStore(stubs.XenAPITestBaseNoDB):
+ def setUp(self):
+ super(TestBittorrentStore, self).setUp()
+ self.store = bittorrent.BittorrentStore()
+ self.mox = mox.Mox()
+
+ self.flags(torrent_base_url='http://foo',
+ connection_url='test_url',
+ connection_password='test_pass',
+ group='xenserver')
+
+ self.context = context.RequestContext(
+ 'user', 'project', auth_token='foobar')
+
+ fake.reset()
+ stubs.stubout_session(self.stubs, fake.SessionBase)
+
+ def mock_iter_eps(namespace):
+ return []
+
+ self.stubs.Set(pkg_resources, 'iter_entry_points', mock_iter_eps)
+
+ driver = xenapi_conn.XenAPIDriver(False)
+ self.session = driver._session
+
+ self.stubs.Set(
+ vm_utils, 'get_sr_path', lambda *a, **kw: '/fake/sr/path')
+
+ def test_download_image(self):
+
+ instance = {'uuid': '00000000-0000-0000-0000-000000007357'}
+ params = {'image_id': 'fake_image_uuid',
+ 'sr_path': '/fake/sr/path',
+ 'torrent_download_stall_cutoff': 600,
+ 'torrent_listen_port_end': 6891,
+ 'torrent_listen_port_start': 6881,
+ 'torrent_max_last_accessed': 86400,
+ 'torrent_max_seeder_processes_per_host': 1,
+ 'torrent_seed_chance': 1.0,
+ 'torrent_seed_duration': 3600,
+ 'torrent_url': 'http://foo/fake_image_uuid.torrent',
+ 'uuid_stack': ['uuid1']}
+
+ self.stubs.Set(vm_utils, '_make_uuid_stack',
+ lambda *a, **kw: ['uuid1'])
+
+ self.mox.StubOutWithMock(self.session, 'call_plugin_serialized')
+ self.session.call_plugin_serialized(
+ 'bittorrent', 'download_vhd', **params)
+ self.mox.ReplayAll()
+
+ self.store.download_image(self.context, self.session,
+ instance, 'fake_image_uuid')
+
+ self.mox.VerifyAll()
+
+ def test_upload_image(self):
+ self.assertRaises(NotImplementedError, self.store.upload_image,
+ self.context, self.session, mox.IgnoreArg, 'fake_image_uuid',
+ ['fake_vdi_uuid'])
+
+
+def bad_fetcher(image_id):
+ raise test.TestingException("just plain bad.")
+
+
+def another_fetcher(image_id):
+ return "http://www.foobar.com/%s" % image_id
+
+
+class MockEntryPoint(object):
+ name = "torrent_url"
+
+ def load(self):
+ return another_fetcher
+
+
+class LookupTorrentURLTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(LookupTorrentURLTestCase, self).setUp()
+ self.store = bittorrent.BittorrentStore()
+ self.image_id = 'fakeimageid'
+
+ def _mock_iter_none(self, namespace):
+ return []
+
+ def _mock_iter_single(self, namespace):
+ return [MockEntryPoint()]
+
+ def test_default_fetch_url_no_base_url_set(self):
+ self.flags(torrent_base_url=None,
+ group='xenserver')
+ self.stubs.Set(pkg_resources, 'iter_entry_points',
+ self._mock_iter_none)
+
+ exc = self.assertRaises(
+ RuntimeError, self.store._lookup_torrent_url_fn)
+ self.assertEqual(_('Cannot create default bittorrent URL without'
+ ' torrent_base_url set'
+ ' or torrent URL fetcher extension'),
+ six.text_type(exc))
+
+ def test_default_fetch_url_base_url_is_set(self):
+ self.flags(torrent_base_url='http://foo',
+ group='xenserver')
+ self.stubs.Set(pkg_resources, 'iter_entry_points',
+ self._mock_iter_single)
+
+ lookup_fn = self.store._lookup_torrent_url_fn()
+ self.assertEqual('http://foo/fakeimageid.torrent',
+ lookup_fn(self.image_id))
+
+ def test_with_extension(self):
+ self.stubs.Set(pkg_resources, 'iter_entry_points',
+ self._mock_iter_single)
+
+ lookup_fn = self.store._lookup_torrent_url_fn()
+ self.assertEqual("http://www.foobar.com/%s" % self.image_id,
+ lookup_fn(self.image_id))
+
+ def test_multiple_extensions_found(self):
+ self.flags(torrent_base_url=None,
+ group='xenserver')
+
+ def mock_iter_multiple(namespace):
+ return [MockEntryPoint(), MockEntryPoint()]
+
+ self.stubs.Set(pkg_resources, 'iter_entry_points', mock_iter_multiple)
+
+ exc = self.assertRaises(
+ RuntimeError, self.store._lookup_torrent_url_fn)
+ self.assertEqual(_('Multiple torrent URL fetcher extensions found.'
+ ' Failing.'),
+ six.text_type(exc))
diff --git a/nova/tests/unit/virt/xenapi/image/test_glance.py b/nova/tests/unit/virt/xenapi/image/test_glance.py
new file mode 100644
index 0000000000..8fbb853efa
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/image/test_glance.py
@@ -0,0 +1,256 @@
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import random
+import time
+
+import mock
+from mox3 import mox
+
+from nova.compute import utils as compute_utils
+from nova import context
+from nova import exception
+from nova.openstack.common import log as logging
+from nova.tests.unit.virt.xenapi import stubs
+from nova.virt.xenapi import driver as xenapi_conn
+from nova.virt.xenapi import fake
+from nova.virt.xenapi.image import glance
+from nova.virt.xenapi import vm_utils
+
+
+class TestGlanceStore(stubs.XenAPITestBaseNoDB):
+ def setUp(self):
+ super(TestGlanceStore, self).setUp()
+ self.store = glance.GlanceStore()
+
+ self.flags(host='1.1.1.1',
+ port=123,
+ api_insecure=False, group='glance')
+ self.flags(connection_url='test_url',
+ connection_password='test_pass',
+ group='xenserver')
+
+ self.context = context.RequestContext(
+ 'user', 'project', auth_token='foobar')
+
+ fake.reset()
+ stubs.stubout_session(self.stubs, fake.SessionBase)
+ driver = xenapi_conn.XenAPIDriver(False)
+ self.session = driver._session
+
+ self.stubs.Set(
+ vm_utils, 'get_sr_path', lambda *a, **kw: '/fake/sr/path')
+
+ self.instance = {'uuid': 'blah',
+ 'system_metadata': [],
+ 'auto_disk_config': True,
+ 'os_type': 'default',
+ 'xenapi_use_agent': 'true'}
+
+ def _get_params(self):
+ return {'image_id': 'fake_image_uuid',
+ 'glance_host': '1.1.1.1',
+ 'glance_port': 123,
+ 'glance_use_ssl': False,
+ 'sr_path': '/fake/sr/path',
+ 'extra_headers': {'X-Service-Catalog': '[]',
+ 'X-Auth-Token': 'foobar',
+ 'X-Roles': '',
+ 'X-Tenant-Id': 'project',
+ 'X-User-Id': 'user',
+ 'X-Identity-Status': 'Confirmed'}}
+
+ def _get_download_params(self):
+ params = self._get_params()
+ params['uuid_stack'] = ['uuid1']
+ return params
+
+ def test_download_image(self):
+ params = self._get_download_params()
+
+ self.stubs.Set(vm_utils, '_make_uuid_stack',
+ lambda *a, **kw: ['uuid1'])
+
+ self.mox.StubOutWithMock(self.session, 'call_plugin_serialized')
+ self.session.call_plugin_serialized('glance', 'download_vhd', **params)
+ self.mox.ReplayAll()
+
+ self.store.download_image(self.context, self.session,
+ self.instance, 'fake_image_uuid')
+
+ self.mox.VerifyAll()
+
+ @mock.patch.object(vm_utils, '_make_uuid_stack', return_value=['uuid1'])
+ @mock.patch.object(random, 'shuffle')
+ @mock.patch.object(time, 'sleep')
+ @mock.patch.object(compute_utils, 'add_instance_fault_from_exc')
+ @mock.patch.object(logging.getLogger('nova.virt.xenapi.client.session'),
+ 'debug')
+ def test_download_image_retry(self, mock_log_debug, mock_fault, mock_sleep,
+ mock_shuffle, mock_make_uuid_stack):
+ params = self._get_download_params()
+ self.flags(num_retries=2, group='glance')
+
+ params.pop("glance_port")
+ params.pop("glance_host")
+ calls = [mock.call('glance', 'download_vhd', glance_port=9292,
+ glance_host='10.0.1.1', **params),
+ mock.call('glance', 'download_vhd', glance_port=9293,
+ glance_host='10.0.0.1', **params)]
+ log_calls = [mock.call(mock.ANY, {'callback_result': '10.0.1.1',
+ 'attempts': 3, 'attempt': 1,
+ 'fn': 'download_vhd',
+ 'plugin': 'glance'}),
+ mock.call(mock.ANY, {'callback_result': '10.0.0.1',
+ 'attempts': 3, 'attempt': 2,
+ 'fn': 'download_vhd',
+ 'plugin': 'glance'})]
+
+ glance_api_servers = ['10.0.1.1:9292',
+ 'http://10.0.0.1:9293']
+ self.flags(api_servers=glance_api_servers, group='glance')
+
+ with (mock.patch.object(self.session, 'call_plugin_serialized')
+ ) as mock_call_plugin_serialized:
+ error_details = ["", "", "RetryableError", ""]
+ error = self.session.XenAPI.Failure(details=error_details)
+ mock_call_plugin_serialized.side_effect = [error, "success"]
+
+ self.store.download_image(self.context, self.session,
+ self.instance, 'fake_image_uuid')
+
+ mock_call_plugin_serialized.assert_has_calls(calls)
+ mock_log_debug.assert_has_calls(log_calls, any_order=True)
+
+ self.assertEqual(1, mock_fault.call_count)
+
+ def _get_upload_params(self, auto_disk_config=True,
+ expected_os_type='default'):
+ params = self._get_params()
+ params['vdi_uuids'] = ['fake_vdi_uuid']
+ params['properties'] = {'auto_disk_config': auto_disk_config,
+ 'os_type': expected_os_type}
+ return params
+
+ def _test_upload_image(self, auto_disk_config, expected_os_type='default'):
+ params = self._get_upload_params(auto_disk_config, expected_os_type)
+
+ self.mox.StubOutWithMock(self.session, 'call_plugin_serialized')
+ self.session.call_plugin_serialized('glance', 'upload_vhd', **params)
+
+ self.mox.ReplayAll()
+ self.store.upload_image(self.context, self.session, self.instance,
+ 'fake_image_uuid', ['fake_vdi_uuid'])
+ self.mox.VerifyAll()
+
+ def test_upload_image(self):
+ self._test_upload_image(True)
+
+ def test_upload_image_None_os_type(self):
+ self.instance['os_type'] = None
+ self._test_upload_image(True, 'linux')
+
+ def test_upload_image_no_os_type(self):
+ del self.instance['os_type']
+ self._test_upload_image(True, 'linux')
+
+ def test_upload_image_auto_config_disk_disabled(self):
+ sys_meta = [{"key": "image_auto_disk_config", "value": "Disabled"}]
+ self.instance["system_metadata"] = sys_meta
+ self._test_upload_image("disabled")
+
+ def test_upload_image_raises_exception(self):
+ params = self._get_upload_params()
+
+ self.mox.StubOutWithMock(self.session, 'call_plugin_serialized')
+ self.session.call_plugin_serialized('glance', 'upload_vhd',
+ **params).AndRaise(RuntimeError)
+ self.mox.ReplayAll()
+
+ self.assertRaises(RuntimeError, self.store.upload_image,
+ self.context, self.session, self.instance,
+ 'fake_image_uuid', ['fake_vdi_uuid'])
+ self.mox.VerifyAll()
+
+ def test_upload_image_retries_then_raises_exception(self):
+ self.flags(num_retries=2, group='glance')
+ params = self._get_upload_params()
+
+ self.mox.StubOutWithMock(self.session, 'call_plugin_serialized')
+ self.mox.StubOutWithMock(time, 'sleep')
+ self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
+ error_details = ["", "", "RetryableError", ""]
+ error = self.session.XenAPI.Failure(details=error_details)
+ self.session.call_plugin_serialized('glance', 'upload_vhd',
+ **params).AndRaise(error)
+ compute_utils.add_instance_fault_from_exc(self.context, self.instance,
+ error, (fake.Failure,
+ error,
+ mox.IgnoreArg()))
+ time.sleep(0.5)
+ self.session.call_plugin_serialized('glance', 'upload_vhd',
+ **params).AndRaise(error)
+ compute_utils.add_instance_fault_from_exc(self.context, self.instance,
+ error, (fake.Failure,
+ error,
+ mox.IgnoreArg()))
+ time.sleep(1)
+ self.session.call_plugin_serialized('glance', 'upload_vhd',
+ **params).AndRaise(error)
+ compute_utils.add_instance_fault_from_exc(self.context, self.instance,
+ error, (fake.Failure,
+ error,
+ mox.IgnoreArg()))
+ self.mox.ReplayAll()
+
+ self.assertRaises(exception.CouldNotUploadImage,
+ self.store.upload_image,
+ self.context, self.session, self.instance,
+ 'fake_image_uuid', ['fake_vdi_uuid'])
+ self.mox.VerifyAll()
+
+ def test_upload_image_retries_on_signal_exception(self):
+ self.flags(num_retries=2, group='glance')
+ params = self._get_upload_params()
+
+ self.mox.StubOutWithMock(self.session, 'call_plugin_serialized')
+ self.mox.StubOutWithMock(time, 'sleep')
+ self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
+ error_details = ["", "task signaled", "", ""]
+ error = self.session.XenAPI.Failure(details=error_details)
+ self.session.call_plugin_serialized('glance', 'upload_vhd',
+ **params).AndRaise(error)
+ compute_utils.add_instance_fault_from_exc(self.context, self.instance,
+ error, (fake.Failure,
+ error,
+ mox.IgnoreArg()))
+ time.sleep(0.5)
+ # Note(johngarbutt) XenServer 6.1 and later has this error
+ error_details = ["", "signal: SIGTERM", "", ""]
+ error = self.session.XenAPI.Failure(details=error_details)
+ self.session.call_plugin_serialized('glance', 'upload_vhd',
+ **params).AndRaise(error)
+ compute_utils.add_instance_fault_from_exc(self.context, self.instance,
+ error, (fake.Failure,
+ error,
+ mox.IgnoreArg()))
+ time.sleep(1)
+ self.session.call_plugin_serialized('glance', 'upload_vhd',
+ **params)
+ self.mox.ReplayAll()
+
+ self.store.upload_image(self.context, self.session, self.instance,
+ 'fake_image_uuid', ['fake_vdi_uuid'])
+ self.mox.VerifyAll()
diff --git a/nova/tests/unit/virt/xenapi/image/test_utils.py b/nova/tests/unit/virt/xenapi/image/test_utils.py
new file mode 100644
index 0000000000..4763f66683
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/image/test_utils.py
@@ -0,0 +1,252 @@
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import tarfile
+
+import mock
+
+from nova import test
+from nova.virt.xenapi.image import utils
+
+
+@mock.patch.object(utils, 'IMAGE_API')
+class GlanceImageTestCase(test.NoDBTestCase):
+
+ def _get_image(self):
+ return utils.GlanceImage(mock.sentinel.context,
+ mock.sentinel.image_ref)
+
+ def test_meta(self, mocked):
+ mocked.get.return_value = mock.sentinel.meta
+
+ image = self._get_image()
+ self.assertEqual(mock.sentinel.meta, image.meta)
+ mocked.get.assert_called_once_with(mock.sentinel.context,
+ mock.sentinel.image_ref)
+
+ def test_download_to(self, mocked):
+ mocked.download.return_value = None
+
+ image = self._get_image()
+ result = image.download_to(mock.sentinel.fobj)
+ self.assertIsNone(result)
+ mocked.download.assert_called_once_with(mock.sentinel.context,
+ mock.sentinel.image_ref,
+ mock.sentinel.fobj)
+
+ def test_is_raw_tgz_empty_meta(self, mocked):
+ mocked.get.return_value = {}
+
+ image = self._get_image()
+ self.assertEqual(False, image.is_raw_tgz())
+
+ def test_is_raw_tgz_for_raw_tgz(self, mocked):
+ mocked.get.return_value = {
+ 'disk_format': 'raw',
+ 'container_format': 'tgz'
+ }
+
+ image = self._get_image()
+ self.assertEqual(True, image.is_raw_tgz())
+
+ def test_data(self, mocked):
+ mocked.download.return_value = mock.sentinel.image
+ image = self._get_image()
+
+ self.assertEqual(mock.sentinel.image, image.data())
+
+
+class RawImageTestCase(test.NoDBTestCase):
+ def test_get_size(self):
+ glance_image = self.mox.CreateMock(utils.GlanceImage)
+ glance_image.meta = {'size': '123'}
+ raw_image = utils.RawImage(glance_image)
+ self.mox.ReplayAll()
+
+ self.assertEqual(123, raw_image.get_size())
+
+ def test_stream_to(self):
+ glance_image = self.mox.CreateMock(utils.GlanceImage)
+ glance_image.download_to('file').AndReturn('result')
+ raw_image = utils.RawImage(glance_image)
+ self.mox.ReplayAll()
+
+ self.assertEqual('result', raw_image.stream_to('file'))
+
+
+class TestIterableBasedFile(test.NoDBTestCase):
+ def test_constructor(self):
+ class FakeIterable(object):
+ def __iter__(_self):
+ return 'iterator'
+
+ the_file = utils.IterableToFileAdapter(FakeIterable())
+
+ self.assertEqual('iterator', the_file.iterator)
+
+ def test_read_one_character(self):
+ the_file = utils.IterableToFileAdapter([
+ 'chunk1', 'chunk2'
+ ])
+
+ self.assertEqual('c', the_file.read(1))
+
+ def test_read_stores_remaining_characters(self):
+ the_file = utils.IterableToFileAdapter([
+ 'chunk1', 'chunk2'
+ ])
+
+ the_file.read(1)
+
+ self.assertEqual('hunk1', the_file.remaining_data)
+
+ def test_read_remaining_characters(self):
+ the_file = utils.IterableToFileAdapter([
+ 'chunk1', 'chunk2'
+ ])
+
+ self.assertEqual('c', the_file.read(1))
+ self.assertEqual('h', the_file.read(1))
+
+ def test_read_reached_end_of_file(self):
+ the_file = utils.IterableToFileAdapter([
+ 'chunk1', 'chunk2'
+ ])
+
+ self.assertEqual('chunk1', the_file.read(100))
+ self.assertEqual('chunk2', the_file.read(100))
+ self.assertEqual('', the_file.read(100))
+
+ def test_empty_chunks(self):
+ the_file = utils.IterableToFileAdapter([
+ '', '', 'chunk2'
+ ])
+
+ self.assertEqual('chunk2', the_file.read(100))
+
+
+class RawTGZTestCase(test.NoDBTestCase):
+ def test_as_tarfile(self):
+ image = utils.RawTGZImage(None)
+ self.mox.StubOutWithMock(image, '_as_file')
+ self.mox.StubOutWithMock(utils.tarfile, 'open')
+
+ image._as_file().AndReturn('the_file')
+ utils.tarfile.open(mode='r|gz', fileobj='the_file').AndReturn('tf')
+
+ self.mox.ReplayAll()
+
+ result = image._as_tarfile()
+ self.assertEqual('tf', result)
+
+ def test_as_file(self):
+ self.mox.StubOutWithMock(utils, 'IterableToFileAdapter')
+ glance_image = self.mox.CreateMock(utils.GlanceImage)
+ image = utils.RawTGZImage(glance_image)
+ glance_image.data().AndReturn('iterable-data')
+ utils.IterableToFileAdapter('iterable-data').AndReturn('data-as-file')
+
+ self.mox.ReplayAll()
+
+ result = image._as_file()
+
+ self.assertEqual('data-as-file', result)
+
+ def test_get_size(self):
+ tar_file = self.mox.CreateMock(tarfile.TarFile)
+ tar_info = self.mox.CreateMock(tarfile.TarInfo)
+
+ image = utils.RawTGZImage(None)
+
+ self.mox.StubOutWithMock(image, '_as_tarfile')
+
+ image._as_tarfile().AndReturn(tar_file)
+ tar_file.next().AndReturn(tar_info)
+ tar_info.size = 124
+
+ self.mox.ReplayAll()
+
+ result = image.get_size()
+
+ self.assertEqual(124, result)
+ self.assertEqual(image._tar_info, tar_info)
+ self.assertEqual(image._tar_file, tar_file)
+
+ def test_get_size_called_twice(self):
+ tar_file = self.mox.CreateMock(tarfile.TarFile)
+ tar_info = self.mox.CreateMock(tarfile.TarInfo)
+
+ image = utils.RawTGZImage(None)
+
+ self.mox.StubOutWithMock(image, '_as_tarfile')
+
+ image._as_tarfile().AndReturn(tar_file)
+ tar_file.next().AndReturn(tar_info)
+ tar_info.size = 124
+
+ self.mox.ReplayAll()
+
+ image.get_size()
+ result = image.get_size()
+
+ self.assertEqual(124, result)
+ self.assertEqual(image._tar_info, tar_info)
+ self.assertEqual(image._tar_file, tar_file)
+
+ def test_stream_to_without_size_retrieved(self):
+ source_tar = self.mox.CreateMock(tarfile.TarFile)
+ first_tarinfo = self.mox.CreateMock(tarfile.TarInfo)
+ target_file = self.mox.CreateMock(file)
+ source_file = self.mox.CreateMock(file)
+
+ image = utils.RawTGZImage(None)
+ image._image_service_and_image_id = ('service', 'id')
+
+ self.mox.StubOutWithMock(image, '_as_tarfile', source_tar)
+ self.mox.StubOutWithMock(utils.shutil, 'copyfileobj')
+
+ image._as_tarfile().AndReturn(source_tar)
+ source_tar.next().AndReturn(first_tarinfo)
+ source_tar.extractfile(first_tarinfo).AndReturn(source_file)
+ utils.shutil.copyfileobj(source_file, target_file)
+ source_tar.close()
+
+ self.mox.ReplayAll()
+
+ image.stream_to(target_file)
+
+ def test_stream_to_with_size_retrieved(self):
+ source_tar = self.mox.CreateMock(tarfile.TarFile)
+ first_tarinfo = self.mox.CreateMock(tarfile.TarInfo)
+ target_file = self.mox.CreateMock(file)
+ source_file = self.mox.CreateMock(file)
+ first_tarinfo.size = 124
+
+ image = utils.RawTGZImage(None)
+ image._image_service_and_image_id = ('service', 'id')
+
+ self.mox.StubOutWithMock(image, '_as_tarfile', source_tar)
+ self.mox.StubOutWithMock(utils.shutil, 'copyfileobj')
+
+ image._as_tarfile().AndReturn(source_tar)
+ source_tar.next().AndReturn(first_tarinfo)
+ source_tar.extractfile(first_tarinfo).AndReturn(source_file)
+ utils.shutil.copyfileobj(source_file, target_file)
+ source_tar.close()
+
+ self.mox.ReplayAll()
+
+ image.get_size()
+ image.stream_to(target_file)
diff --git a/nova/tests/unit/virt/xenapi/image/test_vdi_through_dev.py b/nova/tests/unit/virt/xenapi/image/test_vdi_through_dev.py
new file mode 100644
index 0000000000..4a86ce5371
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/image/test_vdi_through_dev.py
@@ -0,0 +1,182 @@
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import contextlib
+import tarfile
+
+import eventlet
+
+from nova.image import glance
+from nova import test
+from nova.virt.xenapi.client import session as xenapi_session
+from nova.virt.xenapi.image import vdi_through_dev
+
+
+@contextlib.contextmanager
+def fake_context(result=None):
+ yield result
+
+
+class TestDelegatingToCommand(test.NoDBTestCase):
+ def test_upload_image_is_delegated_to_command(self):
+ command = self.mox.CreateMock(vdi_through_dev.UploadToGlanceAsRawTgz)
+ self.mox.StubOutWithMock(vdi_through_dev, 'UploadToGlanceAsRawTgz')
+ vdi_through_dev.UploadToGlanceAsRawTgz(
+ 'ctx', 'session', 'instance', 'image_id', 'vdis').AndReturn(
+ command)
+ command.upload_image().AndReturn('result')
+ self.mox.ReplayAll()
+
+ store = vdi_through_dev.VdiThroughDevStore()
+ result = store.upload_image(
+ 'ctx', 'session', 'instance', 'image_id', 'vdis')
+
+ self.assertEqual('result', result)
+
+
+class TestUploadToGlanceAsRawTgz(test.NoDBTestCase):
+ def test_upload_image(self):
+ store = vdi_through_dev.UploadToGlanceAsRawTgz(
+ 'context', 'session', 'instance', 'id', ['vdi0', 'vdi1'])
+ self.mox.StubOutWithMock(store, '_perform_upload')
+ self.mox.StubOutWithMock(store, '_get_vdi_ref')
+ self.mox.StubOutWithMock(vdi_through_dev, 'glance')
+ self.mox.StubOutWithMock(vdi_through_dev, 'vm_utils')
+ self.mox.StubOutWithMock(vdi_through_dev, 'utils')
+
+ store._get_vdi_ref().AndReturn('vdi_ref')
+ vdi_through_dev.vm_utils.vdi_attached_here(
+ 'session', 'vdi_ref', read_only=True).AndReturn(
+ fake_context('dev'))
+ vdi_through_dev.utils.make_dev_path('dev').AndReturn('devpath')
+ vdi_through_dev.utils.temporary_chown('devpath').AndReturn(
+ fake_context())
+ store._perform_upload('devpath')
+
+ self.mox.ReplayAll()
+
+ store.upload_image()
+
+ def test__perform_upload(self):
+ producer = self.mox.CreateMock(vdi_through_dev.TarGzProducer)
+ consumer = self.mox.CreateMock(glance.UpdateGlanceImage)
+ pool = self.mox.CreateMock(eventlet.GreenPool)
+ store = vdi_through_dev.UploadToGlanceAsRawTgz(
+ 'context', 'session', 'instance', 'id', ['vdi0', 'vdi1'])
+ self.mox.StubOutWithMock(store, '_create_pipe')
+ self.mox.StubOutWithMock(store, '_get_virtual_size')
+ self.mox.StubOutWithMock(producer, 'get_metadata')
+ self.mox.StubOutWithMock(vdi_through_dev, 'TarGzProducer')
+ self.mox.StubOutWithMock(glance, 'UpdateGlanceImage')
+ self.mox.StubOutWithMock(vdi_through_dev, 'eventlet')
+
+ producer.get_metadata().AndReturn('metadata')
+ store._get_virtual_size().AndReturn('324')
+ store._create_pipe().AndReturn(('readfile', 'writefile'))
+ vdi_through_dev.TarGzProducer(
+ 'devpath', 'writefile', '324', 'disk.raw').AndReturn(
+ producer)
+ glance.UpdateGlanceImage('context', 'id', 'metadata',
+ 'readfile').AndReturn(consumer)
+ vdi_through_dev.eventlet.GreenPool().AndReturn(pool)
+ pool.spawn(producer.start)
+ pool.spawn(consumer.start)
+ pool.waitall()
+
+ self.mox.ReplayAll()
+
+ store._perform_upload('devpath')
+
+ def test__get_vdi_ref(self):
+ session = self.mox.CreateMock(xenapi_session.XenAPISession)
+ store = vdi_through_dev.UploadToGlanceAsRawTgz(
+ 'context', session, 'instance', 'id', ['vdi0', 'vdi1'])
+ session.call_xenapi('VDI.get_by_uuid', 'vdi0').AndReturn('vdi_ref')
+
+ self.mox.ReplayAll()
+
+ self.assertEqual('vdi_ref', store._get_vdi_ref())
+
+ def test__get_virtual_size(self):
+ session = self.mox.CreateMock(xenapi_session.XenAPISession)
+ store = vdi_through_dev.UploadToGlanceAsRawTgz(
+ 'context', session, 'instance', 'id', ['vdi0', 'vdi1'])
+ self.mox.StubOutWithMock(store, '_get_vdi_ref')
+ store._get_vdi_ref().AndReturn('vdi_ref')
+ session.call_xenapi('VDI.get_virtual_size', 'vdi_ref')
+
+ self.mox.ReplayAll()
+
+ store._get_virtual_size()
+
+ def test__create_pipe(self):
+ store = vdi_through_dev.UploadToGlanceAsRawTgz(
+ 'context', 'session', 'instance', 'id', ['vdi0', 'vdi1'])
+ self.mox.StubOutWithMock(vdi_through_dev, 'os')
+ self.mox.StubOutWithMock(vdi_through_dev, 'greenio')
+ vdi_through_dev.os.pipe().AndReturn(('rpipe', 'wpipe'))
+ vdi_through_dev.greenio.GreenPipe('rpipe', 'rb', 0).AndReturn('rfile')
+ vdi_through_dev.greenio.GreenPipe('wpipe', 'wb', 0).AndReturn('wfile')
+
+ self.mox.ReplayAll()
+
+ result = store._create_pipe()
+ self.assertEqual(('rfile', 'wfile'), result)
+
+
+class TestTarGzProducer(test.NoDBTestCase):
+ def test_constructor(self):
+ producer = vdi_through_dev.TarGzProducer('devpath', 'writefile',
+ '100', 'fname')
+
+ self.assertEqual('devpath', producer.fpath)
+ self.assertEqual('writefile', producer.output)
+ self.assertEqual('100', producer.size)
+ self.assertEqual('writefile', producer.output)
+
+ def test_start(self):
+ outf = self.mox.CreateMock(file)
+ producer = vdi_through_dev.TarGzProducer('fpath', outf,
+ '100', 'fname')
+
+ tfile = self.mox.CreateMock(tarfile.TarFile)
+ tinfo = self.mox.CreateMock(tarfile.TarInfo)
+
+ inf = self.mox.CreateMock(file)
+
+ self.mox.StubOutWithMock(vdi_through_dev, 'tarfile')
+ self.mox.StubOutWithMock(producer, '_open_file')
+
+ vdi_through_dev.tarfile.TarInfo(name='fname').AndReturn(tinfo)
+ vdi_through_dev.tarfile.open(fileobj=outf, mode='w|gz').AndReturn(
+ fake_context(tfile))
+ producer._open_file('fpath', 'rb').AndReturn(fake_context(inf))
+ tfile.addfile(tinfo, fileobj=inf)
+ outf.close()
+
+ self.mox.ReplayAll()
+
+ producer.start()
+
+ self.assertEqual(100, tinfo.size)
+
+ def test_get_metadata(self):
+ producer = vdi_through_dev.TarGzProducer('devpath', 'writefile',
+ '100', 'fname')
+
+ self.assertEqual({
+ 'disk_format': 'raw',
+ 'container_format': 'tgz'},
+ producer.get_metadata())
diff --git a/nova/tests/unit/virt/xenapi/stubs.py b/nova/tests/unit/virt/xenapi/stubs.py
new file mode 100644
index 0000000000..ad13ca41df
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/stubs.py
@@ -0,0 +1,365 @@
+# Copyright (c) 2010 Citrix Systems, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Stubouts, mocks and fixtures for the test suite."""
+
+import pickle
+import random
+
+from oslo.serialization import jsonutils
+
+from nova import test
+import nova.tests.unit.image.fake
+from nova.virt.xenapi.client import session
+from nova.virt.xenapi import fake
+from nova.virt.xenapi import vm_utils
+from nova.virt.xenapi import vmops
+
+
+def stubout_firewall_driver(stubs, conn):
+
+ def fake_none(self, *args):
+ return
+
+ _vmops = conn._vmops
+ stubs.Set(_vmops.firewall_driver, 'prepare_instance_filter', fake_none)
+ stubs.Set(_vmops.firewall_driver, 'instance_filter_exists', fake_none)
+
+
+def stubout_instance_snapshot(stubs):
+ def fake_fetch_image(context, session, instance, name_label, image, type):
+ return {'root': dict(uuid=_make_fake_vdi(), file=None),
+ 'kernel': dict(uuid=_make_fake_vdi(), file=None),
+ 'ramdisk': dict(uuid=_make_fake_vdi(), file=None)}
+
+ stubs.Set(vm_utils, '_fetch_image', fake_fetch_image)
+
+ def fake_wait_for_vhd_coalesce(*args):
+ # TODO(sirp): Should we actually fake out the data here
+ return "fakeparent", "fakebase"
+
+ stubs.Set(vm_utils, '_wait_for_vhd_coalesce', fake_wait_for_vhd_coalesce)
+
+
+def stubout_session(stubs, cls, product_version=(5, 6, 2),
+ product_brand='XenServer', **opt_args):
+ """Stubs out methods from XenAPISession."""
+ stubs.Set(session.XenAPISession, '_create_session',
+ lambda s, url: cls(url, **opt_args))
+ stubs.Set(session.XenAPISession, '_get_product_version_and_brand',
+ lambda s: (product_version, product_brand))
+
+
+def stubout_get_this_vm_uuid(stubs):
+ def f(session):
+ vms = [rec['uuid'] for ref, rec
+ in fake.get_all_records('VM').iteritems()
+ if rec['is_control_domain']]
+ return vms[0]
+ stubs.Set(vm_utils, 'get_this_vm_uuid', f)
+
+
+def stubout_image_service_download(stubs):
+ def fake_download(*args, **kwargs):
+ pass
+ stubs.Set(nova.tests.unit.image.fake._FakeImageService,
+ 'download', fake_download)
+
+
+def stubout_stream_disk(stubs):
+ def fake_stream_disk(*args, **kwargs):
+ pass
+ stubs.Set(vm_utils, '_stream_disk', fake_stream_disk)
+
+
+def stubout_determine_is_pv_objectstore(stubs):
+ """Assumes VMs stu have PV kernels."""
+
+ def f(*args):
+ return False
+ stubs.Set(vm_utils, '_determine_is_pv_objectstore', f)
+
+
+def stubout_is_snapshot(stubs):
+ """Always returns true
+
+ xenapi fake driver does not create vmrefs for snapshots.
+ """
+
+ def f(*args):
+ return True
+ stubs.Set(vm_utils, 'is_snapshot', f)
+
+
+def stubout_lookup_image(stubs):
+ """Simulates a failure in lookup image."""
+ def f(_1, _2, _3, _4):
+ raise Exception("Test Exception raised by fake lookup_image")
+ stubs.Set(vm_utils, 'lookup_image', f)
+
+
+def stubout_fetch_disk_image(stubs, raise_failure=False):
+ """Simulates a failure in fetch image_glance_disk."""
+
+ def _fake_fetch_disk_image(context, session, instance, name_label, image,
+ image_type):
+ if raise_failure:
+ raise fake.Failure("Test Exception raised by "
+ "fake fetch_image_glance_disk")
+ elif image_type == vm_utils.ImageType.KERNEL:
+ filename = "kernel"
+ elif image_type == vm_utils.ImageType.RAMDISK:
+ filename = "ramdisk"
+ else:
+ filename = "unknown"
+
+ vdi_type = vm_utils.ImageType.to_string(image_type)
+ return {vdi_type: dict(uuid=None, file=filename)}
+
+ stubs.Set(vm_utils, '_fetch_disk_image', _fake_fetch_disk_image)
+
+
+def stubout_create_vm(stubs):
+ """Simulates a failure in create_vm."""
+
+ def f(*args):
+ raise fake.Failure("Test Exception raised by fake create_vm")
+ stubs.Set(vm_utils, 'create_vm', f)
+
+
+def stubout_attach_disks(stubs):
+ """Simulates a failure in _attach_disks."""
+
+ def f(*args):
+ raise fake.Failure("Test Exception raised by fake _attach_disks")
+ stubs.Set(vmops.VMOps, '_attach_disks', f)
+
+
+def _make_fake_vdi():
+ sr_ref = fake.get_all('SR')[0]
+ vdi_ref = fake.create_vdi('', sr_ref)
+ vdi_rec = fake.get_record('VDI', vdi_ref)
+ return vdi_rec['uuid']
+
+
+class FakeSessionForVMTests(fake.SessionBase):
+ """Stubs out a XenAPISession for VM tests."""
+
+ _fake_iptables_save_output = ("# Generated by iptables-save v1.4.10 on "
+ "Sun Nov 6 22:49:02 2011\n"
+ "*filter\n"
+ ":INPUT ACCEPT [0:0]\n"
+ ":FORWARD ACCEPT [0:0]\n"
+ ":OUTPUT ACCEPT [0:0]\n"
+ "COMMIT\n"
+ "# Completed on Sun Nov 6 22:49:02 2011\n")
+
+ def host_call_plugin(self, _1, _2, plugin, method, _5):
+ if (plugin, method) == ('glance', 'download_vhd'):
+ root_uuid = _make_fake_vdi()
+ return pickle.dumps(dict(root=dict(uuid=root_uuid)))
+ elif (plugin, method) == ("xenhost", "iptables_config"):
+ return fake.as_json(out=self._fake_iptables_save_output,
+ err='')
+ else:
+ return (super(FakeSessionForVMTests, self).
+ host_call_plugin(_1, _2, plugin, method, _5))
+
+ def VM_start(self, _1, ref, _2, _3):
+ vm = fake.get_record('VM', ref)
+ if vm['power_state'] != 'Halted':
+ raise fake.Failure(['VM_BAD_POWER_STATE', ref, 'Halted',
+ vm['power_state']])
+ vm['power_state'] = 'Running'
+ vm['is_a_template'] = False
+ vm['is_control_domain'] = False
+ vm['domid'] = random.randrange(1, 1 << 16)
+ return vm
+
+ def VM_start_on(self, _1, vm_ref, host_ref, _2, _3):
+ vm_rec = self.VM_start(_1, vm_ref, _2, _3)
+ vm_rec['resident_on'] = host_ref
+
+ def VDI_snapshot(self, session_ref, vm_ref, _1):
+ sr_ref = "fakesr"
+ return fake.create_vdi('fakelabel', sr_ref, read_only=True)
+
+ def SR_scan(self, session_ref, sr_ref):
+ pass
+
+
+class FakeSessionForFirewallTests(FakeSessionForVMTests):
+ """Stubs out a XenApi Session for doing IPTable Firewall tests."""
+
+ def __init__(self, uri, test_case=None):
+ super(FakeSessionForFirewallTests, self).__init__(uri)
+ if hasattr(test_case, '_in_rules'):
+ self._in_rules = test_case._in_rules
+ if hasattr(test_case, '_in6_filter_rules'):
+ self._in6_filter_rules = test_case._in6_filter_rules
+ self._test_case = test_case
+
+ def host_call_plugin(self, _1, _2, plugin, method, args):
+ """Mock method four host_call_plugin to be used in unit tests
+ for the dom0 iptables Firewall drivers for XenAPI
+
+ """
+ if plugin == "xenhost" and method == "iptables_config":
+ # The command to execute is a json-encoded list
+ cmd_args = args.get('cmd_args', None)
+ cmd = jsonutils.loads(cmd_args)
+ if not cmd:
+ ret_str = ''
+ else:
+ output = ''
+ process_input = args.get('process_input', None)
+ if cmd == ['ip6tables-save', '-c']:
+ output = '\n'.join(self._in6_filter_rules)
+ if cmd == ['iptables-save', '-c']:
+ output = '\n'.join(self._in_rules)
+ if cmd == ['iptables-restore', '-c', ]:
+ lines = process_input.split('\n')
+ if '*filter' in lines:
+ if self._test_case is not None:
+ self._test_case._out_rules = lines
+ output = '\n'.join(lines)
+ if cmd == ['ip6tables-restore', '-c', ]:
+ lines = process_input.split('\n')
+ if '*filter' in lines:
+ output = '\n'.join(lines)
+ ret_str = fake.as_json(out=output, err='')
+ return ret_str
+ else:
+ return (super(FakeSessionForVMTests, self).
+ host_call_plugin(_1, _2, plugin, method, args))
+
+
+def stub_out_vm_methods(stubs):
+ def fake_acquire_bootlock(self, vm):
+ pass
+
+ def fake_release_bootlock(self, vm):
+ pass
+
+ def fake_generate_ephemeral(*args):
+ pass
+
+ def fake_wait_for_device(dev):
+ pass
+
+ stubs.Set(vmops.VMOps, "_acquire_bootlock", fake_acquire_bootlock)
+ stubs.Set(vmops.VMOps, "_release_bootlock", fake_release_bootlock)
+ stubs.Set(vm_utils, 'generate_ephemeral', fake_generate_ephemeral)
+ stubs.Set(vm_utils, '_wait_for_device', fake_wait_for_device)
+
+
+class FakeSessionForVolumeTests(fake.SessionBase):
+ """Stubs out a XenAPISession for Volume tests."""
+ def VDI_introduce(self, _1, uuid, _2, _3, _4, _5,
+ _6, _7, _8, _9, _10, _11):
+ valid_vdi = False
+ refs = fake.get_all('VDI')
+ for ref in refs:
+ rec = fake.get_record('VDI', ref)
+ if rec['uuid'] == uuid:
+ valid_vdi = True
+ if not valid_vdi:
+ raise fake.Failure([['INVALID_VDI', 'session', self._session]])
+
+
+class FakeSessionForVolumeFailedTests(FakeSessionForVolumeTests):
+ """Stubs out a XenAPISession for Volume tests: it injects failures."""
+ def VDI_introduce(self, _1, uuid, _2, _3, _4, _5,
+ _6, _7, _8, _9, _10, _11):
+ # This is for testing failure
+ raise fake.Failure([['INVALID_VDI', 'session', self._session]])
+
+ def PBD_unplug(self, _1, ref):
+ rec = fake.get_record('PBD', ref)
+ rec['currently-attached'] = False
+
+ def SR_forget(self, _1, ref):
+ pass
+
+
+def stub_out_migration_methods(stubs):
+ fakesr = fake.create_sr()
+
+ def fake_import_all_migrated_disks(session, instance):
+ vdi_ref = fake.create_vdi(instance['name'], fakesr)
+ vdi_rec = fake.get_record('VDI', vdi_ref)
+ vdi_rec['other_config']['nova_disk_type'] = 'root'
+ return {"root": {'uuid': vdi_rec['uuid'], 'ref': vdi_ref},
+ "ephemerals": {}}
+
+ def fake_wait_for_instance_to_start(self, *args):
+ pass
+
+ def fake_get_vdi(session, vm_ref, userdevice='0'):
+ vdi_ref_parent = fake.create_vdi('derp-parent', fakesr)
+ vdi_rec_parent = fake.get_record('VDI', vdi_ref_parent)
+ vdi_ref = fake.create_vdi('derp', fakesr,
+ sm_config={'vhd-parent': vdi_rec_parent['uuid']})
+ vdi_rec = session.call_xenapi("VDI.get_record", vdi_ref)
+ return vdi_ref, vdi_rec
+
+ def fake_sr(session, *args):
+ return fakesr
+
+ def fake_get_sr_path(*args):
+ return "fake"
+
+ def fake_destroy(*args, **kwargs):
+ pass
+
+ def fake_generate_ephemeral(*args):
+ pass
+
+ stubs.Set(vmops.VMOps, '_destroy', fake_destroy)
+ stubs.Set(vmops.VMOps, '_wait_for_instance_to_start',
+ fake_wait_for_instance_to_start)
+ stubs.Set(vm_utils, 'import_all_migrated_disks',
+ fake_import_all_migrated_disks)
+ stubs.Set(vm_utils, 'scan_default_sr', fake_sr)
+ stubs.Set(vm_utils, 'get_vdi_for_vm_safely', fake_get_vdi)
+ stubs.Set(vm_utils, 'get_sr_path', fake_get_sr_path)
+ stubs.Set(vm_utils, 'generate_ephemeral', fake_generate_ephemeral)
+
+
+class FakeSessionForFailedMigrateTests(FakeSessionForVMTests):
+ def VM_assert_can_migrate(self, session, vmref, migrate_data,
+ live, vdi_map, vif_map, options):
+ raise fake.Failure("XenAPI VM.assert_can_migrate failed")
+
+ def host_migrate_receive(self, session, hostref, networkref, options):
+ raise fake.Failure("XenAPI host.migrate_receive failed")
+
+ def VM_migrate_send(self, session, vmref, migrate_data, islive, vdi_map,
+ vif_map, options):
+ raise fake.Failure("XenAPI VM.migrate_send failed")
+
+
+# FIXME(sirp): XenAPITestBase is deprecated, all tests should be converted
+# over to use XenAPITestBaseNoDB
+class XenAPITestBase(test.TestCase):
+ def setUp(self):
+ super(XenAPITestBase, self).setUp()
+ self.useFixture(test.ReplaceModule('XenAPI', fake))
+ fake.reset()
+
+
+class XenAPITestBaseNoDB(test.NoDBTestCase):
+ def setUp(self):
+ super(XenAPITestBaseNoDB, self).setUp()
+ self.useFixture(test.ReplaceModule('XenAPI', fake))
+ fake.reset()
diff --git a/nova/tests/unit/virt/xenapi/test_agent.py b/nova/tests/unit/virt/xenapi/test_agent.py
new file mode 100644
index 0000000000..5004b381d4
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/test_agent.py
@@ -0,0 +1,468 @@
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import base64
+import time
+import uuid
+
+import mock
+
+from nova import exception
+from nova import test
+from nova.virt.xenapi import agent
+from nova.virt.xenapi import fake as xenapi_fake
+
+
+def _get_fake_instance(**kwargs):
+ system_metadata = []
+ for k, v in kwargs.items():
+ system_metadata.append({
+ "key": k,
+ "value": v
+ })
+
+ return {
+ "system_metadata": system_metadata,
+ "uuid": "uuid",
+ "key_data": "ssh-rsa asdf",
+ "os_type": "asdf",
+ }
+
+
+class AgentTestCaseBase(test.NoDBTestCase):
+ def _create_agent(self, instance, session="session"):
+ self.session = session
+ self.virtapi = "virtapi"
+ self.vm_ref = "vm_ref"
+ return agent.XenAPIBasedAgent(self.session, self.virtapi,
+ instance, self.vm_ref)
+
+
+class AgentImageFlagsTestCase(AgentTestCaseBase):
+ def test_agent_is_present(self):
+ self.flags(use_agent_default=False, group='xenserver')
+ instance = {"system_metadata":
+ [{"key": "image_xenapi_use_agent", "value": "true"}]}
+ self.assertTrue(agent.should_use_agent(instance))
+
+ def test_agent_is_disabled(self):
+ self.flags(use_agent_default=True, group='xenserver')
+ instance = {"system_metadata":
+ [{"key": "image_xenapi_use_agent", "value": "false"}]}
+ self.assertFalse(agent.should_use_agent(instance))
+
+ def test_agent_uses_deafault_when_prop_invalid(self):
+ self.flags(use_agent_default=True, group='xenserver')
+ instance = {"system_metadata":
+ [{"key": "image_xenapi_use_agent", "value": "bob"}],
+ "uuid": "uuid"}
+ self.assertTrue(agent.should_use_agent(instance))
+
+ def test_agent_default_not_present(self):
+ self.flags(use_agent_default=False, group='xenserver')
+ instance = {"system_metadata": []}
+ self.assertFalse(agent.should_use_agent(instance))
+
+ def test_agent_default_present(self):
+ self.flags(use_agent_default=True, group='xenserver')
+ instance = {"system_metadata": []}
+ self.assertTrue(agent.should_use_agent(instance))
+
+
+class SysMetaKeyTestBase():
+ key = None
+
+ def _create_agent_with_value(self, value):
+ kwargs = {self.key: value}
+ instance = _get_fake_instance(**kwargs)
+ return self._create_agent(instance)
+
+ def test_get_sys_meta_key_true(self):
+ agent = self._create_agent_with_value("true")
+ self.assertTrue(agent._get_sys_meta_key(self.key))
+
+ def test_get_sys_meta_key_false(self):
+ agent = self._create_agent_with_value("False")
+ self.assertFalse(agent._get_sys_meta_key(self.key))
+
+ def test_get_sys_meta_key_invalid_is_false(self):
+ agent = self._create_agent_with_value("invalid")
+ self.assertFalse(agent._get_sys_meta_key(self.key))
+
+ def test_get_sys_meta_key_missing_is_false(self):
+ instance = _get_fake_instance()
+ agent = self._create_agent(instance)
+ self.assertFalse(agent._get_sys_meta_key(self.key))
+
+
+class SkipSshFlagTestCase(SysMetaKeyTestBase, AgentTestCaseBase):
+ key = "image_xenapi_skip_agent_inject_ssh"
+
+ def test_skip_ssh_key_inject(self):
+ agent = self._create_agent_with_value("True")
+ self.assertTrue(agent._skip_ssh_key_inject())
+
+
+class SkipFileInjectAtBootFlagTestCase(SysMetaKeyTestBase, AgentTestCaseBase):
+ key = "image_xenapi_skip_agent_inject_files_at_boot"
+
+ def test_skip_inject_files_at_boot(self):
+ agent = self._create_agent_with_value("True")
+ self.assertTrue(agent._skip_inject_files_at_boot())
+
+
+class InjectSshTestCase(AgentTestCaseBase):
+ def test_inject_ssh_key_succeeds(self):
+ instance = _get_fake_instance()
+ agent = self._create_agent(instance)
+ self.mox.StubOutWithMock(agent, "inject_file")
+
+ agent.inject_file("/root/.ssh/authorized_keys",
+ "\n# The following ssh key was injected by Nova"
+ "\nssh-rsa asdf\n")
+
+ self.mox.ReplayAll()
+ agent.inject_ssh_key()
+
+ def _test_inject_ssh_key_skipped(self, instance):
+ agent = self._create_agent(instance)
+
+ # make sure its not called
+ self.mox.StubOutWithMock(agent, "inject_file")
+ self.mox.ReplayAll()
+
+ agent.inject_ssh_key()
+
+ def test_inject_ssh_key_skipped_no_key_data(self):
+ instance = _get_fake_instance()
+ instance["key_data"] = None
+ self._test_inject_ssh_key_skipped(instance)
+
+ def test_inject_ssh_key_skipped_windows(self):
+ instance = _get_fake_instance()
+ instance["os_type"] = "windows"
+ self._test_inject_ssh_key_skipped(instance)
+
+ def test_inject_ssh_key_skipped_cloud_init_present(self):
+ instance = _get_fake_instance(
+ image_xenapi_skip_agent_inject_ssh="True")
+ self._test_inject_ssh_key_skipped(instance)
+
+
+class FileInjectionTestCase(AgentTestCaseBase):
+ def test_inject_file(self):
+ instance = _get_fake_instance()
+ agent = self._create_agent(instance)
+ self.mox.StubOutWithMock(agent, "_call_agent")
+
+ b64_path = base64.b64encode('path')
+ b64_contents = base64.b64encode('contents')
+ agent._call_agent('inject_file',
+ {'b64_contents': b64_contents,
+ 'b64_path': b64_path})
+
+ self.mox.ReplayAll()
+
+ agent.inject_file("path", "contents")
+
+ def test_inject_files(self):
+ instance = _get_fake_instance()
+ agent = self._create_agent(instance)
+ self.mox.StubOutWithMock(agent, "inject_file")
+
+ files = [("path1", "content1"), ("path2", "content2")]
+ agent.inject_file(*files[0])
+ agent.inject_file(*files[1])
+
+ self.mox.ReplayAll()
+
+ agent.inject_files(files)
+
+ def test_inject_files_skipped_when_cloud_init_installed(self):
+ instance = _get_fake_instance(
+ image_xenapi_skip_agent_inject_files_at_boot="True")
+ agent = self._create_agent(instance)
+ self.mox.StubOutWithMock(agent, "inject_file")
+
+ files = [("path1", "content1"), ("path2", "content2")]
+
+ self.mox.ReplayAll()
+
+ agent.inject_files(files)
+
+
+class FakeRebootException(Exception):
+ details = ["", "", "", "asdf REBOOT: asdf"]
+
+
+class RebootRetryTestCase(AgentTestCaseBase):
+ @mock.patch.object(agent, '_wait_for_new_dom_id')
+ def test_retry_on_reboot(self, mock_wait):
+ mock_session = mock.Mock()
+
+ def fake_call_plugin(*args, **kwargs):
+ if fake_call_plugin.called:
+ return {"returncode": '0', "message": "done"}
+ else:
+ fake_call_plugin.called = True
+ raise FakeRebootException()
+
+ fake_call_plugin.called = False
+ mock_session.XenAPI.Failure = FakeRebootException
+ mock_session.VM.get_domid.return_value = "fake_dom_id"
+ mock_session.call_plugin.side_effect = fake_call_plugin
+
+ agent = self._create_agent(None, mock_session)
+
+ result = agent._call_agent("asdf")
+ self.assertEqual("done", result)
+ self.assertTrue(mock_session.VM.get_domid.called)
+ self.assertEqual(2, mock_session.call_plugin.call_count)
+ mock_wait.called_once_with(mock_session, self.vm_ref,
+ "fake_dom_id", "asdf")
+
+ @mock.patch.object(time, 'sleep')
+ @mock.patch.object(time, 'time')
+ def test_wait_for_new_dom_id_found(self, mock_time, mock_sleep):
+ mock_session = mock.Mock()
+ mock_session.VM.get_domid.return_value = "new"
+
+ agent._wait_for_new_dom_id(mock_session, "vm_ref", "old", "method")
+
+ mock_session.VM.get_domid.assert_called_once_with("vm_ref")
+ self.assertFalse(mock_sleep.called)
+
+ @mock.patch.object(time, 'sleep')
+ @mock.patch.object(time, 'time')
+ def test_wait_for_new_dom_id_after_retry(self, mock_time, mock_sleep):
+ self.flags(agent_timeout=3, group="xenserver")
+ mock_time.return_value = 0
+ mock_session = mock.Mock()
+ old = 40
+ new = 42
+ mock_session.VM.get_domid.side_effect = [old, -1, new]
+
+ agent._wait_for_new_dom_id(mock_session, "vm_ref", old, "method")
+
+ mock_session.VM.get_domid.assert_called_with("vm_ref")
+ self.assertEqual(3, mock_session.VM.get_domid.call_count)
+ self.assertEqual(2, mock_sleep.call_count)
+
+ @mock.patch.object(time, 'sleep')
+ @mock.patch.object(time, 'time')
+ def test_wait_for_new_dom_id_timeout(self, mock_time, mock_sleep):
+ self.flags(agent_timeout=3, group="xenserver")
+
+ def fake_time():
+ fake_time.time = fake_time.time + 1
+ return fake_time.time
+
+ fake_time.time = 0
+ mock_time.side_effect = fake_time
+ mock_session = mock.Mock()
+ mock_session.VM.get_domid.return_value = "old"
+
+ self.assertRaises(exception.AgentTimeout,
+ agent._wait_for_new_dom_id,
+ mock_session, "vm_ref", "old", "method")
+
+ self.assertEqual(4, mock_session.VM.get_domid.call_count)
+
+
+class SetAdminPasswordTestCase(AgentTestCaseBase):
+ @mock.patch.object(agent.XenAPIBasedAgent, '_call_agent')
+ @mock.patch("nova.virt.xenapi.agent.SimpleDH")
+ def test_exchange_key_with_agent(self, mock_simple_dh, mock_call_agent):
+ agent = self._create_agent(None)
+ instance_mock = mock_simple_dh()
+ instance_mock.get_public.return_value = 4321
+ mock_call_agent.return_value = "1234"
+
+ result = agent._exchange_key_with_agent()
+
+ mock_call_agent.assert_called_once_with('key_init', {"pub": "4321"},
+ success_codes=['D0'],
+ ignore_errors=False)
+ result.compute_shared.assert_called_once_with(1234)
+
+ @mock.patch.object(agent.XenAPIBasedAgent, '_call_agent')
+ @mock.patch.object(agent.XenAPIBasedAgent,
+ '_save_instance_password_if_sshkey_present')
+ @mock.patch.object(agent.XenAPIBasedAgent, '_exchange_key_with_agent')
+ def test_set_admin_password_works(self, mock_exchange, mock_save,
+ mock_call_agent):
+ mock_dh = mock.Mock(spec_set=agent.SimpleDH)
+ mock_dh.encrypt.return_value = "enc_pass"
+ mock_exchange.return_value = mock_dh
+ agent_inst = self._create_agent(None)
+
+ agent_inst.set_admin_password("new_pass")
+
+ mock_dh.encrypt.assert_called_once_with("new_pass\n")
+ mock_call_agent.assert_called_once_with('password',
+ {'enc_pass': 'enc_pass'})
+ mock_save.assert_called_once_with("new_pass")
+
+ @mock.patch.object(agent.XenAPIBasedAgent, '_add_instance_fault')
+ @mock.patch.object(agent.XenAPIBasedAgent, '_exchange_key_with_agent')
+ def test_set_admin_password_silently_fails(self, mock_exchange,
+ mock_add_fault):
+ error = exception.AgentTimeout(method="fake")
+ mock_exchange.side_effect = error
+ agent_inst = self._create_agent(None)
+
+ agent_inst.set_admin_password("new_pass")
+
+ mock_add_fault.assert_called_once_with(error, mock.ANY)
+
+
+class UpgradeRequiredTestCase(test.NoDBTestCase):
+ def test_less_than(self):
+ self.assertTrue(agent.is_upgrade_required('1.2.3.4', '1.2.3.5'))
+
+ def test_greater_than(self):
+ self.assertFalse(agent.is_upgrade_required('1.2.3.5', '1.2.3.4'))
+
+ def test_equal(self):
+ self.assertFalse(agent.is_upgrade_required('1.2.3.4', '1.2.3.4'))
+
+ def test_non_lexical(self):
+ self.assertFalse(agent.is_upgrade_required('1.2.3.10', '1.2.3.4'))
+
+ def test_length(self):
+ self.assertTrue(agent.is_upgrade_required('1.2.3', '1.2.3.4'))
+
+
+@mock.patch.object(uuid, "uuid4")
+class CallAgentTestCase(AgentTestCaseBase):
+ def test_call_agent_success(self, mock_uuid):
+ session = mock.Mock()
+ instance = {"uuid": "fake"}
+ addl_args = {"foo": "bar"}
+
+ session.VM.get_domid.return_value = '42'
+ mock_uuid.return_value = 1
+ session.call_plugin.return_value = {'returncode': '4',
+ 'message': "asdf\\r\\n"}
+
+ self.assertEqual("asdf",
+ agent._call_agent(session, instance, "vm_ref",
+ "method", addl_args, timeout=300,
+ success_codes=['0', '4']))
+
+ expected_args = {
+ 'id': '1',
+ 'dom_id': '42',
+ 'timeout': '300',
+ }
+ expected_args.update(addl_args)
+ session.VM.get_domid.assert_called_once_with("vm_ref")
+ session.call_plugin.assert_called_once_with("agent", "method",
+ expected_args)
+
+ def _call_agent_setup(self, session, mock_uuid,
+ returncode='0', success_codes=None,
+ exception=None):
+ session.XenAPI.Failure = xenapi_fake.Failure
+ instance = {"uuid": "fake"}
+
+ session.VM.get_domid.return_value = 42
+ mock_uuid.return_value = 1
+ if exception:
+ session.call_plugin.side_effect = exception
+ else:
+ session.call_plugin.return_value = {'returncode': returncode,
+ 'message': "asdf\\r\\n"}
+
+ return agent._call_agent(session, instance, "vm_ref", "method",
+ success_codes=success_codes)
+
+ def _assert_agent_called(self, session, mock_uuid):
+ expected_args = {
+ 'id': '1',
+ 'dom_id': '42',
+ 'timeout': '30',
+ }
+ session.call_plugin.assert_called_once_with("agent", "method",
+ expected_args)
+ session.VM.get_domid.assert_called_once_with("vm_ref")
+
+ def test_call_agent_works_with_defaults(self, mock_uuid):
+ session = mock.Mock()
+ self._call_agent_setup(session, mock_uuid)
+ self._assert_agent_called(session, mock_uuid)
+
+ def test_call_agent_fails_with_timeout(self, mock_uuid):
+ session = mock.Mock()
+ self.assertRaises(exception.AgentTimeout, self._call_agent_setup,
+ session, mock_uuid,
+ exception=xenapi_fake.Failure(["TIMEOUT:fake"]))
+ self._assert_agent_called(session, mock_uuid)
+
+ def test_call_agent_fails_with_not_implemented(self, mock_uuid):
+ session = mock.Mock()
+ self.assertRaises(exception.AgentNotImplemented,
+ self._call_agent_setup,
+ session, mock_uuid,
+ exception=xenapi_fake.Failure(["NOT IMPLEMENTED:"]))
+ self._assert_agent_called(session, mock_uuid)
+
+ def test_call_agent_fails_with_other_error(self, mock_uuid):
+ session = mock.Mock()
+ self.assertRaises(exception.AgentError, self._call_agent_setup,
+ session, mock_uuid,
+ exception=xenapi_fake.Failure(["asdf"]))
+ self._assert_agent_called(session, mock_uuid)
+
+ def test_call_agent_fails_with_returned_error(self, mock_uuid):
+ session = mock.Mock()
+ self.assertRaises(exception.AgentError, self._call_agent_setup,
+ session, mock_uuid, returncode='42')
+ self._assert_agent_called(session, mock_uuid)
+
+
+class XenAPIBasedAgent(AgentTestCaseBase):
+ @mock.patch.object(agent.XenAPIBasedAgent, "_add_instance_fault")
+ @mock.patch.object(agent, "_call_agent")
+ def test_call_agent_swallows_error(self, mock_call_agent,
+ mock_add_instance_fault):
+ fake_error = exception.AgentError(method="bob")
+ mock_call_agent.side_effect = fake_error
+
+ instance = _get_fake_instance()
+ agent = self._create_agent(instance)
+
+ agent._call_agent("bob")
+
+ mock_call_agent.assert_called_once_with(agent.session, agent.instance,
+ agent.vm_ref, "bob", None, None, None)
+ mock_add_instance_fault.assert_called_once_with(fake_error, mock.ANY)
+
+ @mock.patch.object(agent.XenAPIBasedAgent, "_add_instance_fault")
+ @mock.patch.object(agent, "_call_agent")
+ def test_call_agent_throws_error(self, mock_call_agent,
+ mock_add_instance_fault):
+ fake_error = exception.AgentError(method="bob")
+ mock_call_agent.side_effect = fake_error
+
+ instance = _get_fake_instance()
+ agent = self._create_agent(instance)
+
+ self.assertRaises(exception.AgentError, agent._call_agent,
+ "bob", ignore_errors=False)
+
+ mock_call_agent.assert_called_once_with(agent.session, agent.instance,
+ agent.vm_ref, "bob", None, None, None)
+ self.assertFalse(mock_add_instance_fault.called)
diff --git a/nova/tests/unit/virt/xenapi/test_driver.py b/nova/tests/unit/virt/xenapi/test_driver.py
new file mode 100644
index 0000000000..eb3e02f29e
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/test_driver.py
@@ -0,0 +1,101 @@
+# Copyright (c) 2013 Rackspace Hosting
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import math
+
+import mock
+from oslo.utils import units
+
+from nova.compute import arch
+from nova.tests.unit.virt.xenapi import stubs
+from nova.virt import driver
+from nova.virt import fake
+from nova.virt import xenapi
+from nova.virt.xenapi import driver as xenapi_driver
+
+
+class XenAPIDriverTestCase(stubs.XenAPITestBaseNoDB):
+ """Unit tests for Driver operations."""
+
+ def _get_driver(self):
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.flags(connection_url='test_url',
+ connection_password='test_pass', group='xenserver')
+ return xenapi.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ def host_stats(self, refresh=True):
+ return {'host_memory_total': 3 * units.Mi,
+ 'host_memory_free_computed': 2 * units.Mi,
+ 'disk_total': 5 * units.Gi,
+ 'disk_used': 2 * units.Gi,
+ 'disk_allocated': 4 * units.Gi,
+ 'host_hostname': 'somename',
+ 'supported_instances': arch.X86_64,
+ 'host_cpu_info': {'cpu_count': 50},
+ 'vcpus_used': 10,
+ 'pci_passthrough_devices': ''}
+
+ def test_available_resource(self):
+ driver = self._get_driver()
+ driver._session.product_version = (6, 8, 2)
+
+ self.stubs.Set(driver.host_state, 'get_host_stats', self.host_stats)
+
+ resources = driver.get_available_resource(None)
+ self.assertEqual(6008002, resources['hypervisor_version'])
+ self.assertEqual(50, resources['vcpus'])
+ self.assertEqual(3, resources['memory_mb'])
+ self.assertEqual(5, resources['local_gb'])
+ self.assertEqual(10, resources['vcpus_used'])
+ self.assertEqual(3 - 2, resources['memory_mb_used'])
+ self.assertEqual(2, resources['local_gb_used'])
+ self.assertEqual('xen', resources['hypervisor_type'])
+ self.assertEqual('somename', resources['hypervisor_hostname'])
+ self.assertEqual(1, resources['disk_available_least'])
+
+ def test_overhead(self):
+ driver = self._get_driver()
+ instance = {'memory_mb': 30720, 'vcpus': 4}
+
+ # expected memory overhead per:
+ # https://wiki.openstack.org/wiki/XenServer/Overhead
+ expected = ((instance['memory_mb'] * xenapi_driver.OVERHEAD_PER_MB) +
+ (instance['vcpus'] * xenapi_driver.OVERHEAD_PER_VCPU) +
+ xenapi_driver.OVERHEAD_BASE)
+ expected = math.ceil(expected)
+ overhead = driver.estimate_instance_overhead(instance)
+ self.assertEqual(expected, overhead['memory_mb'])
+
+ def test_set_bootable(self):
+ driver = self._get_driver()
+
+ self.mox.StubOutWithMock(driver._vmops, 'set_bootable')
+ driver._vmops.set_bootable('inst', True)
+ self.mox.ReplayAll()
+
+ driver.set_bootable('inst', True)
+
+ def test_post_interrupted_snapshot_cleanup(self):
+ driver = self._get_driver()
+ fake_vmops_cleanup = mock.Mock()
+ driver._vmops.post_interrupted_snapshot_cleanup = fake_vmops_cleanup
+
+ driver.post_interrupted_snapshot_cleanup("context", "instance")
+
+ fake_vmops_cleanup.assert_called_once_with("context", "instance")
+
+ def test_public_api_signatures(self):
+ inst = self._get_driver()
+ self.assertPublicAPISignatures(driver.ComputeDriver(None), inst)
diff --git a/nova/tests/unit/virt/xenapi/test_network_utils.py b/nova/tests/unit/virt/xenapi/test_network_utils.py
new file mode 100644
index 0000000000..5aa660f2a7
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/test_network_utils.py
@@ -0,0 +1,76 @@
+
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova import exception
+from nova.tests.unit.virt.xenapi import stubs
+from nova.virt.xenapi import network_utils
+
+
+class NetworkUtilsTestCase(stubs.XenAPITestBaseNoDB):
+ def test_find_network_with_name_label_works(self):
+ session = mock.Mock()
+ session.network.get_by_name_label.return_value = ["net"]
+
+ result = network_utils.find_network_with_name_label(session, "label")
+
+ self.assertEqual("net", result)
+ session.network.get_by_name_label.assert_called_once_with("label")
+
+ def test_find_network_with_name_returns_none(self):
+ session = mock.Mock()
+ session.network.get_by_name_label.return_value = []
+
+ result = network_utils.find_network_with_name_label(session, "label")
+
+ self.assertIsNone(result)
+
+ def test_find_network_with_name_label_raises(self):
+ session = mock.Mock()
+ session.network.get_by_name_label.return_value = ["net", "net2"]
+
+ self.assertRaises(exception.NovaException,
+ network_utils.find_network_with_name_label,
+ session, "label")
+
+ def test_find_network_with_bridge_works(self):
+ session = mock.Mock()
+ session.network.get_all_records_where.return_value = {"net": "asdf"}
+
+ result = network_utils.find_network_with_bridge(session, "bridge")
+
+ self.assertEqual(result, "net")
+ expr = 'field "name__label" = "bridge" or field "bridge" = "bridge"'
+ session.network.get_all_records_where.assert_called_once_with(expr)
+
+ def test_find_network_with_bridge_raises_too_many(self):
+ session = mock.Mock()
+ session.network.get_all_records_where.return_value = {
+ "net": "asdf",
+ "net2": "asdf2"
+ }
+
+ self.assertRaises(exception.NovaException,
+ network_utils.find_network_with_bridge,
+ session, "bridge")
+
+ def test_find_network_with_bridge_raises_no_networks(self):
+ session = mock.Mock()
+ session.network.get_all_records_where.return_value = {}
+
+ self.assertRaises(exception.NovaException,
+ network_utils.find_network_with_bridge,
+ session, "bridge")
diff --git a/nova/tests/unit/virt/xenapi/test_vm_utils.py b/nova/tests/unit/virt/xenapi/test_vm_utils.py
new file mode 100644
index 0000000000..ac54bd1480
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/test_vm_utils.py
@@ -0,0 +1,2422 @@
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import contextlib
+import uuid
+
+from eventlet import greenthread
+import fixtures
+import mock
+import mox
+from oslo.concurrency import lockutils
+from oslo.concurrency import processutils
+from oslo.config import cfg
+from oslo.utils import timeutils
+from oslo.utils import units
+import six
+
+from nova.compute import flavors
+from nova.compute import power_state
+from nova.compute import vm_mode
+from nova import context
+from nova import exception
+from nova.i18n import _
+from nova.openstack.common.fixture import config as config_fixture
+from nova import test
+from nova.tests.unit.virt.xenapi import stubs
+from nova.tests.unit.virt.xenapi import test_xenapi
+from nova import utils
+from nova.virt.xenapi.client import session as xenapi_session
+from nova.virt.xenapi import driver as xenapi_conn
+from nova.virt.xenapi import fake
+from nova.virt.xenapi import vm_utils
+
+CONF = cfg.CONF
+XENSM_TYPE = 'xensm'
+ISCSI_TYPE = 'iscsi'
+
+
+def get_fake_connection_data(sr_type):
+ fakes = {XENSM_TYPE: {'sr_uuid': 'falseSR',
+ 'name_label': 'fake_storage',
+ 'name_description': 'test purposes',
+ 'server': 'myserver',
+ 'serverpath': '/local/scratch/myname',
+ 'sr_type': 'nfs',
+ 'introduce_sr_keys': ['server',
+ 'serverpath',
+ 'sr_type'],
+ 'vdi_uuid': 'falseVDI'},
+ ISCSI_TYPE: {'volume_id': 'fake_volume_id',
+ 'target_lun': 1,
+ 'target_iqn': 'fake_iqn:volume-fake_volume_id',
+ 'target_portal': u'localhost:3260',
+ 'target_discovered': False}, }
+ return fakes[sr_type]
+
+
+def _get_fake_session(error=None):
+ session = mock.Mock()
+ xenapi_session.apply_session_helpers(session)
+
+ if error is not None:
+ class FakeException(Exception):
+ details = [error, "a", "b", "c"]
+
+ session.XenAPI.Failure = FakeException
+ session.call_xenapi.side_effect = FakeException
+
+ return session
+
+
+@contextlib.contextmanager
+def contextified(result):
+ yield result
+
+
+def _fake_noop(*args, **kwargs):
+ return
+
+
+class VMUtilsTestBase(stubs.XenAPITestBaseNoDB):
+ pass
+
+
+class LookupTestCase(VMUtilsTestBase):
+ def setUp(self):
+ super(LookupTestCase, self).setUp()
+ self.session = self.mox.CreateMockAnything('Fake Session')
+ self.name_label = 'my_vm'
+
+ def _do_mock(self, result):
+ self.session.call_xenapi(
+ "VM.get_by_name_label", self.name_label).AndReturn(result)
+ self.mox.ReplayAll()
+
+ def test_normal(self):
+ self._do_mock(['x'])
+ result = vm_utils.lookup(self.session, self.name_label)
+ self.assertEqual('x', result)
+
+ def test_no_result(self):
+ self._do_mock([])
+ result = vm_utils.lookup(self.session, self.name_label)
+ self.assertIsNone(result)
+
+ def test_too_many(self):
+ self._do_mock(['a', 'b'])
+ self.assertRaises(exception.InstanceExists,
+ vm_utils.lookup,
+ self.session, self.name_label)
+
+ def test_rescue_none(self):
+ self.session.call_xenapi(
+ "VM.get_by_name_label", self.name_label + '-rescue').AndReturn([])
+ self._do_mock(['x'])
+ result = vm_utils.lookup(self.session, self.name_label,
+ check_rescue=True)
+ self.assertEqual('x', result)
+
+ def test_rescue_found(self):
+ self.session.call_xenapi(
+ "VM.get_by_name_label",
+ self.name_label + '-rescue').AndReturn(['y'])
+ self.mox.ReplayAll()
+ result = vm_utils.lookup(self.session, self.name_label,
+ check_rescue=True)
+ self.assertEqual('y', result)
+
+ def test_rescue_too_many(self):
+ self.session.call_xenapi(
+ "VM.get_by_name_label",
+ self.name_label + '-rescue').AndReturn(['a', 'b', 'c'])
+ self.mox.ReplayAll()
+ self.assertRaises(exception.InstanceExists,
+ vm_utils.lookup,
+ self.session, self.name_label,
+ check_rescue=True)
+
+
+class GenerateConfigDriveTestCase(VMUtilsTestBase):
+ def test_no_admin_pass(self):
+ instance = {}
+
+ self.mox.StubOutWithMock(vm_utils, 'safe_find_sr')
+ vm_utils.safe_find_sr('session').AndReturn('sr_ref')
+
+ self.mox.StubOutWithMock(vm_utils, 'create_vdi')
+ vm_utils.create_vdi('session', 'sr_ref', instance, 'config-2',
+ 'configdrive',
+ 64 * units.Mi).AndReturn('vdi_ref')
+
+ self.mox.StubOutWithMock(vm_utils, 'vdi_attached_here')
+ vm_utils.vdi_attached_here(
+ 'session', 'vdi_ref', read_only=False).AndReturn(
+ contextified('mounted_dev'))
+
+ class FakeInstanceMetadata(object):
+ def __init__(_self, instance, content=None, extra_md=None,
+ network_info=None):
+ self.assertEqual(network_info, "nw_info")
+
+ def metadata_for_config_drive(_self):
+ return []
+
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.api.metadata.base.InstanceMetadata',
+ FakeInstanceMetadata))
+
+ self.mox.StubOutWithMock(utils, 'execute')
+ utils.execute('genisoimage', '-o', mox.IgnoreArg(), '-ldots',
+ '-allow-lowercase', '-allow-multidot', '-l',
+ '-publisher', mox.IgnoreArg(), '-quiet',
+ '-J', '-r', '-V', 'config-2', mox.IgnoreArg(),
+ attempts=1, run_as_root=False).AndReturn(None)
+ utils.execute('dd', mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg(), run_as_root=True).AndReturn(None)
+
+ self.mox.StubOutWithMock(vm_utils, 'create_vbd')
+ vm_utils.create_vbd('session', 'vm_ref', 'vdi_ref', mox.IgnoreArg(),
+ bootable=False, read_only=True).AndReturn(None)
+
+ self.mox.ReplayAll()
+
+ # And the actual call we're testing
+ vm_utils.generate_configdrive('session', instance, 'vm_ref',
+ 'userdevice', "nw_info")
+
+ @mock.patch.object(vm_utils, "destroy_vdi")
+ @mock.patch.object(vm_utils, "vdi_attached_here")
+ @mock.patch.object(vm_utils, "create_vdi")
+ @mock.patch.object(vm_utils, "safe_find_sr")
+ def test_vdi_cleaned_up(self, mock_find, mock_create_vdi, mock_attached,
+ mock_destroy):
+ mock_create_vdi.return_value = 'vdi_ref'
+ mock_attached.side_effect = test.TestingException
+ mock_destroy.side_effect = exception.StorageError(reason="")
+
+ instance = {"uuid": "asdf"}
+ self.assertRaises(test.TestingException,
+ vm_utils.generate_configdrive,
+ 'session', instance, 'vm_ref', 'userdevice',
+ 'nw_info')
+ mock_destroy.assert_called_once_with('session', 'vdi_ref')
+
+
+class XenAPIGetUUID(VMUtilsTestBase):
+ def test_get_this_vm_uuid_new_kernel(self):
+ self.mox.StubOutWithMock(vm_utils, '_get_sys_hypervisor_uuid')
+
+ vm_utils._get_sys_hypervisor_uuid().AndReturn(
+ '2f46f0f5-f14c-ef1b-1fac-9eeca0888a3f')
+
+ self.mox.ReplayAll()
+ self.assertEqual('2f46f0f5-f14c-ef1b-1fac-9eeca0888a3f',
+ vm_utils.get_this_vm_uuid(None))
+ self.mox.VerifyAll()
+
+ def test_get_this_vm_uuid_old_kernel_reboot(self):
+ self.mox.StubOutWithMock(vm_utils, '_get_sys_hypervisor_uuid')
+ self.mox.StubOutWithMock(utils, 'execute')
+
+ vm_utils._get_sys_hypervisor_uuid().AndRaise(
+ IOError(13, 'Permission denied'))
+ utils.execute('xenstore-read', 'domid', run_as_root=True).AndReturn(
+ ('27', ''))
+ utils.execute('xenstore-read', '/local/domain/27/vm',
+ run_as_root=True).AndReturn(
+ ('/vm/2f46f0f5-f14c-ef1b-1fac-9eeca0888a3f', ''))
+
+ self.mox.ReplayAll()
+ self.assertEqual('2f46f0f5-f14c-ef1b-1fac-9eeca0888a3f',
+ vm_utils.get_this_vm_uuid(None))
+ self.mox.VerifyAll()
+
+
+class FakeSession(object):
+ def call_xenapi(self, *args):
+ pass
+
+ def call_plugin(self, *args):
+ pass
+
+ def call_plugin_serialized(self, plugin, fn, *args, **kwargs):
+ pass
+
+ def call_plugin_serialized_with_retry(self, plugin, fn, num_retries,
+ callback, *args, **kwargs):
+ pass
+
+
+class FetchVhdImageTestCase(VMUtilsTestBase):
+ def setUp(self):
+ super(FetchVhdImageTestCase, self).setUp()
+ self.context = context.get_admin_context()
+ self.context.auth_token = 'auth_token'
+ self.session = FakeSession()
+ self.instance = {"uuid": "uuid"}
+
+ self.mox.StubOutWithMock(vm_utils, '_make_uuid_stack')
+ vm_utils._make_uuid_stack().AndReturn(["uuid_stack"])
+
+ self.mox.StubOutWithMock(vm_utils, 'get_sr_path')
+ vm_utils.get_sr_path(self.session).AndReturn('sr_path')
+
+ def _stub_glance_download_vhd(self, raise_exc=None):
+ self.mox.StubOutWithMock(
+ self.session, 'call_plugin_serialized_with_retry')
+ func = self.session.call_plugin_serialized_with_retry(
+ 'glance', 'download_vhd', 0, mox.IgnoreArg(), mox.IgnoreArg(),
+ extra_headers={'X-Service-Catalog': '[]',
+ 'X-Auth-Token': 'auth_token',
+ 'X-Roles': '',
+ 'X-Tenant-Id': None,
+ 'X-User-Id': None,
+ 'X-Identity-Status': 'Confirmed'},
+ image_id='image_id',
+ uuid_stack=["uuid_stack"],
+ sr_path='sr_path')
+
+ if raise_exc:
+ func.AndRaise(raise_exc)
+ else:
+ func.AndReturn({'root': {'uuid': 'vdi'}})
+
+ def _stub_bittorrent_download_vhd(self, raise_exc=None):
+ self.mox.StubOutWithMock(
+ self.session, 'call_plugin_serialized')
+ func = self.session.call_plugin_serialized(
+ 'bittorrent', 'download_vhd',
+ image_id='image_id',
+ uuid_stack=["uuid_stack"],
+ sr_path='sr_path',
+ torrent_download_stall_cutoff=600,
+ torrent_listen_port_start=6881,
+ torrent_listen_port_end=6891,
+ torrent_max_last_accessed=86400,
+ torrent_max_seeder_processes_per_host=1,
+ torrent_seed_chance=1.0,
+ torrent_seed_duration=3600,
+ torrent_url='http://foo/image_id.torrent'
+ )
+ if raise_exc:
+ func.AndRaise(raise_exc)
+ else:
+ func.AndReturn({'root': {'uuid': 'vdi'}})
+
+ def test_fetch_vhd_image_works_with_glance(self):
+ self.mox.StubOutWithMock(vm_utils, '_image_uses_bittorrent')
+ vm_utils._image_uses_bittorrent(
+ self.context, self.instance).AndReturn(False)
+
+ self._stub_glance_download_vhd()
+
+ self.mox.StubOutWithMock(vm_utils, 'safe_find_sr')
+ vm_utils.safe_find_sr(self.session).AndReturn("sr")
+
+ self.mox.StubOutWithMock(vm_utils, '_scan_sr')
+ vm_utils._scan_sr(self.session, "sr")
+
+ self.mox.StubOutWithMock(vm_utils, '_check_vdi_size')
+ vm_utils._check_vdi_size(
+ self.context, self.session, self.instance, "vdi")
+
+ self.mox.ReplayAll()
+
+ self.assertEqual("vdi", vm_utils._fetch_vhd_image(self.context,
+ self.session, self.instance, 'image_id')['root']['uuid'])
+
+ self.mox.VerifyAll()
+
+ def test_fetch_vhd_image_works_with_bittorrent(self):
+ cfg.CONF.import_opt('torrent_base_url',
+ 'nova.virt.xenapi.image.bittorrent',
+ group='xenserver')
+ self.flags(torrent_base_url='http://foo', group='xenserver')
+
+ self.mox.StubOutWithMock(vm_utils, '_image_uses_bittorrent')
+ vm_utils._image_uses_bittorrent(
+ self.context, self.instance).AndReturn(True)
+
+ self._stub_bittorrent_download_vhd()
+
+ self.mox.StubOutWithMock(vm_utils, 'safe_find_sr')
+ vm_utils.safe_find_sr(self.session).AndReturn("sr")
+
+ self.mox.StubOutWithMock(vm_utils, '_scan_sr')
+ vm_utils._scan_sr(self.session, "sr")
+
+ self.mox.StubOutWithMock(vm_utils, '_check_vdi_size')
+ vm_utils._check_vdi_size(self.context, self.session, self.instance,
+ "vdi")
+
+ self.mox.ReplayAll()
+
+ self.assertEqual("vdi", vm_utils._fetch_vhd_image(self.context,
+ self.session, self.instance, 'image_id')['root']['uuid'])
+
+ self.mox.VerifyAll()
+
+ def test_fetch_vhd_image_cleans_up_vdi_on_fail(self):
+ self.mox.StubOutWithMock(vm_utils, '_image_uses_bittorrent')
+ vm_utils._image_uses_bittorrent(
+ self.context, self.instance).AndReturn(False)
+
+ self._stub_glance_download_vhd()
+
+ self.mox.StubOutWithMock(vm_utils, 'safe_find_sr')
+ vm_utils.safe_find_sr(self.session).AndReturn("sr")
+
+ self.mox.StubOutWithMock(vm_utils, '_scan_sr')
+ vm_utils._scan_sr(self.session, "sr")
+
+ self.mox.StubOutWithMock(vm_utils, '_check_vdi_size')
+ vm_utils._check_vdi_size(self.context, self.session, self.instance,
+ "vdi").AndRaise(exception.FlavorDiskTooSmall)
+
+ self.mox.StubOutWithMock(self.session, 'call_xenapi')
+ self.session.call_xenapi("VDI.get_by_uuid", "vdi").AndReturn("ref")
+
+ self.mox.StubOutWithMock(vm_utils, 'destroy_vdi')
+ vm_utils.destroy_vdi(self.session,
+ "ref").AndRaise(exception.StorageError(reason=""))
+
+ self.mox.ReplayAll()
+
+ self.assertRaises(exception.FlavorDiskTooSmall,
+ vm_utils._fetch_vhd_image, self.context, self.session,
+ self.instance, 'image_id')
+
+ self.mox.VerifyAll()
+
+ def test_fallback_to_default_handler(self):
+ cfg.CONF.import_opt('torrent_base_url',
+ 'nova.virt.xenapi.image.bittorrent',
+ group='xenserver')
+ self.flags(torrent_base_url='http://foo', group='xenserver')
+
+ self.mox.StubOutWithMock(vm_utils, '_image_uses_bittorrent')
+ vm_utils._image_uses_bittorrent(
+ self.context, self.instance).AndReturn(True)
+
+ self._stub_bittorrent_download_vhd(raise_exc=RuntimeError)
+
+ vm_utils._make_uuid_stack().AndReturn(["uuid_stack"])
+ vm_utils.get_sr_path(self.session).AndReturn('sr_path')
+
+ self._stub_glance_download_vhd()
+
+ self.mox.StubOutWithMock(vm_utils, 'safe_find_sr')
+ vm_utils.safe_find_sr(self.session).AndReturn("sr")
+
+ self.mox.StubOutWithMock(vm_utils, '_scan_sr')
+ vm_utils._scan_sr(self.session, "sr")
+
+ self.mox.StubOutWithMock(vm_utils, '_check_vdi_size')
+ vm_utils._check_vdi_size(self.context, self.session, self.instance,
+ "vdi")
+
+ self.mox.ReplayAll()
+
+ self.assertEqual("vdi", vm_utils._fetch_vhd_image(self.context,
+ self.session, self.instance, 'image_id')['root']['uuid'])
+
+ self.mox.VerifyAll()
+
+ def test_default_handler_does_not_fallback_to_itself(self):
+ cfg.CONF.import_opt('torrent_base_url',
+ 'nova.virt.xenapi.image.bittorrent',
+ group='xenserver')
+ self.flags(torrent_base_url='http://foo', group='xenserver')
+
+ self.mox.StubOutWithMock(vm_utils, '_image_uses_bittorrent')
+ vm_utils._image_uses_bittorrent(
+ self.context, self.instance).AndReturn(False)
+
+ self._stub_glance_download_vhd(raise_exc=RuntimeError)
+
+ self.mox.ReplayAll()
+
+ self.assertRaises(RuntimeError, vm_utils._fetch_vhd_image,
+ self.context, self.session, self.instance, 'image_id')
+
+ self.mox.VerifyAll()
+
+
+class TestImageCompression(VMUtilsTestBase):
+ def test_image_compression(self):
+ # Testing for nova.conf, too low, negative, and a correct value.
+ self.assertIsNone(vm_utils.get_compression_level())
+ self.flags(image_compression_level=0, group='xenserver')
+ self.assertIsNone(vm_utils.get_compression_level())
+ self.flags(image_compression_level=-6, group='xenserver')
+ self.assertIsNone(vm_utils.get_compression_level())
+ self.flags(image_compression_level=6, group='xenserver')
+ self.assertEqual(vm_utils.get_compression_level(), 6)
+
+
+class ResizeHelpersTestCase(VMUtilsTestBase):
+ def test_repair_filesystem(self):
+ self.mox.StubOutWithMock(utils, 'execute')
+
+ utils.execute('e2fsck', '-f', "-y", "fakepath",
+ run_as_root=True, check_exit_code=[0, 1, 2]).AndReturn(
+ ("size is: 42", ""))
+
+ self.mox.ReplayAll()
+
+ vm_utils._repair_filesystem("fakepath")
+
+ def _call_tune2fs_remove_journal(self, path):
+ utils.execute("tune2fs", "-O ^has_journal", path, run_as_root=True)
+
+ def _call_tune2fs_add_journal(self, path):
+ utils.execute("tune2fs", "-j", path, run_as_root=True)
+
+ def _call_parted_mkpart(self, path, start, end):
+ utils.execute('parted', '--script', path, 'rm', '1',
+ run_as_root=True)
+ utils.execute('parted', '--script', path, 'mkpart',
+ 'primary', '%ds' % start, '%ds' % end, run_as_root=True)
+
+ def _call_parted_boot_flag(sef, path):
+ utils.execute('parted', '--script', path, 'set', '1',
+ 'boot', 'on', run_as_root=True)
+
+ def test_resize_part_and_fs_down_succeeds(self):
+ self.mox.StubOutWithMock(vm_utils, "_repair_filesystem")
+ self.mox.StubOutWithMock(utils, 'execute')
+
+ dev_path = "/dev/fake"
+ partition_path = "%s1" % dev_path
+ vm_utils._repair_filesystem(partition_path)
+ self._call_tune2fs_remove_journal(partition_path)
+ utils.execute("resize2fs", partition_path, "10s", run_as_root=True)
+ self._call_parted_mkpart(dev_path, 0, 9)
+ self._call_parted_boot_flag(dev_path)
+ self._call_tune2fs_add_journal(partition_path)
+
+ self.mox.ReplayAll()
+
+ vm_utils._resize_part_and_fs("fake", 0, 20, 10, "boot")
+
+ def test_log_progress_if_required(self):
+ self.mox.StubOutWithMock(vm_utils.LOG, "debug")
+ vm_utils.LOG.debug(_("Sparse copy in progress, "
+ "%(complete_pct).2f%% complete. "
+ "%(left)s bytes left to copy"),
+ {"complete_pct": 50.0, "left": 1})
+ current = timeutils.utcnow()
+ timeutils.set_time_override(current)
+ timeutils.advance_time_seconds(vm_utils.PROGRESS_INTERVAL_SECONDS + 1)
+ self.mox.ReplayAll()
+ vm_utils._log_progress_if_required(1, current, 2)
+
+ def test_log_progress_if_not_required(self):
+ self.mox.StubOutWithMock(vm_utils.LOG, "debug")
+ current = timeutils.utcnow()
+ timeutils.set_time_override(current)
+ timeutils.advance_time_seconds(vm_utils.PROGRESS_INTERVAL_SECONDS - 1)
+ self.mox.ReplayAll()
+ vm_utils._log_progress_if_required(1, current, 2)
+
+ def test_resize_part_and_fs_down_fails_disk_too_big(self):
+ self.mox.StubOutWithMock(vm_utils, "_repair_filesystem")
+ self.mox.StubOutWithMock(utils, 'execute')
+
+ dev_path = "/dev/fake"
+ partition_path = "%s1" % dev_path
+ new_sectors = 10
+ vm_utils._repair_filesystem(partition_path)
+ self._call_tune2fs_remove_journal(partition_path)
+ mobj = utils.execute("resize2fs",
+ partition_path,
+ "%ss" % new_sectors,
+ run_as_root=True)
+ mobj.AndRaise(processutils.ProcessExecutionError)
+ self.mox.ReplayAll()
+ self.assertRaises(exception.ResizeError,
+ vm_utils._resize_part_and_fs,
+ "fake", 0, 20, 10, "boot")
+
+ def test_resize_part_and_fs_up_succeeds(self):
+ self.mox.StubOutWithMock(vm_utils, "_repair_filesystem")
+ self.mox.StubOutWithMock(utils, 'execute')
+
+ dev_path = "/dev/fake"
+ partition_path = "%s1" % dev_path
+ vm_utils._repair_filesystem(partition_path)
+ self._call_tune2fs_remove_journal(partition_path)
+ self._call_parted_mkpart(dev_path, 0, 29)
+ utils.execute("resize2fs", partition_path, run_as_root=True)
+ self._call_tune2fs_add_journal(partition_path)
+
+ self.mox.ReplayAll()
+
+ vm_utils._resize_part_and_fs("fake", 0, 20, 30, "")
+
+ def test_resize_disk_throws_on_zero_size(self):
+ self.assertRaises(exception.ResizeError,
+ vm_utils.resize_disk, "session", "instance", "vdi_ref",
+ {"root_gb": 0})
+
+ def test_auto_config_disk_returns_early_on_zero_size(self):
+ vm_utils.try_auto_configure_disk("bad_session", "bad_vdi_ref", 0)
+
+ @mock.patch.object(utils, "execute")
+ def test_get_partitions(self, mock_execute):
+ parted_return = "BYT;\n...\n"
+ parted_return += "1:2s:11s:10s:ext3::boot;\n"
+ parted_return += "2:20s:11s:10s::bob:;\n"
+ mock_execute.return_value = (parted_return, None)
+
+ partitions = vm_utils._get_partitions("abc")
+
+ self.assertEqual(2, len(partitions))
+ self.assertEqual((1, 2, 10, "ext3", "", "boot"), partitions[0])
+ self.assertEqual((2, 20, 10, "", "bob", ""), partitions[1])
+
+
+class CheckVDISizeTestCase(VMUtilsTestBase):
+ def setUp(self):
+ super(CheckVDISizeTestCase, self).setUp()
+ self.context = 'fakecontext'
+ self.session = 'fakesession'
+ self.instance = dict(uuid='fakeinstance')
+ self.vdi_uuid = 'fakeuuid'
+
+ def test_not_too_large(self):
+ self.mox.StubOutWithMock(flavors, 'extract_flavor')
+ flavors.extract_flavor(self.instance).AndReturn(
+ dict(root_gb=1))
+
+ self.mox.StubOutWithMock(vm_utils, '_get_vdi_chain_size')
+ vm_utils._get_vdi_chain_size(self.session,
+ self.vdi_uuid).AndReturn(1073741824)
+
+ self.mox.ReplayAll()
+
+ vm_utils._check_vdi_size(self.context, self.session, self.instance,
+ self.vdi_uuid)
+
+ def test_too_large(self):
+ self.mox.StubOutWithMock(flavors, 'extract_flavor')
+ flavors.extract_flavor(self.instance).AndReturn(
+ dict(root_gb=1))
+
+ self.mox.StubOutWithMock(vm_utils, '_get_vdi_chain_size')
+ vm_utils._get_vdi_chain_size(self.session,
+ self.vdi_uuid).AndReturn(11811160065) # 10GB overhead allowed
+
+ self.mox.ReplayAll()
+
+ self.assertRaises(exception.FlavorDiskTooSmall,
+ vm_utils._check_vdi_size, self.context, self.session,
+ self.instance, self.vdi_uuid)
+
+ def test_zero_root_gb_disables_check(self):
+ self.mox.StubOutWithMock(flavors, 'extract_flavor')
+ flavors.extract_flavor(self.instance).AndReturn(
+ dict(root_gb=0))
+
+ self.mox.ReplayAll()
+
+ vm_utils._check_vdi_size(self.context, self.session, self.instance,
+ self.vdi_uuid)
+
+
+class GetInstanceForVdisForSrTestCase(VMUtilsTestBase):
+ def setUp(self):
+ super(GetInstanceForVdisForSrTestCase, self).setUp()
+ self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
+ self.fixture.config(disable_process_locking=True,
+ group='oslo_concurrency')
+ self.flags(instance_name_template='%d',
+ firewall_driver='nova.virt.xenapi.firewall.'
+ 'Dom0IptablesFirewallDriver')
+ self.flags(connection_url='test_url',
+ connection_password='test_pass',
+ group='xenserver')
+
+ def test_get_instance_vdis_for_sr(self):
+ vm_ref = fake.create_vm("foo", "Running")
+ sr_ref = fake.create_sr()
+
+ vdi_1 = fake.create_vdi('vdiname1', sr_ref)
+ vdi_2 = fake.create_vdi('vdiname2', sr_ref)
+
+ for vdi_ref in [vdi_1, vdi_2]:
+ fake.create_vbd(vm_ref, vdi_ref)
+
+ stubs.stubout_session(self.stubs, fake.SessionBase)
+ driver = xenapi_conn.XenAPIDriver(False)
+
+ result = list(vm_utils.get_instance_vdis_for_sr(
+ driver._session, vm_ref, sr_ref))
+
+ self.assertEqual([vdi_1, vdi_2], result)
+
+ def test_get_instance_vdis_for_sr_no_vbd(self):
+ vm_ref = fake.create_vm("foo", "Running")
+ sr_ref = fake.create_sr()
+
+ stubs.stubout_session(self.stubs, fake.SessionBase)
+ driver = xenapi_conn.XenAPIDriver(False)
+
+ result = list(vm_utils.get_instance_vdis_for_sr(
+ driver._session, vm_ref, sr_ref))
+
+ self.assertEqual([], result)
+
+
+class VMRefOrRaiseVMFoundTestCase(VMUtilsTestBase):
+
+ def test_lookup_call(self):
+ mock = mox.Mox()
+ mock.StubOutWithMock(vm_utils, 'lookup')
+
+ vm_utils.lookup('session', 'somename').AndReturn('ignored')
+
+ mock.ReplayAll()
+ vm_utils.vm_ref_or_raise('session', 'somename')
+ mock.VerifyAll()
+
+ def test_return_value(self):
+ mock = mox.Mox()
+ mock.StubOutWithMock(vm_utils, 'lookup')
+
+ vm_utils.lookup(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn('vmref')
+
+ mock.ReplayAll()
+ self.assertEqual(
+ 'vmref', vm_utils.vm_ref_or_raise('session', 'somename'))
+ mock.VerifyAll()
+
+
+class VMRefOrRaiseVMNotFoundTestCase(VMUtilsTestBase):
+
+ def test_exception_raised(self):
+ mock = mox.Mox()
+ mock.StubOutWithMock(vm_utils, 'lookup')
+
+ vm_utils.lookup('session', 'somename').AndReturn(None)
+
+ mock.ReplayAll()
+ self.assertRaises(
+ exception.InstanceNotFound,
+ lambda: vm_utils.vm_ref_or_raise('session', 'somename')
+ )
+ mock.VerifyAll()
+
+ def test_exception_msg_contains_vm_name(self):
+ mock = mox.Mox()
+ mock.StubOutWithMock(vm_utils, 'lookup')
+
+ vm_utils.lookup('session', 'somename').AndReturn(None)
+
+ mock.ReplayAll()
+ try:
+ vm_utils.vm_ref_or_raise('session', 'somename')
+ except exception.InstanceNotFound as e:
+ self.assertIn('somename', six.text_type(e))
+ mock.VerifyAll()
+
+
+@mock.patch.object(vm_utils, 'safe_find_sr', return_value='safe_find_sr')
+class CreateCachedImageTestCase(VMUtilsTestBase):
+ def setUp(self):
+ super(CreateCachedImageTestCase, self).setUp()
+ self.session = _get_fake_session()
+
+ @mock.patch.object(vm_utils, '_clone_vdi', return_value='new_vdi_ref')
+ def test_cached(self, mock_clone_vdi, mock_safe_find_sr):
+ self.session.call_xenapi.side_effect = ['ext', {'vdi_ref': 2},
+ None, None, None, 'vdi_uuid']
+ self.assertEqual((False, {'root': {'uuid': 'vdi_uuid', 'file': None}}),
+ vm_utils._create_cached_image('context', self.session,
+ 'instance', 'name', 'uuid',
+ vm_utils.ImageType.DISK_VHD))
+
+ @mock.patch.object(vm_utils, '_safe_copy_vdi', return_value='new_vdi_ref')
+ def test_no_cow(self, mock_safe_copy_vdi, mock_safe_find_sr):
+ self.flags(use_cow_images=False)
+ self.session.call_xenapi.side_effect = ['ext', {'vdi_ref': 2},
+ None, None, None, 'vdi_uuid']
+ self.assertEqual((False, {'root': {'uuid': 'vdi_uuid', 'file': None}}),
+ vm_utils._create_cached_image('context', self.session,
+ 'instance', 'name', 'uuid',
+ vm_utils.ImageType.DISK_VHD))
+
+ def test_no_cow_no_ext(self, mock_safe_find_sr):
+ self.flags(use_cow_images=False)
+ self.session.call_xenapi.side_effect = ['non-ext', {'vdi_ref': 2},
+ 'vdi_ref', None, None, None,
+ 'vdi_uuid']
+ self.assertEqual((False, {'root': {'uuid': 'vdi_uuid', 'file': None}}),
+ vm_utils._create_cached_image('context', self.session,
+ 'instance', 'name', 'uuid',
+ vm_utils.ImageType.DISK_VHD))
+
+ @mock.patch.object(vm_utils, '_clone_vdi', return_value='new_vdi_ref')
+ @mock.patch.object(vm_utils, '_fetch_image',
+ return_value={'root': {'uuid': 'vdi_uuid',
+ 'file': None}})
+ def test_noncached(self, mock_fetch_image, mock_clone_vdi,
+ mock_safe_find_sr):
+ self.session.call_xenapi.side_effect = ['ext', {}, 'cache_vdi_ref',
+ None, None, None, None, None,
+ None, 'vdi_uuid']
+ self.assertEqual((True, {'root': {'uuid': 'vdi_uuid', 'file': None}}),
+ vm_utils._create_cached_image('context', self.session,
+ 'instance', 'name', 'uuid',
+ vm_utils.ImageType.DISK_VHD))
+
+
+class BittorrentTestCase(VMUtilsTestBase):
+ def setUp(self):
+ super(BittorrentTestCase, self).setUp()
+ self.context = context.get_admin_context()
+
+ def test_image_uses_bittorrent(self):
+ instance = {'system_metadata': {'image_bittorrent': True}}
+ self.flags(torrent_images='some', group='xenserver')
+ self.assertTrue(vm_utils._image_uses_bittorrent(self.context,
+ instance))
+
+ def _test_create_image(self, cache_type):
+ instance = {'system_metadata': {'image_cache_in_nova': True}}
+ self.flags(cache_images=cache_type, group='xenserver')
+
+ was = {'called': None}
+
+ def fake_create_cached_image(*args):
+ was['called'] = 'some'
+ return (False, {})
+ self.stubs.Set(vm_utils, '_create_cached_image',
+ fake_create_cached_image)
+
+ def fake_fetch_image(*args):
+ was['called'] = 'none'
+ return {}
+ self.stubs.Set(vm_utils, '_fetch_image',
+ fake_fetch_image)
+
+ vm_utils.create_image(self.context, None, instance,
+ 'foo', 'bar', 'baz')
+
+ self.assertEqual(was['called'], cache_type)
+
+ def test_create_image_cached(self):
+ self._test_create_image('some')
+
+ def test_create_image_uncached(self):
+ self._test_create_image('none')
+
+
+class ShutdownTestCase(VMUtilsTestBase):
+
+ def test_hardshutdown_should_return_true_when_vm_is_shutdown(self):
+ self.mock = mox.Mox()
+ session = FakeSession()
+ instance = "instance"
+ vm_ref = "vm-ref"
+ self.mock.StubOutWithMock(vm_utils, 'is_vm_shutdown')
+ vm_utils.is_vm_shutdown(session, vm_ref).AndReturn(True)
+ self.mock.StubOutWithMock(vm_utils, 'LOG')
+ self.assertTrue(vm_utils.hard_shutdown_vm(
+ session, instance, vm_ref))
+
+ def test_cleanshutdown_should_return_true_when_vm_is_shutdown(self):
+ self.mock = mox.Mox()
+ session = FakeSession()
+ instance = "instance"
+ vm_ref = "vm-ref"
+ self.mock.StubOutWithMock(vm_utils, 'is_vm_shutdown')
+ vm_utils.is_vm_shutdown(session, vm_ref).AndReturn(True)
+ self.mock.StubOutWithMock(vm_utils, 'LOG')
+ self.assertTrue(vm_utils.clean_shutdown_vm(
+ session, instance, vm_ref))
+
+
+class CreateVBDTestCase(VMUtilsTestBase):
+ def setUp(self):
+ super(CreateVBDTestCase, self).setUp()
+ self.session = FakeSession()
+ self.mock = mox.Mox()
+ self.mock.StubOutWithMock(self.session, 'call_xenapi')
+ self.vbd_rec = self._generate_vbd_rec()
+
+ def _generate_vbd_rec(self):
+ vbd_rec = {}
+ vbd_rec['VM'] = 'vm_ref'
+ vbd_rec['VDI'] = 'vdi_ref'
+ vbd_rec['userdevice'] = '0'
+ vbd_rec['bootable'] = False
+ vbd_rec['mode'] = 'RW'
+ vbd_rec['type'] = 'disk'
+ vbd_rec['unpluggable'] = True
+ vbd_rec['empty'] = False
+ vbd_rec['other_config'] = {}
+ vbd_rec['qos_algorithm_type'] = ''
+ vbd_rec['qos_algorithm_params'] = {}
+ vbd_rec['qos_supported_algorithms'] = []
+ return vbd_rec
+
+ def test_create_vbd_default_args(self):
+ self.session.call_xenapi('VBD.create',
+ self.vbd_rec).AndReturn("vbd_ref")
+ self.mock.ReplayAll()
+
+ result = vm_utils.create_vbd(self.session, "vm_ref", "vdi_ref", 0)
+ self.assertEqual(result, "vbd_ref")
+ self.mock.VerifyAll()
+
+ def test_create_vbd_osvol(self):
+ self.session.call_xenapi('VBD.create',
+ self.vbd_rec).AndReturn("vbd_ref")
+ self.session.call_xenapi('VBD.add_to_other_config', "vbd_ref",
+ "osvol", "True")
+ self.mock.ReplayAll()
+ result = vm_utils.create_vbd(self.session, "vm_ref", "vdi_ref", 0,
+ osvol=True)
+ self.assertEqual(result, "vbd_ref")
+ self.mock.VerifyAll()
+
+ def test_create_vbd_extra_args(self):
+ self.vbd_rec['VDI'] = 'OpaqueRef:NULL'
+ self.vbd_rec['type'] = 'a'
+ self.vbd_rec['mode'] = 'RO'
+ self.vbd_rec['bootable'] = True
+ self.vbd_rec['empty'] = True
+ self.vbd_rec['unpluggable'] = False
+ self.session.call_xenapi('VBD.create',
+ self.vbd_rec).AndReturn("vbd_ref")
+ self.mock.ReplayAll()
+
+ result = vm_utils.create_vbd(self.session, "vm_ref", None, 0,
+ vbd_type="a", read_only=True, bootable=True,
+ empty=True, unpluggable=False)
+ self.assertEqual(result, "vbd_ref")
+ self.mock.VerifyAll()
+
+ def test_attach_cd(self):
+ self.mock.StubOutWithMock(vm_utils, 'create_vbd')
+
+ vm_utils.create_vbd(self.session, "vm_ref", None, 1,
+ vbd_type='cd', read_only=True, bootable=True,
+ empty=True, unpluggable=False).AndReturn("vbd_ref")
+ self.session.call_xenapi('VBD.insert', "vbd_ref", "vdi_ref")
+ self.mock.ReplayAll()
+
+ result = vm_utils.attach_cd(self.session, "vm_ref", "vdi_ref", 1)
+ self.assertEqual(result, "vbd_ref")
+ self.mock.VerifyAll()
+
+
+class UnplugVbdTestCase(VMUtilsTestBase):
+ @mock.patch.object(greenthread, 'sleep')
+ def test_unplug_vbd_works(self, mock_sleep):
+ session = _get_fake_session()
+ vbd_ref = "vbd_ref"
+ vm_ref = 'vm_ref'
+
+ vm_utils.unplug_vbd(session, vbd_ref, vm_ref)
+
+ session.call_xenapi.assert_called_once_with('VBD.unplug', vbd_ref)
+ self.assertEqual(0, mock_sleep.call_count)
+
+ def test_unplug_vbd_raises_unexpected_error(self):
+ session = _get_fake_session()
+ vbd_ref = "vbd_ref"
+ vm_ref = 'vm_ref'
+ session.call_xenapi.side_effect = test.TestingException()
+
+ self.assertRaises(test.TestingException, vm_utils.unplug_vbd,
+ session, vm_ref, vbd_ref)
+ self.assertEqual(1, session.call_xenapi.call_count)
+
+ def test_unplug_vbd_already_detached_works(self):
+ error = "DEVICE_ALREADY_DETACHED"
+ session = _get_fake_session(error)
+ vbd_ref = "vbd_ref"
+ vm_ref = 'vm_ref'
+
+ vm_utils.unplug_vbd(session, vbd_ref, vm_ref)
+ self.assertEqual(1, session.call_xenapi.call_count)
+
+ def test_unplug_vbd_already_raises_unexpected_xenapi_error(self):
+ session = _get_fake_session("")
+ vbd_ref = "vbd_ref"
+ vm_ref = 'vm_ref'
+
+ self.assertRaises(exception.StorageError, vm_utils.unplug_vbd,
+ session, vbd_ref, vm_ref)
+ self.assertEqual(1, session.call_xenapi.call_count)
+
+ def _test_uplug_vbd_retries(self, mock_sleep, error):
+ session = _get_fake_session(error)
+ vbd_ref = "vbd_ref"
+ vm_ref = 'vm_ref'
+
+ self.assertRaises(exception.StorageError, vm_utils.unplug_vbd,
+ session, vm_ref, vbd_ref)
+
+ self.assertEqual(11, session.call_xenapi.call_count)
+ self.assertEqual(10, mock_sleep.call_count)
+
+ @mock.patch.object(greenthread, 'sleep')
+ def test_uplug_vbd_retries_on_rejected(self, mock_sleep):
+ self._test_uplug_vbd_retries(mock_sleep,
+ "DEVICE_DETACH_REJECTED")
+
+ @mock.patch.object(greenthread, 'sleep')
+ def test_uplug_vbd_retries_on_internal_error(self, mock_sleep):
+ self._test_uplug_vbd_retries(mock_sleep,
+ "INTERNAL_ERROR")
+
+
+class VDIOtherConfigTestCase(VMUtilsTestBase):
+ """Tests to ensure that the code is populating VDI's `other_config`
+ attribute with the correct metadta.
+ """
+
+ def setUp(self):
+ super(VDIOtherConfigTestCase, self).setUp()
+
+ class _FakeSession():
+ def call_xenapi(self, operation, *args, **kwargs):
+ # VDI.add_to_other_config -> VDI_add_to_other_config
+ method = getattr(self, operation.replace('.', '_'), None)
+ if method:
+ return method(*args, **kwargs)
+
+ self.operation = operation
+ self.args = args
+ self.kwargs = kwargs
+
+ self.session = _FakeSession()
+ self.context = context.get_admin_context()
+ self.fake_instance = {'uuid': 'aaaa-bbbb-cccc-dddd',
+ 'name': 'myinstance'}
+
+ def test_create_vdi(self):
+ # Some images are registered with XenServer explicitly by calling
+ # `create_vdi`
+ vm_utils.create_vdi(self.session, 'sr_ref', self.fake_instance,
+ 'myvdi', 'root', 1024, read_only=True)
+
+ expected = {'nova_disk_type': 'root',
+ 'nova_instance_uuid': 'aaaa-bbbb-cccc-dddd'}
+
+ self.assertEqual(expected, self.session.args[0]['other_config'])
+
+ def test_create_image(self):
+ # Other images are registered implicitly when they are dropped into
+ # the SR by a dom0 plugin or some other process
+ self.flags(cache_images='none', group='xenserver')
+
+ def fake_fetch_image(*args):
+ return {'root': {'uuid': 'fake-uuid'}}
+
+ self.stubs.Set(vm_utils, '_fetch_image', fake_fetch_image)
+
+ other_config = {}
+
+ def VDI_add_to_other_config(ref, key, value):
+ other_config[key] = value
+
+ # Stubbing on the session object and not class so we don't pollute
+ # other tests
+ self.session.VDI_add_to_other_config = VDI_add_to_other_config
+ self.session.VDI_get_other_config = lambda vdi: {}
+
+ vm_utils.create_image(self.context, self.session, self.fake_instance,
+ 'myvdi', 'image1', vm_utils.ImageType.DISK_VHD)
+
+ expected = {'nova_disk_type': 'root',
+ 'nova_instance_uuid': 'aaaa-bbbb-cccc-dddd'}
+
+ self.assertEqual(expected, other_config)
+
+ def test_import_migrated_vhds(self):
+ # Migrated images should preserve the `other_config`
+ other_config = {}
+
+ def VDI_add_to_other_config(ref, key, value):
+ other_config[key] = value
+
+ def call_plugin_serialized(*args, **kwargs):
+ return {'root': {'uuid': 'aaaa-bbbb-cccc-dddd'}}
+
+ # Stubbing on the session object and not class so we don't pollute
+ # other tests
+ self.session.VDI_add_to_other_config = VDI_add_to_other_config
+ self.session.VDI_get_other_config = lambda vdi: {}
+ self.session.call_plugin_serialized = call_plugin_serialized
+
+ self.stubs.Set(vm_utils, 'get_sr_path', lambda *a, **k: None)
+ self.stubs.Set(vm_utils, 'scan_default_sr', lambda *a, **k: None)
+
+ vm_utils._import_migrated_vhds(self.session, self.fake_instance,
+ "disk_label", "root", "vdi_label")
+
+ expected = {'nova_disk_type': 'root',
+ 'nova_instance_uuid': 'aaaa-bbbb-cccc-dddd'}
+
+ self.assertEqual(expected, other_config)
+
+
+class GenerateDiskTestCase(VMUtilsTestBase):
+ def setUp(self):
+ super(GenerateDiskTestCase, self).setUp()
+ self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
+ self.fixture.config(disable_process_locking=True,
+ group='oslo_concurrency')
+ self.flags(instance_name_template='%d',
+ firewall_driver='nova.virt.xenapi.firewall.'
+ 'Dom0IptablesFirewallDriver')
+ self.flags(connection_url='test_url',
+ connection_password='test_pass',
+ group='xenserver')
+ stubs.stubout_session(self.stubs, fake.SessionBase)
+ driver = xenapi_conn.XenAPIDriver(False)
+ self.session = driver._session
+ self.session.is_local_connection = False
+ self.vm_ref = fake.create_vm("foo", "Running")
+
+ def tearDown(self):
+ super(GenerateDiskTestCase, self).tearDown()
+ fake.destroy_vm(self.vm_ref)
+
+ def _expect_parted_calls(self):
+ self.mox.StubOutWithMock(utils, "execute")
+ self.mox.StubOutWithMock(utils, "trycmd")
+ self.mox.StubOutWithMock(vm_utils, "destroy_vdi")
+ self.mox.StubOutWithMock(vm_utils.os.path, "exists")
+ if self.session.is_local_connection:
+ utils.execute('parted', '--script', '/dev/fakedev', 'mklabel',
+ 'msdos', check_exit_code=False, run_as_root=True)
+ utils.execute('parted', '--script', '/dev/fakedev', '--', 'mkpart',
+ 'primary', '0', '-0',
+ check_exit_code=False, run_as_root=True)
+ vm_utils.os.path.exists('/dev/mapper/fakedev1').AndReturn(True)
+ utils.trycmd('kpartx', '-a', '/dev/fakedev',
+ discard_warnings=True, run_as_root=True)
+ else:
+ utils.execute('parted', '--script', '/dev/fakedev', 'mklabel',
+ 'msdos', check_exit_code=True, run_as_root=True)
+ utils.execute('parted', '--script', '/dev/fakedev', '--', 'mkpart',
+ 'primary', '0', '-0',
+ check_exit_code=True, run_as_root=True)
+
+ def _check_vdi(self, vdi_ref, check_attached=True):
+ vdi_rec = self.session.call_xenapi("VDI.get_record", vdi_ref)
+ self.assertEqual(str(10 * units.Mi), vdi_rec["virtual_size"])
+ if check_attached:
+ vbd_ref = vdi_rec["VBDs"][0]
+ vbd_rec = self.session.call_xenapi("VBD.get_record", vbd_ref)
+ self.assertEqual(self.vm_ref, vbd_rec['VM'])
+ else:
+ self.assertEqual(0, len(vdi_rec["VBDs"]))
+
+ @test_xenapi.stub_vm_utils_with_vdi_attached_here
+ def test_generate_disk_with_no_fs_given(self):
+ self._expect_parted_calls()
+
+ self.mox.ReplayAll()
+ vdi_ref = vm_utils._generate_disk(self.session, {"uuid": "fake_uuid"},
+ self.vm_ref, "2", "name", "user", 10, None)
+ self._check_vdi(vdi_ref)
+
+ @test_xenapi.stub_vm_utils_with_vdi_attached_here
+ def test_generate_disk_swap(self):
+ self._expect_parted_calls()
+ utils.execute('mkswap', '/dev/fakedev1', run_as_root=True)
+
+ self.mox.ReplayAll()
+ vdi_ref = vm_utils._generate_disk(self.session, {"uuid": "fake_uuid"},
+ self.vm_ref, "2", "name", "swap", 10, "linux-swap")
+ self._check_vdi(vdi_ref)
+
+ @test_xenapi.stub_vm_utils_with_vdi_attached_here
+ def test_generate_disk_ephemeral(self):
+ self._expect_parted_calls()
+ utils.execute('mkfs', '-t', 'ext4', '/dev/fakedev1',
+ run_as_root=True)
+
+ self.mox.ReplayAll()
+ vdi_ref = vm_utils._generate_disk(self.session, {"uuid": "fake_uuid"},
+ self.vm_ref, "2", "name", "ephemeral", 10, "ext4")
+ self._check_vdi(vdi_ref)
+
+ @test_xenapi.stub_vm_utils_with_vdi_attached_here
+ def test_generate_disk_ensure_cleanup_called(self):
+ self._expect_parted_calls()
+ utils.execute('mkfs', '-t', 'ext4', '/dev/fakedev1',
+ run_as_root=True).AndRaise(test.TestingException)
+ vm_utils.destroy_vdi(self.session,
+ mox.IgnoreArg()).AndRaise(exception.StorageError(reason=""))
+
+ self.mox.ReplayAll()
+ self.assertRaises(test.TestingException, vm_utils._generate_disk,
+ self.session, {"uuid": "fake_uuid"},
+ self.vm_ref, "2", "name", "ephemeral", 10, "ext4")
+
+ @test_xenapi.stub_vm_utils_with_vdi_attached_here
+ def test_generate_disk_ephemeral_local_not_attached(self):
+ self.session.is_local_connection = True
+ self._expect_parted_calls()
+ utils.execute('mkfs', '-t', 'ext4', '/dev/mapper/fakedev1',
+ run_as_root=True)
+
+ self.mox.ReplayAll()
+ vdi_ref = vm_utils._generate_disk(self.session, {"uuid": "fake_uuid"},
+ None, "2", "name", "ephemeral", 10, "ext4")
+ self._check_vdi(vdi_ref, check_attached=False)
+
+
+class GenerateEphemeralTestCase(VMUtilsTestBase):
+ def setUp(self):
+ super(GenerateEphemeralTestCase, self).setUp()
+ self.session = "session"
+ self.instance = "instance"
+ self.vm_ref = "vm_ref"
+ self.name_label = "name"
+ self.ephemeral_name_label = "name ephemeral"
+ self.userdevice = 4
+ self.mox.StubOutWithMock(vm_utils, "_generate_disk")
+ self.mox.StubOutWithMock(vm_utils, "safe_destroy_vdis")
+
+ def test_get_ephemeral_disk_sizes_simple(self):
+ result = vm_utils.get_ephemeral_disk_sizes(20)
+ expected = [20]
+ self.assertEqual(expected, list(result))
+
+ def test_get_ephemeral_disk_sizes_three_disks_2000(self):
+ result = vm_utils.get_ephemeral_disk_sizes(4030)
+ expected = [2000, 2000, 30]
+ self.assertEqual(expected, list(result))
+
+ def test_get_ephemeral_disk_sizes_two_disks_1024(self):
+ result = vm_utils.get_ephemeral_disk_sizes(2048)
+ expected = [1024, 1024]
+ self.assertEqual(expected, list(result))
+
+ def _expect_generate_disk(self, size, device, name_label):
+ vm_utils._generate_disk(self.session, self.instance, self.vm_ref,
+ str(device), name_label, 'ephemeral',
+ size * 1024, None).AndReturn(device)
+
+ def test_generate_ephemeral_adds_one_disk(self):
+ self._expect_generate_disk(20, self.userdevice,
+ self.ephemeral_name_label)
+ self.mox.ReplayAll()
+
+ vm_utils.generate_ephemeral(self.session, self.instance, self.vm_ref,
+ str(self.userdevice), self.name_label, 20)
+
+ def test_generate_ephemeral_adds_multiple_disks(self):
+ self._expect_generate_disk(2000, self.userdevice,
+ self.ephemeral_name_label)
+ self._expect_generate_disk(2000, self.userdevice + 1,
+ self.ephemeral_name_label + " (1)")
+ self._expect_generate_disk(30, self.userdevice + 2,
+ self.ephemeral_name_label + " (2)")
+ self.mox.ReplayAll()
+
+ vm_utils.generate_ephemeral(self.session, self.instance, self.vm_ref,
+ str(self.userdevice), self.name_label, 4030)
+
+ def test_generate_ephemeral_cleans_up_on_error(self):
+ self._expect_generate_disk(1024, self.userdevice,
+ self.ephemeral_name_label)
+ self._expect_generate_disk(1024, self.userdevice + 1,
+ self.ephemeral_name_label + " (1)")
+
+ vm_utils._generate_disk(self.session, self.instance, self.vm_ref,
+ str(self.userdevice + 2), "name ephemeral (2)", 'ephemeral',
+ units.Mi, None).AndRaise(exception.NovaException)
+
+ vm_utils.safe_destroy_vdis(self.session, [4, 5])
+
+ self.mox.ReplayAll()
+
+ self.assertRaises(exception.NovaException, vm_utils.generate_ephemeral,
+ self.session, self.instance, self.vm_ref,
+ str(self.userdevice), self.name_label, 4096)
+
+
+class FakeFile(object):
+ def __init__(self):
+ self._file_operations = []
+
+ def seek(self, offset):
+ self._file_operations.append((self.seek, offset))
+
+
+class StreamDiskTestCase(VMUtilsTestBase):
+ def setUp(self):
+ import __builtin__
+ super(StreamDiskTestCase, self).setUp()
+ self.mox.StubOutWithMock(vm_utils.utils, 'make_dev_path')
+ self.mox.StubOutWithMock(vm_utils.utils, 'temporary_chown')
+ self.mox.StubOutWithMock(vm_utils, '_write_partition')
+
+ # NOTE(matelakat): This might hide the fail reason, as test runners
+ # are unhappy with a mocked out open.
+ self.mox.StubOutWithMock(__builtin__, 'open')
+ self.image_service_func = self.mox.CreateMockAnything()
+
+ def test_non_ami(self):
+ fake_file = FakeFile()
+
+ vm_utils.utils.make_dev_path('dev').AndReturn('some_path')
+ vm_utils.utils.temporary_chown(
+ 'some_path').AndReturn(contextified(None))
+ open('some_path', 'wb').AndReturn(contextified(fake_file))
+ self.image_service_func(fake_file)
+
+ self.mox.ReplayAll()
+
+ vm_utils._stream_disk("session", self.image_service_func,
+ vm_utils.ImageType.KERNEL, None, 'dev')
+
+ self.assertEqual([(fake_file.seek, 0)], fake_file._file_operations)
+
+ def test_ami_disk(self):
+ fake_file = FakeFile()
+
+ vm_utils._write_partition("session", 100, 'dev')
+ vm_utils.utils.make_dev_path('dev').AndReturn('some_path')
+ vm_utils.utils.temporary_chown(
+ 'some_path').AndReturn(contextified(None))
+ open('some_path', 'wb').AndReturn(contextified(fake_file))
+ self.image_service_func(fake_file)
+
+ self.mox.ReplayAll()
+
+ vm_utils._stream_disk("session", self.image_service_func,
+ vm_utils.ImageType.DISK, 100, 'dev')
+
+ self.assertEqual(
+ [(fake_file.seek, vm_utils.MBR_SIZE_BYTES)],
+ fake_file._file_operations)
+
+
+class VMUtilsSRPath(VMUtilsTestBase):
+ def setUp(self):
+ super(VMUtilsSRPath, self).setUp()
+ self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
+ self.fixture.config(disable_process_locking=True,
+ group='oslo_concurrency')
+ self.flags(instance_name_template='%d',
+ firewall_driver='nova.virt.xenapi.firewall.'
+ 'Dom0IptablesFirewallDriver')
+ self.flags(connection_url='test_url',
+ connection_password='test_pass',
+ group='xenserver')
+ stubs.stubout_session(self.stubs, fake.SessionBase)
+ driver = xenapi_conn.XenAPIDriver(False)
+ self.session = driver._session
+ self.session.is_local_connection = False
+
+ def test_defined(self):
+ self.mox.StubOutWithMock(vm_utils, "safe_find_sr")
+ self.mox.StubOutWithMock(self.session, "call_xenapi")
+
+ vm_utils.safe_find_sr(self.session).AndReturn("sr_ref")
+ self.session.host_ref = "host_ref"
+ self.session.call_xenapi('PBD.get_all_records_where',
+ 'field "host"="host_ref" and field "SR"="sr_ref"').AndReturn(
+ {'pbd_ref': {'device_config': {'path': 'sr_path'}}})
+
+ self.mox.ReplayAll()
+ self.assertEqual(vm_utils.get_sr_path(self.session), "sr_path")
+
+ def test_default(self):
+ self.mox.StubOutWithMock(vm_utils, "safe_find_sr")
+ self.mox.StubOutWithMock(self.session, "call_xenapi")
+
+ vm_utils.safe_find_sr(self.session).AndReturn("sr_ref")
+ self.session.host_ref = "host_ref"
+ self.session.call_xenapi('PBD.get_all_records_where',
+ 'field "host"="host_ref" and field "SR"="sr_ref"').AndReturn(
+ {'pbd_ref': {'device_config': {}}})
+ self.session.call_xenapi("SR.get_record", "sr_ref").AndReturn(
+ {'uuid': 'sr_uuid', 'type': 'ext'})
+ self.mox.ReplayAll()
+ self.assertEqual(vm_utils.get_sr_path(self.session),
+ "/var/run/sr-mount/sr_uuid")
+
+
+class CreateKernelRamdiskTestCase(VMUtilsTestBase):
+ def setUp(self):
+ super(CreateKernelRamdiskTestCase, self).setUp()
+ self.context = "context"
+ self.session = FakeSession()
+ self.instance = {"kernel_id": None, "ramdisk_id": None}
+ self.name_label = "name"
+ self.mox.StubOutWithMock(self.session, "call_plugin")
+ self.mox.StubOutWithMock(uuid, "uuid4")
+ self.mox.StubOutWithMock(vm_utils, "_fetch_disk_image")
+
+ def test_create_kernel_and_ramdisk_no_create(self):
+ self.mox.ReplayAll()
+ result = vm_utils.create_kernel_and_ramdisk(self.context,
+ self.session, self.instance, self.name_label)
+ self.assertEqual((None, None), result)
+
+ def test_create_kernel_and_ramdisk_create_both_cached(self):
+ kernel_id = "kernel"
+ ramdisk_id = "ramdisk"
+ self.instance["kernel_id"] = kernel_id
+ self.instance["ramdisk_id"] = ramdisk_id
+
+ args_kernel = {}
+ args_kernel['cached-image'] = kernel_id
+ args_kernel['new-image-uuid'] = "fake_uuid1"
+ uuid.uuid4().AndReturn("fake_uuid1")
+ self.session.call_plugin('kernel', 'create_kernel_ramdisk',
+ args_kernel).AndReturn("k")
+
+ args_ramdisk = {}
+ args_ramdisk['cached-image'] = ramdisk_id
+ args_ramdisk['new-image-uuid'] = "fake_uuid2"
+ uuid.uuid4().AndReturn("fake_uuid2")
+ self.session.call_plugin('kernel', 'create_kernel_ramdisk',
+ args_ramdisk).AndReturn("r")
+
+ self.mox.ReplayAll()
+ result = vm_utils.create_kernel_and_ramdisk(self.context,
+ self.session, self.instance, self.name_label)
+ self.assertEqual(("k", "r"), result)
+
+ def test_create_kernel_and_ramdisk_create_kernel_not_cached(self):
+ kernel_id = "kernel"
+ self.instance["kernel_id"] = kernel_id
+
+ args_kernel = {}
+ args_kernel['cached-image'] = kernel_id
+ args_kernel['new-image-uuid'] = "fake_uuid1"
+ uuid.uuid4().AndReturn("fake_uuid1")
+ self.session.call_plugin('kernel', 'create_kernel_ramdisk',
+ args_kernel).AndReturn("")
+
+ kernel = {"kernel": {"file": "k"}}
+ vm_utils._fetch_disk_image(self.context, self.session, self.instance,
+ self.name_label, kernel_id, 0).AndReturn(kernel)
+
+ self.mox.ReplayAll()
+ result = vm_utils.create_kernel_and_ramdisk(self.context,
+ self.session, self.instance, self.name_label)
+ self.assertEqual(("k", None), result)
+
+
+class ScanSrTestCase(VMUtilsTestBase):
+ @mock.patch.object(vm_utils, "_scan_sr")
+ @mock.patch.object(vm_utils, "safe_find_sr")
+ def test_scan_default_sr(self, mock_safe_find_sr, mock_scan_sr):
+ mock_safe_find_sr.return_value = "sr_ref"
+
+ self.assertEqual("sr_ref", vm_utils.scan_default_sr("fake_session"))
+
+ mock_scan_sr.assert_called_once_with("fake_session", "sr_ref")
+
+ def test_scan_sr_works(self):
+ session = mock.Mock()
+ vm_utils._scan_sr(session, "sr_ref")
+ session.call_xenapi.assert_called_once_with('SR.scan', "sr_ref")
+
+ def test_scan_sr_unknown_error_fails_once(self):
+ session = mock.Mock()
+ session.call_xenapi.side_effect = test.TestingException
+ self.assertRaises(test.TestingException,
+ vm_utils._scan_sr, session, "sr_ref")
+ session.call_xenapi.assert_called_once_with('SR.scan', "sr_ref")
+
+ @mock.patch.object(greenthread, 'sleep')
+ def test_scan_sr_known_error_retries_then_throws(self, mock_sleep):
+ session = mock.Mock()
+
+ class FakeException(Exception):
+ details = ['SR_BACKEND_FAILURE_40', "", "", ""]
+
+ session.XenAPI.Failure = FakeException
+ session.call_xenapi.side_effect = FakeException
+
+ self.assertRaises(FakeException,
+ vm_utils._scan_sr, session, "sr_ref")
+
+ session.call_xenapi.assert_called_with('SR.scan', "sr_ref")
+ self.assertEqual(4, session.call_xenapi.call_count)
+ mock_sleep.assert_has_calls([mock.call(2), mock.call(4), mock.call(8)])
+
+ @mock.patch.object(greenthread, 'sleep')
+ def test_scan_sr_known_error_retries_then_succeeds(self, mock_sleep):
+ session = mock.Mock()
+
+ class FakeException(Exception):
+ details = ['SR_BACKEND_FAILURE_40', "", "", ""]
+
+ session.XenAPI.Failure = FakeException
+
+ def fake_call_xenapi(*args):
+ fake_call_xenapi.count += 1
+ if fake_call_xenapi.count != 2:
+ raise FakeException()
+
+ fake_call_xenapi.count = 0
+ session.call_xenapi.side_effect = fake_call_xenapi
+
+ vm_utils._scan_sr(session, "sr_ref")
+
+ session.call_xenapi.assert_called_with('SR.scan', "sr_ref")
+ self.assertEqual(2, session.call_xenapi.call_count)
+ mock_sleep.assert_called_once_with(2)
+
+
+@mock.patch.object(flavors, 'extract_flavor',
+ return_value={
+ 'memory_mb': 1024,
+ 'vcpus': 1,
+ 'vcpu_weight': 1.0,
+ })
+class CreateVmTestCase(VMUtilsTestBase):
+ def test_vss_provider(self, mock_extract):
+ self.flags(vcpu_pin_set="2,3")
+ session = _get_fake_session()
+ instance = {
+ "uuid": "uuid", "os_type": "windows"
+ }
+
+ vm_utils.create_vm(session, instance, "label",
+ "kernel", "ramdisk")
+
+ vm_rec = {
+ 'VCPUs_params': {'cap': '0', 'mask': '2,3', 'weight': '1.0'},
+ 'PV_args': '',
+ 'memory_static_min': '0',
+ 'ha_restart_priority': '',
+ 'HVM_boot_policy': 'BIOS order',
+ 'PV_bootloader': '', 'tags': [],
+ 'VCPUs_max': '1',
+ 'memory_static_max': '1073741824',
+ 'actions_after_shutdown': 'destroy',
+ 'memory_dynamic_max': '1073741824',
+ 'user_version': '0',
+ 'xenstore_data': {'vm-data/allowvssprovider': 'false'},
+ 'blocked_operations': {},
+ 'is_a_template': False,
+ 'name_description': '',
+ 'memory_dynamic_min': '1073741824',
+ 'actions_after_crash': 'destroy',
+ 'memory_target': '1073741824',
+ 'PV_ramdisk': '',
+ 'PV_bootloader_args': '',
+ 'PCI_bus': '',
+ 'other_config': {'nova_uuid': 'uuid'},
+ 'name_label': 'label',
+ 'actions_after_reboot': 'restart',
+ 'VCPUs_at_startup': '1',
+ 'HVM_boot_params': {'order': 'dc'},
+ 'platform': {'nx': 'true', 'pae': 'true', 'apic': 'true',
+ 'timeoffset': '0', 'viridian': 'true',
+ 'acpi': 'true'},
+ 'PV_legacy_args': '',
+ 'PV_kernel': '',
+ 'affinity': '',
+ 'recommendations': '',
+ 'ha_always_run': False
+ }
+ session.call_xenapi.assert_called_once_with("VM.create", vm_rec)
+
+ def test_invalid_cpu_mask_raises(self, mock_extract):
+ self.flags(vcpu_pin_set="asdf")
+ session = mock.Mock()
+ instance = {
+ "uuid": "uuid",
+ }
+ self.assertRaises(exception.Invalid,
+ vm_utils.create_vm,
+ session, instance, "label",
+ "kernel", "ramdisk")
+
+ def test_destroy_vm(self, mock_extract):
+ session = mock.Mock()
+ instance = {
+ "uuid": "uuid",
+ }
+
+ vm_utils.destroy_vm(session, instance, "vm_ref")
+
+ session.VM.destroy.assert_called_once_with("vm_ref")
+
+ def test_destroy_vm_silently_fails(self, mock_extract):
+ session = mock.Mock()
+ exc = test.TestingException()
+ session.XenAPI.Failure = test.TestingException
+ session.VM.destroy.side_effect = exc
+ instance = {
+ "uuid": "uuid",
+ }
+
+ vm_utils.destroy_vm(session, instance, "vm_ref")
+
+ session.VM.destroy.assert_called_once_with("vm_ref")
+
+
+class DetermineVmModeTestCase(VMUtilsTestBase):
+ def test_determine_vm_mode_returns_xen_mode(self):
+ instance = {"vm_mode": "xen"}
+ self.assertEqual(vm_mode.XEN,
+ vm_utils.determine_vm_mode(instance, None))
+
+ def test_determine_vm_mode_returns_hvm_mode(self):
+ instance = {"vm_mode": "hvm"}
+ self.assertEqual(vm_mode.HVM,
+ vm_utils.determine_vm_mode(instance, None))
+
+ def test_determine_vm_mode_returns_xen_for_linux(self):
+ instance = {"vm_mode": None, "os_type": "linux"}
+ self.assertEqual(vm_mode.XEN,
+ vm_utils.determine_vm_mode(instance, None))
+
+ def test_determine_vm_mode_returns_hvm_for_windows(self):
+ instance = {"vm_mode": None, "os_type": "windows"}
+ self.assertEqual(vm_mode.HVM,
+ vm_utils.determine_vm_mode(instance, None))
+
+ def test_determine_vm_mode_returns_hvm_by_default(self):
+ instance = {"vm_mode": None, "os_type": None}
+ self.assertEqual(vm_mode.HVM,
+ vm_utils.determine_vm_mode(instance, None))
+
+ def test_determine_vm_mode_returns_xen_for_VHD(self):
+ instance = {"vm_mode": None, "os_type": None}
+ self.assertEqual(vm_mode.XEN,
+ vm_utils.determine_vm_mode(instance, vm_utils.ImageType.DISK_VHD))
+
+ def test_determine_vm_mode_returns_xen_for_DISK(self):
+ instance = {"vm_mode": None, "os_type": None}
+ self.assertEqual(vm_mode.XEN,
+ vm_utils.determine_vm_mode(instance, vm_utils.ImageType.DISK))
+
+
+class CallXenAPIHelpersTestCase(VMUtilsTestBase):
+ def test_vm_get_vbd_refs(self):
+ session = mock.Mock()
+ session.call_xenapi.return_value = "foo"
+ self.assertEqual("foo", vm_utils._vm_get_vbd_refs(session, "vm_ref"))
+ session.call_xenapi.assert_called_once_with("VM.get_VBDs", "vm_ref")
+
+ def test_vbd_get_rec(self):
+ session = mock.Mock()
+ session.call_xenapi.return_value = "foo"
+ self.assertEqual("foo", vm_utils._vbd_get_rec(session, "vbd_ref"))
+ session.call_xenapi.assert_called_once_with("VBD.get_record",
+ "vbd_ref")
+
+ def test_vdi_get_rec(self):
+ session = mock.Mock()
+ session.call_xenapi.return_value = "foo"
+ self.assertEqual("foo", vm_utils._vdi_get_rec(session, "vdi_ref"))
+ session.call_xenapi.assert_called_once_with("VDI.get_record",
+ "vdi_ref")
+
+ def test_vdi_snapshot(self):
+ session = mock.Mock()
+ session.call_xenapi.return_value = "foo"
+ self.assertEqual("foo", vm_utils._vdi_snapshot(session, "vdi_ref"))
+ session.call_xenapi.assert_called_once_with("VDI.snapshot",
+ "vdi_ref", {})
+
+ def test_vdi_get_virtual_size(self):
+ session = mock.Mock()
+ session.call_xenapi.return_value = "123"
+ self.assertEqual(123, vm_utils._vdi_get_virtual_size(session, "ref"))
+ session.call_xenapi.assert_called_once_with("VDI.get_virtual_size",
+ "ref")
+
+ @mock.patch.object(vm_utils, '_get_resize_func_name')
+ def test_vdi_resize(self, mock_get_resize_func_name):
+ session = mock.Mock()
+ mock_get_resize_func_name.return_value = "VDI.fake"
+ vm_utils._vdi_resize(session, "ref", 123)
+ session.call_xenapi.assert_called_once_with("VDI.fake", "ref", "123")
+
+ @mock.patch.object(vm_utils, '_vdi_resize')
+ @mock.patch.object(vm_utils, '_vdi_get_virtual_size')
+ def test_update_vdi_virtual_size_works(self, mock_get_size, mock_resize):
+ mock_get_size.return_value = (1024 ** 3) - 1
+ instance = {"uuid": "a"}
+
+ vm_utils.update_vdi_virtual_size("s", instance, "ref", 1)
+
+ mock_get_size.assert_called_once_with("s", "ref")
+ mock_resize.assert_called_once_with("s", "ref", 1024 ** 3)
+
+ @mock.patch.object(vm_utils, '_vdi_resize')
+ @mock.patch.object(vm_utils, '_vdi_get_virtual_size')
+ def test_update_vdi_virtual_size_skips_resize_down(self, mock_get_size,
+ mock_resize):
+ mock_get_size.return_value = 1024 ** 3
+ instance = {"uuid": "a"}
+
+ vm_utils.update_vdi_virtual_size("s", instance, "ref", 1)
+
+ mock_get_size.assert_called_once_with("s", "ref")
+ self.assertFalse(mock_resize.called)
+
+ @mock.patch.object(vm_utils, '_vdi_resize')
+ @mock.patch.object(vm_utils, '_vdi_get_virtual_size')
+ def test_update_vdi_virtual_size_raise_if_disk_big(self, mock_get_size,
+ mock_resize):
+ mock_get_size.return_value = 1024 ** 3 + 1
+ instance = {"uuid": "a"}
+
+ self.assertRaises(exception.ResizeError,
+ vm_utils.update_vdi_virtual_size,
+ "s", instance, "ref", 1)
+
+ mock_get_size.assert_called_once_with("s", "ref")
+ self.assertFalse(mock_resize.called)
+
+
+@mock.patch.object(vm_utils, '_vdi_get_rec')
+@mock.patch.object(vm_utils, '_vbd_get_rec')
+@mock.patch.object(vm_utils, '_vm_get_vbd_refs')
+class GetVdiForVMTestCase(VMUtilsTestBase):
+ def test_get_vdi_for_vm_safely(self, vm_get_vbd_refs,
+ vbd_get_rec, vdi_get_rec):
+ session = "session"
+
+ vm_get_vbd_refs.return_value = ["a", "b"]
+ vbd_get_rec.return_value = {'userdevice': '0', 'VDI': 'vdi_ref'}
+ vdi_get_rec.return_value = {}
+
+ result = vm_utils.get_vdi_for_vm_safely(session, "vm_ref")
+ self.assertEqual(('vdi_ref', {}), result)
+
+ vm_get_vbd_refs.assert_called_once_with(session, "vm_ref")
+ vbd_get_rec.assert_called_once_with(session, "a")
+ vdi_get_rec.assert_called_once_with(session, "vdi_ref")
+
+ def test_get_vdi_for_vm_safely_fails(self, vm_get_vbd_refs,
+ vbd_get_rec, vdi_get_rec):
+ session = "session"
+
+ vm_get_vbd_refs.return_value = ["a", "b"]
+ vbd_get_rec.return_value = {'userdevice': '0', 'VDI': 'vdi_ref'}
+
+ self.assertRaises(exception.NovaException,
+ vm_utils.get_vdi_for_vm_safely,
+ session, "vm_ref", userdevice='1')
+
+ self.assertEqual([], vdi_get_rec.call_args_list)
+ self.assertEqual(2, len(vbd_get_rec.call_args_list))
+
+
+@mock.patch.object(vm_utils, '_vdi_get_uuid')
+@mock.patch.object(vm_utils, '_vbd_get_rec')
+@mock.patch.object(vm_utils, '_vm_get_vbd_refs')
+class GetAllVdiForVMTestCase(VMUtilsTestBase):
+ def _setup_get_all_vdi_uuids_for_vm(self, vm_get_vbd_refs,
+ vbd_get_rec, vdi_get_uuid):
+ def fake_vbd_get_rec(session, vbd_ref):
+ return {'userdevice': vbd_ref, 'VDI': "vdi_ref_%s" % vbd_ref}
+
+ def fake_vdi_get_uuid(session, vdi_ref):
+ return vdi_ref
+
+ vm_get_vbd_refs.return_value = ["0", "2"]
+ vbd_get_rec.side_effect = fake_vbd_get_rec
+ vdi_get_uuid.side_effect = fake_vdi_get_uuid
+
+ def test_get_all_vdi_uuids_for_vm_works(self, vm_get_vbd_refs,
+ vbd_get_rec, vdi_get_uuid):
+ self._setup_get_all_vdi_uuids_for_vm(vm_get_vbd_refs,
+ vbd_get_rec, vdi_get_uuid)
+
+ result = vm_utils.get_all_vdi_uuids_for_vm('session', "vm_ref")
+ expected = ['vdi_ref_0', 'vdi_ref_2']
+ self.assertEqual(expected, list(result))
+
+ def test_get_all_vdi_uuids_for_vm_finds_none(self, vm_get_vbd_refs,
+ vbd_get_rec, vdi_get_uuid):
+ self._setup_get_all_vdi_uuids_for_vm(vm_get_vbd_refs,
+ vbd_get_rec, vdi_get_uuid)
+
+ result = vm_utils.get_all_vdi_uuids_for_vm('session', "vm_ref",
+ min_userdevice=1)
+ expected = ["vdi_ref_2"]
+ self.assertEqual(expected, list(result))
+
+
+class GetAllVdisTestCase(VMUtilsTestBase):
+ def test_get_all_vdis_in_sr(self):
+
+ def fake_get_rec(record_type, ref):
+ if ref == "2":
+ return "vdi_rec_2"
+
+ session = mock.Mock()
+ session.call_xenapi.return_value = ["1", "2"]
+ session.get_rec.side_effect = fake_get_rec
+
+ sr_ref = "sr_ref"
+ actual = list(vm_utils._get_all_vdis_in_sr(session, sr_ref))
+ self.assertEqual(actual, [('2', 'vdi_rec_2')])
+
+ session.call_xenapi.assert_called_once_with("SR.get_VDIs", sr_ref)
+
+
+class VDIAttachedHere(VMUtilsTestBase):
+ @mock.patch.object(vm_utils, 'destroy_vbd')
+ @mock.patch.object(vm_utils, '_get_this_vm_ref')
+ @mock.patch.object(vm_utils, 'create_vbd')
+ @mock.patch.object(vm_utils, '_remap_vbd_dev')
+ @mock.patch.object(vm_utils, '_wait_for_device')
+ @mock.patch.object(utils, 'execute')
+ def test_sync_called(self, mock_execute, mock_wait_for_device,
+ mock_remap_vbd_dev, mock_create_vbd,
+ mock_get_this_vm_ref, mock_destroy_vbd):
+ session = _get_fake_session()
+ with vm_utils.vdi_attached_here(session, 'vdi_ref'):
+ pass
+ mock_execute.assert_called_with('sync', run_as_root=True)
+
+
+class SnapshotAttachedHereTestCase(VMUtilsTestBase):
+ @mock.patch.object(vm_utils, '_snapshot_attached_here_impl')
+ def test_snapshot_attached_here(self, mock_impl):
+ def fake_impl(session, instance, vm_ref, label, userdevice,
+ post_snapshot_callback):
+ self.assertEqual("session", session)
+ self.assertEqual("instance", instance)
+ self.assertEqual("vm_ref", vm_ref)
+ self.assertEqual("label", label)
+ self.assertEqual('0', userdevice)
+ self.assertIsNone(post_snapshot_callback)
+ yield "fake"
+
+ mock_impl.side_effect = fake_impl
+
+ with vm_utils.snapshot_attached_here("session", "instance", "vm_ref",
+ "label") as result:
+ self.assertEqual("fake", result)
+
+ mock_impl.assert_called_once_with("session", "instance", "vm_ref",
+ "label", '0', None)
+
+ @mock.patch.object(vm_utils, '_delete_snapshots_in_vdi_chain')
+ @mock.patch.object(vm_utils, 'safe_destroy_vdis')
+ @mock.patch.object(vm_utils, '_walk_vdi_chain')
+ @mock.patch.object(vm_utils, '_wait_for_vhd_coalesce')
+ @mock.patch.object(vm_utils, '_vdi_get_uuid')
+ @mock.patch.object(vm_utils, '_vdi_snapshot')
+ @mock.patch.object(vm_utils, 'get_vdi_for_vm_safely')
+ def test_snapshot_attached_here_impl(self, mock_get_vdi_for_vm_safely,
+ mock_vdi_snapshot, mock_vdi_get_uuid,
+ mock_wait_for_vhd_coalesce, mock_walk_vdi_chain,
+ mock_safe_destroy_vdis, mock_delete_snapshots_in_vdi_chain):
+ session = "session"
+ instance = {"uuid": "uuid"}
+ mock_callback = mock.Mock()
+
+ mock_get_vdi_for_vm_safely.return_value = ("vdi_ref",
+ {"SR": "sr_ref",
+ "uuid": "vdi_uuid"})
+ mock_vdi_snapshot.return_value = "snap_ref"
+ mock_vdi_get_uuid.return_value = "snap_uuid"
+ mock_walk_vdi_chain.return_value = [{"uuid": "a"}, {"uuid": "b"}]
+
+ try:
+ with vm_utils.snapshot_attached_here(session, instance, "vm_ref",
+ "label", '2', mock_callback) as result:
+ self.assertEqual(["a", "b"], result)
+ raise test.TestingException()
+ self.assertTrue(False)
+ except test.TestingException:
+ pass
+
+ mock_get_vdi_for_vm_safely.assert_called_once_with(session, "vm_ref",
+ '2')
+ mock_vdi_snapshot.assert_called_once_with(session, "vdi_ref")
+ mock_wait_for_vhd_coalesce.assert_called_once_with(session, instance,
+ "sr_ref", "vdi_ref", ['a', 'b'])
+ mock_vdi_get_uuid.assert_called_once_with(session, "snap_ref")
+ mock_walk_vdi_chain.assert_has_calls([mock.call(session, "vdi_uuid"),
+ mock.call(session, "snap_uuid")])
+ mock_callback.assert_called_once_with(
+ task_state="image_pending_upload")
+ mock_safe_destroy_vdis.assert_called_once_with(session, ["snap_ref"])
+ mock_delete_snapshots_in_vdi_chain.assert_called_once_with(session,
+ instance, ['a', 'b'], "sr_ref")
+
+ @mock.patch.object(greenthread, 'sleep')
+ def test_wait_for_vhd_coalesce_leaf_node(self, mock_sleep):
+ instance = {"uuid": "fake"}
+ vm_utils._wait_for_vhd_coalesce("session", instance,
+ "sr_ref", "vdi_ref", ["uuid"])
+ self.assertFalse(mock_sleep.called)
+
+ @mock.patch.object(vm_utils, '_count_children')
+ @mock.patch.object(greenthread, 'sleep')
+ def test_wait_for_vhd_coalesce_parent_snapshot(self, mock_sleep,
+ mock_count):
+ mock_count.return_value = 2
+ instance = {"uuid": "fake"}
+
+ vm_utils._wait_for_vhd_coalesce("session", instance,
+ "sr_ref", "vdi_ref", ["uuid1", "uuid2"])
+
+ self.assertFalse(mock_sleep.called)
+ self.assertTrue(mock_count.called)
+
+ @mock.patch.object(greenthread, 'sleep')
+ @mock.patch.object(vm_utils, '_get_vhd_parent_uuid')
+ @mock.patch.object(vm_utils, '_count_children')
+ @mock.patch.object(vm_utils, '_scan_sr')
+ def test_wait_for_vhd_coalesce_raises(self, mock_scan_sr,
+ mock_count, mock_get_vhd_parent_uuid, mock_sleep):
+ mock_count.return_value = 1
+ instance = {"uuid": "fake"}
+
+ self.assertRaises(exception.NovaException,
+ vm_utils._wait_for_vhd_coalesce, "session", instance,
+ "sr_ref", "vdi_ref", ["uuid1", "uuid2"])
+
+ self.assertTrue(mock_count.called)
+ self.assertEqual(20, mock_sleep.call_count)
+ self.assertEqual(20, mock_scan_sr.call_count)
+
+ @mock.patch.object(greenthread, 'sleep')
+ @mock.patch.object(vm_utils, '_get_vhd_parent_uuid')
+ @mock.patch.object(vm_utils, '_count_children')
+ @mock.patch.object(vm_utils, '_scan_sr')
+ def test_wait_for_vhd_coalesce_success(self, mock_scan_sr,
+ mock_count, mock_get_vhd_parent_uuid, mock_sleep):
+ mock_count.return_value = 1
+ instance = {"uuid": "fake"}
+ mock_get_vhd_parent_uuid.side_effect = ["bad", "uuid2"]
+
+ vm_utils._wait_for_vhd_coalesce("session", instance,
+ "sr_ref", "vdi_ref", ["uuid1", "uuid2"])
+
+ self.assertEqual(1, mock_sleep.call_count)
+ self.assertEqual(2, mock_scan_sr.call_count)
+
+ @mock.patch.object(vm_utils, '_get_all_vdis_in_sr')
+ def test_count_children(self, mock_get_all_vdis_in_sr):
+ vdis = [('child1', {'sm_config': {'vhd-parent': 'parent1'}}),
+ ('child2', {'sm_config': {'vhd-parent': 'parent2'}}),
+ ('child3', {'sm_config': {'vhd-parent': 'parent1'}})]
+ mock_get_all_vdis_in_sr.return_value = vdis
+ self.assertEqual(2, vm_utils._count_children('session',
+ 'parent1', 'sr'))
+
+
+class ImportMigratedDisksTestCase(VMUtilsTestBase):
+ @mock.patch.object(vm_utils, '_import_migrate_ephemeral_disks')
+ @mock.patch.object(vm_utils, '_import_migrated_root_disk')
+ def test_import_all_migrated_disks(self, mock_root, mock_ephemeral):
+ session = "session"
+ instance = "instance"
+ mock_root.return_value = "root_vdi"
+ mock_ephemeral.return_value = ["a", "b"]
+
+ result = vm_utils.import_all_migrated_disks(session, instance)
+
+ expected = {'root': 'root_vdi', 'ephemerals': ["a", "b"]}
+ self.assertEqual(expected, result)
+ mock_root.assert_called_once_with(session, instance)
+ mock_ephemeral.assert_called_once_with(session, instance)
+
+ @mock.patch.object(vm_utils, '_import_migrated_vhds')
+ def test_import_migrated_root_disk(self, mock_migrate):
+ mock_migrate.return_value = "foo"
+ instance = {"uuid": "uuid", "name": "name"}
+
+ result = vm_utils._import_migrated_root_disk("s", instance)
+
+ self.assertEqual("foo", result)
+ mock_migrate.assert_called_once_with("s", instance, "uuid", "root",
+ "name")
+
+ @mock.patch.object(vm_utils, '_import_migrated_vhds')
+ def test_import_migrate_ephemeral_disks(self, mock_migrate):
+ mock_migrate.return_value = "foo"
+ instance = {"uuid": "uuid", "name": "name", "ephemeral_gb": 4000}
+
+ result = vm_utils._import_migrate_ephemeral_disks("s", instance)
+
+ self.assertEqual({'4': 'foo', '5': 'foo'}, result)
+ expected_calls = [mock.call("s", instance, "uuid_ephemeral_1",
+ "ephemeral", "name ephemeral (1)"),
+ mock.call("s", instance, "uuid_ephemeral_2",
+ "ephemeral", "name ephemeral (2)")]
+ self.assertEqual(expected_calls, mock_migrate.call_args_list)
+
+ @mock.patch.object(vm_utils, '_set_vdi_info')
+ @mock.patch.object(vm_utils, 'scan_default_sr')
+ @mock.patch.object(vm_utils, 'get_sr_path')
+ def test_import_migrated_vhds(self, mock_get_sr_path, mock_scan_sr,
+ mock_set_info):
+ session = mock.Mock()
+ instance = {"uuid": "uuid"}
+ session.call_plugin_serialized.return_value = {"root": {"uuid": "a"}}
+ session.call_xenapi.return_value = "vdi_ref"
+ mock_get_sr_path.return_value = "sr_path"
+
+ result = vm_utils._import_migrated_vhds(session, instance,
+ 'chain_label', 'disk_type', 'vdi_label')
+
+ expected = {'uuid': "a", 'ref': "vdi_ref"}
+ self.assertEqual(expected, result)
+ mock_get_sr_path.assert_called_once_with(session)
+ session.call_plugin_serialized.assert_called_once_with('migration',
+ 'move_vhds_into_sr', instance_uuid='chain_label',
+ sr_path='sr_path', uuid_stack=mock.ANY)
+ mock_scan_sr.assert_called_once_with(session)
+ session.call_xenapi.assert_called_once_with('VDI.get_by_uuid', 'a')
+ mock_set_info.assert_called_once_with(session, 'vdi_ref', 'disk_type',
+ 'vdi_label', 'disk_type', instance)
+
+ def test_get_vhd_parent_uuid_rec_provided(self):
+ session = mock.Mock()
+ vdi_ref = 'vdi_ref'
+ vdi_rec = {'sm_config': {}}
+ self.assertIsNone(vm_utils._get_vhd_parent_uuid(session,
+ vdi_ref,
+ vdi_rec))
+ self.assertFalse(session.call_xenapi.called)
+
+
+class MigrateVHDTestCase(VMUtilsTestBase):
+ def _assert_transfer_called(self, session, label):
+ session.call_plugin_serialized.assert_called_once_with(
+ 'migration', 'transfer_vhd', instance_uuid=label, host="dest",
+ vdi_uuid="vdi_uuid", sr_path="sr_path", seq_num=2)
+
+ def test_migrate_vhd_root(self):
+ session = mock.Mock()
+ instance = {"uuid": "a"}
+
+ vm_utils.migrate_vhd(session, instance, "vdi_uuid", "dest",
+ "sr_path", 2)
+
+ self._assert_transfer_called(session, "a")
+
+ def test_migrate_vhd_ephemeral(self):
+ session = mock.Mock()
+ instance = {"uuid": "a"}
+
+ vm_utils.migrate_vhd(session, instance, "vdi_uuid", "dest",
+ "sr_path", 2, 2)
+
+ self._assert_transfer_called(session, "a_ephemeral_2")
+
+ def test_migrate_vhd_converts_exceptions(self):
+ session = mock.Mock()
+ session.XenAPI.Failure = test.TestingException
+ session.call_plugin_serialized.side_effect = test.TestingException()
+ instance = {"uuid": "a"}
+
+ self.assertRaises(exception.MigrationError, vm_utils.migrate_vhd,
+ session, instance, "vdi_uuid", "dest", "sr_path", 2)
+ self._assert_transfer_called(session, "a")
+
+
+class StripBaseMirrorTestCase(VMUtilsTestBase):
+ def test_strip_base_mirror_from_vdi_works(self):
+ session = mock.Mock()
+ vm_utils._try_strip_base_mirror_from_vdi(session, "vdi_ref")
+ session.call_xenapi.assert_called_once_with(
+ "VDI.remove_from_sm_config", "vdi_ref", "base_mirror")
+
+ def test_strip_base_mirror_from_vdi_hides_error(self):
+ session = mock.Mock()
+ session.XenAPI.Failure = test.TestingException
+ session.call_xenapi.side_effect = test.TestingException()
+
+ vm_utils._try_strip_base_mirror_from_vdi(session, "vdi_ref")
+
+ session.call_xenapi.assert_called_once_with(
+ "VDI.remove_from_sm_config", "vdi_ref", "base_mirror")
+
+ @mock.patch.object(vm_utils, '_try_strip_base_mirror_from_vdi')
+ def test_strip_base_mirror_from_vdis(self, mock_strip):
+ def call_xenapi(method, arg):
+ if method == "VM.get_VBDs":
+ return ['VBD_ref_1', 'VBD_ref_2']
+ if method == "VBD.get_VDI":
+ return 'VDI' + arg[3:]
+ return "Unexpected call_xenapi: %s.%s" % (method, arg)
+
+ session = mock.Mock()
+ session.call_xenapi.side_effect = call_xenapi
+
+ vm_utils.strip_base_mirror_from_vdis(session, "vm_ref")
+
+ expected = [mock.call('VM.get_VBDs', "vm_ref"),
+ mock.call('VBD.get_VDI', "VBD_ref_1"),
+ mock.call('VBD.get_VDI', "VBD_ref_2")]
+ self.assertEqual(expected, session.call_xenapi.call_args_list)
+
+ expected = [mock.call(session, "VDI_ref_1"),
+ mock.call(session, "VDI_ref_2")]
+ self.assertEqual(expected, mock_strip.call_args_list)
+
+
+class DeviceIdTestCase(VMUtilsTestBase):
+ def test_device_id_is_none_if_not_specified_in_meta_data(self):
+ image_meta = {}
+ session = mock.Mock()
+ session.product_version = (6, 1, 0)
+ self.assertIsNone(vm_utils.get_vm_device_id(session, image_meta))
+
+ def test_get_device_id_if_hypervisor_version_is_greater_than_6_1(self):
+ image_meta = {'xenapi_device_id': '0002'}
+ session = mock.Mock()
+ session.product_version = (6, 2, 0)
+ self.assertEqual('0002',
+ vm_utils.get_vm_device_id(session, image_meta))
+ session.product_version = (6, 3, 1)
+ self.assertEqual('0002',
+ vm_utils.get_vm_device_id(session, image_meta))
+
+ def test_raise_exception_if_device_id_not_supported_by_hyp_version(self):
+ image_meta = {'xenapi_device_id': '0002'}
+ session = mock.Mock()
+ session.product_version = (6, 0)
+ exc = self.assertRaises(exception.NovaException,
+ vm_utils.get_vm_device_id, session, image_meta)
+ self.assertEqual("Device id 0002 specified is not supported by "
+ "hypervisor version (6, 0)", exc.message)
+ session.product_version = ('6a')
+ exc = self.assertRaises(exception.NovaException,
+ vm_utils.get_vm_device_id, session, image_meta)
+ self.assertEqual("Device id 0002 specified is not supported by "
+ "hypervisor version 6a", exc.message)
+
+
+class CreateVmRecordTestCase(VMUtilsTestBase):
+ @mock.patch.object(flavors, 'extract_flavor')
+ def test_create_vm_record_linux(self, mock_extract_flavor):
+ instance = {"uuid": "uuid123", "os_type": "linux"}
+ self._test_create_vm_record(mock_extract_flavor, instance, False)
+
+ @mock.patch.object(flavors, 'extract_flavor')
+ def test_create_vm_record_windows(self, mock_extract_flavor):
+ instance = {"uuid": "uuid123", "os_type": "windows"}
+ self._test_create_vm_record(mock_extract_flavor, instance, True)
+
+ def _test_create_vm_record(self, mock_extract_flavor, instance,
+ is_viridian):
+ session = _get_fake_session()
+ flavor = {"memory_mb": 1024, "vcpus": 1, "vcpu_weight": 2}
+ mock_extract_flavor.return_value = flavor
+
+ vm_utils.create_vm(session, instance, "name", "kernel", "ramdisk",
+ device_id="0002")
+
+ is_viridian_str = str(is_viridian).lower()
+
+ expected_vm_rec = {
+ 'VCPUs_params': {'cap': '0', 'weight': '2'},
+ 'PV_args': '',
+ 'memory_static_min': '0',
+ 'ha_restart_priority': '',
+ 'HVM_boot_policy': 'BIOS order',
+ 'PV_bootloader': '',
+ 'tags': [],
+ 'VCPUs_max': '1',
+ 'memory_static_max': '1073741824',
+ 'actions_after_shutdown': 'destroy',
+ 'memory_dynamic_max': '1073741824',
+ 'user_version': '0',
+ 'xenstore_data': {'vm-data/allowvssprovider': 'false'},
+ 'blocked_operations': {},
+ 'is_a_template': False,
+ 'name_description': '',
+ 'memory_dynamic_min': '1073741824',
+ 'actions_after_crash': 'destroy',
+ 'memory_target': '1073741824',
+ 'PV_ramdisk': '',
+ 'PV_bootloader_args': '',
+ 'PCI_bus': '',
+ 'other_config': {'nova_uuid': 'uuid123'},
+ 'name_label': 'name',
+ 'actions_after_reboot': 'restart',
+ 'VCPUs_at_startup': '1',
+ 'HVM_boot_params': {'order': 'dc'},
+ 'platform': {'nx': 'true', 'pae': 'true', 'apic': 'true',
+ 'timeoffset': '0', 'viridian': is_viridian_str,
+ 'acpi': 'true', 'device_id': '0002'},
+ 'PV_legacy_args': '',
+ 'PV_kernel': '',
+ 'affinity': '',
+ 'recommendations': '',
+ 'ha_always_run': False}
+
+ session.call_xenapi.assert_called_with('VM.create', expected_vm_rec)
+
+ def test_list_vms(self):
+ self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
+ self.fixture.config(disable_process_locking=True,
+ group='oslo_concurrency')
+ self.flags(instance_name_template='%d',
+ firewall_driver='nova.virt.xenapi.firewall.'
+ 'Dom0IptablesFirewallDriver')
+ self.flags(connection_url='test_url',
+ connection_password='test_pass',
+ group='xenserver')
+
+ fake.create_vm("foo1", "Halted")
+ vm_ref = fake.create_vm("foo2", "Running")
+
+ stubs.stubout_session(self.stubs, fake.SessionBase)
+ driver = xenapi_conn.XenAPIDriver(False)
+
+ result = list(vm_utils.list_vms(driver._session))
+
+ # Will have 3 VMs - but one is Dom0 and one is not running on the host
+ self.assertEqual(len(driver._session.call_xenapi('VM.get_all')), 3)
+ self.assertEqual(len(result), 1)
+
+ result_keys = [key for (key, value) in result]
+
+ self.assertIn(vm_ref, result_keys)
+
+
+class ChildVHDsTestCase(test.NoDBTestCase):
+ all_vdis = [
+ ("my-vdi-ref",
+ {"uuid": "my-uuid", "sm_config": {},
+ "is_a_snapshot": False, "other_config": {}}),
+ ("non-parent",
+ {"uuid": "uuid-1", "sm_config": {},
+ "is_a_snapshot": False, "other_config": {}}),
+ ("diff-parent",
+ {"uuid": "uuid-1", "sm_config": {"vhd-parent": "other-uuid"},
+ "is_a_snapshot": False, "other_config": {}}),
+ ("child",
+ {"uuid": "uuid-child", "sm_config": {"vhd-parent": "my-uuid"},
+ "is_a_snapshot": False, "other_config": {}}),
+ ("child-snap",
+ {"uuid": "uuid-child-snap", "sm_config": {"vhd-parent": "my-uuid"},
+ "is_a_snapshot": True, "other_config": {}}),
+ ]
+
+ @mock.patch.object(vm_utils, '_get_all_vdis_in_sr')
+ def test_child_vhds_defaults(self, mock_get_all):
+ mock_get_all.return_value = self.all_vdis
+
+ result = vm_utils._child_vhds("session", "sr_ref", ["my-uuid"])
+
+ self.assertEqual(['uuid-child', 'uuid-child-snap'], result)
+
+ @mock.patch.object(vm_utils, '_get_all_vdis_in_sr')
+ def test_child_vhds_only_snapshots(self, mock_get_all):
+ mock_get_all.return_value = self.all_vdis
+
+ result = vm_utils._child_vhds("session", "sr_ref", ["my-uuid"],
+ old_snapshots_only=True)
+
+ self.assertEqual(['uuid-child-snap'], result)
+
+ @mock.patch.object(vm_utils, '_get_all_vdis_in_sr')
+ def test_child_vhds_chain(self, mock_get_all):
+ mock_get_all.return_value = self.all_vdis
+
+ result = vm_utils._child_vhds("session", "sr_ref",
+ ["my-uuid", "other-uuid"], old_snapshots_only=True)
+
+ self.assertEqual(['uuid-child-snap'], result)
+
+ def test_is_vdi_a_snapshot_works(self):
+ vdi_rec = {"is_a_snapshot": True,
+ "other_config": {}}
+
+ self.assertTrue(vm_utils._is_vdi_a_snapshot(vdi_rec))
+
+ def test_is_vdi_a_snapshot_base_images_false(self):
+ vdi_rec = {"is_a_snapshot": True,
+ "other_config": {"image-id": "fake"}}
+
+ self.assertFalse(vm_utils._is_vdi_a_snapshot(vdi_rec))
+
+ def test_is_vdi_a_snapshot_false_for_non_snapshot(self):
+ vdi_rec = {"is_a_snapshot": False,
+ "other_config": {}}
+
+ self.assertFalse(vm_utils._is_vdi_a_snapshot(vdi_rec))
+
+
+class RemoveOldSnapshotsTestCase(test.NoDBTestCase):
+
+ @mock.patch.object(vm_utils, 'get_vdi_for_vm_safely')
+ @mock.patch.object(vm_utils, '_walk_vdi_chain')
+ @mock.patch.object(vm_utils, '_delete_snapshots_in_vdi_chain')
+ def test_remove_old_snapshots(self, mock_delete, mock_walk, mock_get):
+ instance = {"uuid": "fake"}
+ mock_get.return_value = ("ref", {"uuid": "vdi", "SR": "sr_ref"})
+ mock_walk.return_value = [{"uuid": "uuid1"}, {"uuid": "uuid2"}]
+
+ vm_utils.remove_old_snapshots("session", instance, "vm_ref")
+
+ mock_delete.assert_called_once_with("session", instance,
+ ["uuid1", "uuid2"], "sr_ref")
+ mock_get.assert_called_once_with("session", "vm_ref")
+ mock_walk.assert_called_once_with("session", "vdi")
+
+ @mock.patch.object(vm_utils, '_child_vhds')
+ def test_delete_snapshots_in_vdi_chain_no_chain(self, mock_child):
+ instance = {"uuid": "fake"}
+
+ vm_utils._delete_snapshots_in_vdi_chain("session", instance,
+ ["uuid"], "sr")
+
+ self.assertFalse(mock_child.called)
+
+ @mock.patch.object(vm_utils, '_child_vhds')
+ def test_delete_snapshots_in_vdi_chain_no_snapshots(self, mock_child):
+ instance = {"uuid": "fake"}
+ mock_child.return_value = []
+
+ vm_utils._delete_snapshots_in_vdi_chain("session", instance,
+ ["uuid1", "uuid2"], "sr")
+
+ mock_child.assert_called_once_with("session", "sr", ["uuid2"],
+ old_snapshots_only=True)
+
+ @mock.patch.object(vm_utils, '_scan_sr')
+ @mock.patch.object(vm_utils, 'safe_destroy_vdis')
+ @mock.patch.object(vm_utils, '_child_vhds')
+ def test_delete_snapshots_in_vdi_chain_calls_destroy(self, mock_child,
+ mock_destroy, mock_scan):
+ instance = {"uuid": "fake"}
+ mock_child.return_value = ["suuid1", "suuid2"]
+ session = mock.Mock()
+ session.VDI.get_by_uuid.side_effect = ["ref1", "ref2"]
+
+ vm_utils._delete_snapshots_in_vdi_chain(session, instance,
+ ["uuid1", "uuid2"], "sr")
+
+ mock_child.assert_called_once_with(session, "sr", ["uuid2"],
+ old_snapshots_only=True)
+ session.VDI.get_by_uuid.assert_has_calls([
+ mock.call("suuid1"), mock.call("suuid2")])
+ mock_destroy.assert_called_once_with(session, ["ref1", "ref2"])
+ mock_scan.assert_called_once_with(session, "sr")
+
+
+class ResizeFunctionTestCase(test.NoDBTestCase):
+ def _call_get_resize_func_name(self, brand, version):
+ session = mock.Mock()
+ session.product_brand = brand
+ session.product_version = version
+
+ return vm_utils._get_resize_func_name(session)
+
+ def _test_is_resize(self, brand, version):
+ result = self._call_get_resize_func_name(brand, version)
+ self.assertEqual("VDI.resize", result)
+
+ def _test_is_resize_online(self, brand, version):
+ result = self._call_get_resize_func_name(brand, version)
+ self.assertEqual("VDI.resize_online", result)
+
+ def test_xenserver_5_5(self):
+ self._test_is_resize_online("XenServer", (5, 5, 0))
+
+ def test_xenserver_6_0(self):
+ self._test_is_resize("XenServer", (6, 0, 0))
+
+ def test_xcp_1_1(self):
+ self._test_is_resize_online("XCP", (1, 1, 0))
+
+ def test_xcp_1_2(self):
+ self._test_is_resize("XCP", (1, 2, 0))
+
+ def test_xcp_2_0(self):
+ self._test_is_resize("XCP", (2, 0, 0))
+
+ def test_random_brand(self):
+ self._test_is_resize("asfd", (1, 1, 0))
+
+ def test_default(self):
+ self._test_is_resize(None, None)
+
+ def test_empty(self):
+ self._test_is_resize("", "")
+
+ def test_bad_version(self):
+ self._test_is_resize("XenServer", "asdf")
+
+
+class VMInfoTests(VMUtilsTestBase):
+ def setUp(self):
+ super(VMInfoTests, self).setUp()
+ self.session = mock.Mock()
+
+ def test_get_power_state_valid(self):
+ # Save on test setup calls by having these simple tests in one method
+ self.session.call_xenapi.return_value = "Running"
+ self.assertEqual(vm_utils.get_power_state(self.session, "ref"),
+ power_state.RUNNING)
+
+ self.session.call_xenapi.return_value = "Halted"
+ self.assertEqual(vm_utils.get_power_state(self.session, "ref"),
+ power_state.SHUTDOWN)
+
+ self.session.call_xenapi.return_value = "Paused"
+ self.assertEqual(vm_utils.get_power_state(self.session, "ref"),
+ power_state.PAUSED)
+
+ self.session.call_xenapi.return_value = "Suspended"
+ self.assertEqual(vm_utils.get_power_state(self.session, "ref"),
+ power_state.SUSPENDED)
+
+ self.session.call_xenapi.return_value = "Crashed"
+ self.assertEqual(vm_utils.get_power_state(self.session, "ref"),
+ power_state.CRASHED)
+
+ def test_get_power_state_invalid(self):
+ self.session.call_xenapi.return_value = "Invalid"
+ self.assertRaises(KeyError,
+ vm_utils.get_power_state, self.session, "ref")
+
+ _XAPI_record = {'power_state': 'Running',
+ 'memory_static_max': str(10 << 10),
+ 'memory_dynamic_max': str(9 << 10),
+ 'VCPUs_max': '5'}
+
+ def test_compile_info(self):
+
+ def call_xenapi(method, *args):
+ if method.startswith('VM.get_') and args[0] == 'dummy':
+ return self._XAPI_record[method[7:]]
+
+ self.session.call_xenapi.side_effect = call_xenapi
+
+ expected = {'state': power_state.RUNNING,
+ 'max_mem': 10L,
+ 'mem': 9L,
+ 'num_cpu': '5',
+ 'cpu_time': 0}
+
+ self.assertEqual(vm_utils.compile_info(self.session, "dummy"),
+ expected)
diff --git a/nova/tests/unit/virt/xenapi/test_vmops.py b/nova/tests/unit/virt/xenapi/test_vmops.py
new file mode 100644
index 0000000000..8140f997d2
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/test_vmops.py
@@ -0,0 +1,1124 @@
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from eventlet import greenthread
+import mock
+
+from nova.compute import power_state
+from nova.compute import task_states
+from nova import context
+from nova import exception
+from nova import objects
+from nova.pci import manager as pci_manager
+from nova import test
+from nova.tests.unit import fake_instance
+from nova.tests.unit.virt.xenapi import stubs
+from nova.virt import fake
+from nova.virt.xenapi import agent as xenapi_agent
+from nova.virt.xenapi.client import session as xenapi_session
+from nova.virt.xenapi import fake as xenapi_fake
+from nova.virt.xenapi import vm_utils
+from nova.virt.xenapi import vmops
+from nova.virt.xenapi import volume_utils
+from nova.virt.xenapi import volumeops
+
+
+class VMOpsTestBase(stubs.XenAPITestBaseNoDB):
+ def setUp(self):
+ super(VMOpsTestBase, self).setUp()
+ self._setup_mock_vmops()
+ self.vms = []
+
+ def _setup_mock_vmops(self, product_brand=None, product_version=None):
+ stubs.stubout_session(self.stubs, xenapi_fake.SessionBase)
+ self._session = xenapi_session.XenAPISession('test_url', 'root',
+ 'test_pass')
+ self.vmops = vmops.VMOps(self._session, fake.FakeVirtAPI())
+
+ def create_vm(self, name, state="Running"):
+ vm_ref = xenapi_fake.create_vm(name, state)
+ self.vms.append(vm_ref)
+ vm = xenapi_fake.get_record("VM", vm_ref)
+ return vm, vm_ref
+
+ def tearDown(self):
+ super(VMOpsTestBase, self).tearDown()
+ for vm in self.vms:
+ xenapi_fake.destroy_vm(vm)
+
+
+class VMOpsTestCase(VMOpsTestBase):
+ def setUp(self):
+ super(VMOpsTestCase, self).setUp()
+ self._setup_mock_vmops()
+
+ def _setup_mock_vmops(self, product_brand=None, product_version=None):
+ self._session = self._get_mock_session(product_brand, product_version)
+ self._vmops = vmops.VMOps(self._session, fake.FakeVirtAPI())
+
+ def _get_mock_session(self, product_brand, product_version):
+ class Mock(object):
+ pass
+
+ mock_session = Mock()
+ mock_session.product_brand = product_brand
+ mock_session.product_version = product_version
+ return mock_session
+
+ def _test_finish_revert_migration_after_crash(self, backup_made, new_made,
+ vm_shutdown=True):
+ instance = {'name': 'foo',
+ 'task_state': task_states.RESIZE_MIGRATING}
+ context = 'fake_context'
+
+ self.mox.StubOutWithMock(vm_utils, 'lookup')
+ self.mox.StubOutWithMock(self._vmops, '_destroy')
+ self.mox.StubOutWithMock(vm_utils, 'set_vm_name_label')
+ self.mox.StubOutWithMock(self._vmops, '_attach_mapped_block_devices')
+ self.mox.StubOutWithMock(self._vmops, '_start')
+ self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown')
+
+ vm_utils.lookup(self._session, 'foo-orig').AndReturn(
+ backup_made and 'foo' or None)
+ vm_utils.lookup(self._session, 'foo').AndReturn(
+ (not backup_made or new_made) and 'foo' or None)
+ if backup_made:
+ if new_made:
+ self._vmops._destroy(instance, 'foo')
+ vm_utils.set_vm_name_label(self._session, 'foo', 'foo')
+ self._vmops._attach_mapped_block_devices(instance, [])
+
+ vm_utils.is_vm_shutdown(self._session, 'foo').AndReturn(vm_shutdown)
+ if vm_shutdown:
+ self._vmops._start(instance, 'foo')
+
+ self.mox.ReplayAll()
+
+ self._vmops.finish_revert_migration(context, instance, [])
+
+ def test_finish_revert_migration_after_crash(self):
+ self._test_finish_revert_migration_after_crash(True, True)
+
+ def test_finish_revert_migration_after_crash_before_new(self):
+ self._test_finish_revert_migration_after_crash(True, False)
+
+ def test_finish_revert_migration_after_crash_before_backup(self):
+ self._test_finish_revert_migration_after_crash(False, False)
+
+ def test_xsm_sr_check_relaxed_cached(self):
+ self.make_plugin_call_count = 0
+
+ def fake_make_plugin_call(plugin, method, **args):
+ self.make_plugin_call_count = self.make_plugin_call_count + 1
+ return "true"
+
+ self.stubs.Set(self._vmops, "_make_plugin_call",
+ fake_make_plugin_call)
+
+ self.assertTrue(self._vmops._is_xsm_sr_check_relaxed())
+ self.assertTrue(self._vmops._is_xsm_sr_check_relaxed())
+
+ self.assertEqual(self.make_plugin_call_count, 1)
+
+ def test_get_vm_opaque_ref_raises_instance_not_found(self):
+ instance = {"name": "dummy"}
+ self.mox.StubOutWithMock(vm_utils, 'lookup')
+ vm_utils.lookup(self._session, instance['name'], False).AndReturn(None)
+ self.mox.ReplayAll()
+
+ self.assertRaises(exception.InstanceNotFound,
+ self._vmops._get_vm_opaque_ref, instance)
+
+
+class InjectAutoDiskConfigTestCase(VMOpsTestBase):
+ def test_inject_auto_disk_config_when_present(self):
+ vm, vm_ref = self.create_vm("dummy")
+ instance = {"name": "dummy", "uuid": "1234", "auto_disk_config": True}
+ self.vmops._inject_auto_disk_config(instance, vm_ref)
+ xenstore_data = vm['xenstore_data']
+ self.assertEqual(xenstore_data['vm-data/auto-disk-config'], 'True')
+
+ def test_inject_auto_disk_config_none_as_false(self):
+ vm, vm_ref = self.create_vm("dummy")
+ instance = {"name": "dummy", "uuid": "1234", "auto_disk_config": None}
+ self.vmops._inject_auto_disk_config(instance, vm_ref)
+ xenstore_data = vm['xenstore_data']
+ self.assertEqual(xenstore_data['vm-data/auto-disk-config'], 'False')
+
+
+class GetConsoleOutputTestCase(VMOpsTestBase):
+ def test_get_console_output_works(self):
+ self.mox.StubOutWithMock(self.vmops, '_get_dom_id')
+
+ instance = {"name": "dummy"}
+ self.vmops._get_dom_id(instance, check_rescue=True).AndReturn(42)
+ self.mox.ReplayAll()
+
+ self.assertEqual("dom_id: 42", self.vmops.get_console_output(instance))
+
+ def test_get_console_output_throws_nova_exception(self):
+ self.mox.StubOutWithMock(self.vmops, '_get_dom_id')
+
+ instance = {"name": "dummy"}
+ # dom_id=0 used to trigger exception in fake XenAPI
+ self.vmops._get_dom_id(instance, check_rescue=True).AndReturn(0)
+ self.mox.ReplayAll()
+
+ self.assertRaises(exception.NovaException,
+ self.vmops.get_console_output, instance)
+
+ def test_get_dom_id_works(self):
+ instance = {"name": "dummy"}
+ vm, vm_ref = self.create_vm("dummy")
+ self.assertEqual(vm["domid"], self.vmops._get_dom_id(instance))
+
+ def test_get_dom_id_works_with_rescue_vm(self):
+ instance = {"name": "dummy"}
+ vm, vm_ref = self.create_vm("dummy-rescue")
+ self.assertEqual(vm["domid"],
+ self.vmops._get_dom_id(instance, check_rescue=True))
+
+ def test_get_dom_id_raises_not_found(self):
+ instance = {"name": "dummy"}
+ self.create_vm("not-dummy")
+ self.assertRaises(exception.NotFound, self.vmops._get_dom_id, instance)
+
+ def test_get_dom_id_works_with_vmref(self):
+ vm, vm_ref = self.create_vm("dummy")
+ self.assertEqual(vm["domid"],
+ self.vmops._get_dom_id(vm_ref=vm_ref))
+
+
+class SpawnTestCase(VMOpsTestBase):
+ def _stub_out_common(self):
+ self.mox.StubOutWithMock(self.vmops, '_ensure_instance_name_unique')
+ self.mox.StubOutWithMock(self.vmops, '_ensure_enough_free_mem')
+ self.mox.StubOutWithMock(self.vmops, '_update_instance_progress')
+ self.mox.StubOutWithMock(vm_utils, 'determine_disk_image_type')
+ self.mox.StubOutWithMock(self.vmops, '_get_vdis_for_instance')
+ self.mox.StubOutWithMock(vm_utils, 'safe_destroy_vdis')
+ self.mox.StubOutWithMock(self.vmops._volumeops,
+ 'safe_cleanup_from_vdis')
+ self.mox.StubOutWithMock(self.vmops, '_resize_up_vdis')
+ self.mox.StubOutWithMock(vm_utils,
+ 'create_kernel_and_ramdisk')
+ self.mox.StubOutWithMock(vm_utils, 'destroy_kernel_ramdisk')
+ self.mox.StubOutWithMock(self.vmops, '_create_vm_record')
+ self.mox.StubOutWithMock(self.vmops, '_destroy')
+ self.mox.StubOutWithMock(self.vmops, '_attach_disks')
+ self.mox.StubOutWithMock(pci_manager, 'get_instance_pci_devs')
+ self.mox.StubOutWithMock(vm_utils, 'set_other_config_pci')
+ self.mox.StubOutWithMock(self.vmops, '_attach_orig_disks')
+ self.mox.StubOutWithMock(self.vmops, 'inject_network_info')
+ self.mox.StubOutWithMock(self.vmops, '_inject_hostname')
+ self.mox.StubOutWithMock(self.vmops, '_inject_instance_metadata')
+ self.mox.StubOutWithMock(self.vmops, '_inject_auto_disk_config')
+ self.mox.StubOutWithMock(self.vmops, '_file_inject_vm_settings')
+ self.mox.StubOutWithMock(self.vmops, '_create_vifs')
+ self.mox.StubOutWithMock(self.vmops.firewall_driver,
+ 'setup_basic_filtering')
+ self.mox.StubOutWithMock(self.vmops.firewall_driver,
+ 'prepare_instance_filter')
+ self.mox.StubOutWithMock(self.vmops, '_start')
+ self.mox.StubOutWithMock(self.vmops, '_wait_for_instance_to_start')
+ self.mox.StubOutWithMock(self.vmops,
+ '_configure_new_instance_with_agent')
+ self.mox.StubOutWithMock(self.vmops, '_remove_hostname')
+ self.mox.StubOutWithMock(self.vmops.firewall_driver,
+ 'apply_instance_filter')
+
+ def _test_spawn(self, name_label_param=None, block_device_info_param=None,
+ rescue=False, include_root_vdi=True, throw_exception=None,
+ attach_pci_dev=False):
+ self._stub_out_common()
+
+ instance = {"name": "dummy", "uuid": "fake_uuid"}
+ name_label = name_label_param
+ if name_label is None:
+ name_label = "dummy"
+ image_meta = {"id": "image_id"}
+ context = "context"
+ session = self.vmops._session
+ injected_files = "fake_files"
+ admin_password = "password"
+ network_info = "net_info"
+ steps = 10
+ if rescue:
+ steps += 1
+
+ block_device_info = block_device_info_param
+ if block_device_info and not block_device_info['root_device_name']:
+ block_device_info = dict(block_device_info_param)
+ block_device_info['root_device_name'] = \
+ self.vmops.default_root_dev
+
+ di_type = "di_type"
+ vm_utils.determine_disk_image_type(image_meta).AndReturn(di_type)
+ step = 1
+ self.vmops._update_instance_progress(context, instance, step, steps)
+
+ vdis = {"other": {"ref": "fake_ref_2", "osvol": True}}
+ if include_root_vdi:
+ vdis["root"] = {"ref": "fake_ref"}
+ self.vmops._get_vdis_for_instance(context, instance,
+ name_label, "image_id", di_type,
+ block_device_info).AndReturn(vdis)
+ self.vmops._resize_up_vdis(instance, vdis)
+ step += 1
+ self.vmops._update_instance_progress(context, instance, step, steps)
+
+ kernel_file = "kernel"
+ ramdisk_file = "ramdisk"
+ vm_utils.create_kernel_and_ramdisk(context, session,
+ instance, name_label).AndReturn((kernel_file, ramdisk_file))
+ step += 1
+ self.vmops._update_instance_progress(context, instance, step, steps)
+
+ vm_ref = "fake_vm_ref"
+ self.vmops._ensure_instance_name_unique(name_label)
+ self.vmops._ensure_enough_free_mem(instance)
+ self.vmops._create_vm_record(context, instance, name_label,
+ di_type, kernel_file,
+ ramdisk_file, image_meta).AndReturn(vm_ref)
+ step += 1
+ self.vmops._update_instance_progress(context, instance, step, steps)
+
+ self.vmops._attach_disks(instance, vm_ref, name_label, vdis, di_type,
+ network_info, rescue, admin_password, injected_files)
+ if attach_pci_dev:
+ fake_dev = {
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': None,
+ 'id': 1,
+ 'compute_node_id': 1,
+ 'address': '00:00.0',
+ 'vendor_id': '1234',
+ 'product_id': 'abcd',
+ 'dev_type': 'type-PCI',
+ 'status': 'available',
+ 'dev_id': 'devid',
+ 'label': 'label',
+ 'instance_uuid': None,
+ 'extra_info': '{}',
+ }
+ pci_manager.get_instance_pci_devs(instance).AndReturn([fake_dev])
+ vm_utils.set_other_config_pci(self.vmops._session,
+ vm_ref,
+ "0/0000:00:00.0")
+ else:
+ pci_manager.get_instance_pci_devs(instance).AndReturn([])
+ step += 1
+ self.vmops._update_instance_progress(context, instance, step, steps)
+
+ self.vmops._inject_instance_metadata(instance, vm_ref)
+ self.vmops._inject_auto_disk_config(instance, vm_ref)
+ self.vmops._inject_hostname(instance, vm_ref, rescue)
+ self.vmops._file_inject_vm_settings(instance, vm_ref, vdis,
+ network_info)
+ self.vmops.inject_network_info(instance, network_info, vm_ref)
+ step += 1
+ self.vmops._update_instance_progress(context, instance, step, steps)
+
+ self.vmops._create_vifs(instance, vm_ref, network_info)
+ self.vmops.firewall_driver.setup_basic_filtering(instance,
+ network_info).AndRaise(NotImplementedError)
+ self.vmops.firewall_driver.prepare_instance_filter(instance,
+ network_info)
+ step += 1
+ self.vmops._update_instance_progress(context, instance, step, steps)
+
+ if rescue:
+ self.vmops._attach_orig_disks(instance, vm_ref)
+ step += 1
+ self.vmops._update_instance_progress(context, instance, step,
+ steps)
+ self.vmops._start(instance, vm_ref)
+ self.vmops._wait_for_instance_to_start(instance, vm_ref)
+ step += 1
+ self.vmops._update_instance_progress(context, instance, step, steps)
+
+ self.vmops._configure_new_instance_with_agent(instance, vm_ref,
+ injected_files, admin_password)
+ self.vmops._remove_hostname(instance, vm_ref)
+ step += 1
+ self.vmops._update_instance_progress(context, instance, step, steps)
+
+ self.vmops.firewall_driver.apply_instance_filter(instance,
+ network_info)
+ step += 1
+ last_call = self.vmops._update_instance_progress(context, instance,
+ step, steps)
+ if throw_exception:
+ last_call.AndRaise(throw_exception)
+ self.vmops._destroy(instance, vm_ref, network_info=network_info)
+ vm_utils.destroy_kernel_ramdisk(self.vmops._session, instance,
+ kernel_file, ramdisk_file)
+ vm_utils.safe_destroy_vdis(self.vmops._session, ["fake_ref"])
+ self.vmops._volumeops.safe_cleanup_from_vdis(["fake_ref_2"])
+
+ self.mox.ReplayAll()
+ self.vmops.spawn(context, instance, image_meta, injected_files,
+ admin_password, network_info,
+ block_device_info_param, name_label_param, rescue)
+
+ def test_spawn(self):
+ self._test_spawn()
+
+ def test_spawn_with_alternate_options(self):
+ self._test_spawn(include_root_vdi=False, rescue=True,
+ name_label_param="bob",
+ block_device_info_param={"root_device_name": ""})
+
+ def test_spawn_with_pci_available_on_the_host(self):
+ self._test_spawn(attach_pci_dev=True)
+
+ def test_spawn_performs_rollback_and_throws_exception(self):
+ self.assertRaises(test.TestingException, self._test_spawn,
+ throw_exception=test.TestingException())
+
+ def _test_finish_migration(self, power_on=True, resize_instance=True,
+ throw_exception=None):
+ self._stub_out_common()
+ self.mox.StubOutWithMock(vm_utils, "import_all_migrated_disks")
+ self.mox.StubOutWithMock(self.vmops, "_attach_mapped_block_devices")
+
+ context = "context"
+ migration = {}
+ name_label = "dummy"
+ instance = {"name": name_label, "uuid": "fake_uuid"}
+ disk_info = "disk_info"
+ network_info = "net_info"
+ image_meta = {"id": "image_id"}
+ block_device_info = "bdi"
+ session = self.vmops._session
+
+ self.vmops._ensure_instance_name_unique(name_label)
+ self.vmops._ensure_enough_free_mem(instance)
+
+ di_type = "di_type"
+ vm_utils.determine_disk_image_type(image_meta).AndReturn(di_type)
+
+ root_vdi = {"ref": "fake_ref"}
+ ephemeral_vdi = {"ref": "fake_ref_e"}
+ vdis = {"root": root_vdi, "ephemerals": {4: ephemeral_vdi}}
+ vm_utils.import_all_migrated_disks(self.vmops._session,
+ instance).AndReturn(vdis)
+
+ kernel_file = "kernel"
+ ramdisk_file = "ramdisk"
+ vm_utils.create_kernel_and_ramdisk(context, session,
+ instance, name_label).AndReturn((kernel_file, ramdisk_file))
+
+ vm_ref = "fake_vm_ref"
+ self.vmops._create_vm_record(context, instance, name_label,
+ di_type, kernel_file,
+ ramdisk_file, image_meta).AndReturn(vm_ref)
+
+ if resize_instance:
+ self.vmops._resize_up_vdis(instance, vdis)
+ self.vmops._attach_disks(instance, vm_ref, name_label, vdis, di_type,
+ network_info, False, None, None)
+ self.vmops._attach_mapped_block_devices(instance, block_device_info)
+ pci_manager.get_instance_pci_devs(instance).AndReturn([])
+
+ self.vmops._inject_instance_metadata(instance, vm_ref)
+ self.vmops._inject_auto_disk_config(instance, vm_ref)
+ self.vmops._file_inject_vm_settings(instance, vm_ref, vdis,
+ network_info)
+ self.vmops.inject_network_info(instance, network_info, vm_ref)
+
+ self.vmops._create_vifs(instance, vm_ref, network_info)
+ self.vmops.firewall_driver.setup_basic_filtering(instance,
+ network_info).AndRaise(NotImplementedError)
+ self.vmops.firewall_driver.prepare_instance_filter(instance,
+ network_info)
+
+ if power_on:
+ self.vmops._start(instance, vm_ref)
+ self.vmops._wait_for_instance_to_start(instance, vm_ref)
+
+ self.vmops.firewall_driver.apply_instance_filter(instance,
+ network_info)
+
+ last_call = self.vmops._update_instance_progress(context, instance,
+ step=5, total_steps=5)
+ if throw_exception:
+ last_call.AndRaise(throw_exception)
+ self.vmops._destroy(instance, vm_ref, network_info=network_info)
+ vm_utils.destroy_kernel_ramdisk(self.vmops._session, instance,
+ kernel_file, ramdisk_file)
+ vm_utils.safe_destroy_vdis(self.vmops._session,
+ ["fake_ref_e", "fake_ref"])
+
+ self.mox.ReplayAll()
+ self.vmops.finish_migration(context, migration, instance, disk_info,
+ network_info, image_meta, resize_instance,
+ block_device_info, power_on)
+
+ def test_finish_migration(self):
+ self._test_finish_migration()
+
+ def test_finish_migration_no_power_on(self):
+ self._test_finish_migration(power_on=False, resize_instance=False)
+
+ def test_finish_migrate_performs_rollback_on_error(self):
+ self.assertRaises(test.TestingException, self._test_finish_migration,
+ power_on=False, resize_instance=False,
+ throw_exception=test.TestingException())
+
+ def test_remove_hostname(self):
+ vm, vm_ref = self.create_vm("dummy")
+ instance = {"name": "dummy", "uuid": "1234", "auto_disk_config": None}
+ self.mox.StubOutWithMock(self._session, 'call_xenapi')
+ self._session.call_xenapi("VM.remove_from_xenstore_data", vm_ref,
+ "vm-data/hostname")
+
+ self.mox.ReplayAll()
+ self.vmops._remove_hostname(instance, vm_ref)
+ self.mox.VerifyAll()
+
+ def test_reset_network(self):
+ class mock_agent(object):
+ def __init__(self):
+ self.called = False
+
+ def resetnetwork(self):
+ self.called = True
+
+ vm, vm_ref = self.create_vm("dummy")
+ instance = {"name": "dummy", "uuid": "1234", "auto_disk_config": None}
+ agent = mock_agent()
+
+ self.mox.StubOutWithMock(self.vmops, 'agent_enabled')
+ self.mox.StubOutWithMock(self.vmops, '_get_agent')
+ self.mox.StubOutWithMock(self.vmops, '_inject_hostname')
+ self.mox.StubOutWithMock(self.vmops, '_remove_hostname')
+
+ self.vmops.agent_enabled(instance).AndReturn(True)
+ self.vmops._get_agent(instance, vm_ref).AndReturn(agent)
+ self.vmops._inject_hostname(instance, vm_ref, False)
+ self.vmops._remove_hostname(instance, vm_ref)
+ self.mox.ReplayAll()
+ self.vmops.reset_network(instance)
+ self.assertTrue(agent.called)
+ self.mox.VerifyAll()
+
+ def test_inject_hostname(self):
+ instance = {"hostname": "dummy", "os_type": "fake", "uuid": "uuid"}
+ vm_ref = "vm_ref"
+
+ self.mox.StubOutWithMock(self.vmops, '_add_to_param_xenstore')
+ self.vmops._add_to_param_xenstore(vm_ref, 'vm-data/hostname', 'dummy')
+
+ self.mox.ReplayAll()
+ self.vmops._inject_hostname(instance, vm_ref, rescue=False)
+
+ def test_inject_hostname_with_rescue_prefix(self):
+ instance = {"hostname": "dummy", "os_type": "fake", "uuid": "uuid"}
+ vm_ref = "vm_ref"
+
+ self.mox.StubOutWithMock(self.vmops, '_add_to_param_xenstore')
+ self.vmops._add_to_param_xenstore(vm_ref, 'vm-data/hostname',
+ 'RESCUE-dummy')
+
+ self.mox.ReplayAll()
+ self.vmops._inject_hostname(instance, vm_ref, rescue=True)
+
+ def test_inject_hostname_with_windows_name_truncation(self):
+ instance = {"hostname": "dummydummydummydummydummy",
+ "os_type": "windows", "uuid": "uuid"}
+ vm_ref = "vm_ref"
+
+ self.mox.StubOutWithMock(self.vmops, '_add_to_param_xenstore')
+ self.vmops._add_to_param_xenstore(vm_ref, 'vm-data/hostname',
+ 'RESCUE-dummydum')
+
+ self.mox.ReplayAll()
+ self.vmops._inject_hostname(instance, vm_ref, rescue=True)
+
+ def test_wait_for_instance_to_start(self):
+ instance = {"uuid": "uuid"}
+ vm_ref = "vm_ref"
+
+ self.mox.StubOutWithMock(vm_utils, 'get_power_state')
+ self.mox.StubOutWithMock(greenthread, 'sleep')
+ vm_utils.get_power_state(self._session, vm_ref).AndReturn(
+ power_state.SHUTDOWN)
+ greenthread.sleep(0.5)
+ vm_utils.get_power_state(self._session, vm_ref).AndReturn(
+ power_state.RUNNING)
+
+ self.mox.ReplayAll()
+ self.vmops._wait_for_instance_to_start(instance, vm_ref)
+
+ def test_attach_orig_disks(self):
+ instance = {"name": "dummy"}
+ vm_ref = "vm_ref"
+ vbd_refs = {vmops.DEVICE_ROOT: "vdi_ref"}
+
+ self.mox.StubOutWithMock(vm_utils, 'lookup')
+ self.mox.StubOutWithMock(self.vmops, '_find_vdi_refs')
+ self.mox.StubOutWithMock(vm_utils, 'create_vbd')
+
+ vm_utils.lookup(self.vmops._session, "dummy").AndReturn("ref")
+ self.vmops._find_vdi_refs("ref", exclude_volumes=True).AndReturn(
+ vbd_refs)
+ vm_utils.create_vbd(self.vmops._session, vm_ref, "vdi_ref",
+ vmops.DEVICE_RESCUE, bootable=False)
+
+ self.mox.ReplayAll()
+ self.vmops._attach_orig_disks(instance, vm_ref)
+
+ def test_agent_update_setup(self):
+ # agent updates need to occur after networking is configured
+ instance = {'name': 'betelgeuse',
+ 'uuid': '1-2-3-4-5-6'}
+ vm_ref = 'vm_ref'
+ agent = xenapi_agent.XenAPIBasedAgent(self.vmops._session,
+ self.vmops._virtapi, instance, vm_ref)
+
+ self.mox.StubOutWithMock(xenapi_agent, 'should_use_agent')
+ self.mox.StubOutWithMock(self.vmops, '_get_agent')
+ self.mox.StubOutWithMock(agent, 'get_version')
+ self.mox.StubOutWithMock(agent, 'resetnetwork')
+ self.mox.StubOutWithMock(agent, 'update_if_needed')
+
+ xenapi_agent.should_use_agent(instance).AndReturn(True)
+ self.vmops._get_agent(instance, vm_ref).AndReturn(agent)
+ agent.get_version().AndReturn('1.2.3')
+ agent.resetnetwork()
+ agent.update_if_needed('1.2.3')
+
+ self.mox.ReplayAll()
+ self.vmops._configure_new_instance_with_agent(instance, vm_ref,
+ None, None)
+
+
+class DestroyTestCase(VMOpsTestBase):
+ def setUp(self):
+ super(DestroyTestCase, self).setUp()
+ self.context = context.RequestContext(user_id=None, project_id=None)
+ self.instance = fake_instance.fake_instance_obj(self.context)
+
+ @mock.patch.object(vm_utils, 'lookup', side_effect=[None, None])
+ @mock.patch.object(vm_utils, 'hard_shutdown_vm')
+ @mock.patch.object(volume_utils, 'find_sr_by_uuid')
+ @mock.patch.object(volume_utils, 'forget_sr')
+ def test_no_vm_no_bdm(self, forget_sr, find_sr_by_uuid, hard_shutdown_vm,
+ lookup):
+ self.vmops.destroy(self.instance, 'network_info',
+ {'block_device_mapping': []})
+ self.assertEqual(0, find_sr_by_uuid.call_count)
+ self.assertEqual(0, forget_sr.call_count)
+ self.assertEqual(0, hard_shutdown_vm.call_count)
+
+ @mock.patch.object(vm_utils, 'lookup', side_effect=[None, None])
+ @mock.patch.object(vm_utils, 'hard_shutdown_vm')
+ @mock.patch.object(volume_utils, 'find_sr_by_uuid', return_value=None)
+ @mock.patch.object(volume_utils, 'forget_sr')
+ def test_no_vm_orphaned_volume_no_sr(self, forget_sr, find_sr_by_uuid,
+ hard_shutdown_vm, lookup):
+ self.vmops.destroy(self.instance, 'network_info',
+ {'block_device_mapping': [{'connection_info':
+ {'data': {'volume_id': 'fake-uuid'}}}]})
+ find_sr_by_uuid.assert_called_once_with(self.vmops._session,
+ 'FA15E-D15C-fake-uuid')
+ self.assertEqual(0, forget_sr.call_count)
+ self.assertEqual(0, hard_shutdown_vm.call_count)
+
+ @mock.patch.object(vm_utils, 'lookup', side_effect=[None, None])
+ @mock.patch.object(vm_utils, 'hard_shutdown_vm')
+ @mock.patch.object(volume_utils, 'find_sr_by_uuid', return_value='sr_ref')
+ @mock.patch.object(volume_utils, 'forget_sr')
+ def test_no_vm_orphaned_volume(self, forget_sr, find_sr_by_uuid,
+ hard_shutdown_vm, lookup):
+ self.vmops.destroy(self.instance, 'network_info',
+ {'block_device_mapping': [{'connection_info':
+ {'data': {'volume_id': 'fake-uuid'}}}]})
+ find_sr_by_uuid.assert_called_once_with(self.vmops._session,
+ 'FA15E-D15C-fake-uuid')
+ forget_sr.assert_called_once_with(self.vmops._session, 'sr_ref')
+ self.assertEqual(0, hard_shutdown_vm.call_count)
+
+
+@mock.patch.object(vmops.VMOps, '_update_instance_progress')
+@mock.patch.object(vmops.VMOps, '_get_vm_opaque_ref')
+@mock.patch.object(vm_utils, 'get_sr_path')
+@mock.patch.object(vmops.VMOps, '_detach_block_devices_from_orig_vm')
+@mock.patch.object(vmops.VMOps, '_migrate_disk_resizing_down')
+@mock.patch.object(vmops.VMOps, '_migrate_disk_resizing_up')
+class MigrateDiskAndPowerOffTestCase(VMOpsTestBase):
+ def test_migrate_disk_and_power_off_works_down(self,
+ migrate_up, migrate_down, *mocks):
+ instance = {"root_gb": 2, "ephemeral_gb": 0, "uuid": "uuid"}
+ flavor = {"root_gb": 1, "ephemeral_gb": 0}
+
+ self.vmops.migrate_disk_and_power_off(None, instance, None,
+ flavor, None)
+
+ self.assertFalse(migrate_up.called)
+ self.assertTrue(migrate_down.called)
+
+ def test_migrate_disk_and_power_off_works_up(self,
+ migrate_up, migrate_down, *mocks):
+ instance = {"root_gb": 1, "ephemeral_gb": 1, "uuid": "uuid"}
+ flavor = {"root_gb": 2, "ephemeral_gb": 2}
+
+ self.vmops.migrate_disk_and_power_off(None, instance, None,
+ flavor, None)
+
+ self.assertFalse(migrate_down.called)
+ self.assertTrue(migrate_up.called)
+
+ def test_migrate_disk_and_power_off_resize_down_ephemeral_fails(self,
+ migrate_up, migrate_down, *mocks):
+ instance = {"ephemeral_gb": 2}
+ flavor = {"ephemeral_gb": 1}
+
+ self.assertRaises(exception.ResizeError,
+ self.vmops.migrate_disk_and_power_off,
+ None, instance, None, flavor, None)
+
+
+@mock.patch.object(vm_utils, 'migrate_vhd')
+@mock.patch.object(vmops.VMOps, '_resize_ensure_vm_is_shutdown')
+@mock.patch.object(vm_utils, 'get_all_vdi_uuids_for_vm')
+@mock.patch.object(vmops.VMOps, '_update_instance_progress')
+@mock.patch.object(vmops.VMOps, '_apply_orig_vm_name_label')
+class MigrateDiskResizingUpTestCase(VMOpsTestBase):
+ def _fake_snapshot_attached_here(self, session, instance, vm_ref, label,
+ userdevice, post_snapshot_callback):
+ self.assertIsInstance(instance, dict)
+ if userdevice == '0':
+ self.assertEqual("vm_ref", vm_ref)
+ self.assertEqual("fake-snapshot", label)
+ yield ["leaf", "parent", "grandp"]
+ else:
+ leaf = userdevice + "-leaf"
+ parent = userdevice + "-parent"
+ yield [leaf, parent]
+
+ def test_migrate_disk_resizing_up_works_no_ephemeral(self,
+ mock_apply_orig, mock_update_progress, mock_get_all_vdi_uuids,
+ mock_shutdown, mock_migrate_vhd):
+ context = "ctxt"
+ instance = {"name": "fake", "uuid": "uuid"}
+ dest = "dest"
+ vm_ref = "vm_ref"
+ sr_path = "sr_path"
+
+ mock_get_all_vdi_uuids.return_value = None
+
+ with mock.patch.object(vm_utils, '_snapshot_attached_here_impl',
+ self._fake_snapshot_attached_here):
+ self.vmops._migrate_disk_resizing_up(context, instance, dest,
+ vm_ref, sr_path)
+
+ mock_get_all_vdi_uuids.assert_called_once_with(self.vmops._session,
+ vm_ref, min_userdevice=4)
+ mock_apply_orig.assert_called_once_with(instance, vm_ref)
+ mock_shutdown.assert_called_once_with(instance, vm_ref)
+
+ m_vhd_expected = [mock.call(self.vmops._session, instance, "parent",
+ dest, sr_path, 1),
+ mock.call(self.vmops._session, instance, "grandp",
+ dest, sr_path, 2),
+ mock.call(self.vmops._session, instance, "leaf",
+ dest, sr_path, 0)]
+ self.assertEqual(m_vhd_expected, mock_migrate_vhd.call_args_list)
+
+ prog_expected = [
+ mock.call(context, instance, 1, 5),
+ mock.call(context, instance, 2, 5),
+ mock.call(context, instance, 3, 5),
+ mock.call(context, instance, 4, 5)
+ # 5/5: step to be executed by finish migration.
+ ]
+ self.assertEqual(prog_expected, mock_update_progress.call_args_list)
+
+ def test_migrate_disk_resizing_up_works_with_two_ephemeral(self,
+ mock_apply_orig, mock_update_progress, mock_get_all_vdi_uuids,
+ mock_shutdown, mock_migrate_vhd):
+ context = "ctxt"
+ instance = {"name": "fake", "uuid": "uuid"}
+ dest = "dest"
+ vm_ref = "vm_ref"
+ sr_path = "sr_path"
+
+ mock_get_all_vdi_uuids.return_value = ["vdi-eph1", "vdi-eph2"]
+
+ with mock.patch.object(vm_utils, '_snapshot_attached_here_impl',
+ self._fake_snapshot_attached_here):
+ self.vmops._migrate_disk_resizing_up(context, instance, dest,
+ vm_ref, sr_path)
+
+ mock_get_all_vdi_uuids.assert_called_once_with(self.vmops._session,
+ vm_ref, min_userdevice=4)
+ mock_apply_orig.assert_called_once_with(instance, vm_ref)
+ mock_shutdown.assert_called_once_with(instance, vm_ref)
+
+ m_vhd_expected = [mock.call(self.vmops._session, instance,
+ "parent", dest, sr_path, 1),
+ mock.call(self.vmops._session, instance,
+ "grandp", dest, sr_path, 2),
+ mock.call(self.vmops._session, instance,
+ "4-parent", dest, sr_path, 1, 1),
+ mock.call(self.vmops._session, instance,
+ "5-parent", dest, sr_path, 1, 2),
+ mock.call(self.vmops._session, instance,
+ "leaf", dest, sr_path, 0),
+ mock.call(self.vmops._session, instance,
+ "4-leaf", dest, sr_path, 0, 1),
+ mock.call(self.vmops._session, instance,
+ "5-leaf", dest, sr_path, 0, 2)]
+ self.assertEqual(m_vhd_expected, mock_migrate_vhd.call_args_list)
+
+ prog_expected = [
+ mock.call(context, instance, 1, 5),
+ mock.call(context, instance, 2, 5),
+ mock.call(context, instance, 3, 5),
+ mock.call(context, instance, 4, 5)
+ # 5/5: step to be executed by finish migration.
+ ]
+ self.assertEqual(prog_expected, mock_update_progress.call_args_list)
+
+ @mock.patch.object(vmops.VMOps, '_restore_orig_vm_and_cleanup_orphan')
+ def test_migrate_disk_resizing_up_rollback(self,
+ mock_restore,
+ mock_apply_orig, mock_update_progress, mock_get_all_vdi_uuids,
+ mock_shutdown, mock_migrate_vhd):
+ context = "ctxt"
+ instance = {"name": "fake", "uuid": "fake"}
+ dest = "dest"
+ vm_ref = "vm_ref"
+ sr_path = "sr_path"
+
+ mock_migrate_vhd.side_effect = test.TestingException
+ mock_restore.side_effect = test.TestingException
+
+ with mock.patch.object(vm_utils, '_snapshot_attached_here_impl',
+ self._fake_snapshot_attached_here):
+ self.assertRaises(exception.InstanceFaultRollback,
+ self.vmops._migrate_disk_resizing_up,
+ context, instance, dest, vm_ref, sr_path)
+
+ mock_apply_orig.assert_called_once_with(instance, vm_ref)
+ mock_restore.assert_called_once_with(instance)
+ mock_migrate_vhd.assert_called_once_with(self.vmops._session,
+ instance, "parent", dest, sr_path, 1)
+
+
+class CreateVMRecordTestCase(VMOpsTestBase):
+ @mock.patch.object(vm_utils, 'determine_vm_mode')
+ @mock.patch.object(vm_utils, 'get_vm_device_id')
+ @mock.patch.object(vm_utils, 'create_vm')
+ def test_create_vm_record_with_vm_device_id(self, mock_create_vm,
+ mock_get_vm_device_id, mock_determine_vm_mode):
+
+ context = "context"
+ instance = objects.Instance(vm_mode="vm_mode", uuid="uuid123")
+ name_label = "dummy"
+ disk_image_type = "vhd"
+ kernel_file = "kernel"
+ ramdisk_file = "ram"
+ device_id = "0002"
+ image_properties = {"xenapi_device_id": device_id}
+ image_meta = {"properties": image_properties}
+ session = "session"
+ self.vmops._session = session
+ mock_get_vm_device_id.return_value = device_id
+ mock_determine_vm_mode.return_value = "vm_mode"
+
+ self.vmops._create_vm_record(context, instance, name_label,
+ disk_image_type, kernel_file, ramdisk_file, image_meta)
+
+ mock_get_vm_device_id.assert_called_with(session, image_properties)
+ mock_create_vm.assert_called_with(session, instance, name_label,
+ kernel_file, ramdisk_file, False, device_id)
+
+
+class BootableTestCase(VMOpsTestBase):
+
+ def setUp(self):
+ super(BootableTestCase, self).setUp()
+
+ self.instance = {"name": "test", "uuid": "fake"}
+ vm_rec, self.vm_ref = self.create_vm('test')
+
+ # sanity check bootlock is initially disabled:
+ self.assertEqual({}, vm_rec['blocked_operations'])
+
+ def _get_blocked(self):
+ vm_rec = self._session.call_xenapi("VM.get_record", self.vm_ref)
+ return vm_rec['blocked_operations']
+
+ def test_acquire_bootlock(self):
+ self.vmops._acquire_bootlock(self.vm_ref)
+ blocked = self._get_blocked()
+ self.assertIn('start', blocked)
+
+ def test_release_bootlock(self):
+ self.vmops._acquire_bootlock(self.vm_ref)
+ self.vmops._release_bootlock(self.vm_ref)
+ blocked = self._get_blocked()
+ self.assertNotIn('start', blocked)
+
+ def test_set_bootable(self):
+ self.vmops.set_bootable(self.instance, True)
+ blocked = self._get_blocked()
+ self.assertNotIn('start', blocked)
+
+ def test_set_not_bootable(self):
+ self.vmops.set_bootable(self.instance, False)
+ blocked = self._get_blocked()
+ self.assertIn('start', blocked)
+
+
+@mock.patch.object(vm_utils, 'update_vdi_virtual_size', autospec=True)
+class ResizeVdisTestCase(VMOpsTestBase):
+ def test_dont_resize_root_volumes_osvol_false(self, mock_resize):
+ instance = fake_instance.fake_db_instance(root_gb=20)
+ vdis = {'root': {'osvol': False, 'ref': 'vdi_ref'}}
+ self.vmops._resize_up_vdis(instance, vdis)
+ self.assertTrue(mock_resize.called)
+
+ def test_dont_resize_root_volumes_osvol_true(self, mock_resize):
+ instance = fake_instance.fake_db_instance(root_gb=20)
+ vdis = {'root': {'osvol': True}}
+ self.vmops._resize_up_vdis(instance, vdis)
+ self.assertFalse(mock_resize.called)
+
+ def test_dont_resize_root_volumes_no_osvol(self, mock_resize):
+ instance = fake_instance.fake_db_instance(root_gb=20)
+ vdis = {'root': {}}
+ self.vmops._resize_up_vdis(instance, vdis)
+ self.assertFalse(mock_resize.called)
+
+ @mock.patch.object(vm_utils, 'get_ephemeral_disk_sizes')
+ def test_ensure_ephemeral_resize_with_root_volume(self, mock_sizes,
+ mock_resize):
+ mock_sizes.return_value = [2000, 1000]
+ instance = fake_instance.fake_db_instance(root_gb=20, ephemeral_gb=20)
+ ephemerals = {"4": {"ref": 4}, "5": {"ref": 5}}
+ vdis = {'root': {'osvol': True, 'ref': 'vdi_ref'},
+ 'ephemerals': ephemerals}
+ with mock.patch.object(vm_utils, 'generate_single_ephemeral',
+ autospec=True) as g:
+ self.vmops._resize_up_vdis(instance, vdis)
+ self.assertEqual([mock.call(self.vmops._session, instance, 4,
+ 2000),
+ mock.call(self.vmops._session, instance, 5,
+ 1000)],
+ mock_resize.call_args_list)
+ self.assertFalse(g.called)
+
+ def test_resize_up_vdis_root(self, mock_resize):
+ instance = {"root_gb": 20, "ephemeral_gb": 0}
+ self.vmops._resize_up_vdis(instance, {"root": {"ref": "vdi_ref"}})
+ mock_resize.assert_called_once_with(self.vmops._session, instance,
+ "vdi_ref", 20)
+
+ def test_resize_up_vdis_zero_disks(self, mock_resize):
+ instance = {"root_gb": 0, "ephemeral_gb": 0}
+ self.vmops._resize_up_vdis(instance, {"root": {}})
+ self.assertFalse(mock_resize.called)
+
+ def test_resize_up_vdis_no_vdis_like_initial_spawn(self, mock_resize):
+ instance = {"root_gb": 0, "ephemeral_gb": 3000}
+ vdis = {}
+
+ self.vmops._resize_up_vdis(instance, vdis)
+
+ self.assertFalse(mock_resize.called)
+
+ @mock.patch.object(vm_utils, 'get_ephemeral_disk_sizes')
+ def test_resize_up_vdis_ephemeral(self, mock_sizes, mock_resize):
+ mock_sizes.return_value = [2000, 1000]
+ instance = {"root_gb": 0, "ephemeral_gb": 3000}
+ ephemerals = {"4": {"ref": 4}, "5": {"ref": 5}}
+ vdis = {"ephemerals": ephemerals}
+
+ self.vmops._resize_up_vdis(instance, vdis)
+
+ mock_sizes.assert_called_once_with(3000)
+ expected = [mock.call(self.vmops._session, instance, 4, 2000),
+ mock.call(self.vmops._session, instance, 5, 1000)]
+ self.assertEqual(expected, mock_resize.call_args_list)
+
+ @mock.patch.object(vm_utils, 'generate_single_ephemeral')
+ @mock.patch.object(vm_utils, 'get_ephemeral_disk_sizes')
+ def test_resize_up_vdis_ephemeral_with_generate(self, mock_sizes,
+ mock_generate,
+ mock_resize):
+ mock_sizes.return_value = [2000, 1000]
+ instance = {"root_gb": 0, "ephemeral_gb": 3000, "uuid": "a"}
+ ephemerals = {"4": {"ref": 4}}
+ vdis = {"ephemerals": ephemerals}
+
+ self.vmops._resize_up_vdis(instance, vdis)
+
+ mock_sizes.assert_called_once_with(3000)
+ mock_resize.assert_called_once_with(self.vmops._session, instance,
+ 4, 2000)
+ mock_generate.assert_called_once_with(self.vmops._session, instance,
+ None, 5, 1000)
+
+
+@mock.patch.object(vm_utils, 'remove_old_snapshots')
+class CleanupFailedSnapshotTestCase(VMOpsTestBase):
+ def test_post_interrupted_snapshot_cleanup(self, mock_remove):
+ self.vmops._get_vm_opaque_ref = mock.Mock()
+ self.vmops._get_vm_opaque_ref.return_value = "vm_ref"
+
+ self.vmops.post_interrupted_snapshot_cleanup("context", "instance")
+
+ mock_remove.assert_called_once_with(self.vmops._session,
+ "instance", "vm_ref")
+
+
+class LiveMigrateHelperTestCase(VMOpsTestBase):
+ def test_connect_block_device_volumes_none(self):
+ self.assertEqual({}, self.vmops.connect_block_device_volumes(None))
+
+ @mock.patch.object(volumeops.VolumeOps, "connect_volume")
+ def test_connect_block_device_volumes_calls_connect(self, mock_connect):
+ with mock.patch.object(self.vmops._session,
+ "call_xenapi") as mock_session:
+ mock_connect.return_value = ("sr_uuid", None)
+ mock_session.return_value = "sr_ref"
+ bdm = {"connection_info": "c_info"}
+ bdi = {"block_device_mapping": [bdm]}
+ result = self.vmops.connect_block_device_volumes(bdi)
+
+ self.assertEqual({'sr_uuid': 'sr_ref'}, result)
+
+ mock_connect.assert_called_once_with("c_info")
+ mock_session.assert_called_once_with("SR.get_by_uuid",
+ "sr_uuid")
+
+
+@mock.patch.object(vmops.VMOps, '_resize_ensure_vm_is_shutdown')
+@mock.patch.object(vmops.VMOps, '_apply_orig_vm_name_label')
+@mock.patch.object(vmops.VMOps, '_update_instance_progress')
+@mock.patch.object(vm_utils, 'get_vdi_for_vm_safely')
+@mock.patch.object(vm_utils, 'resize_disk')
+@mock.patch.object(vm_utils, 'migrate_vhd')
+@mock.patch.object(vm_utils, 'destroy_vdi')
+class MigrateDiskResizingDownTestCase(VMOpsTestBase):
+ def test_migrate_disk_resizing_down_works_no_ephemeral(
+ self,
+ mock_destroy_vdi,
+ mock_migrate_vhd,
+ mock_resize_disk,
+ mock_get_vdi_for_vm_safely,
+ mock_update_instance_progress,
+ mock_apply_orig_vm_name_label,
+ mock_resize_ensure_vm_is_shutdown):
+
+ context = "ctx"
+ instance = {"name": "fake", "uuid": "uuid"}
+ dest = "dest"
+ vm_ref = "vm_ref"
+ sr_path = "sr_path"
+ instance_type = dict(root_gb=1)
+ old_vdi_ref = "old_ref"
+ new_vdi_ref = "new_ref"
+ new_vdi_uuid = "new_uuid"
+
+ mock_get_vdi_for_vm_safely.return_value = (old_vdi_ref, None)
+ mock_resize_disk.return_value = (new_vdi_ref, new_vdi_uuid)
+
+ self.vmops._migrate_disk_resizing_down(context, instance, dest,
+ instance_type, vm_ref, sr_path)
+
+ mock_get_vdi_for_vm_safely.assert_called_once_with(
+ self.vmops._session,
+ vm_ref)
+ mock_resize_ensure_vm_is_shutdown.assert_called_once_with(
+ instance, vm_ref)
+ mock_apply_orig_vm_name_label.assert_called_once_with(
+ instance, vm_ref)
+ mock_resize_disk.assert_called_once_with(
+ self.vmops._session,
+ instance,
+ old_vdi_ref,
+ instance_type)
+ mock_migrate_vhd.assert_called_once_with(
+ self.vmops._session,
+ instance,
+ new_vdi_uuid,
+ dest,
+ sr_path, 0)
+ mock_destroy_vdi.assert_called_once_with(
+ self.vmops._session,
+ new_vdi_ref)
+
+ prog_expected = [
+ mock.call(context, instance, 1, 5),
+ mock.call(context, instance, 2, 5),
+ mock.call(context, instance, 3, 5),
+ mock.call(context, instance, 4, 5)
+ # 5/5: step to be executed by finish migration.
+ ]
+ self.assertEqual(prog_expected,
+ mock_update_instance_progress.call_args_list)
+
+
+class GetVdisForInstanceTestCase(VMOpsTestBase):
+ """Tests get_vdis_for_instance utility method."""
+ def setUp(self):
+ super(GetVdisForInstanceTestCase, self).setUp()
+ self.context = context.get_admin_context()
+ self.context.auth_token = 'auth_token'
+ self.session = mock.Mock()
+ self.vmops._session = self.session
+ self.instance = fake_instance.fake_instance_obj(self.context)
+ self.name_label = 'name'
+ self.image = 'fake_image_id'
+
+ @mock.patch.object(volumeops.VolumeOps, "connect_volume",
+ return_value=("sr", "vdi_uuid"))
+ def test_vdis_for_instance_bdi_password_scrubbed(self, get_uuid_mock):
+ # setup fake data
+ data = {'name_label': self.name_label,
+ 'sr_uuid': 'fake',
+ 'auth_password': 'scrubme'}
+ bdm = [{'mount_device': '/dev/vda',
+ 'connection_info': {'data': data}}]
+ bdi = {'root_device_name': 'vda',
+ 'block_device_mapping': bdm}
+
+ # Tests that the parameters to the to_xml method are sanitized for
+ # passwords when logged.
+ def fake_debug(*args, **kwargs):
+ if 'auth_password' in args[0]:
+ self.assertNotIn('scrubme', args[0])
+ fake_debug.matched = True
+
+ fake_debug.matched = False
+
+ with mock.patch.object(vmops.LOG, 'debug',
+ side_effect=fake_debug) as debug_mock:
+ vdis = self.vmops._get_vdis_for_instance(self.context,
+ self.instance, self.name_label, self.image,
+ image_type=4, block_device_info=bdi)
+ self.assertEqual(1, len(vdis))
+ get_uuid_mock.assert_called_once_with({"data": data})
+ # we don't care what the log message is, we just want to make sure
+ # our stub method is called which asserts the password is scrubbed
+ self.assertTrue(debug_mock.called)
+ self.assertTrue(fake_debug.matched)
diff --git a/nova/tests/unit/virt/xenapi/test_volume_utils.py b/nova/tests/unit/virt/xenapi/test_volume_utils.py
new file mode 100644
index 0000000000..59fd4626b9
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/test_volume_utils.py
@@ -0,0 +1,232 @@
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from eventlet import greenthread
+import mock
+
+from nova import exception
+from nova import test
+from nova.tests.unit.virt.xenapi import stubs
+from nova.virt.xenapi import volume_utils
+
+
+class SROps(stubs.XenAPITestBaseNoDB):
+ def test_find_sr_valid_uuid(self):
+ self.session = mock.Mock()
+ self.session.call_xenapi.return_value = 'sr_ref'
+ self.assertEqual(volume_utils.find_sr_by_uuid(self.session,
+ 'sr_uuid'),
+ 'sr_ref')
+
+ def test_find_sr_invalid_uuid(self):
+ class UUIDException(Exception):
+ details = ["UUID_INVALID", "", "", ""]
+
+ self.session = mock.Mock()
+ self.session.XenAPI.Failure = UUIDException
+ self.session.call_xenapi.side_effect = UUIDException
+ self.assertIsNone(
+ volume_utils.find_sr_by_uuid(self.session, 'sr_uuid'))
+
+ def test_find_sr_from_vdi(self):
+ vdi_ref = 'fake-ref'
+
+ def fake_call_xenapi(method, *args):
+ self.assertEqual(method, 'VDI.get_SR')
+ self.assertEqual(args[0], vdi_ref)
+ return args[0]
+
+ session = mock.Mock()
+ session.call_xenapi.side_effect = fake_call_xenapi
+ self.assertEqual(volume_utils.find_sr_from_vdi(session, vdi_ref),
+ vdi_ref)
+
+ def test_find_sr_from_vdi_exception(self):
+ vdi_ref = 'fake-ref'
+
+ class FakeException(Exception):
+ pass
+
+ def fake_call_xenapi(method, *args):
+ self.assertEqual(method, 'VDI.get_SR')
+ self.assertEqual(args[0], vdi_ref)
+ return args[0]
+
+ session = mock.Mock()
+ session.XenAPI.Failure = FakeException
+ session.call_xenapi.side_effect = FakeException
+ self.assertRaises(exception.StorageError,
+ volume_utils.find_sr_from_vdi, session, vdi_ref)
+
+
+class ISCSIParametersTestCase(stubs.XenAPITestBaseNoDB):
+ def test_target_host(self):
+ self.assertEqual(volume_utils._get_target_host('host:port'),
+ 'host')
+
+ self.assertEqual(volume_utils._get_target_host('host'),
+ 'host')
+
+ # There is no default value
+ self.assertIsNone(volume_utils._get_target_host(':port'))
+
+ self.assertIsNone(volume_utils._get_target_host(None))
+
+ def test_target_port(self):
+ self.assertEqual(volume_utils._get_target_port('host:port'),
+ 'port')
+
+ self.assertEqual(volume_utils._get_target_port('host'),
+ '3260')
+
+
+class IntroduceTestCase(stubs.XenAPITestBaseNoDB):
+
+ @mock.patch.object(volume_utils, '_get_vdi_ref')
+ @mock.patch.object(greenthread, 'sleep')
+ def test_introduce_vdi_retry(self, mock_sleep, mock_get_vdi_ref):
+ def fake_get_vdi_ref(session, sr_ref, vdi_uuid, target_lun):
+ fake_get_vdi_ref.call_count += 1
+ if fake_get_vdi_ref.call_count == 2:
+ return 'vdi_ref'
+
+ def fake_call_xenapi(method, *args):
+ if method == 'SR.scan':
+ return
+ elif method == 'VDI.get_record':
+ return {'managed': 'true'}
+
+ session = mock.Mock()
+ session.call_xenapi.side_effect = fake_call_xenapi
+
+ mock_get_vdi_ref.side_effect = fake_get_vdi_ref
+ fake_get_vdi_ref.call_count = 0
+
+ self.assertEqual(volume_utils.introduce_vdi(session, 'sr_ref'),
+ 'vdi_ref')
+ mock_sleep.assert_called_once_with(20)
+
+ @mock.patch.object(volume_utils, '_get_vdi_ref')
+ @mock.patch.object(greenthread, 'sleep')
+ def test_introduce_vdi_exception(self, mock_sleep, mock_get_vdi_ref):
+ def fake_call_xenapi(method, *args):
+ if method == 'SR.scan':
+ return
+ elif method == 'VDI.get_record':
+ return {'managed': 'true'}
+
+ session = mock.Mock()
+ session.call_xenapi.side_effect = fake_call_xenapi
+ mock_get_vdi_ref.return_value = None
+
+ self.assertRaises(exception.StorageError,
+ volume_utils.introduce_vdi, session, 'sr_ref')
+ mock_sleep.assert_called_once_with(20)
+
+
+class ParseVolumeInfoTestCase(stubs.XenAPITestBaseNoDB):
+ def test_mountpoint_to_number(self):
+ cases = {
+ 'sda': 0,
+ 'sdp': 15,
+ 'hda': 0,
+ 'hdp': 15,
+ 'vda': 0,
+ 'xvda': 0,
+ '0': 0,
+ '10': 10,
+ 'vdq': -1,
+ 'sdq': -1,
+ 'hdq': -1,
+ 'xvdq': -1,
+ }
+
+ for (input, expected) in cases.iteritems():
+ actual = volume_utils._mountpoint_to_number(input)
+ self.assertEqual(actual, expected,
+ '%s yielded %s, not %s' % (input, actual, expected))
+
+ @classmethod
+ def _make_connection_info(cls):
+ target_iqn = 'iqn.2010-10.org.openstack:volume-00000001'
+ return {'driver_volume_type': 'iscsi',
+ 'data': {'volume_id': 1,
+ 'target_iqn': target_iqn,
+ 'target_portal': '127.0.0.1:3260,fake',
+ 'target_lun': None,
+ 'auth_method': 'CHAP',
+ 'auth_username': 'username',
+ 'auth_password': 'password'}}
+
+ def test_parse_volume_info_parsing_auth_details(self):
+ conn_info = self._make_connection_info()
+ result = volume_utils._parse_volume_info(conn_info['data'])
+
+ self.assertEqual('username', result['chapuser'])
+ self.assertEqual('password', result['chappassword'])
+
+ def test_get_device_number_raise_exception_on_wrong_mountpoint(self):
+ self.assertRaises(
+ exception.StorageError,
+ volume_utils.get_device_number,
+ 'dev/sd')
+
+
+class FindVBDTestCase(stubs.XenAPITestBaseNoDB):
+ def test_find_vbd_by_number_works(self):
+ session = mock.Mock()
+ session.VM.get_VBDs.return_value = ["a", "b"]
+ session.VBD.get_userdevice.return_value = "1"
+
+ result = volume_utils.find_vbd_by_number(session, "vm_ref", 1)
+
+ self.assertEqual("a", result)
+ session.VM.get_VBDs.assert_called_once_with("vm_ref")
+ session.VBD.get_userdevice.assert_called_once_with("a")
+
+ def test_find_vbd_by_number_no_matches(self):
+ session = mock.Mock()
+ session.VM.get_VBDs.return_value = ["a", "b"]
+ session.VBD.get_userdevice.return_value = "3"
+
+ result = volume_utils.find_vbd_by_number(session, "vm_ref", 1)
+
+ self.assertIsNone(result)
+ session.VM.get_VBDs.assert_called_once_with("vm_ref")
+ expected = [mock.call("a"), mock.call("b")]
+ self.assertEqual(expected,
+ session.VBD.get_userdevice.call_args_list)
+
+ def test_find_vbd_by_number_no_vbds(self):
+ session = mock.Mock()
+ session.VM.get_VBDs.return_value = []
+
+ result = volume_utils.find_vbd_by_number(session, "vm_ref", 1)
+
+ self.assertIsNone(result)
+ session.VM.get_VBDs.assert_called_once_with("vm_ref")
+ self.assertFalse(session.VBD.get_userdevice.called)
+
+ def test_find_vbd_by_number_ignores_exception(self):
+ session = mock.Mock()
+ session.XenAPI.Failure = test.TestingException
+ session.VM.get_VBDs.return_value = ["a"]
+ session.VBD.get_userdevice.side_effect = test.TestingException
+
+ result = volume_utils.find_vbd_by_number(session, "vm_ref", 1)
+
+ self.assertIsNone(result)
+ session.VM.get_VBDs.assert_called_once_with("vm_ref")
+ session.VBD.get_userdevice.assert_called_once_with("a")
diff --git a/nova/tests/unit/virt/xenapi/test_volumeops.py b/nova/tests/unit/virt/xenapi/test_volumeops.py
new file mode 100644
index 0000000000..0e840bb209
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/test_volumeops.py
@@ -0,0 +1,549 @@
+# Copyright (c) 2012 Citrix Systems, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova import exception
+from nova import test
+from nova.tests.unit.virt.xenapi import stubs
+from nova.virt.xenapi import vm_utils
+from nova.virt.xenapi import volume_utils
+from nova.virt.xenapi import volumeops
+
+
+class VolumeOpsTestBase(stubs.XenAPITestBaseNoDB):
+ def setUp(self):
+ super(VolumeOpsTestBase, self).setUp()
+ self._setup_mock_volumeops()
+
+ def _setup_mock_volumeops(self):
+ self.session = stubs.FakeSessionForVolumeTests('fake_uri')
+ self.ops = volumeops.VolumeOps(self.session)
+
+
+class VolumeDetachTestCase(VolumeOpsTestBase):
+ def test_detach_volume_call(self):
+ registered_calls = []
+
+ def regcall(label):
+ def side_effect(*args, **kwargs):
+ registered_calls.append(label)
+ return side_effect
+
+ ops = volumeops.VolumeOps('session')
+ self.mox.StubOutWithMock(volumeops.vm_utils, 'lookup')
+ self.mox.StubOutWithMock(volumeops.volume_utils, 'find_vbd_by_number')
+ self.mox.StubOutWithMock(volumeops.vm_utils, 'is_vm_shutdown')
+ self.mox.StubOutWithMock(volumeops.vm_utils, 'unplug_vbd')
+ self.mox.StubOutWithMock(volumeops.vm_utils, 'destroy_vbd')
+ self.mox.StubOutWithMock(volumeops.volume_utils, 'get_device_number')
+ self.mox.StubOutWithMock(volumeops.volume_utils, 'find_sr_from_vbd')
+ self.mox.StubOutWithMock(volumeops.volume_utils, 'purge_sr')
+
+ volumeops.vm_utils.lookup('session', 'instance_1').AndReturn(
+ 'vmref')
+
+ volumeops.volume_utils.get_device_number('mountpoint').AndReturn(
+ 'devnumber')
+
+ volumeops.volume_utils.find_vbd_by_number(
+ 'session', 'vmref', 'devnumber').AndReturn('vbdref')
+
+ volumeops.vm_utils.is_vm_shutdown('session', 'vmref').AndReturn(
+ False)
+
+ volumeops.vm_utils.unplug_vbd('session', 'vbdref', 'vmref')
+
+ volumeops.vm_utils.destroy_vbd('session', 'vbdref').WithSideEffects(
+ regcall('destroy_vbd'))
+
+ volumeops.volume_utils.find_sr_from_vbd(
+ 'session', 'vbdref').WithSideEffects(
+ regcall('find_sr_from_vbd')).AndReturn('srref')
+
+ volumeops.volume_utils.purge_sr('session', 'srref')
+
+ self.mox.ReplayAll()
+
+ ops.detach_volume(
+ dict(driver_volume_type='iscsi', data='conn_data'),
+ 'instance_1', 'mountpoint')
+
+ self.assertEqual(
+ ['find_sr_from_vbd', 'destroy_vbd'], registered_calls)
+
+ @mock.patch.object(volumeops.VolumeOps, "_detach_vbds_and_srs")
+ @mock.patch.object(volume_utils, "find_vbd_by_number")
+ @mock.patch.object(vm_utils, "vm_ref_or_raise")
+ def test_detach_volume(self, mock_vm, mock_vbd, mock_detach):
+ mock_vm.return_value = "vm_ref"
+ mock_vbd.return_value = "vbd_ref"
+
+ self.ops.detach_volume({}, "name", "/dev/xvdd")
+
+ mock_vm.assert_called_once_with(self.session, "name")
+ mock_vbd.assert_called_once_with(self.session, "vm_ref", 3)
+ mock_detach.assert_called_once_with("vm_ref", ["vbd_ref"])
+
+ @mock.patch.object(volumeops.VolumeOps, "_detach_vbds_and_srs")
+ @mock.patch.object(volume_utils, "find_vbd_by_number")
+ @mock.patch.object(vm_utils, "vm_ref_or_raise")
+ def test_detach_volume_skips_error_skip_attach(self, mock_vm, mock_vbd,
+ mock_detach):
+ mock_vm.return_value = "vm_ref"
+ mock_vbd.return_value = None
+
+ self.ops.detach_volume({}, "name", "/dev/xvdd")
+
+ self.assertFalse(mock_detach.called)
+
+ @mock.patch.object(volumeops.VolumeOps, "_detach_vbds_and_srs")
+ @mock.patch.object(volume_utils, "find_vbd_by_number")
+ @mock.patch.object(vm_utils, "vm_ref_or_raise")
+ def test_detach_volume_raises(self, mock_vm, mock_vbd,
+ mock_detach):
+ mock_vm.return_value = "vm_ref"
+ mock_vbd.side_effect = test.TestingException
+
+ self.assertRaises(test.TestingException,
+ self.ops.detach_volume, {}, "name", "/dev/xvdd")
+ self.assertFalse(mock_detach.called)
+
+ @mock.patch.object(volume_utils, "purge_sr")
+ @mock.patch.object(vm_utils, "destroy_vbd")
+ @mock.patch.object(volume_utils, "find_sr_from_vbd")
+ @mock.patch.object(vm_utils, "unplug_vbd")
+ @mock.patch.object(vm_utils, "is_vm_shutdown")
+ def test_detach_vbds_and_srs_not_shutdown(self, mock_shutdown, mock_unplug,
+ mock_find_sr, mock_destroy, mock_purge):
+ mock_shutdown.return_value = False
+ mock_find_sr.return_value = "sr_ref"
+
+ self.ops._detach_vbds_and_srs("vm_ref", ["vbd_ref"])
+
+ mock_shutdown.assert_called_once_with(self.session, "vm_ref")
+ mock_find_sr.assert_called_once_with(self.session, "vbd_ref")
+ mock_unplug.assert_called_once_with(self.session, "vbd_ref", "vm_ref")
+ mock_destroy.assert_called_once_with(self.session, "vbd_ref")
+ mock_purge.assert_called_once_with(self.session, "sr_ref")
+
+ @mock.patch.object(volume_utils, "purge_sr")
+ @mock.patch.object(vm_utils, "destroy_vbd")
+ @mock.patch.object(volume_utils, "find_sr_from_vbd")
+ @mock.patch.object(vm_utils, "unplug_vbd")
+ @mock.patch.object(vm_utils, "is_vm_shutdown")
+ def test_detach_vbds_and_srs_is_shutdown(self, mock_shutdown, mock_unplug,
+ mock_find_sr, mock_destroy, mock_purge):
+ mock_shutdown.return_value = True
+ mock_find_sr.return_value = "sr_ref"
+
+ self.ops._detach_vbds_and_srs("vm_ref", ["vbd_ref_1", "vbd_ref_2"])
+
+ expected = [mock.call(self.session, "vbd_ref_1"),
+ mock.call(self.session, "vbd_ref_2")]
+ self.assertEqual(expected, mock_destroy.call_args_list)
+ mock_purge.assert_called_with(self.session, "sr_ref")
+ self.assertFalse(mock_unplug.called)
+
+ @mock.patch.object(volumeops.VolumeOps, "_detach_vbds_and_srs")
+ @mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs")
+ def test_detach_all_no_volumes(self, mock_get_all, mock_detach):
+ mock_get_all.return_value = []
+
+ self.ops.detach_all("vm_ref")
+
+ mock_get_all.assert_called_once_with("vm_ref")
+ self.assertFalse(mock_detach.called)
+
+ @mock.patch.object(volumeops.VolumeOps, "_detach_vbds_and_srs")
+ @mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs")
+ def test_detach_all_volumes(self, mock_get_all, mock_detach):
+ mock_get_all.return_value = ["1"]
+
+ self.ops.detach_all("vm_ref")
+
+ mock_get_all.assert_called_once_with("vm_ref")
+ mock_detach.assert_called_once_with("vm_ref", ["1"])
+
+ def test_get_all_volume_vbd_refs_no_vbds(self):
+ with mock.patch.object(self.session.VM, "get_VBDs") as mock_get:
+ with mock.patch.object(self.session.VBD,
+ "get_other_config") as mock_conf:
+ mock_get.return_value = []
+
+ result = self.ops._get_all_volume_vbd_refs("vm_ref")
+
+ self.assertEqual([], list(result))
+ mock_get.assert_called_once_with("vm_ref")
+ self.assertFalse(mock_conf.called)
+
+ def test_get_all_volume_vbd_refs_no_volumes(self):
+ with mock.patch.object(self.session.VM, "get_VBDs") as mock_get:
+ with mock.patch.object(self.session.VBD,
+ "get_other_config") as mock_conf:
+ mock_get.return_value = ["1"]
+ mock_conf.return_value = {}
+
+ result = self.ops._get_all_volume_vbd_refs("vm_ref")
+
+ self.assertEqual([], list(result))
+ mock_get.assert_called_once_with("vm_ref")
+ mock_conf.assert_called_once_with("1")
+
+ def test_get_all_volume_vbd_refs_with_volumes(self):
+ with mock.patch.object(self.session.VM, "get_VBDs") as mock_get:
+ with mock.patch.object(self.session.VBD,
+ "get_other_config") as mock_conf:
+ mock_get.return_value = ["1", "2"]
+ mock_conf.return_value = {"osvol": True}
+
+ result = self.ops._get_all_volume_vbd_refs("vm_ref")
+
+ self.assertEqual(["1", "2"], list(result))
+ mock_get.assert_called_once_with("vm_ref")
+
+
+class AttachVolumeTestCase(VolumeOpsTestBase):
+ @mock.patch.object(volumeops.VolumeOps, "_attach_volume")
+ @mock.patch.object(vm_utils, "vm_ref_or_raise")
+ def test_attach_volume_default_hotplug(self, mock_get_vm, mock_attach):
+ mock_get_vm.return_value = "vm_ref"
+
+ self.ops.attach_volume({}, "instance_name", "/dev/xvda")
+
+ mock_attach.assert_called_once_with({}, "vm_ref", "instance_name", 0,
+ True)
+
+ @mock.patch.object(volumeops.VolumeOps, "_attach_volume")
+ @mock.patch.object(vm_utils, "vm_ref_or_raise")
+ def test_attach_volume_hotplug(self, mock_get_vm, mock_attach):
+ mock_get_vm.return_value = "vm_ref"
+
+ self.ops.attach_volume({}, "instance_name", "/dev/xvda", False)
+
+ mock_attach.assert_called_once_with({}, "vm_ref", "instance_name", 0,
+ False)
+
+ @mock.patch.object(volumeops.VolumeOps, "_attach_volume")
+ def test_attach_volume_default_hotplug_connect_volume(self, mock_attach):
+ self.ops.connect_volume({})
+ mock_attach.assert_called_once_with({})
+
+ @mock.patch.object(volumeops.VolumeOps, "_check_is_supported_driver_type")
+ @mock.patch.object(volumeops.VolumeOps, "_connect_to_volume_provider")
+ @mock.patch.object(volumeops.VolumeOps, "_connect_hypervisor_to_volume")
+ @mock.patch.object(volumeops.VolumeOps, "_attach_volume_to_vm")
+ def test_attach_volume_with_defaults(self, mock_attach, mock_hypervisor,
+ mock_provider, mock_driver):
+ connection_info = {"data": {}}
+ with mock.patch.object(self.session.VDI, "get_uuid") as mock_vdi:
+ mock_provider.return_value = ("sr_ref", "sr_uuid")
+ mock_vdi.return_value = "vdi_uuid"
+
+ result = self.ops._attach_volume(connection_info)
+
+ self.assertEqual(result, ("sr_uuid", "vdi_uuid"))
+
+ mock_driver.assert_called_once_with(connection_info)
+ mock_provider.assert_called_once_with({}, None)
+ mock_hypervisor.assert_called_once_with("sr_ref", {})
+ self.assertFalse(mock_attach.called)
+
+ @mock.patch.object(volumeops.VolumeOps, "_check_is_supported_driver_type")
+ @mock.patch.object(volumeops.VolumeOps, "_connect_to_volume_provider")
+ @mock.patch.object(volumeops.VolumeOps, "_connect_hypervisor_to_volume")
+ @mock.patch.object(volumeops.VolumeOps, "_attach_volume_to_vm")
+ def test_attach_volume_with_hot_attach(self, mock_attach, mock_hypervisor,
+ mock_provider, mock_driver):
+ connection_info = {"data": {}}
+ with mock.patch.object(self.session.VDI, "get_uuid") as mock_vdi:
+ mock_provider.return_value = ("sr_ref", "sr_uuid")
+ mock_hypervisor.return_value = "vdi_ref"
+ mock_vdi.return_value = "vdi_uuid"
+
+ result = self.ops._attach_volume(connection_info, "vm_ref",
+ "name", 2, True)
+
+ self.assertEqual(result, ("sr_uuid", "vdi_uuid"))
+
+ mock_driver.assert_called_once_with(connection_info)
+ mock_provider.assert_called_once_with({}, "name")
+ mock_hypervisor.assert_called_once_with("sr_ref", {})
+ mock_attach.assert_called_once_with("vdi_ref", "vm_ref", "name", 2,
+ True)
+
+ @mock.patch.object(volume_utils, "forget_sr")
+ @mock.patch.object(volumeops.VolumeOps, "_check_is_supported_driver_type")
+ @mock.patch.object(volumeops.VolumeOps, "_connect_to_volume_provider")
+ @mock.patch.object(volumeops.VolumeOps, "_connect_hypervisor_to_volume")
+ @mock.patch.object(volumeops.VolumeOps, "_attach_volume_to_vm")
+ def test_attach_volume_cleanup(self, mock_attach, mock_hypervisor,
+ mock_provider, mock_driver, mock_forget):
+ connection_info = {"data": {}}
+ mock_provider.return_value = ("sr_ref", "sr_uuid")
+ mock_hypervisor.side_effect = test.TestingException
+
+ self.assertRaises(test.TestingException,
+ self.ops._attach_volume, connection_info)
+
+ mock_driver.assert_called_once_with(connection_info)
+ mock_provider.assert_called_once_with({}, None)
+ mock_hypervisor.assert_called_once_with("sr_ref", {})
+ mock_forget.assert_called_once_with(self.session, "sr_ref")
+ self.assertFalse(mock_attach.called)
+
+ def test_check_is_supported_driver_type_pass_iscsi(self):
+ conn_info = {"driver_volume_type": "iscsi"}
+ self.ops._check_is_supported_driver_type(conn_info)
+
+ def test_check_is_supported_driver_type_pass_xensm(self):
+ conn_info = {"driver_volume_type": "xensm"}
+ self.ops._check_is_supported_driver_type(conn_info)
+
+ def test_check_is_supported_driver_type_pass_bad(self):
+ conn_info = {"driver_volume_type": "bad"}
+ self.assertRaises(exception.VolumeDriverNotFound,
+ self.ops._check_is_supported_driver_type, conn_info)
+
+ @mock.patch.object(volume_utils, "introduce_sr")
+ @mock.patch.object(volume_utils, "find_sr_by_uuid")
+ @mock.patch.object(volume_utils, "parse_sr_info")
+ def test_connect_to_volume_provider_new_sr(self, mock_parse, mock_find_sr,
+ mock_introduce_sr):
+ mock_parse.return_value = ("uuid", "label", "params")
+ mock_find_sr.return_value = None
+ mock_introduce_sr.return_value = "sr_ref"
+
+ ref, uuid = self.ops._connect_to_volume_provider({}, "name")
+
+ self.assertEqual("sr_ref", ref)
+ self.assertEqual("uuid", uuid)
+ mock_parse.assert_called_once_with({}, "Disk-for:name")
+ mock_find_sr.assert_called_once_with(self.session, "uuid")
+ mock_introduce_sr.assert_called_once_with(self.session, "uuid",
+ "label", "params")
+
+ @mock.patch.object(volume_utils, "introduce_sr")
+ @mock.patch.object(volume_utils, "find_sr_by_uuid")
+ @mock.patch.object(volume_utils, "parse_sr_info")
+ def test_connect_to_volume_provider_old_sr(self, mock_parse, mock_find_sr,
+ mock_introduce_sr):
+ mock_parse.return_value = ("uuid", "label", "params")
+ mock_find_sr.return_value = "sr_ref"
+
+ ref, uuid = self.ops._connect_to_volume_provider({}, "name")
+
+ self.assertEqual("sr_ref", ref)
+ self.assertEqual("uuid", uuid)
+ mock_parse.assert_called_once_with({}, "Disk-for:name")
+ mock_find_sr.assert_called_once_with(self.session, "uuid")
+ self.assertFalse(mock_introduce_sr.called)
+
+ @mock.patch.object(volume_utils, "introduce_vdi")
+ def test_connect_hypervisor_to_volume_regular(self, mock_intro):
+ mock_intro.return_value = "vdi"
+
+ result = self.ops._connect_hypervisor_to_volume("sr", {})
+
+ self.assertEqual("vdi", result)
+ mock_intro.assert_called_once_with(self.session, "sr")
+
+ @mock.patch.object(volume_utils, "introduce_vdi")
+ def test_connect_hypervisor_to_volume_vdi(self, mock_intro):
+ mock_intro.return_value = "vdi"
+
+ conn = {"vdi_uuid": "id"}
+ result = self.ops._connect_hypervisor_to_volume("sr", conn)
+
+ self.assertEqual("vdi", result)
+ mock_intro.assert_called_once_with(self.session, "sr",
+ vdi_uuid="id")
+
+ @mock.patch.object(volume_utils, "introduce_vdi")
+ def test_connect_hypervisor_to_volume_lun(self, mock_intro):
+ mock_intro.return_value = "vdi"
+
+ conn = {"target_lun": "lun"}
+ result = self.ops._connect_hypervisor_to_volume("sr", conn)
+
+ self.assertEqual("vdi", result)
+ mock_intro.assert_called_once_with(self.session, "sr",
+ target_lun="lun")
+
+ @mock.patch.object(vm_utils, "is_vm_shutdown")
+ @mock.patch.object(vm_utils, "create_vbd")
+ def test_attach_volume_to_vm_plug(self, mock_vbd, mock_shutdown):
+ mock_vbd.return_value = "vbd"
+ mock_shutdown.return_value = False
+
+ with mock.patch.object(self.session.VBD, "plug") as mock_plug:
+ self.ops._attach_volume_to_vm("vdi", "vm", "name", 2, True)
+ mock_plug.assert_called_once_with("vbd", "vm")
+
+ mock_vbd.assert_called_once_with(self.session, "vm", "vdi", 2,
+ bootable=False, osvol=True)
+ mock_shutdown.assert_called_once_with(self.session, "vm")
+
+ @mock.patch.object(vm_utils, "is_vm_shutdown")
+ @mock.patch.object(vm_utils, "create_vbd")
+ def test_attach_volume_to_vm_no_plug(self, mock_vbd, mock_shutdown):
+ mock_vbd.return_value = "vbd"
+ mock_shutdown.return_value = True
+
+ with mock.patch.object(self.session.VBD, "plug") as mock_plug:
+ self.ops._attach_volume_to_vm("vdi", "vm", "name", 2, True)
+ self.assertFalse(mock_plug.called)
+
+ mock_vbd.assert_called_once_with(self.session, "vm", "vdi", 2,
+ bootable=False, osvol=True)
+ mock_shutdown.assert_called_once_with(self.session, "vm")
+
+ @mock.patch.object(vm_utils, "is_vm_shutdown")
+ @mock.patch.object(vm_utils, "create_vbd")
+ def test_attach_volume_to_vm_no_hotplug(self, mock_vbd, mock_shutdown):
+ mock_vbd.return_value = "vbd"
+
+ with mock.patch.object(self.session.VBD, "plug") as mock_plug:
+ self.ops._attach_volume_to_vm("vdi", "vm", "name", 2, False)
+ self.assertFalse(mock_plug.called)
+
+ mock_vbd.assert_called_once_with(self.session, "vm", "vdi", 2,
+ bootable=False, osvol=True)
+ self.assertFalse(mock_shutdown.called)
+
+
+class FindBadVolumeTestCase(VolumeOpsTestBase):
+ @mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs")
+ def test_find_bad_volumes_no_vbds(self, mock_get_all):
+ mock_get_all.return_value = []
+
+ result = self.ops.find_bad_volumes("vm_ref")
+
+ mock_get_all.assert_called_once_with("vm_ref")
+ self.assertEqual([], result)
+
+ @mock.patch.object(volume_utils, "find_sr_from_vbd")
+ @mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs")
+ def test_find_bad_volumes_no_bad_vbds(self, mock_get_all, mock_find_sr):
+ mock_get_all.return_value = ["1", "2"]
+ mock_find_sr.return_value = "sr_ref"
+
+ with mock.patch.object(self.session.SR, "scan") as mock_scan:
+ result = self.ops.find_bad_volumes("vm_ref")
+
+ mock_get_all.assert_called_once_with("vm_ref")
+ expected_find = [mock.call(self.session, "1"),
+ mock.call(self.session, "2")]
+ self.assertEqual(expected_find, mock_find_sr.call_args_list)
+ expected_scan = [mock.call("sr_ref"), mock.call("sr_ref")]
+ self.assertEqual(expected_scan, mock_scan.call_args_list)
+ self.assertEqual([], result)
+
+ @mock.patch.object(volume_utils, "find_sr_from_vbd")
+ @mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs")
+ def test_find_bad_volumes_bad_vbds(self, mock_get_all, mock_find_sr):
+ mock_get_all.return_value = ["vbd_ref"]
+ mock_find_sr.return_value = "sr_ref"
+
+ class FakeException(Exception):
+ details = ['SR_BACKEND_FAILURE_40', "", "", ""]
+
+ session = mock.Mock()
+ session.XenAPI.Failure = FakeException
+ self.ops._session = session
+
+ with mock.patch.object(session.SR, "scan") as mock_scan:
+ with mock.patch.object(session.VBD,
+ "get_device") as mock_get:
+ mock_scan.side_effect = FakeException
+ mock_get.return_value = "xvdb"
+
+ result = self.ops.find_bad_volumes("vm_ref")
+
+ mock_get_all.assert_called_once_with("vm_ref")
+ mock_scan.assert_called_once_with("sr_ref")
+ mock_get.assert_called_once_with("vbd_ref")
+ self.assertEqual(["/dev/xvdb"], result)
+
+ @mock.patch.object(volume_utils, "find_sr_from_vbd")
+ @mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs")
+ def test_find_bad_volumes_raises(self, mock_get_all, mock_find_sr):
+ mock_get_all.return_value = ["vbd_ref"]
+ mock_find_sr.return_value = "sr_ref"
+
+ class FakeException(Exception):
+ details = ['foo', "", "", ""]
+
+ session = mock.Mock()
+ session.XenAPI.Failure = FakeException
+ self.ops._session = session
+
+ with mock.patch.object(session.SR, "scan") as mock_scan:
+ with mock.patch.object(session.VBD,
+ "get_device") as mock_get:
+ mock_scan.side_effect = FakeException
+ mock_get.return_value = "xvdb"
+
+ self.assertRaises(FakeException,
+ self.ops.find_bad_volumes, "vm_ref")
+ mock_scan.assert_called_once_with("sr_ref")
+
+
+class CleanupFromVDIsTestCase(VolumeOpsTestBase):
+ def _check_find_purge_calls(self, find_sr_from_vdi, purge_sr, vdi_refs,
+ sr_refs):
+ find_sr_calls = [mock.call(self.ops._session, vdi_ref) for vdi_ref
+ in vdi_refs]
+ find_sr_from_vdi.assert_has_calls(find_sr_calls)
+ purge_sr_calls = [mock.call(self.ops._session, sr_ref) for sr_ref
+ in sr_refs]
+ purge_sr.assert_has_calls(purge_sr_calls)
+
+ @mock.patch.object(volume_utils, 'find_sr_from_vdi')
+ @mock.patch.object(volume_utils, 'purge_sr')
+ def test_safe_cleanup_from_vdis(self, purge_sr, find_sr_from_vdi):
+ vdi_refs = ['vdi_ref1', 'vdi_ref2']
+ sr_refs = ['sr_ref1', 'sr_ref2']
+ find_sr_from_vdi.side_effect = sr_refs
+ self.ops.safe_cleanup_from_vdis(vdi_refs)
+
+ self._check_find_purge_calls(find_sr_from_vdi, purge_sr, vdi_refs,
+ sr_refs)
+
+ @mock.patch.object(volume_utils, 'find_sr_from_vdi',
+ side_effect=[exception.StorageError(reason=''), 'sr_ref2'])
+ @mock.patch.object(volume_utils, 'purge_sr')
+ def test_safe_cleanup_from_vdis_handles_find_sr_exception(self, purge_sr,
+ find_sr_from_vdi):
+ vdi_refs = ['vdi_ref1', 'vdi_ref2']
+ sr_refs = ['sr_ref2']
+ find_sr_from_vdi.side_effect = [exception.StorageError(reason=''),
+ sr_refs[0]]
+ self.ops.safe_cleanup_from_vdis(vdi_refs)
+
+ self._check_find_purge_calls(find_sr_from_vdi, purge_sr, vdi_refs,
+ sr_refs)
+
+ @mock.patch.object(volume_utils, 'find_sr_from_vdi')
+ @mock.patch.object(volume_utils, 'purge_sr')
+ def test_safe_cleanup_from_vdis_handles_purge_sr_exception(self, purge_sr,
+ find_sr_from_vdi):
+ vdi_refs = ['vdi_ref1', 'vdi_ref2']
+ sr_refs = ['sr_ref1', 'sr_ref2']
+ find_sr_from_vdi.side_effect = sr_refs
+ purge_sr.side_effects = [test.TestingException, None]
+ self.ops.safe_cleanup_from_vdis(vdi_refs)
+
+ self._check_find_purge_calls(find_sr_from_vdi, purge_sr, vdi_refs,
+ sr_refs)
diff --git a/nova/tests/unit/virt/xenapi/test_xenapi.py b/nova/tests/unit/virt/xenapi/test_xenapi.py
new file mode 100644
index 0000000000..c90f8c2f63
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/test_xenapi.py
@@ -0,0 +1,4105 @@
+# Copyright (c) 2010 Citrix Systems, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Test suite for XenAPI."""
+
+import ast
+import base64
+import contextlib
+import copy
+import functools
+import os
+import re
+
+import mock
+import mox
+from oslo.concurrency import lockutils
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+from oslo.utils import importutils
+
+from nova.compute import api as compute_api
+from nova.compute import arch
+from nova.compute import flavors
+from nova.compute import hvtype
+from nova.compute import power_state
+from nova.compute import task_states
+from nova.compute import utils as compute_utils
+from nova.compute import vm_states
+from nova.conductor import api as conductor_api
+from nova import context
+from nova import crypto
+from nova import db
+from nova import exception
+from nova import objects
+from nova.objects import instance as instance_obj
+from nova.openstack.common.fixture import config as config_fixture
+from nova.openstack.common import log as logging
+from nova import test
+from nova.tests.unit.db import fakes as db_fakes
+from nova.tests.unit import fake_instance
+from nova.tests.unit import fake_network
+from nova.tests.unit import fake_processutils
+import nova.tests.unit.image.fake as fake_image
+from nova.tests.unit import matchers
+from nova.tests.unit.objects import test_aggregate
+from nova.tests.unit.virt.xenapi import stubs
+from nova.virt import fake
+from nova.virt.xenapi import agent
+from nova.virt.xenapi.client import session as xenapi_session
+from nova.virt.xenapi import driver as xenapi_conn
+from nova.virt.xenapi import fake as xenapi_fake
+from nova.virt.xenapi import host
+from nova.virt.xenapi.image import glance
+from nova.virt.xenapi import pool
+from nova.virt.xenapi import pool_states
+from nova.virt.xenapi import vm_utils
+from nova.virt.xenapi import vmops
+from nova.virt.xenapi import volume_utils
+
+LOG = logging.getLogger(__name__)
+
+CONF = cfg.CONF
+CONF.import_opt('compute_manager', 'nova.service')
+CONF.import_opt('network_manager', 'nova.service')
+CONF.import_opt('compute_driver', 'nova.virt.driver')
+CONF.import_opt('host', 'nova.netconf')
+CONF.import_opt('default_availability_zone', 'nova.availability_zones')
+CONF.import_opt('login_timeout', 'nova.virt.xenapi.client.session',
+ group="xenserver")
+
+IMAGE_MACHINE = '1'
+IMAGE_KERNEL = '2'
+IMAGE_RAMDISK = '3'
+IMAGE_RAW = '4'
+IMAGE_VHD = '5'
+IMAGE_ISO = '6'
+IMAGE_IPXE_ISO = '7'
+IMAGE_FROM_VOLUME = '8'
+
+IMAGE_FIXTURES = {
+ IMAGE_MACHINE: {
+ 'image_meta': {'name': 'fakemachine', 'size': 0,
+ 'disk_format': 'ami',
+ 'container_format': 'ami'},
+ },
+ IMAGE_KERNEL: {
+ 'image_meta': {'name': 'fakekernel', 'size': 0,
+ 'disk_format': 'aki',
+ 'container_format': 'aki'},
+ },
+ IMAGE_RAMDISK: {
+ 'image_meta': {'name': 'fakeramdisk', 'size': 0,
+ 'disk_format': 'ari',
+ 'container_format': 'ari'},
+ },
+ IMAGE_RAW: {
+ 'image_meta': {'name': 'fakeraw', 'size': 0,
+ 'disk_format': 'raw',
+ 'container_format': 'bare'},
+ },
+ IMAGE_VHD: {
+ 'image_meta': {'name': 'fakevhd', 'size': 0,
+ 'disk_format': 'vhd',
+ 'container_format': 'ovf'},
+ },
+ IMAGE_ISO: {
+ 'image_meta': {'name': 'fakeiso', 'size': 0,
+ 'disk_format': 'iso',
+ 'container_format': 'bare'},
+ },
+ IMAGE_IPXE_ISO: {
+ 'image_meta': {'name': 'fake_ipxe_iso', 'size': 0,
+ 'disk_format': 'iso',
+ 'container_format': 'bare',
+ 'properties': {'ipxe_boot': 'true'}},
+ },
+ IMAGE_FROM_VOLUME: {
+ 'image_meta': {'name': 'fake_ipxe_iso',
+ 'properties': {'foo': 'bar'}},
+ },
+}
+
+
+def get_session():
+ return xenapi_session.XenAPISession('test_url', 'root', 'test_pass')
+
+
+def set_image_fixtures():
+ image_service = fake_image.FakeImageService()
+ image_service.images.clear()
+ for image_id, image_meta in IMAGE_FIXTURES.items():
+ image_meta = image_meta['image_meta']
+ image_meta['id'] = image_id
+ image_service.create(None, image_meta)
+
+
+def get_fake_device_info():
+ # FIXME: 'sr_uuid', 'introduce_sr_keys', sr_type and vdi_uuid
+ # can be removed from the dict when LP bug #1087308 is fixed
+ fake_vdi_ref = xenapi_fake.create_vdi('fake-vdi', None)
+ fake_vdi_uuid = xenapi_fake.get_record('VDI', fake_vdi_ref)['uuid']
+ fake = {'block_device_mapping':
+ [{'connection_info': {'driver_volume_type': 'iscsi',
+ 'data': {'sr_uuid': 'falseSR',
+ 'introduce_sr_keys': ['sr_type'],
+ 'sr_type': 'iscsi',
+ 'vdi_uuid': fake_vdi_uuid,
+ 'target_discovered': False,
+ 'target_iqn': 'foo_iqn:foo_volid',
+ 'target_portal': 'localhost:3260',
+ 'volume_id': 'foo_volid',
+ 'target_lun': 1,
+ 'auth_password': 'my-p@55w0rd',
+ 'auth_username': 'johndoe',
+ 'auth_method': u'CHAP'}, },
+ 'mount_device': 'vda',
+ 'delete_on_termination': False}, ],
+ 'root_device_name': '/dev/sda',
+ 'ephemerals': [],
+ 'swap': None, }
+ return fake
+
+
+def stub_vm_utils_with_vdi_attached_here(function):
+ """vm_utils.with_vdi_attached_here needs to be stubbed out because it
+ calls down to the filesystem to attach a vdi. This provides a
+ decorator to handle that.
+ """
+ @functools.wraps(function)
+ def decorated_function(self, *args, **kwargs):
+ @contextlib.contextmanager
+ def fake_vdi_attached_here(*args, **kwargs):
+ fake_dev = 'fakedev'
+ yield fake_dev
+
+ def fake_image_download(*args, **kwargs):
+ pass
+
+ orig_vdi_attached_here = vm_utils.vdi_attached_here
+ orig_image_download = fake_image._FakeImageService.download
+ try:
+ vm_utils.vdi_attached_here = fake_vdi_attached_here
+ fake_image._FakeImageService.download = fake_image_download
+ return function(self, *args, **kwargs)
+ finally:
+ fake_image._FakeImageService.download = orig_image_download
+ vm_utils.vdi_attached_here = orig_vdi_attached_here
+
+ return decorated_function
+
+
+def get_create_system_metadata(context, instance_type_id):
+ flavor = db.flavor_get(context, instance_type_id)
+ return flavors.save_flavor_info({}, flavor)
+
+
+def create_instance_with_system_metadata(context, instance_values):
+ instance_values['system_metadata'] = get_create_system_metadata(
+ context, instance_values['instance_type_id'])
+ instance_values['pci_devices'] = []
+ return db.instance_create(context, instance_values)
+
+
+class XenAPIVolumeTestCase(stubs.XenAPITestBaseNoDB):
+ """Unit tests for Volume operations."""
+ def setUp(self):
+ super(XenAPIVolumeTestCase, self).setUp()
+ self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
+ self.fixture.config(disable_process_locking=True,
+ group='oslo_concurrency')
+ self.flags(firewall_driver='nova.virt.xenapi.firewall.'
+ 'Dom0IptablesFirewallDriver')
+ self.flags(connection_url='test_url',
+ connection_password='test_pass',
+ group='xenserver')
+
+ self.instance = fake_instance.fake_db_instance(name='foo')
+
+ @classmethod
+ def _make_connection_info(cls):
+ target_iqn = 'iqn.2010-10.org.openstack:volume-00000001'
+ return {'driver_volume_type': 'iscsi',
+ 'data': {'volume_id': 1,
+ 'target_iqn': target_iqn,
+ 'target_portal': '127.0.0.1:3260,fake',
+ 'target_lun': None,
+ 'auth_method': 'CHAP',
+ 'auth_username': 'username',
+ 'auth_password': 'password'}}
+
+ def test_attach_volume(self):
+ # This shows how to test Ops classes' methods.
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ vm = xenapi_fake.create_vm(self.instance['name'], 'Running')
+ conn_info = self._make_connection_info()
+ self.assertIsNone(
+ conn.attach_volume(None, conn_info, self.instance, '/dev/sdc'))
+
+ # check that the VM has a VBD attached to it
+ # Get XenAPI record for VBD
+ vbds = xenapi_fake.get_all('VBD')
+ vbd = xenapi_fake.get_record('VBD', vbds[0])
+ vm_ref = vbd['VM']
+ self.assertEqual(vm_ref, vm)
+
+ def test_attach_volume_raise_exception(self):
+ # This shows how to test when exceptions are raised.
+ stubs.stubout_session(self.stubs,
+ stubs.FakeSessionForVolumeFailedTests)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ xenapi_fake.create_vm(self.instance['name'], 'Running')
+ self.assertRaises(exception.VolumeDriverNotFound,
+ conn.attach_volume,
+ None, {'driver_volume_type': 'nonexist'},
+ self.instance, '/dev/sdc')
+
+
+# FIXME(sirp): convert this to use XenAPITestBaseNoDB
+class XenAPIVMTestCase(stubs.XenAPITestBase):
+ """Unit tests for VM operations."""
+ def setUp(self):
+ super(XenAPIVMTestCase, self).setUp()
+ self.useFixture(test.SampleNetworks())
+ self.network = importutils.import_object(CONF.network_manager)
+ self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
+ self.fixture.config(disable_process_locking=True,
+ group='oslo_concurrency')
+ self.flags(instance_name_template='%d',
+ firewall_driver='nova.virt.xenapi.firewall.'
+ 'Dom0IptablesFirewallDriver')
+ self.flags(connection_url='test_url',
+ connection_password='test_pass',
+ group='xenserver')
+ db_fakes.stub_out_db_instance_api(self.stubs)
+ xenapi_fake.create_network('fake', 'fake_br1')
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ stubs.stubout_get_this_vm_uuid(self.stubs)
+ stubs.stub_out_vm_methods(self.stubs)
+ fake_processutils.stub_out_processutils_execute(self.stubs)
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = context.RequestContext(self.user_id, self.project_id)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ self.conn._session.is_local_connection = False
+
+ fake_image.stub_out_image_service(self.stubs)
+ set_image_fixtures()
+ stubs.stubout_image_service_download(self.stubs)
+ stubs.stubout_stream_disk(self.stubs)
+
+ def fake_inject_instance_metadata(self, instance, vm):
+ pass
+ self.stubs.Set(vmops.VMOps, '_inject_instance_metadata',
+ fake_inject_instance_metadata)
+
+ def fake_safe_copy_vdi(session, sr_ref, instance, vdi_to_copy_ref):
+ name_label = "fakenamelabel"
+ disk_type = "fakedisktype"
+ virtual_size = 777
+ return vm_utils.create_vdi(
+ session, sr_ref, instance, name_label, disk_type,
+ virtual_size)
+ self.stubs.Set(vm_utils, '_safe_copy_vdi', fake_safe_copy_vdi)
+
+ def tearDown(self):
+ fake_image.FakeImageService_reset()
+ super(XenAPIVMTestCase, self).tearDown()
+
+ def test_init_host(self):
+ session = get_session()
+ vm = vm_utils._get_this_vm_ref(session)
+ # Local root disk
+ vdi0 = xenapi_fake.create_vdi('compute', None)
+ vbd0 = xenapi_fake.create_vbd(vm, vdi0)
+ # Instance VDI
+ vdi1 = xenapi_fake.create_vdi('instance-aaaa', None,
+ other_config={'nova_instance_uuid': 'aaaa'})
+ xenapi_fake.create_vbd(vm, vdi1)
+ # Only looks like instance VDI
+ vdi2 = xenapi_fake.create_vdi('instance-bbbb', None)
+ vbd2 = xenapi_fake.create_vbd(vm, vdi2)
+
+ self.conn.init_host(None)
+ self.assertEqual(set(xenapi_fake.get_all('VBD')), set([vbd0, vbd2]))
+
+ def test_instance_exists(self):
+ self.mox.StubOutWithMock(vm_utils, 'lookup')
+ vm_utils.lookup(mox.IgnoreArg(), 'foo').AndReturn(True)
+ self.mox.ReplayAll()
+
+ self.stubs.Set(objects.Instance, 'name', 'foo')
+ instance = objects.Instance(uuid='fake-uuid')
+ self.assertTrue(self.conn.instance_exists(instance))
+
+ def test_instance_not_exists(self):
+ self.mox.StubOutWithMock(vm_utils, 'lookup')
+ vm_utils.lookup(mox.IgnoreArg(), 'bar').AndReturn(None)
+ self.mox.ReplayAll()
+
+ self.stubs.Set(objects.Instance, 'name', 'bar')
+ instance = objects.Instance(uuid='fake-uuid')
+ self.assertFalse(self.conn.instance_exists(instance))
+
+ def test_list_instances_0(self):
+ instances = self.conn.list_instances()
+ self.assertEqual(instances, [])
+
+ def test_list_instance_uuids_0(self):
+ instance_uuids = self.conn.list_instance_uuids()
+ self.assertEqual(instance_uuids, [])
+
+ def test_list_instance_uuids(self):
+ uuids = []
+ for x in xrange(1, 4):
+ instance = self._create_instance(x)
+ uuids.append(instance['uuid'])
+ instance_uuids = self.conn.list_instance_uuids()
+ self.assertEqual(len(uuids), len(instance_uuids))
+ self.assertEqual(set(uuids), set(instance_uuids))
+
+ def test_get_rrd_server(self):
+ self.flags(connection_url='myscheme://myaddress/',
+ group='xenserver')
+ server_info = vm_utils._get_rrd_server()
+ self.assertEqual(server_info[0], 'myscheme')
+ self.assertEqual(server_info[1], 'myaddress')
+
+ expected_raw_diagnostics = {
+ 'vbd_xvdb_write': '0.0',
+ 'memory_target': '4294967296.0000',
+ 'memory_internal_free': '1415564.0000',
+ 'memory': '4294967296.0000',
+ 'vbd_xvda_write': '0.0',
+ 'cpu0': '0.0042',
+ 'vif_0_tx': '287.4134',
+ 'vbd_xvda_read': '0.0',
+ 'vif_0_rx': '1816.0144',
+ 'vif_2_rx': '0.0',
+ 'vif_2_tx': '0.0',
+ 'vbd_xvdb_read': '0.0',
+ 'last_update': '1328795567',
+ }
+
+ def test_get_diagnostics(self):
+ def fake_get_rrd(host, vm_uuid):
+ path = os.path.dirname(os.path.realpath(__file__))
+ with open(os.path.join(path, 'vm_rrd.xml')) as f:
+ return re.sub(r'\s', '', f.read())
+ self.stubs.Set(vm_utils, '_get_rrd', fake_get_rrd)
+
+ expected = self.expected_raw_diagnostics
+ instance = self._create_instance()
+ actual = self.conn.get_diagnostics(instance)
+ self.assertThat(actual, matchers.DictMatches(expected))
+
+ def test_get_instance_diagnostics(self):
+ def fake_get_rrd(host, vm_uuid):
+ path = os.path.dirname(os.path.realpath(__file__))
+ with open(os.path.join(path, 'vm_rrd.xml')) as f:
+ return re.sub(r'\s', '', f.read())
+ self.stubs.Set(vm_utils, '_get_rrd', fake_get_rrd)
+
+ expected = {
+ 'config_drive': False,
+ 'state': 'running',
+ 'driver': 'xenapi',
+ 'version': '1.0',
+ 'uptime': 0,
+ 'hypervisor_os': None,
+ 'cpu_details': [{'time': 0}, {'time': 0},
+ {'time': 0}, {'time': 0}],
+ 'nic_details': [{'mac_address': '00:00:00:00:00:00',
+ 'rx_drop': 0,
+ 'rx_errors': 0,
+ 'rx_octets': 0,
+ 'rx_packets': 0,
+ 'tx_drop': 0,
+ 'tx_errors': 0,
+ 'tx_octets': 0,
+ 'tx_packets': 0}],
+ 'disk_details': [{'errors_count': 0,
+ 'id': '',
+ 'read_bytes': 0,
+ 'read_requests': 0,
+ 'write_bytes': 0,
+ 'write_requests': 0}],
+ 'memory_details': {'maximum': 8192, 'used': 0}}
+
+ instance = self._create_instance()
+ actual = self.conn.get_instance_diagnostics(instance)
+ self.assertEqual(expected, actual.serialize())
+
+ def test_get_vnc_console(self):
+ instance = self._create_instance(obj=True)
+ session = get_session()
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ vm_ref = vm_utils.lookup(session, instance['name'])
+
+ console = conn.get_vnc_console(self.context, instance)
+
+ # Note(sulo): We don't care about session id in test
+ # they will always differ so strip that out
+ actual_path = console.internal_access_path.split('&')[0]
+ expected_path = "/console?ref=%s" % str(vm_ref)
+
+ self.assertEqual(expected_path, actual_path)
+
+ def test_get_vnc_console_for_rescue(self):
+ instance = self._create_instance(obj=True)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ rescue_vm = xenapi_fake.create_vm(instance['name'] + '-rescue',
+ 'Running')
+ # Set instance state to rescued
+ instance['vm_state'] = 'rescued'
+
+ console = conn.get_vnc_console(self.context, instance)
+
+ # Note(sulo): We don't care about session id in test
+ # they will always differ so strip that out
+ actual_path = console.internal_access_path.split('&')[0]
+ expected_path = "/console?ref=%s" % str(rescue_vm)
+
+ self.assertEqual(expected_path, actual_path)
+
+ def test_get_vnc_console_instance_not_ready(self):
+ instance = self._create_instance(obj=True, spawn=False)
+ instance.vm_state = 'building'
+
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ self.assertRaises(exception.InstanceNotFound,
+ conn.get_vnc_console, self.context, instance)
+
+ def test_get_vnc_console_rescue_not_ready(self):
+ instance = self._create_instance(obj=True, spawn=False)
+ instance.vm_state = 'rescued'
+
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ self.assertRaises(exception.InstanceNotReady,
+ conn.get_vnc_console, self.context, instance)
+
+ def test_instance_snapshot_fails_with_no_primary_vdi(self):
+
+ def create_bad_vbd(session, vm_ref, vdi_ref, userdevice,
+ vbd_type='disk', read_only=False, bootable=False,
+ osvol=False):
+ vbd_rec = {'VM': vm_ref,
+ 'VDI': vdi_ref,
+ 'userdevice': 'fake',
+ 'currently_attached': False}
+ vbd_ref = xenapi_fake._create_object('VBD', vbd_rec)
+ xenapi_fake.after_VBD_create(vbd_ref, vbd_rec)
+ return vbd_ref
+
+ self.stubs.Set(vm_utils, 'create_vbd', create_bad_vbd)
+ stubs.stubout_instance_snapshot(self.stubs)
+ # Stubbing out firewall driver as previous stub sets alters
+ # xml rpc result parsing
+ stubs.stubout_firewall_driver(self.stubs, self.conn)
+ instance = self._create_instance()
+
+ image_id = "my_snapshot_id"
+ self.assertRaises(exception.NovaException, self.conn.snapshot,
+ self.context, instance, image_id,
+ lambda *args, **kwargs: None)
+
+ def test_instance_snapshot(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+ image_id = "my_snapshot_id"
+
+ stubs.stubout_instance_snapshot(self.stubs)
+ stubs.stubout_is_snapshot(self.stubs)
+ # Stubbing out firewall driver as previous stub sets alters
+ # xml rpc result parsing
+ stubs.stubout_firewall_driver(self.stubs, self.conn)
+
+ instance = self._create_instance()
+
+ self.fake_upload_called = False
+
+ def fake_image_upload(_self, ctx, session, inst, img_id, vdi_uuids):
+ self.fake_upload_called = True
+ self.assertEqual(ctx, self.context)
+ self.assertEqual(inst, instance)
+ self.assertIsInstance(vdi_uuids, list)
+ self.assertEqual(img_id, image_id)
+
+ self.stubs.Set(glance.GlanceStore, 'upload_image',
+ fake_image_upload)
+
+ self.conn.snapshot(self.context, instance, image_id,
+ func_call_matcher.call)
+
+ # Ensure VM was torn down
+ vm_labels = []
+ for vm_ref in xenapi_fake.get_all('VM'):
+ vm_rec = xenapi_fake.get_record('VM', vm_ref)
+ if not vm_rec["is_control_domain"]:
+ vm_labels.append(vm_rec["name_label"])
+
+ self.assertEqual(vm_labels, [instance['name']])
+
+ # Ensure VBDs were torn down
+ vbd_labels = []
+ for vbd_ref in xenapi_fake.get_all('VBD'):
+ vbd_rec = xenapi_fake.get_record('VBD', vbd_ref)
+ vbd_labels.append(vbd_rec["vm_name_label"])
+
+ self.assertEqual(vbd_labels, [instance['name']])
+
+ # Ensure task states changed in correct order
+ self.assertIsNone(func_call_matcher.match())
+
+ # Ensure VDIs were torn down
+ for vdi_ref in xenapi_fake.get_all('VDI'):
+ vdi_rec = xenapi_fake.get_record('VDI', vdi_ref)
+ name_label = vdi_rec["name_label"]
+ self.assertFalse(name_label.endswith('snapshot'))
+
+ self.assertTrue(self.fake_upload_called)
+
+ def create_vm_record(self, conn, os_type, name):
+ instances = conn.list_instances()
+ self.assertEqual(instances, [name])
+
+ # Get Nova record for VM
+ vm_info = conn.get_info({'name': name})
+ # Get XenAPI record for VM
+ vms = [rec for ref, rec
+ in xenapi_fake.get_all_records('VM').iteritems()
+ if not rec['is_control_domain']]
+ vm = vms[0]
+ self.vm_info = vm_info
+ self.vm = vm
+
+ def check_vm_record(self, conn, instance_type_id, check_injection):
+ flavor = db.flavor_get(conn, instance_type_id)
+ mem_kib = long(flavor['memory_mb']) << 10
+ mem_bytes = str(mem_kib << 10)
+ vcpus = flavor['vcpus']
+ vcpu_weight = flavor['vcpu_weight']
+
+ self.assertEqual(self.vm_info['max_mem'], mem_kib)
+ self.assertEqual(self.vm_info['mem'], mem_kib)
+ self.assertEqual(self.vm['memory_static_max'], mem_bytes)
+ self.assertEqual(self.vm['memory_dynamic_max'], mem_bytes)
+ self.assertEqual(self.vm['memory_dynamic_min'], mem_bytes)
+ self.assertEqual(self.vm['VCPUs_max'], str(vcpus))
+ self.assertEqual(self.vm['VCPUs_at_startup'], str(vcpus))
+ if vcpu_weight is None:
+ self.assertEqual(self.vm['VCPUs_params'], {})
+ else:
+ self.assertEqual(self.vm['VCPUs_params'],
+ {'weight': str(vcpu_weight), 'cap': '0'})
+
+ # Check that the VM is running according to Nova
+ self.assertEqual(self.vm_info['state'], power_state.RUNNING)
+
+ # Check that the VM is running according to XenAPI.
+ self.assertEqual(self.vm['power_state'], 'Running')
+
+ if check_injection:
+ xenstore_data = self.vm['xenstore_data']
+ self.assertNotIn('vm-data/hostname', xenstore_data)
+ key = 'vm-data/networking/DEADBEEF0001'
+ xenstore_value = xenstore_data[key]
+ tcpip_data = ast.literal_eval(xenstore_value)
+ self.assertEqual(tcpip_data,
+ {'broadcast': '192.168.1.255',
+ 'dns': ['192.168.1.4', '192.168.1.3'],
+ 'gateway': '192.168.1.1',
+ 'gateway_v6': '2001:db8:0:1::1',
+ 'ip6s': [{'enabled': '1',
+ 'ip': '2001:db8:0:1:dcad:beff:feef:1',
+ 'netmask': 64,
+ 'gateway': '2001:db8:0:1::1'}],
+ 'ips': [{'enabled': '1',
+ 'ip': '192.168.1.100',
+ 'netmask': '255.255.255.0',
+ 'gateway': '192.168.1.1'},
+ {'enabled': '1',
+ 'ip': '192.168.1.101',
+ 'netmask': '255.255.255.0',
+ 'gateway': '192.168.1.1'}],
+ 'label': 'test1',
+ 'mac': 'DE:AD:BE:EF:00:01'})
+
+ def check_vm_params_for_windows(self):
+ self.assertEqual(self.vm['platform']['nx'], 'true')
+ self.assertEqual(self.vm['HVM_boot_params'], {'order': 'dc'})
+ self.assertEqual(self.vm['HVM_boot_policy'], 'BIOS order')
+
+ # check that these are not set
+ self.assertEqual(self.vm['PV_args'], '')
+ self.assertEqual(self.vm['PV_bootloader'], '')
+ self.assertEqual(self.vm['PV_kernel'], '')
+ self.assertEqual(self.vm['PV_ramdisk'], '')
+
+ def check_vm_params_for_linux(self):
+ self.assertEqual(self.vm['platform']['nx'], 'false')
+ self.assertEqual(self.vm['PV_args'], '')
+ self.assertEqual(self.vm['PV_bootloader'], 'pygrub')
+
+ # check that these are not set
+ self.assertEqual(self.vm['PV_kernel'], '')
+ self.assertEqual(self.vm['PV_ramdisk'], '')
+ self.assertEqual(self.vm['HVM_boot_params'], {})
+ self.assertEqual(self.vm['HVM_boot_policy'], '')
+
+ def check_vm_params_for_linux_with_external_kernel(self):
+ self.assertEqual(self.vm['platform']['nx'], 'false')
+ self.assertEqual(self.vm['PV_args'], 'root=/dev/xvda1')
+ self.assertNotEqual(self.vm['PV_kernel'], '')
+ self.assertNotEqual(self.vm['PV_ramdisk'], '')
+
+ # check that these are not set
+ self.assertEqual(self.vm['HVM_boot_params'], {})
+ self.assertEqual(self.vm['HVM_boot_policy'], '')
+
+ def _list_vdis(self):
+ session = get_session()
+ return session.call_xenapi('VDI.get_all')
+
+ def _list_vms(self):
+ session = get_session()
+ return session.call_xenapi('VM.get_all')
+
+ def _check_vdis(self, start_list, end_list):
+ for vdi_ref in end_list:
+ if vdi_ref not in start_list:
+ vdi_rec = xenapi_fake.get_record('VDI', vdi_ref)
+ # If the cache is turned on then the base disk will be
+ # there even after the cleanup
+ if 'other_config' in vdi_rec:
+ if 'image-id' not in vdi_rec['other_config']:
+ self.fail('Found unexpected VDI:%s' % vdi_ref)
+ else:
+ self.fail('Found unexpected VDI:%s' % vdi_ref)
+
+ def _test_spawn(self, image_ref, kernel_id, ramdisk_id,
+ instance_type_id="3", os_type="linux",
+ hostname="test", architecture="x86-64", instance_id=1,
+ injected_files=None, check_injection=False,
+ create_record=True, empty_dns=False,
+ block_device_info=None,
+ key_data=None):
+ if injected_files is None:
+ injected_files = []
+
+ # Fake out inject_instance_metadata
+ def fake_inject_instance_metadata(self, instance, vm):
+ pass
+ self.stubs.Set(vmops.VMOps, '_inject_instance_metadata',
+ fake_inject_instance_metadata)
+
+ if create_record:
+ instance = objects.Instance(context=self.context)
+ instance.project_id = self.project_id
+ instance.user_id = self.user_id
+ instance.image_ref = image_ref
+ instance.kernel_id = kernel_id
+ instance.ramdisk_id = ramdisk_id
+ instance.root_gb = 20
+ instance.ephemeral_gb = 0
+ instance.instance_type_id = instance_type_id
+ instance.os_type = os_type
+ instance.hostname = hostname
+ instance.key_data = key_data
+ instance.architecture = architecture
+ instance.system_metadata = get_create_system_metadata(
+ self.context, instance_type_id)
+ instance.create()
+ else:
+ instance = objects.Instance.get_by_id(self.context, instance_id)
+
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs)
+ if empty_dns:
+ # NOTE(tr3buchet): this is a terrible way to do this...
+ network_info[0]['network']['subnets'][0]['dns'] = []
+
+ image_meta = {}
+ if image_ref:
+ image_meta = IMAGE_FIXTURES[image_ref]["image_meta"]
+ self.conn.spawn(self.context, instance, image_meta, injected_files,
+ 'herp', network_info, block_device_info)
+ self.create_vm_record(self.conn, os_type, instance['name'])
+ self.check_vm_record(self.conn, instance_type_id, check_injection)
+ self.assertEqual(instance['os_type'], os_type)
+ self.assertEqual(instance['architecture'], architecture)
+
+ def test_spawn_ipxe_iso_success(self):
+ self.mox.StubOutWithMock(vm_utils, 'get_sr_path')
+ vm_utils.get_sr_path(mox.IgnoreArg()).AndReturn('/sr/path')
+
+ self.flags(ipxe_network_name='test1',
+ ipxe_boot_menu_url='http://boot.example.com',
+ ipxe_mkisofs_cmd='/root/mkisofs',
+ group='xenserver')
+ self.mox.StubOutWithMock(self.conn._session, 'call_plugin_serialized')
+ self.conn._session.call_plugin_serialized(
+ 'ipxe', 'inject', '/sr/path', mox.IgnoreArg(),
+ 'http://boot.example.com', '192.168.1.100', '255.255.255.0',
+ '192.168.1.1', '192.168.1.3', '/root/mkisofs')
+
+ self.mox.ReplayAll()
+ self._test_spawn(IMAGE_IPXE_ISO, None, None)
+
+ def test_spawn_ipxe_iso_no_network_name(self):
+ self.flags(ipxe_network_name=None,
+ ipxe_boot_menu_url='http://boot.example.com',
+ group='xenserver')
+
+ # call_plugin_serialized shouldn't be called
+ self.mox.StubOutWithMock(self.conn._session, 'call_plugin_serialized')
+
+ self.mox.ReplayAll()
+ self._test_spawn(IMAGE_IPXE_ISO, None, None)
+
+ def test_spawn_ipxe_iso_no_boot_menu_url(self):
+ self.flags(ipxe_network_name='test1',
+ ipxe_boot_menu_url=None,
+ group='xenserver')
+
+ # call_plugin_serialized shouldn't be called
+ self.mox.StubOutWithMock(self.conn._session, 'call_plugin_serialized')
+
+ self.mox.ReplayAll()
+ self._test_spawn(IMAGE_IPXE_ISO, None, None)
+
+ def test_spawn_ipxe_iso_unknown_network_name(self):
+ self.flags(ipxe_network_name='test2',
+ ipxe_boot_menu_url='http://boot.example.com',
+ group='xenserver')
+
+ # call_plugin_serialized shouldn't be called
+ self.mox.StubOutWithMock(self.conn._session, 'call_plugin_serialized')
+
+ self.mox.ReplayAll()
+ self._test_spawn(IMAGE_IPXE_ISO, None, None)
+
+ def test_spawn_empty_dns(self):
+ # Test spawning with an empty dns list.
+ self._test_spawn(IMAGE_VHD, None, None,
+ os_type="linux", architecture="x86-64",
+ empty_dns=True)
+ self.check_vm_params_for_linux()
+
+ def test_spawn_not_enough_memory(self):
+ self.assertRaises(exception.InsufficientFreeMemory,
+ self._test_spawn,
+ '1', 2, 3, "4") # m1.xlarge
+
+ def test_spawn_fail_cleanup_1(self):
+ """Simulates an error while downloading an image.
+
+ Verifies that the VM and VDIs created are properly cleaned up.
+ """
+ vdi_recs_start = self._list_vdis()
+ start_vms = self._list_vms()
+ stubs.stubout_fetch_disk_image(self.stubs, raise_failure=True)
+ self.assertRaises(xenapi_fake.Failure,
+ self._test_spawn, '1', 2, 3)
+ # No additional VDI should be found.
+ vdi_recs_end = self._list_vdis()
+ end_vms = self._list_vms()
+ self._check_vdis(vdi_recs_start, vdi_recs_end)
+ # No additional VMs should be found.
+ self.assertEqual(start_vms, end_vms)
+
+ def test_spawn_fail_cleanup_2(self):
+ """Simulates an error while creating VM record.
+
+ Verifies that the VM and VDIs created are properly cleaned up.
+ """
+ vdi_recs_start = self._list_vdis()
+ start_vms = self._list_vms()
+ stubs.stubout_create_vm(self.stubs)
+ self.assertRaises(xenapi_fake.Failure,
+ self._test_spawn, '1', 2, 3)
+ # No additional VDI should be found.
+ vdi_recs_end = self._list_vdis()
+ end_vms = self._list_vms()
+ self._check_vdis(vdi_recs_start, vdi_recs_end)
+ # No additional VMs should be found.
+ self.assertEqual(start_vms, end_vms)
+
+ def test_spawn_fail_cleanup_3(self):
+ """Simulates an error while attaching disks.
+
+ Verifies that the VM and VDIs created are properly cleaned up.
+ """
+ stubs.stubout_attach_disks(self.stubs)
+ vdi_recs_start = self._list_vdis()
+ start_vms = self._list_vms()
+ self.assertRaises(xenapi_fake.Failure,
+ self._test_spawn, '1', 2, 3)
+ # No additional VDI should be found.
+ vdi_recs_end = self._list_vdis()
+ end_vms = self._list_vms()
+ self._check_vdis(vdi_recs_start, vdi_recs_end)
+ # No additional VMs should be found.
+ self.assertEqual(start_vms, end_vms)
+
+ def test_spawn_raw_glance(self):
+ self._test_spawn(IMAGE_RAW, None, None, os_type=None)
+ self.check_vm_params_for_windows()
+
+ def test_spawn_vhd_glance_linux(self):
+ self._test_spawn(IMAGE_VHD, None, None,
+ os_type="linux", architecture="x86-64")
+ self.check_vm_params_for_linux()
+
+ def test_spawn_vhd_glance_windows(self):
+ self._test_spawn(IMAGE_VHD, None, None,
+ os_type="windows", architecture="i386",
+ instance_type_id=5)
+ self.check_vm_params_for_windows()
+
+ def test_spawn_iso_glance(self):
+ self._test_spawn(IMAGE_ISO, None, None,
+ os_type="windows", architecture="i386")
+ self.check_vm_params_for_windows()
+
+ def test_spawn_glance(self):
+
+ def fake_fetch_disk_image(context, session, instance, name_label,
+ image_id, image_type):
+ sr_ref = vm_utils.safe_find_sr(session)
+ image_type_str = vm_utils.ImageType.to_string(image_type)
+ vdi_ref = vm_utils.create_vdi(session, sr_ref, instance,
+ name_label, image_type_str, "20")
+ vdi_role = vm_utils.ImageType.get_role(image_type)
+ vdi_uuid = session.call_xenapi("VDI.get_uuid", vdi_ref)
+ return {vdi_role: dict(uuid=vdi_uuid, file=None)}
+ self.stubs.Set(vm_utils, '_fetch_disk_image',
+ fake_fetch_disk_image)
+
+ self._test_spawn(IMAGE_MACHINE,
+ IMAGE_KERNEL,
+ IMAGE_RAMDISK)
+ self.check_vm_params_for_linux_with_external_kernel()
+
+ def test_spawn_boot_from_volume_no_image_meta(self):
+ dev_info = get_fake_device_info()
+ self._test_spawn(None, None, None,
+ block_device_info=dev_info)
+
+ def test_spawn_boot_from_volume_no_glance_image_meta(self):
+ dev_info = get_fake_device_info()
+ self._test_spawn(IMAGE_FROM_VOLUME, None, None,
+ block_device_info=dev_info)
+
+ def test_spawn_boot_from_volume_with_image_meta(self):
+ dev_info = get_fake_device_info()
+ self._test_spawn(IMAGE_VHD, None, None,
+ block_device_info=dev_info)
+
+ def test_spawn_netinject_file(self):
+ self.flags(flat_injected=True)
+ db_fakes.stub_out_db_instance_api(self.stubs, injected=True)
+
+ self._tee_executed = False
+
+ def _tee_handler(cmd, **kwargs):
+ actual = kwargs.get('process_input', None)
+ expected = """\
+# Injected by Nova on instance boot
+#
+# This file describes the network interfaces available on your system
+# and how to activate them. For more information, see interfaces(5).
+
+# The loopback network interface
+auto lo
+iface lo inet loopback
+
+auto eth0
+iface eth0 inet static
+ address 192.168.1.100
+ netmask 255.255.255.0
+ broadcast 192.168.1.255
+ gateway 192.168.1.1
+ dns-nameservers 192.168.1.3 192.168.1.4
+iface eth0 inet6 static
+ address 2001:db8:0:1:dcad:beff:feef:1
+ netmask 64
+ gateway 2001:db8:0:1::1
+"""
+ self.assertEqual(expected, actual)
+ self._tee_executed = True
+ return '', ''
+
+ def _readlink_handler(cmd_parts, **kwargs):
+ return os.path.realpath(cmd_parts[2]), ''
+
+ fake_processutils.fake_execute_set_repliers([
+ # Capture the tee .../etc/network/interfaces command
+ (r'tee.*interfaces', _tee_handler),
+ (r'readlink -nm.*', _readlink_handler),
+ ])
+ self._test_spawn(IMAGE_MACHINE,
+ IMAGE_KERNEL,
+ IMAGE_RAMDISK,
+ check_injection=True)
+ self.assertTrue(self._tee_executed)
+
+ def test_spawn_netinject_xenstore(self):
+ db_fakes.stub_out_db_instance_api(self.stubs, injected=True)
+
+ self._tee_executed = False
+
+ def _mount_handler(cmd, *ignore_args, **ignore_kwargs):
+ # When mounting, create real files under the mountpoint to simulate
+ # files in the mounted filesystem
+
+ # mount point will be the last item of the command list
+ self._tmpdir = cmd[len(cmd) - 1]
+ LOG.debug('Creating files in %s to simulate guest agent',
+ self._tmpdir)
+ os.makedirs(os.path.join(self._tmpdir, 'usr', 'sbin'))
+ # Touch the file using open
+ open(os.path.join(self._tmpdir, 'usr', 'sbin',
+ 'xe-update-networking'), 'w').close()
+ return '', ''
+
+ def _umount_handler(cmd, *ignore_args, **ignore_kwargs):
+ # Umount would normally make files in the mounted filesystem
+ # disappear, so do that here
+ LOG.debug('Removing simulated guest agent files in %s',
+ self._tmpdir)
+ os.remove(os.path.join(self._tmpdir, 'usr', 'sbin',
+ 'xe-update-networking'))
+ os.rmdir(os.path.join(self._tmpdir, 'usr', 'sbin'))
+ os.rmdir(os.path.join(self._tmpdir, 'usr'))
+ return '', ''
+
+ def _tee_handler(cmd, *ignore_args, **ignore_kwargs):
+ self._tee_executed = True
+ return '', ''
+
+ fake_processutils.fake_execute_set_repliers([
+ (r'mount', _mount_handler),
+ (r'umount', _umount_handler),
+ (r'tee.*interfaces', _tee_handler)])
+ self._test_spawn('1', 2, 3, check_injection=True)
+
+ # tee must not run in this case, where an injection-capable
+ # guest agent is detected
+ self.assertFalse(self._tee_executed)
+
+ def test_spawn_injects_auto_disk_config_to_xenstore(self):
+ instance = self._create_instance(spawn=False)
+ self.mox.StubOutWithMock(self.conn._vmops, '_inject_auto_disk_config')
+ self.conn._vmops._inject_auto_disk_config(instance, mox.IgnoreArg())
+ self.mox.ReplayAll()
+ self.conn.spawn(self.context, instance,
+ IMAGE_FIXTURES['1']["image_meta"], [], 'herp', '')
+
+ def test_spawn_vlanmanager(self):
+ self.flags(network_manager='nova.network.manager.VlanManager',
+ vlan_interface='fake0')
+
+ def dummy(*args, **kwargs):
+ pass
+
+ self.stubs.Set(vmops.VMOps, '_create_vifs', dummy)
+ # Reset network table
+ xenapi_fake.reset_table('network')
+ # Instance id = 2 will use vlan network (see db/fakes.py)
+ ctxt = self.context.elevated()
+ self.network.conductor_api = conductor_api.LocalAPI()
+ self._create_instance(2, False)
+ networks = self.network.db.network_get_all(ctxt)
+ with mock.patch('nova.objects.network.Network._from_db_object'):
+ for network in networks:
+ self.network.set_network_host(ctxt, network)
+
+ self.network.allocate_for_instance(ctxt,
+ instance_id=2,
+ instance_uuid='00000000-0000-0000-0000-000000000002',
+ host=CONF.host,
+ vpn=None,
+ rxtx_factor=3,
+ project_id=self.project_id,
+ macs=None)
+ self._test_spawn(IMAGE_MACHINE,
+ IMAGE_KERNEL,
+ IMAGE_RAMDISK,
+ instance_id=2,
+ create_record=False)
+ # TODO(salvatore-orlando): a complete test here would require
+ # a check for making sure the bridge for the VM's VIF is
+ # consistent with bridge specified in nova db
+
+ def test_spawn_with_network_qos(self):
+ self._create_instance()
+ for vif_ref in xenapi_fake.get_all('VIF'):
+ vif_rec = xenapi_fake.get_record('VIF', vif_ref)
+ self.assertEqual(vif_rec['qos_algorithm_type'], 'ratelimit')
+ self.assertEqual(vif_rec['qos_algorithm_params']['kbps'],
+ str(3 * 10 * 1024))
+
+ def test_spawn_ssh_key_injection(self):
+ # Test spawning with key_data on an instance. Should use
+ # agent file injection.
+ self.flags(use_agent_default=True,
+ group='xenserver')
+ actual_injected_files = []
+
+ def fake_inject_file(self, method, args):
+ path = base64.b64decode(args['b64_path'])
+ contents = base64.b64decode(args['b64_contents'])
+ actual_injected_files.append((path, contents))
+ return jsonutils.dumps({'returncode': '0', 'message': 'success'})
+
+ self.stubs.Set(stubs.FakeSessionForVMTests,
+ '_plugin_agent_inject_file', fake_inject_file)
+
+ def fake_encrypt_text(sshkey, new_pass):
+ self.assertEqual("ssh-rsa fake_keydata", sshkey)
+ return "fake"
+
+ self.stubs.Set(crypto, 'ssh_encrypt_text', fake_encrypt_text)
+
+ expected_data = ('\n# The following ssh key was injected by '
+ 'Nova\nssh-rsa fake_keydata\n')
+
+ injected_files = [('/root/.ssh/authorized_keys', expected_data)]
+ self._test_spawn(IMAGE_VHD, None, None,
+ os_type="linux", architecture="x86-64",
+ key_data='ssh-rsa fake_keydata')
+ self.assertEqual(actual_injected_files, injected_files)
+
+ def test_spawn_ssh_key_injection_non_rsa(self):
+ # Test spawning with key_data on an instance. Should use
+ # agent file injection.
+ self.flags(use_agent_default=True,
+ group='xenserver')
+ actual_injected_files = []
+
+ def fake_inject_file(self, method, args):
+ path = base64.b64decode(args['b64_path'])
+ contents = base64.b64decode(args['b64_contents'])
+ actual_injected_files.append((path, contents))
+ return jsonutils.dumps({'returncode': '0', 'message': 'success'})
+
+ self.stubs.Set(stubs.FakeSessionForVMTests,
+ '_plugin_agent_inject_file', fake_inject_file)
+
+ def fake_encrypt_text(sshkey, new_pass):
+ raise NotImplementedError("Should not be called")
+
+ self.stubs.Set(crypto, 'ssh_encrypt_text', fake_encrypt_text)
+
+ expected_data = ('\n# The following ssh key was injected by '
+ 'Nova\nssh-dsa fake_keydata\n')
+
+ injected_files = [('/root/.ssh/authorized_keys', expected_data)]
+ self._test_spawn(IMAGE_VHD, None, None,
+ os_type="linux", architecture="x86-64",
+ key_data='ssh-dsa fake_keydata')
+ self.assertEqual(actual_injected_files, injected_files)
+
+ def test_spawn_injected_files(self):
+ # Test spawning with injected_files.
+ self.flags(use_agent_default=True,
+ group='xenserver')
+ actual_injected_files = []
+
+ def fake_inject_file(self, method, args):
+ path = base64.b64decode(args['b64_path'])
+ contents = base64.b64decode(args['b64_contents'])
+ actual_injected_files.append((path, contents))
+ return jsonutils.dumps({'returncode': '0', 'message': 'success'})
+ self.stubs.Set(stubs.FakeSessionForVMTests,
+ '_plugin_agent_inject_file', fake_inject_file)
+
+ injected_files = [('/tmp/foo', 'foobar')]
+ self._test_spawn(IMAGE_VHD, None, None,
+ os_type="linux", architecture="x86-64",
+ injected_files=injected_files)
+ self.check_vm_params_for_linux()
+ self.assertEqual(actual_injected_files, injected_files)
+
+ @mock.patch('nova.db.agent_build_get_by_triple')
+ def test_spawn_agent_upgrade(self, mock_get):
+ self.flags(use_agent_default=True,
+ group='xenserver')
+
+ mock_get.return_value = {"version": "1.1.0", "architecture": "x86-64",
+ "hypervisor": "xen", "os": "windows",
+ "url": "url", "md5hash": "asdf",
+ 'created_at': None, 'updated_at': None,
+ 'deleted_at': None, 'deleted': False,
+ 'id': 1}
+
+ self._test_spawn(IMAGE_VHD, None, None,
+ os_type="linux", architecture="x86-64")
+
+ @mock.patch('nova.db.agent_build_get_by_triple')
+ def test_spawn_agent_upgrade_fails_silently(self, mock_get):
+ mock_get.return_value = {"version": "1.1.0", "architecture": "x86-64",
+ "hypervisor": "xen", "os": "windows",
+ "url": "url", "md5hash": "asdf",
+ 'created_at': None, 'updated_at': None,
+ 'deleted_at': None, 'deleted': False,
+ 'id': 1}
+
+ self._test_spawn_fails_silently_with(exception.AgentError,
+ method="_plugin_agent_agentupdate", failure="fake_error")
+
+ def test_spawn_with_resetnetwork_alternative_returncode(self):
+ self.flags(use_agent_default=True,
+ group='xenserver')
+
+ def fake_resetnetwork(self, method, args):
+ fake_resetnetwork.called = True
+ # NOTE(johngarbutt): as returned by FreeBSD and Gentoo
+ return jsonutils.dumps({'returncode': '500',
+ 'message': 'success'})
+ self.stubs.Set(stubs.FakeSessionForVMTests,
+ '_plugin_agent_resetnetwork', fake_resetnetwork)
+ fake_resetnetwork.called = False
+
+ self._test_spawn(IMAGE_VHD, None, None,
+ os_type="linux", architecture="x86-64")
+ self.assertTrue(fake_resetnetwork.called)
+
+ def _test_spawn_fails_silently_with(self, expected_exception_cls,
+ method="_plugin_agent_version",
+ failure=None, value=None):
+ self.flags(use_agent_default=True,
+ agent_version_timeout=0,
+ group='xenserver')
+
+ def fake_agent_call(self, method, args):
+ if failure:
+ raise xenapi_fake.Failure([failure])
+ else:
+ return value
+
+ self.stubs.Set(stubs.FakeSessionForVMTests,
+ method, fake_agent_call)
+
+ called = {}
+
+ def fake_add_instance_fault(*args, **kwargs):
+ called["fake_add_instance_fault"] = args[2]
+
+ self.stubs.Set(compute_utils, 'add_instance_fault_from_exc',
+ fake_add_instance_fault)
+
+ self._test_spawn(IMAGE_VHD, None, None,
+ os_type="linux", architecture="x86-64")
+ actual_exception = called["fake_add_instance_fault"]
+ self.assertIsInstance(actual_exception, expected_exception_cls)
+
+ def test_spawn_fails_silently_with_agent_timeout(self):
+ self._test_spawn_fails_silently_with(exception.AgentTimeout,
+ failure="TIMEOUT:fake")
+
+ def test_spawn_fails_silently_with_agent_not_implemented(self):
+ self._test_spawn_fails_silently_with(exception.AgentNotImplemented,
+ failure="NOT IMPLEMENTED:fake")
+
+ def test_spawn_fails_silently_with_agent_error(self):
+ self._test_spawn_fails_silently_with(exception.AgentError,
+ failure="fake_error")
+
+ def test_spawn_fails_silently_with_agent_bad_return(self):
+ error = jsonutils.dumps({'returncode': -1, 'message': 'fake'})
+ self._test_spawn_fails_silently_with(exception.AgentError,
+ value=error)
+
+ def test_rescue(self):
+ instance = self._create_instance(spawn=False)
+ xenapi_fake.create_vm(instance['name'], 'Running')
+
+ session = get_session()
+ vm_ref = vm_utils.lookup(session, instance['name'])
+
+ swap_vdi_ref = xenapi_fake.create_vdi('swap', None)
+ root_vdi_ref = xenapi_fake.create_vdi('root', None)
+ eph1_vdi_ref = xenapi_fake.create_vdi('eph', None)
+ eph2_vdi_ref = xenapi_fake.create_vdi('eph', None)
+ vol_vdi_ref = xenapi_fake.create_vdi('volume', None)
+
+ xenapi_fake.create_vbd(vm_ref, swap_vdi_ref, userdevice=2)
+ xenapi_fake.create_vbd(vm_ref, root_vdi_ref, userdevice=0)
+ xenapi_fake.create_vbd(vm_ref, eph1_vdi_ref, userdevice=4)
+ xenapi_fake.create_vbd(vm_ref, eph2_vdi_ref, userdevice=5)
+ xenapi_fake.create_vbd(vm_ref, vol_vdi_ref, userdevice=6,
+ other_config={'osvol': True})
+
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ image_meta = {'id': IMAGE_VHD,
+ 'disk_format': 'vhd'}
+ conn.rescue(self.context, instance, [], image_meta, '')
+
+ vm = xenapi_fake.get_record('VM', vm_ref)
+ rescue_name = "%s-rescue" % vm["name_label"]
+ rescue_ref = vm_utils.lookup(session, rescue_name)
+ rescue_vm = xenapi_fake.get_record('VM', rescue_ref)
+
+ vdi_refs = {}
+ for vbd_ref in rescue_vm['VBDs']:
+ vbd = xenapi_fake.get_record('VBD', vbd_ref)
+ vdi_refs[vbd['VDI']] = vbd['userdevice']
+
+ self.assertEqual('1', vdi_refs[root_vdi_ref])
+ self.assertEqual('2', vdi_refs[swap_vdi_ref])
+ self.assertEqual('4', vdi_refs[eph1_vdi_ref])
+ self.assertEqual('5', vdi_refs[eph2_vdi_ref])
+ self.assertNotIn(vol_vdi_ref, vdi_refs)
+
+ def test_rescue_preserve_disk_on_failure(self):
+ # test that the original disk is preserved if rescue setup fails
+ # bug #1227898
+ instance = self._create_instance()
+ session = get_session()
+ image_meta = {'id': IMAGE_VHD,
+ 'disk_format': 'vhd'}
+
+ vm_ref = vm_utils.lookup(session, instance['name'])
+ vdi_ref, vdi_rec = vm_utils.get_vdi_for_vm_safely(session, vm_ref)
+
+ # raise an error in the spawn setup process and trigger the
+ # undo manager logic:
+ def fake_start(*args, **kwargs):
+ raise test.TestingException('Start Error')
+
+ self.stubs.Set(self.conn._vmops, '_start', fake_start)
+
+ self.assertRaises(test.TestingException, self.conn.rescue,
+ self.context, instance, [], image_meta, '')
+
+ # confirm original disk still exists:
+ vdi_ref2, vdi_rec2 = vm_utils.get_vdi_for_vm_safely(session, vm_ref)
+ self.assertEqual(vdi_ref, vdi_ref2)
+ self.assertEqual(vdi_rec['uuid'], vdi_rec2['uuid'])
+
+ def test_unrescue(self):
+ instance = self._create_instance()
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ # Unrescue expects the original instance to be powered off
+ conn.power_off(instance)
+ xenapi_fake.create_vm(instance['name'] + '-rescue', 'Running')
+ conn.unrescue(instance, None)
+
+ def test_unrescue_not_in_rescue(self):
+ instance = self._create_instance()
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ # Ensure that it will not unrescue a non-rescued instance.
+ self.assertRaises(exception.InstanceNotInRescueMode, conn.unrescue,
+ instance, None)
+
+ def test_finish_revert_migration(self):
+ instance = self._create_instance()
+
+ class VMOpsMock():
+
+ def __init__(self):
+ self.finish_revert_migration_called = False
+
+ def finish_revert_migration(self, context, instance, block_info,
+ power_on):
+ self.finish_revert_migration_called = True
+
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ conn._vmops = VMOpsMock()
+ conn.finish_revert_migration(self.context, instance, None)
+ self.assertTrue(conn._vmops.finish_revert_migration_called)
+
+ def test_reboot_hard(self):
+ instance = self._create_instance()
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ conn.reboot(self.context, instance, None, "HARD")
+
+ def test_poll_rebooting_instances(self):
+ self.mox.StubOutWithMock(compute_api.API, 'reboot')
+ compute_api.API.reboot(mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg())
+ self.mox.ReplayAll()
+ instance = self._create_instance()
+ instances = [instance]
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ conn.poll_rebooting_instances(60, instances)
+
+ def test_reboot_soft(self):
+ instance = self._create_instance()
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ conn.reboot(self.context, instance, None, "SOFT")
+
+ def test_reboot_halted(self):
+ session = get_session()
+ instance = self._create_instance(spawn=False)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ xenapi_fake.create_vm(instance['name'], 'Halted')
+ conn.reboot(self.context, instance, None, "SOFT")
+ vm_ref = vm_utils.lookup(session, instance['name'])
+ vm = xenapi_fake.get_record('VM', vm_ref)
+ self.assertEqual(vm['power_state'], 'Running')
+
+ def test_reboot_unknown_state(self):
+ instance = self._create_instance(spawn=False)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ xenapi_fake.create_vm(instance['name'], 'Unknown')
+ self.assertRaises(xenapi_fake.Failure, conn.reboot, self.context,
+ instance, None, "SOFT")
+
+ def test_reboot_rescued(self):
+ instance = self._create_instance()
+ instance['vm_state'] = vm_states.RESCUED
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ real_result = vm_utils.lookup(conn._session, instance['name'])
+
+ self.mox.StubOutWithMock(vm_utils, 'lookup')
+ vm_utils.lookup(conn._session, instance['name'],
+ True).AndReturn(real_result)
+ self.mox.ReplayAll()
+
+ conn.reboot(self.context, instance, None, "SOFT")
+
+ def test_get_console_output_succeeds(self):
+
+ def fake_get_console_output(instance):
+ self.assertEqual("instance", instance)
+ return "console_log"
+ self.stubs.Set(self.conn._vmops, 'get_console_output',
+ fake_get_console_output)
+
+ self.assertEqual(self.conn.get_console_output('context', "instance"),
+ "console_log")
+
+ def _test_maintenance_mode(self, find_host, find_aggregate):
+ real_call_xenapi = self.conn._session.call_xenapi
+ instance = self._create_instance(spawn=True)
+ api_calls = {}
+
+ # Record all the xenapi calls, and return a fake list of hosts
+ # for the host.get_all call
+ def fake_call_xenapi(method, *args):
+ api_calls[method] = args
+ if method == 'host.get_all':
+ return ['foo', 'bar', 'baz']
+ return real_call_xenapi(method, *args)
+ self.stubs.Set(self.conn._session, 'call_xenapi', fake_call_xenapi)
+
+ def fake_aggregate_get(context, host, key):
+ if find_aggregate:
+ return [test_aggregate.fake_aggregate]
+ else:
+ return []
+ self.stubs.Set(db, 'aggregate_get_by_host',
+ fake_aggregate_get)
+
+ def fake_host_find(context, session, src, dst):
+ if find_host:
+ return 'bar'
+ else:
+ raise exception.NoValidHost("I saw this one coming...")
+ self.stubs.Set(host, '_host_find', fake_host_find)
+
+ result = self.conn.host_maintenance_mode('bar', 'on_maintenance')
+ self.assertEqual(result, 'on_maintenance')
+
+ # We expect the VM.pool_migrate call to have been called to
+ # migrate our instance to the 'bar' host
+ vm_ref = vm_utils.lookup(self.conn._session, instance['name'])
+ host_ref = "foo"
+ expected = (vm_ref, host_ref, {"live": "true"})
+ self.assertEqual(api_calls.get('VM.pool_migrate'), expected)
+
+ instance = db.instance_get_by_uuid(self.context, instance['uuid'])
+ self.assertEqual(instance['vm_state'], vm_states.ACTIVE)
+ self.assertEqual(instance['task_state'], task_states.MIGRATING)
+
+ def test_maintenance_mode(self):
+ self._test_maintenance_mode(True, True)
+
+ def test_maintenance_mode_no_host(self):
+ self.assertRaises(exception.NoValidHost,
+ self._test_maintenance_mode, False, True)
+
+ def test_maintenance_mode_no_aggregate(self):
+ self.assertRaises(exception.NotFound,
+ self._test_maintenance_mode, True, False)
+
+ def test_uuid_find(self):
+ self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
+ fake_inst = fake_instance.fake_db_instance(id=123)
+ fake_inst2 = fake_instance.fake_db_instance(id=456)
+ db.instance_get_all_by_host(self.context, fake_inst['host'],
+ columns_to_join=None,
+ use_slave=False
+ ).AndReturn([fake_inst, fake_inst2])
+ self.mox.ReplayAll()
+ expected_name = CONF.instance_name_template % fake_inst['id']
+ inst_uuid = host._uuid_find(self.context, fake_inst['host'],
+ expected_name)
+ self.assertEqual(inst_uuid, fake_inst['uuid'])
+
+ def test_session_virtapi(self):
+ was = {'called': False}
+
+ def fake_aggregate_get_by_host(self, *args, **kwargs):
+ was['called'] = True
+ raise test.TestingException()
+ self.stubs.Set(db, "aggregate_get_by_host",
+ fake_aggregate_get_by_host)
+
+ self.stubs.Set(self.conn._session, "is_slave", True)
+
+ self.assertRaises(test.TestingException,
+ self.conn._session._get_host_uuid)
+ self.assertTrue(was['called'])
+
+ def test_per_instance_usage_running(self):
+ instance = self._create_instance(spawn=True)
+ flavor = flavors.get_flavor(3)
+
+ expected = {instance['uuid']: {'memory_mb': flavor['memory_mb'],
+ 'uuid': instance['uuid']}}
+ actual = self.conn.get_per_instance_usage()
+ self.assertEqual(expected, actual)
+
+ # Paused instances still consume resources:
+ self.conn.pause(instance)
+ actual = self.conn.get_per_instance_usage()
+ self.assertEqual(expected, actual)
+
+ def test_per_instance_usage_suspended(self):
+ # Suspended instances do not consume memory:
+ instance = self._create_instance(spawn=True)
+ self.conn.suspend(instance)
+ actual = self.conn.get_per_instance_usage()
+ self.assertEqual({}, actual)
+
+ def test_per_instance_usage_halted(self):
+ instance = self._create_instance(spawn=True)
+ self.conn.power_off(instance)
+ actual = self.conn.get_per_instance_usage()
+ self.assertEqual({}, actual)
+
+ def _create_instance(self, instance_id=1, spawn=True, obj=False, **attrs):
+ """Creates and spawns a test instance."""
+ instance_values = {
+ 'id': instance_id,
+ 'uuid': '00000000-0000-0000-0000-00000000000%d' % instance_id,
+ 'display_name': 'host-%d' % instance_id,
+ 'project_id': self.project_id,
+ 'user_id': self.user_id,
+ 'image_ref': 1,
+ 'kernel_id': 2,
+ 'ramdisk_id': 3,
+ 'root_gb': 80,
+ 'ephemeral_gb': 0,
+ 'instance_type_id': '3', # m1.large
+ 'os_type': 'linux',
+ 'vm_mode': 'hvm',
+ 'architecture': 'x86-64'}
+ instance_values.update(attrs)
+
+ instance = create_instance_with_system_metadata(self.context,
+ instance_values)
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs)
+ image_meta = {'id': IMAGE_VHD,
+ 'disk_format': 'vhd'}
+ if spawn:
+ self.conn.spawn(self.context, instance, image_meta, [], 'herp',
+ network_info)
+ if obj:
+ instance = objects.Instance._from_db_object(
+ self.context, objects.Instance(), instance,
+ expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS)
+ return instance
+
+ def test_destroy_clean_up_kernel_and_ramdisk(self):
+ def fake_lookup_kernel_ramdisk(session, vm_ref):
+ return "kernel", "ramdisk"
+
+ self.stubs.Set(vm_utils, "lookup_kernel_ramdisk",
+ fake_lookup_kernel_ramdisk)
+
+ def fake_destroy_kernel_ramdisk(session, instance, kernel, ramdisk):
+ fake_destroy_kernel_ramdisk.called = True
+ self.assertEqual("kernel", kernel)
+ self.assertEqual("ramdisk", ramdisk)
+
+ fake_destroy_kernel_ramdisk.called = False
+
+ self.stubs.Set(vm_utils, "destroy_kernel_ramdisk",
+ fake_destroy_kernel_ramdisk)
+
+ instance = self._create_instance(spawn=True)
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs)
+ self.conn.destroy(self.context, instance, network_info)
+
+ vm_ref = vm_utils.lookup(self.conn._session, instance['name'])
+ self.assertIsNone(vm_ref)
+ self.assertTrue(fake_destroy_kernel_ramdisk.called)
+
+
+class XenAPIDiffieHellmanTestCase(test.NoDBTestCase):
+ """Unit tests for Diffie-Hellman code."""
+ def setUp(self):
+ super(XenAPIDiffieHellmanTestCase, self).setUp()
+ self.alice = agent.SimpleDH()
+ self.bob = agent.SimpleDH()
+
+ def test_shared(self):
+ alice_pub = self.alice.get_public()
+ bob_pub = self.bob.get_public()
+ alice_shared = self.alice.compute_shared(bob_pub)
+ bob_shared = self.bob.compute_shared(alice_pub)
+ self.assertEqual(alice_shared, bob_shared)
+
+ def _test_encryption(self, message):
+ enc = self.alice.encrypt(message)
+ self.assertFalse(enc.endswith('\n'))
+ dec = self.bob.decrypt(enc)
+ self.assertEqual(dec, message)
+
+ def test_encrypt_simple_message(self):
+ self._test_encryption('This is a simple message.')
+
+ def test_encrypt_message_with_newlines_at_end(self):
+ self._test_encryption('This message has a newline at the end.\n')
+
+ def test_encrypt_many_newlines_at_end(self):
+ self._test_encryption('Message with lotsa newlines.\n\n\n')
+
+ def test_encrypt_newlines_inside_message(self):
+ self._test_encryption('Message\nwith\ninterior\nnewlines.')
+
+ def test_encrypt_with_leading_newlines(self):
+ self._test_encryption('\n\nMessage with leading newlines.')
+
+ def test_encrypt_really_long_message(self):
+ self._test_encryption(''.join(['abcd' for i in xrange(1024)]))
+
+
+# FIXME(sirp): convert this to use XenAPITestBaseNoDB
+class XenAPIMigrateInstance(stubs.XenAPITestBase):
+ """Unit test for verifying migration-related actions."""
+
+ REQUIRES_LOCKING = True
+
+ def setUp(self):
+ super(XenAPIMigrateInstance, self).setUp()
+ self.flags(connection_url='test_url',
+ connection_password='test_pass',
+ group='xenserver')
+ self.flags(firewall_driver='nova.virt.xenapi.firewall.'
+ 'Dom0IptablesFirewallDriver')
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ db_fakes.stub_out_db_instance_api(self.stubs)
+ xenapi_fake.create_network('fake', 'fake_br1')
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = context.RequestContext(self.user_id, self.project_id)
+ self.instance_values = {'id': 1,
+ 'project_id': self.project_id,
+ 'user_id': self.user_id,
+ 'image_ref': 1,
+ 'kernel_id': None,
+ 'ramdisk_id': None,
+ 'root_gb': 80,
+ 'ephemeral_gb': 0,
+ 'instance_type_id': '3', # m1.large
+ 'os_type': 'linux',
+ 'architecture': 'x86-64'}
+
+ migration_values = {
+ 'source_compute': 'nova-compute',
+ 'dest_compute': 'nova-compute',
+ 'dest_host': '10.127.5.114',
+ 'status': 'post-migrating',
+ 'instance_uuid': '15f23e6a-cc6e-4d22-b651-d9bdaac316f7',
+ 'old_instance_type_id': 5,
+ 'new_instance_type_id': 1
+ }
+ self.migration = db.migration_create(
+ context.get_admin_context(), migration_values)
+
+ fake_processutils.stub_out_processutils_execute(self.stubs)
+ stubs.stub_out_migration_methods(self.stubs)
+ stubs.stubout_get_this_vm_uuid(self.stubs)
+
+ def fake_inject_instance_metadata(self, instance, vm):
+ pass
+ self.stubs.Set(vmops.VMOps, '_inject_instance_metadata',
+ fake_inject_instance_metadata)
+
+ def test_migrate_disk_and_power_off(self):
+ instance = db.instance_create(self.context, self.instance_values)
+ xenapi_fake.create_vm(instance['name'], 'Running')
+ flavor = {"root_gb": 80, 'ephemeral_gb': 0}
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ conn.migrate_disk_and_power_off(self.context, instance,
+ '127.0.0.1', flavor, None)
+
+ def test_migrate_disk_and_power_off_passes_exceptions(self):
+ instance = db.instance_create(self.context, self.instance_values)
+ xenapi_fake.create_vm(instance['name'], 'Running')
+ flavor = {"root_gb": 80, 'ephemeral_gb': 0}
+
+ def fake_raise(*args, **kwargs):
+ raise exception.MigrationError(reason='test failure')
+ self.stubs.Set(vmops.VMOps, "_migrate_disk_resizing_up", fake_raise)
+
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ self.assertRaises(exception.MigrationError,
+ conn.migrate_disk_and_power_off,
+ self.context, instance,
+ '127.0.0.1', flavor, None)
+
+ def test_migrate_disk_and_power_off_throws_on_zero_gb_resize_down(self):
+ instance = db.instance_create(self.context, self.instance_values)
+ flavor = {"root_gb": 0, 'ephemeral_gb': 0}
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ self.assertRaises(exception.ResizeError,
+ conn.migrate_disk_and_power_off,
+ self.context, instance,
+ 'fake_dest', flavor, None)
+
+ def test_migrate_disk_and_power_off_with_zero_gb_old_and_new_works(self):
+ flavor = {"root_gb": 0, 'ephemeral_gb': 0}
+ values = copy.copy(self.instance_values)
+ values["root_gb"] = 0
+ values["ephemeral_gb"] = 0
+ instance = db.instance_create(self.context, values)
+ xenapi_fake.create_vm(instance['name'], 'Running')
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ conn.migrate_disk_and_power_off(self.context, instance,
+ '127.0.0.1', flavor, None)
+
+ def _test_revert_migrate(self, power_on):
+ instance = create_instance_with_system_metadata(self.context,
+ self.instance_values)
+ self.called = False
+ self.fake_vm_start_called = False
+ self.fake_finish_revert_migration_called = False
+ context = 'fake_context'
+
+ def fake_vm_start(*args, **kwargs):
+ self.fake_vm_start_called = True
+
+ def fake_vdi_resize(*args, **kwargs):
+ self.called = True
+
+ def fake_finish_revert_migration(*args, **kwargs):
+ self.fake_finish_revert_migration_called = True
+
+ self.stubs.Set(stubs.FakeSessionForVMTests,
+ "VDI_resize_online", fake_vdi_resize)
+ self.stubs.Set(vmops.VMOps, '_start', fake_vm_start)
+ self.stubs.Set(vmops.VMOps, 'finish_revert_migration',
+ fake_finish_revert_migration)
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests,
+ product_version=(4, 0, 0),
+ product_brand='XenServer')
+
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs)
+ image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'}
+ base = xenapi_fake.create_vdi('hurr', 'fake')
+ base_uuid = xenapi_fake.get_record('VDI', base)['uuid']
+ cow = xenapi_fake.create_vdi('durr', 'fake')
+ cow_uuid = xenapi_fake.get_record('VDI', cow)['uuid']
+ conn.finish_migration(self.context, self.migration, instance,
+ dict(base_copy=base_uuid, cow=cow_uuid),
+ network_info, image_meta, resize_instance=True,
+ block_device_info=None, power_on=power_on)
+ self.assertEqual(self.called, True)
+ self.assertEqual(self.fake_vm_start_called, power_on)
+
+ conn.finish_revert_migration(context, instance, network_info)
+ self.assertEqual(self.fake_finish_revert_migration_called, True)
+
+ def test_revert_migrate_power_on(self):
+ self._test_revert_migrate(True)
+
+ def test_revert_migrate_power_off(self):
+ self._test_revert_migrate(False)
+
+ def _test_finish_migrate(self, power_on):
+ instance = create_instance_with_system_metadata(self.context,
+ self.instance_values)
+ self.called = False
+ self.fake_vm_start_called = False
+
+ def fake_vm_start(*args, **kwargs):
+ self.fake_vm_start_called = True
+
+ def fake_vdi_resize(*args, **kwargs):
+ self.called = True
+
+ self.stubs.Set(vmops.VMOps, '_start', fake_vm_start)
+ self.stubs.Set(stubs.FakeSessionForVMTests,
+ "VDI_resize_online", fake_vdi_resize)
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests,
+ product_version=(4, 0, 0),
+ product_brand='XenServer')
+
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs)
+ image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'}
+ conn.finish_migration(self.context, self.migration, instance,
+ dict(base_copy='hurr', cow='durr'),
+ network_info, image_meta, resize_instance=True,
+ block_device_info=None, power_on=power_on)
+ self.assertEqual(self.called, True)
+ self.assertEqual(self.fake_vm_start_called, power_on)
+
+ def test_finish_migrate_power_on(self):
+ self._test_finish_migrate(True)
+
+ def test_finish_migrate_power_off(self):
+ self._test_finish_migrate(False)
+
+ def test_finish_migrate_no_local_storage(self):
+ values = copy.copy(self.instance_values)
+ values["root_gb"] = 0
+ values["ephemeral_gb"] = 0
+ instance = create_instance_with_system_metadata(self.context, values)
+
+ def fake_vdi_resize(*args, **kwargs):
+ raise Exception("This shouldn't be called")
+
+ self.stubs.Set(stubs.FakeSessionForVMTests,
+ "VDI_resize_online", fake_vdi_resize)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs)
+ image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'}
+ conn.finish_migration(self.context, self.migration, instance,
+ dict(base_copy='hurr', cow='durr'),
+ network_info, image_meta, resize_instance=True)
+
+ def test_finish_migrate_no_resize_vdi(self):
+ instance = create_instance_with_system_metadata(self.context,
+ self.instance_values)
+
+ def fake_vdi_resize(*args, **kwargs):
+ raise Exception("This shouldn't be called")
+
+ self.stubs.Set(stubs.FakeSessionForVMTests,
+ "VDI_resize_online", fake_vdi_resize)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs)
+ # Resize instance would be determined by the compute call
+ image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'}
+ conn.finish_migration(self.context, self.migration, instance,
+ dict(base_copy='hurr', cow='durr'),
+ network_info, image_meta, resize_instance=False)
+
+ @stub_vm_utils_with_vdi_attached_here
+ def test_migrate_too_many_partitions_no_resize_down(self):
+ instance_values = self.instance_values
+ instance = db.instance_create(self.context, instance_values)
+ xenapi_fake.create_vm(instance['name'], 'Running')
+ flavor = db.flavor_get_by_name(self.context, 'm1.small')
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ def fake_get_partitions(partition):
+ return [(1, 2, 3, 4, "", ""), (1, 2, 3, 4, "", "")]
+
+ self.stubs.Set(vm_utils, '_get_partitions', fake_get_partitions)
+
+ self.assertRaises(exception.InstanceFaultRollback,
+ conn.migrate_disk_and_power_off,
+ self.context, instance,
+ '127.0.0.1', flavor, None)
+
+ @stub_vm_utils_with_vdi_attached_here
+ def test_migrate_bad_fs_type_no_resize_down(self):
+ instance_values = self.instance_values
+ instance = db.instance_create(self.context, instance_values)
+ xenapi_fake.create_vm(instance['name'], 'Running')
+ flavor = db.flavor_get_by_name(self.context, 'm1.small')
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ def fake_get_partitions(partition):
+ return [(1, 2, 3, "ext2", "", "boot")]
+
+ self.stubs.Set(vm_utils, '_get_partitions', fake_get_partitions)
+
+ self.assertRaises(exception.InstanceFaultRollback,
+ conn.migrate_disk_and_power_off,
+ self.context, instance,
+ '127.0.0.1', flavor, None)
+
+ def test_migrate_rollback_when_resize_down_fs_fails(self):
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ vmops = conn._vmops
+
+ self.mox.StubOutWithMock(vmops, '_resize_ensure_vm_is_shutdown')
+ self.mox.StubOutWithMock(vmops, '_apply_orig_vm_name_label')
+ self.mox.StubOutWithMock(vm_utils, 'resize_disk')
+ self.mox.StubOutWithMock(vm_utils, 'migrate_vhd')
+ self.mox.StubOutWithMock(vm_utils, 'destroy_vdi')
+ self.mox.StubOutWithMock(vm_utils, 'get_vdi_for_vm_safely')
+ self.mox.StubOutWithMock(vmops, '_restore_orig_vm_and_cleanup_orphan')
+
+ instance = objects.Instance(context=self.context,
+ auto_disk_config=True, uuid='uuid')
+ instance.obj_reset_changes()
+ vm_ref = "vm_ref"
+ dest = "dest"
+ flavor = "type"
+ sr_path = "sr_path"
+
+ vmops._resize_ensure_vm_is_shutdown(instance, vm_ref)
+ vmops._apply_orig_vm_name_label(instance, vm_ref)
+ old_vdi_ref = "old_ref"
+ vm_utils.get_vdi_for_vm_safely(vmops._session, vm_ref).AndReturn(
+ (old_vdi_ref, None))
+ new_vdi_ref = "new_ref"
+ new_vdi_uuid = "new_uuid"
+ vm_utils.resize_disk(vmops._session, instance, old_vdi_ref,
+ flavor).AndReturn((new_vdi_ref, new_vdi_uuid))
+ vm_utils.migrate_vhd(vmops._session, instance, new_vdi_uuid, dest,
+ sr_path, 0).AndRaise(
+ exception.ResizeError(reason="asdf"))
+
+ vm_utils.destroy_vdi(vmops._session, new_vdi_ref)
+ vmops._restore_orig_vm_and_cleanup_orphan(instance)
+
+ self.mox.ReplayAll()
+
+ with mock.patch.object(instance, 'save') as mock_save:
+ self.assertRaises(exception.InstanceFaultRollback,
+ vmops._migrate_disk_resizing_down, self.context,
+ instance, dest, flavor, vm_ref, sr_path)
+ self.assertEqual(3, mock_save.call_count)
+ self.assertEqual(60.0, instance.progress)
+
+ def test_resize_ensure_vm_is_shutdown_cleanly(self):
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ vmops = conn._vmops
+ fake_instance = {'uuid': 'uuid'}
+
+ self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown')
+ self.mox.StubOutWithMock(vm_utils, 'clean_shutdown_vm')
+ self.mox.StubOutWithMock(vm_utils, 'hard_shutdown_vm')
+
+ vm_utils.is_vm_shutdown(vmops._session, "ref").AndReturn(False)
+ vm_utils.clean_shutdown_vm(vmops._session, fake_instance,
+ "ref").AndReturn(True)
+
+ self.mox.ReplayAll()
+
+ vmops._resize_ensure_vm_is_shutdown(fake_instance, "ref")
+
+ def test_resize_ensure_vm_is_shutdown_forced(self):
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ vmops = conn._vmops
+ fake_instance = {'uuid': 'uuid'}
+
+ self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown')
+ self.mox.StubOutWithMock(vm_utils, 'clean_shutdown_vm')
+ self.mox.StubOutWithMock(vm_utils, 'hard_shutdown_vm')
+
+ vm_utils.is_vm_shutdown(vmops._session, "ref").AndReturn(False)
+ vm_utils.clean_shutdown_vm(vmops._session, fake_instance,
+ "ref").AndReturn(False)
+ vm_utils.hard_shutdown_vm(vmops._session, fake_instance,
+ "ref").AndReturn(True)
+
+ self.mox.ReplayAll()
+
+ vmops._resize_ensure_vm_is_shutdown(fake_instance, "ref")
+
+ def test_resize_ensure_vm_is_shutdown_fails(self):
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ vmops = conn._vmops
+ fake_instance = {'uuid': 'uuid'}
+
+ self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown')
+ self.mox.StubOutWithMock(vm_utils, 'clean_shutdown_vm')
+ self.mox.StubOutWithMock(vm_utils, 'hard_shutdown_vm')
+
+ vm_utils.is_vm_shutdown(vmops._session, "ref").AndReturn(False)
+ vm_utils.clean_shutdown_vm(vmops._session, fake_instance,
+ "ref").AndReturn(False)
+ vm_utils.hard_shutdown_vm(vmops._session, fake_instance,
+ "ref").AndReturn(False)
+
+ self.mox.ReplayAll()
+
+ self.assertRaises(exception.ResizeError,
+ vmops._resize_ensure_vm_is_shutdown, fake_instance, "ref")
+
+ def test_resize_ensure_vm_is_shutdown_already_shutdown(self):
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ vmops = conn._vmops
+ fake_instance = {'uuid': 'uuid'}
+
+ self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown')
+ self.mox.StubOutWithMock(vm_utils, 'clean_shutdown_vm')
+ self.mox.StubOutWithMock(vm_utils, 'hard_shutdown_vm')
+
+ vm_utils.is_vm_shutdown(vmops._session, "ref").AndReturn(True)
+
+ self.mox.ReplayAll()
+
+ vmops._resize_ensure_vm_is_shutdown(fake_instance, "ref")
+
+
+class XenAPIImageTypeTestCase(test.NoDBTestCase):
+ """Test ImageType class."""
+
+ def test_to_string(self):
+ # Can convert from type id to type string.
+ self.assertEqual(
+ vm_utils.ImageType.to_string(vm_utils.ImageType.KERNEL),
+ vm_utils.ImageType.KERNEL_STR)
+
+ def _assert_role(self, expected_role, image_type_id):
+ self.assertEqual(
+ expected_role,
+ vm_utils.ImageType.get_role(image_type_id))
+
+ def test_get_image_role_kernel(self):
+ self._assert_role('kernel', vm_utils.ImageType.KERNEL)
+
+ def test_get_image_role_ramdisk(self):
+ self._assert_role('ramdisk', vm_utils.ImageType.RAMDISK)
+
+ def test_get_image_role_disk(self):
+ self._assert_role('root', vm_utils.ImageType.DISK)
+
+ def test_get_image_role_disk_raw(self):
+ self._assert_role('root', vm_utils.ImageType.DISK_RAW)
+
+ def test_get_image_role_disk_vhd(self):
+ self._assert_role('root', vm_utils.ImageType.DISK_VHD)
+
+
+class XenAPIDetermineDiskImageTestCase(test.NoDBTestCase):
+ """Unit tests for code that detects the ImageType."""
+ def assert_disk_type(self, image_meta, expected_disk_type):
+ actual = vm_utils.determine_disk_image_type(image_meta)
+ self.assertEqual(expected_disk_type, actual)
+
+ def test_machine(self):
+ image_meta = {'id': 'a', 'disk_format': 'ami'}
+ self.assert_disk_type(image_meta, vm_utils.ImageType.DISK)
+
+ def test_raw(self):
+ image_meta = {'id': 'a', 'disk_format': 'raw'}
+ self.assert_disk_type(image_meta, vm_utils.ImageType.DISK_RAW)
+
+ def test_vhd(self):
+ image_meta = {'id': 'a', 'disk_format': 'vhd'}
+ self.assert_disk_type(image_meta, vm_utils.ImageType.DISK_VHD)
+
+ def test_none(self):
+ image_meta = None
+ self.assert_disk_type(image_meta, None)
+
+
+# FIXME(sirp): convert this to use XenAPITestBaseNoDB
+class XenAPIHostTestCase(stubs.XenAPITestBase):
+ """Tests HostState, which holds metrics from XenServer that get
+ reported back to the Schedulers.
+ """
+
+ def setUp(self):
+ super(XenAPIHostTestCase, self).setUp()
+ self.flags(connection_url='test_url',
+ connection_password='test_pass',
+ group='xenserver')
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.context = context.get_admin_context()
+ self.flags(use_local=True, group='conductor')
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ self.instance = fake_instance.fake_db_instance(name='foo')
+
+ def test_host_state(self):
+ stats = self.conn.host_state.get_host_stats(False)
+ # Values from fake.create_local_srs (ext SR)
+ self.assertEqual(stats['disk_total'], 40000)
+ self.assertEqual(stats['disk_used'], 20000)
+ # Values from fake._plugin_xenhost_host_data
+ self.assertEqual(stats['host_memory_total'], 10)
+ self.assertEqual(stats['host_memory_overhead'], 20)
+ self.assertEqual(stats['host_memory_free'], 30)
+ self.assertEqual(stats['host_memory_free_computed'], 40)
+ self.assertEqual(stats['hypervisor_hostname'], 'fake-xenhost')
+ self.assertThat({'cpu_count': 50},
+ matchers.DictMatches(stats['host_cpu_info']))
+ # No VMs running
+ self.assertEqual(stats['vcpus_used'], 0)
+
+ def test_host_state_vcpus_used(self):
+ stats = self.conn.host_state.get_host_stats(True)
+ self.assertEqual(stats['vcpus_used'], 0)
+ xenapi_fake.create_vm(self.instance['name'], 'Running')
+ stats = self.conn.host_state.get_host_stats(True)
+ self.assertEqual(stats['vcpus_used'], 4)
+
+ def test_pci_passthrough_devices_whitelist(self):
+ # NOTE(guillaume-thouvenin): This pci whitelist will be used to
+ # match with _plugin_xenhost_get_pci_device_details method in fake.py.
+ white_list = '{"vendor_id":"10de", "product_id":"11bf"}'
+ self.flags(pci_passthrough_whitelist=[white_list])
+ stats = self.conn.host_state.get_host_stats(False)
+ self.assertEqual(len(stats['pci_passthrough_devices']), 1)
+
+ def test_pci_passthrough_devices_no_whitelist(self):
+ stats = self.conn.host_state.get_host_stats(False)
+ self.assertEqual(len(stats['pci_passthrough_devices']), 0)
+
+ def test_host_state_missing_sr(self):
+ # Must trigger construction of 'host_state' property
+ # before introducing the stub which raises the error
+ hs = self.conn.host_state
+
+ def fake_safe_find_sr(session):
+ raise exception.StorageRepositoryNotFound('not there')
+
+ self.stubs.Set(vm_utils, 'safe_find_sr', fake_safe_find_sr)
+ self.assertRaises(exception.StorageRepositoryNotFound,
+ hs.get_host_stats,
+ refresh=True)
+
+ def _test_host_action(self, method, action, expected=None):
+ result = method('host', action)
+ if not expected:
+ expected = action
+ self.assertEqual(result, expected)
+
+ def test_host_reboot(self):
+ self._test_host_action(self.conn.host_power_action, 'reboot')
+
+ def test_host_shutdown(self):
+ self._test_host_action(self.conn.host_power_action, 'shutdown')
+
+ def test_host_startup(self):
+ self.assertRaises(NotImplementedError,
+ self.conn.host_power_action, 'host', 'startup')
+
+ def test_host_maintenance_on(self):
+ self._test_host_action(self.conn.host_maintenance_mode,
+ True, 'on_maintenance')
+
+ def test_host_maintenance_off(self):
+ self._test_host_action(self.conn.host_maintenance_mode,
+ False, 'off_maintenance')
+
+ def test_set_enable_host_enable(self):
+ _create_service_entries(self.context, values={'nova': ['fake-mini']})
+ self._test_host_action(self.conn.set_host_enabled, True, 'enabled')
+ service = db.service_get_by_args(self.context, 'fake-mini',
+ 'nova-compute')
+ self.assertEqual(service.disabled, False)
+
+ def test_set_enable_host_disable(self):
+ _create_service_entries(self.context, values={'nova': ['fake-mini']})
+ self._test_host_action(self.conn.set_host_enabled, False, 'disabled')
+ service = db.service_get_by_args(self.context, 'fake-mini',
+ 'nova-compute')
+ self.assertEqual(service.disabled, True)
+
+ def test_get_host_uptime(self):
+ result = self.conn.get_host_uptime('host')
+ self.assertEqual(result, 'fake uptime')
+
+ def test_supported_instances_is_included_in_host_state(self):
+ stats = self.conn.host_state.get_host_stats(False)
+ self.assertIn('supported_instances', stats)
+
+ def test_supported_instances_is_calculated_by_to_supported_instances(self):
+
+ def to_supported_instances(somedata):
+ self.assertIsNone(somedata)
+ return "SOMERETURNVALUE"
+ self.stubs.Set(host, 'to_supported_instances', to_supported_instances)
+
+ stats = self.conn.host_state.get_host_stats(False)
+ self.assertEqual("SOMERETURNVALUE", stats['supported_instances'])
+
+ def test_update_stats_caches_hostname(self):
+ self.mox.StubOutWithMock(host, 'call_xenhost')
+ self.mox.StubOutWithMock(vm_utils, 'scan_default_sr')
+ self.mox.StubOutWithMock(vm_utils, 'list_vms')
+ self.mox.StubOutWithMock(self.conn._session, 'call_xenapi')
+ data = {'disk_total': 0,
+ 'disk_used': 0,
+ 'disk_available': 0,
+ 'supported_instances': 0,
+ 'host_capabilities': [],
+ 'host_hostname': 'foo',
+ 'vcpus_used': 0,
+ }
+ sr_rec = {
+ 'physical_size': 0,
+ 'physical_utilisation': 0,
+ 'virtual_allocation': 0,
+ }
+
+ for i in range(3):
+ host.call_xenhost(mox.IgnoreArg(), 'host_data', {}).AndReturn(data)
+ vm_utils.scan_default_sr(self.conn._session).AndReturn("ref")
+ vm_utils.list_vms(self.conn._session).AndReturn([])
+ self.conn._session.call_xenapi('SR.get_record', "ref").AndReturn(
+ sr_rec)
+ if i == 2:
+ # On the third call (the second below) change the hostname
+ data = dict(data, host_hostname='bar')
+
+ self.mox.ReplayAll()
+ stats = self.conn.host_state.get_host_stats(refresh=True)
+ self.assertEqual('foo', stats['hypervisor_hostname'])
+ stats = self.conn.host_state.get_host_stats(refresh=True)
+ self.assertEqual('foo', stats['hypervisor_hostname'])
+
+
+class ToSupportedInstancesTestCase(test.NoDBTestCase):
+ def test_default_return_value(self):
+ self.assertEqual([],
+ host.to_supported_instances(None))
+
+ def test_return_value(self):
+ self.assertEqual([(arch.X86_64, hvtype.XEN, 'xen')],
+ host.to_supported_instances([u'xen-3.0-x86_64']))
+
+ def test_invalid_values_do_not_break(self):
+ self.assertEqual([(arch.X86_64, hvtype.XEN, 'xen')],
+ host.to_supported_instances([u'xen-3.0-x86_64', 'spam']))
+
+ def test_multiple_values(self):
+ self.assertEqual(
+ [
+ (arch.X86_64, hvtype.XEN, 'xen'),
+ (arch.I686, hvtype.XEN, 'hvm')
+ ],
+ host.to_supported_instances([u'xen-3.0-x86_64', 'hvm-3.0-x86_32'])
+ )
+
+
+# FIXME(sirp): convert this to use XenAPITestBaseNoDB
+class XenAPIAutoDiskConfigTestCase(stubs.XenAPITestBase):
+ def setUp(self):
+ super(XenAPIAutoDiskConfigTestCase, self).setUp()
+ self.flags(connection_url='test_url',
+ connection_password='test_pass',
+ group='xenserver')
+ self.flags(firewall_driver='nova.virt.xenapi.firewall.'
+ 'Dom0IptablesFirewallDriver')
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+
+ self.instance_values = {'id': 1,
+ 'project_id': self.project_id,
+ 'user_id': self.user_id,
+ 'image_ref': 1,
+ 'kernel_id': 2,
+ 'ramdisk_id': 3,
+ 'root_gb': 80,
+ 'ephemeral_gb': 0,
+ 'instance_type_id': '3', # m1.large
+ 'os_type': 'linux',
+ 'architecture': 'x86-64'}
+
+ self.context = context.RequestContext(self.user_id, self.project_id)
+
+ def fake_create_vbd(session, vm_ref, vdi_ref, userdevice,
+ vbd_type='disk', read_only=False, bootable=True,
+ osvol=False):
+ pass
+
+ self.stubs.Set(vm_utils, 'create_vbd', fake_create_vbd)
+
+ def assertIsPartitionCalled(self, called):
+ marker = {"partition_called": False}
+
+ def fake_resize_part_and_fs(dev, start, old_sectors, new_sectors,
+ flags):
+ marker["partition_called"] = True
+ self.stubs.Set(vm_utils, "_resize_part_and_fs",
+ fake_resize_part_and_fs)
+
+ context.RequestContext(self.user_id, self.project_id)
+ session = get_session()
+
+ disk_image_type = vm_utils.ImageType.DISK_VHD
+ instance = create_instance_with_system_metadata(self.context,
+ self.instance_values)
+ vm_ref = xenapi_fake.create_vm(instance['name'], 'Halted')
+ vdi_ref = xenapi_fake.create_vdi(instance['name'], 'fake')
+
+ vdi_uuid = session.call_xenapi('VDI.get_record', vdi_ref)['uuid']
+ vdis = {'root': {'uuid': vdi_uuid, 'ref': vdi_ref}}
+
+ self.conn._vmops._attach_disks(instance, vm_ref, instance['name'],
+ vdis, disk_image_type, "fake_nw_inf")
+
+ self.assertEqual(marker["partition_called"], called)
+
+ def test_instance_not_auto_disk_config(self):
+ """Should not partition unless instance is marked as
+ auto_disk_config.
+ """
+ self.instance_values['auto_disk_config'] = False
+ self.assertIsPartitionCalled(False)
+
+ @stub_vm_utils_with_vdi_attached_here
+ def test_instance_auto_disk_config_fails_safe_two_partitions(self):
+ # Should not partition unless fail safes pass.
+ self.instance_values['auto_disk_config'] = True
+
+ def fake_get_partitions(dev):
+ return [(1, 0, 100, 'ext4', "", ""), (2, 100, 200, 'ext4' "", "")]
+ self.stubs.Set(vm_utils, "_get_partitions",
+ fake_get_partitions)
+
+ self.assertIsPartitionCalled(False)
+
+ @stub_vm_utils_with_vdi_attached_here
+ def test_instance_auto_disk_config_fails_safe_badly_numbered(self):
+ # Should not partition unless fail safes pass.
+ self.instance_values['auto_disk_config'] = True
+
+ def fake_get_partitions(dev):
+ return [(2, 100, 200, 'ext4', "", "")]
+ self.stubs.Set(vm_utils, "_get_partitions",
+ fake_get_partitions)
+
+ self.assertIsPartitionCalled(False)
+
+ @stub_vm_utils_with_vdi_attached_here
+ def test_instance_auto_disk_config_fails_safe_bad_fstype(self):
+ # Should not partition unless fail safes pass.
+ self.instance_values['auto_disk_config'] = True
+
+ def fake_get_partitions(dev):
+ return [(1, 100, 200, 'asdf', "", "")]
+ self.stubs.Set(vm_utils, "_get_partitions",
+ fake_get_partitions)
+
+ self.assertIsPartitionCalled(False)
+
+ @stub_vm_utils_with_vdi_attached_here
+ def test_instance_auto_disk_config_passes_fail_safes(self):
+ """Should partition if instance is marked as auto_disk_config=True and
+ virt-layer specific fail-safe checks pass.
+ """
+ self.instance_values['auto_disk_config'] = True
+
+ def fake_get_partitions(dev):
+ return [(1, 0, 100, 'ext4', "", "boot")]
+ self.stubs.Set(vm_utils, "_get_partitions",
+ fake_get_partitions)
+
+ self.assertIsPartitionCalled(True)
+
+
+# FIXME(sirp): convert this to use XenAPITestBaseNoDB
+class XenAPIGenerateLocal(stubs.XenAPITestBase):
+ """Test generating of local disks, like swap and ephemeral."""
+ def setUp(self):
+ super(XenAPIGenerateLocal, self).setUp()
+ self.flags(connection_url='test_url',
+ connection_password='test_pass',
+ group='xenserver')
+ self.flags(firewall_driver='nova.virt.xenapi.firewall.'
+ 'Dom0IptablesFirewallDriver')
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ db_fakes.stub_out_db_instance_api(self.stubs)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+
+ self.instance_values = {'id': 1,
+ 'project_id': self.project_id,
+ 'user_id': self.user_id,
+ 'image_ref': 1,
+ 'kernel_id': 2,
+ 'ramdisk_id': 3,
+ 'root_gb': 80,
+ 'ephemeral_gb': 0,
+ 'instance_type_id': '3', # m1.large
+ 'os_type': 'linux',
+ 'architecture': 'x86-64'}
+
+ self.context = context.RequestContext(self.user_id, self.project_id)
+
+ def fake_create_vbd(session, vm_ref, vdi_ref, userdevice,
+ vbd_type='disk', read_only=False, bootable=True,
+ osvol=False, empty=False, unpluggable=True):
+ return session.call_xenapi('VBD.create', {'VM': vm_ref,
+ 'VDI': vdi_ref})
+
+ self.stubs.Set(vm_utils, 'create_vbd', fake_create_vbd)
+
+ def assertCalled(self, instance,
+ disk_image_type=vm_utils.ImageType.DISK_VHD):
+ context.RequestContext(self.user_id, self.project_id)
+ session = get_session()
+
+ vm_ref = xenapi_fake.create_vm(instance['name'], 'Halted')
+ vdi_ref = xenapi_fake.create_vdi(instance['name'], 'fake')
+
+ vdi_uuid = session.call_xenapi('VDI.get_record', vdi_ref)['uuid']
+
+ vdi_key = 'root'
+ if disk_image_type == vm_utils.ImageType.DISK_ISO:
+ vdi_key = 'iso'
+ vdis = {vdi_key: {'uuid': vdi_uuid, 'ref': vdi_ref}}
+
+ self.called = False
+ self.conn._vmops._attach_disks(instance, vm_ref, instance['name'],
+ vdis, disk_image_type, "fake_nw_inf")
+ self.assertTrue(self.called)
+
+ def test_generate_swap(self):
+ # Test swap disk generation.
+ instance_values = dict(self.instance_values, instance_type_id=5)
+ instance = create_instance_with_system_metadata(self.context,
+ instance_values)
+
+ def fake_generate_swap(*args, **kwargs):
+ self.called = True
+ self.stubs.Set(vm_utils, 'generate_swap', fake_generate_swap)
+
+ self.assertCalled(instance)
+
+ def test_generate_ephemeral(self):
+ # Test ephemeral disk generation.
+ instance_values = dict(self.instance_values, instance_type_id=4)
+ instance = create_instance_with_system_metadata(self.context,
+ instance_values)
+
+ def fake_generate_ephemeral(*args):
+ self.called = True
+ self.stubs.Set(vm_utils, 'generate_ephemeral', fake_generate_ephemeral)
+
+ self.assertCalled(instance)
+
+ def test_generate_iso_blank_root_disk(self):
+ instance_values = dict(self.instance_values, instance_type_id=4)
+ instance_values.pop('kernel_id')
+ instance_values.pop('ramdisk_id')
+ instance = create_instance_with_system_metadata(self.context,
+ instance_values)
+
+ def fake_generate_ephemeral(*args):
+ pass
+ self.stubs.Set(vm_utils, 'generate_ephemeral', fake_generate_ephemeral)
+
+ def fake_generate_iso(*args):
+ self.called = True
+ self.stubs.Set(vm_utils, 'generate_iso_blank_root_disk',
+ fake_generate_iso)
+
+ self.assertCalled(instance, vm_utils.ImageType.DISK_ISO)
+
+
+class XenAPIBWCountersTestCase(stubs.XenAPITestBaseNoDB):
+ FAKE_VMS = {'test1:ref': dict(name_label='test1',
+ other_config=dict(nova_uuid='hash'),
+ domid='12',
+ _vifmap={'0': "a:b:c:d...",
+ '1': "e:f:12:q..."}),
+ 'test2:ref': dict(name_label='test2',
+ other_config=dict(nova_uuid='hash'),
+ domid='42',
+ _vifmap={'0': "a:3:c:d...",
+ '1': "e:f:42:q..."}),
+ }
+
+ def setUp(self):
+ super(XenAPIBWCountersTestCase, self).setUp()
+ self.stubs.Set(vm_utils, 'list_vms',
+ XenAPIBWCountersTestCase._fake_list_vms)
+ self.flags(connection_url='test_url',
+ connection_password='test_pass',
+ group='xenserver')
+ self.flags(firewall_driver='nova.virt.xenapi.firewall.'
+ 'Dom0IptablesFirewallDriver')
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ def _fake_get_vif_device_map(vm_rec):
+ return vm_rec['_vifmap']
+
+ self.stubs.Set(self.conn._vmops, "_get_vif_device_map",
+ _fake_get_vif_device_map)
+
+ @classmethod
+ def _fake_list_vms(cls, session):
+ return cls.FAKE_VMS.iteritems()
+
+ @staticmethod
+ def _fake_fetch_bandwidth_mt(session):
+ return {}
+
+ @staticmethod
+ def _fake_fetch_bandwidth(session):
+ return {'42':
+ {'0': {'bw_in': 21024, 'bw_out': 22048},
+ '1': {'bw_in': 231337, 'bw_out': 221212121}},
+ '12':
+ {'0': {'bw_in': 1024, 'bw_out': 2048},
+ '1': {'bw_in': 31337, 'bw_out': 21212121}},
+ }
+
+ def test_get_all_bw_counters(self):
+ instances = [dict(name='test1', uuid='1-2-3'),
+ dict(name='test2', uuid='4-5-6')]
+
+ self.stubs.Set(vm_utils, 'fetch_bandwidth',
+ self._fake_fetch_bandwidth)
+ result = self.conn.get_all_bw_counters(instances)
+ self.assertEqual(len(result), 4)
+ self.assertIn(dict(uuid='1-2-3',
+ mac_address="a:b:c:d...",
+ bw_in=1024,
+ bw_out=2048), result)
+ self.assertIn(dict(uuid='1-2-3',
+ mac_address="e:f:12:q...",
+ bw_in=31337,
+ bw_out=21212121), result)
+
+ self.assertIn(dict(uuid='4-5-6',
+ mac_address="a:3:c:d...",
+ bw_in=21024,
+ bw_out=22048), result)
+ self.assertIn(dict(uuid='4-5-6',
+ mac_address="e:f:42:q...",
+ bw_in=231337,
+ bw_out=221212121), result)
+
+ def test_get_all_bw_counters_in_failure_case(self):
+ """Test that get_all_bw_conters returns an empty list when
+ no data returned from Xenserver. c.f. bug #910045.
+ """
+ instances = [dict(name='instance-0001', uuid='1-2-3-4-5')]
+
+ self.stubs.Set(vm_utils, 'fetch_bandwidth',
+ self._fake_fetch_bandwidth_mt)
+ result = self.conn.get_all_bw_counters(instances)
+ self.assertEqual(result, [])
+
+
+# TODO(salvatore-orlando): this class and
+# nova.tests.unit.virt.test_libvirt.IPTablesFirewallDriverTestCase
+# share a lot of code. Consider abstracting common code in a base
+# class for firewall driver testing.
+#
+# FIXME(sirp): convert this to use XenAPITestBaseNoDB
+class XenAPIDom0IptablesFirewallTestCase(stubs.XenAPITestBase):
+
+ REQUIRES_LOCKING = True
+
+ _in_rules = [
+ '# Generated by iptables-save v1.4.10 on Sat Feb 19 00:03:19 2011',
+ '*nat',
+ ':PREROUTING ACCEPT [1170:189210]',
+ ':INPUT ACCEPT [844:71028]',
+ ':OUTPUT ACCEPT [5149:405186]',
+ ':POSTROUTING ACCEPT [5063:386098]',
+ '# Completed on Mon Dec 6 11:54:13 2010',
+ '# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010',
+ '*mangle',
+ ':INPUT ACCEPT [969615:281627771]',
+ ':FORWARD ACCEPT [0:0]',
+ ':OUTPUT ACCEPT [915599:63811649]',
+ ':nova-block-ipv4 - [0:0]',
+ '[0:0] -A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ',
+ '[0:0] -A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED'
+ ',ESTABLISHED -j ACCEPT ',
+ '[0:0] -A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ',
+ '[0:0] -A FORWARD -i virbr0 -o virbr0 -j ACCEPT ',
+ '[0:0] -A FORWARD -o virbr0 -j REJECT '
+ '--reject-with icmp-port-unreachable ',
+ '[0:0] -A FORWARD -i virbr0 -j REJECT '
+ '--reject-with icmp-port-unreachable ',
+ 'COMMIT',
+ '# Completed on Mon Dec 6 11:54:13 2010',
+ '# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010',
+ '*filter',
+ ':INPUT ACCEPT [969615:281627771]',
+ ':FORWARD ACCEPT [0:0]',
+ ':OUTPUT ACCEPT [915599:63811649]',
+ ':nova-block-ipv4 - [0:0]',
+ '[0:0] -A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ',
+ '[0:0] -A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED'
+ ',ESTABLISHED -j ACCEPT ',
+ '[0:0] -A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ',
+ '[0:0] -A FORWARD -i virbr0 -o virbr0 -j ACCEPT ',
+ '[0:0] -A FORWARD -o virbr0 -j REJECT '
+ '--reject-with icmp-port-unreachable ',
+ '[0:0] -A FORWARD -i virbr0 -j REJECT '
+ '--reject-with icmp-port-unreachable ',
+ 'COMMIT',
+ '# Completed on Mon Dec 6 11:54:13 2010',
+ ]
+
+ _in6_filter_rules = [
+ '# Generated by ip6tables-save v1.4.4 on Tue Jan 18 23:47:56 2011',
+ '*filter',
+ ':INPUT ACCEPT [349155:75810423]',
+ ':FORWARD ACCEPT [0:0]',
+ ':OUTPUT ACCEPT [349256:75777230]',
+ 'COMMIT',
+ '# Completed on Tue Jan 18 23:47:56 2011',
+ ]
+
+ def setUp(self):
+ super(XenAPIDom0IptablesFirewallTestCase, self).setUp()
+ self.flags(connection_url='test_url',
+ connection_password='test_pass',
+ group='xenserver')
+ self.flags(instance_name_template='%d',
+ firewall_driver='nova.virt.xenapi.firewall.'
+ 'Dom0IptablesFirewallDriver')
+ self.user_id = 'mappin'
+ self.project_id = 'fake'
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForFirewallTests,
+ test_case=self)
+ self.context = context.RequestContext(self.user_id, self.project_id)
+ self.network = importutils.import_object(CONF.network_manager)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ self.fw = self.conn._vmops.firewall_driver
+
+ def _create_instance_ref(self):
+ return db.instance_create(self.context,
+ {'user_id': self.user_id,
+ 'project_id': self.project_id,
+ 'instance_type_id': 1})
+
+ def _create_test_security_group(self):
+ admin_ctxt = context.get_admin_context()
+ secgroup = db.security_group_create(admin_ctxt,
+ {'user_id': self.user_id,
+ 'project_id': self.project_id,
+ 'name': 'testgroup',
+ 'description': 'test group'})
+ db.security_group_rule_create(admin_ctxt,
+ {'parent_group_id': secgroup['id'],
+ 'protocol': 'icmp',
+ 'from_port': -1,
+ 'to_port': -1,
+ 'cidr': '192.168.11.0/24'})
+
+ db.security_group_rule_create(admin_ctxt,
+ {'parent_group_id': secgroup['id'],
+ 'protocol': 'icmp',
+ 'from_port': 8,
+ 'to_port': -1,
+ 'cidr': '192.168.11.0/24'})
+
+ db.security_group_rule_create(admin_ctxt,
+ {'parent_group_id': secgroup['id'],
+ 'protocol': 'tcp',
+ 'from_port': 80,
+ 'to_port': 81,
+ 'cidr': '192.168.10.0/24'})
+ return secgroup
+
+ def _validate_security_group(self):
+ in_rules = filter(lambda l: not l.startswith('#'),
+ self._in_rules)
+ for rule in in_rules:
+ if 'nova' not in rule:
+ self.assertTrue(rule in self._out_rules,
+ 'Rule went missing: %s' % rule)
+
+ instance_chain = None
+ for rule in self._out_rules:
+ # This is pretty crude, but it'll do for now
+ # last two octets change
+ if re.search('-d 192.168.[0-9]{1,3}.[0-9]{1,3} -j', rule):
+ instance_chain = rule.split(' ')[-1]
+ break
+ self.assertTrue(instance_chain, "The instance chain wasn't added")
+ security_group_chain = None
+ for rule in self._out_rules:
+ # This is pretty crude, but it'll do for now
+ if '-A %s -j' % instance_chain in rule:
+ security_group_chain = rule.split(' ')[-1]
+ break
+ self.assertTrue(security_group_chain,
+ "The security group chain wasn't added")
+
+ regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p icmp'
+ ' -s 192.168.11.0/24')
+ self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
+ "ICMP acceptance rule wasn't added")
+
+ regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p icmp -m icmp'
+ ' --icmp-type 8 -s 192.168.11.0/24')
+ self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
+ "ICMP Echo Request acceptance rule wasn't added")
+
+ regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p tcp --dport 80:81'
+ ' -s 192.168.10.0/24')
+ self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
+ "TCP port 80/81 acceptance rule wasn't added")
+
+ def test_static_filters(self):
+ instance_ref = self._create_instance_ref()
+ src_instance_ref = self._create_instance_ref()
+ admin_ctxt = context.get_admin_context()
+ secgroup = self._create_test_security_group()
+
+ src_secgroup = db.security_group_create(admin_ctxt,
+ {'user_id': self.user_id,
+ 'project_id': self.project_id,
+ 'name': 'testsourcegroup',
+ 'description': 'src group'})
+ db.security_group_rule_create(admin_ctxt,
+ {'parent_group_id': secgroup['id'],
+ 'protocol': 'tcp',
+ 'from_port': 80,
+ 'to_port': 81,
+ 'group_id': src_secgroup['id']})
+
+ db.instance_add_security_group(admin_ctxt, instance_ref['uuid'],
+ secgroup['id'])
+ db.instance_add_security_group(admin_ctxt, src_instance_ref['uuid'],
+ src_secgroup['id'])
+ instance_ref = db.instance_get(admin_ctxt, instance_ref['id'])
+ src_instance_ref = db.instance_get(admin_ctxt, src_instance_ref['id'])
+
+ network_model = fake_network.fake_get_instance_nw_info(self.stubs, 1)
+
+ from nova.compute import utils as compute_utils # noqa
+ self.stubs.Set(compute_utils, 'get_nw_info_for_instance',
+ lambda instance: network_model)
+
+ self.fw.prepare_instance_filter(instance_ref, network_model)
+ self.fw.apply_instance_filter(instance_ref, network_model)
+
+ self._validate_security_group()
+ # Extra test for TCP acceptance rules
+ for ip in network_model.fixed_ips():
+ if ip['version'] != 4:
+ continue
+ regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p tcp'
+ ' --dport 80:81 -s %s' % ip['address'])
+ self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
+ "TCP port 80/81 acceptance rule wasn't added")
+
+ db.instance_destroy(admin_ctxt, instance_ref['uuid'])
+
+ def test_filters_for_instance_with_ip_v6(self):
+ self.flags(use_ipv6=True)
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1)
+ rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info)
+ self.assertEqual(len(rulesv4), 2)
+ self.assertEqual(len(rulesv6), 1)
+
+ def test_filters_for_instance_without_ip_v6(self):
+ self.flags(use_ipv6=False)
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1)
+ rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info)
+ self.assertEqual(len(rulesv4), 2)
+ self.assertEqual(len(rulesv6), 0)
+
+ def test_multinic_iptables(self):
+ ipv4_rules_per_addr = 1
+ ipv4_addr_per_network = 2
+ ipv6_rules_per_addr = 1
+ ipv6_addr_per_network = 1
+ networks_count = 5
+ instance_ref = self._create_instance_ref()
+ _get_instance_nw_info = fake_network.fake_get_instance_nw_info
+ network_info = _get_instance_nw_info(self.stubs,
+ networks_count,
+ ipv4_addr_per_network)
+ network_info[0]['network']['subnets'][0]['meta']['dhcp_server'] = \
+ '1.1.1.1'
+ ipv4_len = len(self.fw.iptables.ipv4['filter'].rules)
+ ipv6_len = len(self.fw.iptables.ipv6['filter'].rules)
+ inst_ipv4, inst_ipv6 = self.fw.instance_rules(instance_ref,
+ network_info)
+ self.fw.prepare_instance_filter(instance_ref, network_info)
+ ipv4 = self.fw.iptables.ipv4['filter'].rules
+ ipv6 = self.fw.iptables.ipv6['filter'].rules
+ ipv4_network_rules = len(ipv4) - len(inst_ipv4) - ipv4_len
+ ipv6_network_rules = len(ipv6) - len(inst_ipv6) - ipv6_len
+ # Extra rules are for the DHCP request
+ rules = (ipv4_rules_per_addr * ipv4_addr_per_network *
+ networks_count) + 2
+ self.assertEqual(ipv4_network_rules, rules)
+ self.assertEqual(ipv6_network_rules,
+ ipv6_rules_per_addr * ipv6_addr_per_network * networks_count)
+
+ def test_do_refresh_security_group_rules(self):
+ admin_ctxt = context.get_admin_context()
+ instance_ref = self._create_instance_ref()
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1, 1)
+ secgroup = self._create_test_security_group()
+ db.instance_add_security_group(admin_ctxt, instance_ref['uuid'],
+ secgroup['id'])
+ self.fw.prepare_instance_filter(instance_ref, network_info)
+ self.fw.instance_info[instance_ref['id']] = (instance_ref,
+ network_info)
+ self._validate_security_group()
+ # add a rule to the security group
+ db.security_group_rule_create(admin_ctxt,
+ {'parent_group_id': secgroup['id'],
+ 'protocol': 'udp',
+ 'from_port': 200,
+ 'to_port': 299,
+ 'cidr': '192.168.99.0/24'})
+ # validate the extra rule
+ self.fw.refresh_security_group_rules(secgroup)
+ regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p udp --dport 200:299'
+ ' -s 192.168.99.0/24')
+ self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
+ "Rules were not updated properly."
+ "The rule for UDP acceptance is missing")
+
+ def test_provider_firewall_rules(self):
+ # setup basic instance data
+ instance_ref = self._create_instance_ref()
+ # FRAGILE: as in libvirt tests
+ # peeks at how the firewall names chains
+ chain_name = 'inst-%s' % instance_ref['id']
+
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1, 1)
+ self.fw.prepare_instance_filter(instance_ref, network_info)
+ self.assertIn('provider', self.fw.iptables.ipv4['filter'].chains)
+ rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
+ if rule.chain == 'provider']
+ self.assertEqual(0, len(rules))
+
+ admin_ctxt = context.get_admin_context()
+ # add a rule and send the update message, check for 1 rule
+ db.provider_fw_rule_create(admin_ctxt,
+ {'protocol': 'tcp',
+ 'cidr': '10.99.99.99/32',
+ 'from_port': 1,
+ 'to_port': 65535})
+ self.fw.refresh_provider_fw_rules()
+ rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
+ if rule.chain == 'provider']
+ self.assertEqual(1, len(rules))
+
+ # Add another, refresh, and make sure number of rules goes to two
+ provider_fw1 = db.provider_fw_rule_create(admin_ctxt,
+ {'protocol': 'udp',
+ 'cidr': '10.99.99.99/32',
+ 'from_port': 1,
+ 'to_port': 65535})
+ self.fw.refresh_provider_fw_rules()
+ rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
+ if rule.chain == 'provider']
+ self.assertEqual(2, len(rules))
+
+ # create the instance filter and make sure it has a jump rule
+ self.fw.prepare_instance_filter(instance_ref, network_info)
+ self.fw.apply_instance_filter(instance_ref, network_info)
+ inst_rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
+ if rule.chain == chain_name]
+ jump_rules = [rule for rule in inst_rules if '-j' in rule.rule]
+ provjump_rules = []
+ # IptablesTable doesn't make rules unique internally
+ for rule in jump_rules:
+ if 'provider' in rule.rule and rule not in provjump_rules:
+ provjump_rules.append(rule)
+ self.assertEqual(1, len(provjump_rules))
+
+ # remove a rule from the db, cast to compute to refresh rule
+ db.provider_fw_rule_destroy(admin_ctxt, provider_fw1['id'])
+ self.fw.refresh_provider_fw_rules()
+ rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
+ if rule.chain == 'provider']
+ self.assertEqual(1, len(rules))
+
+
+class XenAPISRSelectionTestCase(stubs.XenAPITestBaseNoDB):
+ """Unit tests for testing we find the right SR."""
+ def test_safe_find_sr_raise_exception(self):
+ # Ensure StorageRepositoryNotFound is raise when wrong filter.
+ self.flags(sr_matching_filter='yadayadayada', group='xenserver')
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ session = get_session()
+ self.assertRaises(exception.StorageRepositoryNotFound,
+ vm_utils.safe_find_sr, session)
+
+ def test_safe_find_sr_local_storage(self):
+ # Ensure the default local-storage is found.
+ self.flags(sr_matching_filter='other-config:i18n-key=local-storage',
+ group='xenserver')
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ session = get_session()
+ # This test is only guaranteed if there is one host in the pool
+ self.assertEqual(len(xenapi_fake.get_all('host')), 1)
+ host_ref = xenapi_fake.get_all('host')[0]
+ pbd_refs = xenapi_fake.get_all('PBD')
+ for pbd_ref in pbd_refs:
+ pbd_rec = xenapi_fake.get_record('PBD', pbd_ref)
+ if pbd_rec['host'] != host_ref:
+ continue
+ sr_rec = xenapi_fake.get_record('SR', pbd_rec['SR'])
+ if sr_rec['other_config']['i18n-key'] == 'local-storage':
+ local_sr = pbd_rec['SR']
+ expected = vm_utils.safe_find_sr(session)
+ self.assertEqual(local_sr, expected)
+
+ def test_safe_find_sr_by_other_criteria(self):
+ # Ensure the SR is found when using a different filter.
+ self.flags(sr_matching_filter='other-config:my_fake_sr=true',
+ group='xenserver')
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ session = get_session()
+ host_ref = xenapi_fake.get_all('host')[0]
+ local_sr = xenapi_fake.create_sr(name_label='Fake Storage',
+ type='lvm',
+ other_config={'my_fake_sr': 'true'},
+ host_ref=host_ref)
+ expected = vm_utils.safe_find_sr(session)
+ self.assertEqual(local_sr, expected)
+
+ def test_safe_find_sr_default(self):
+ # Ensure the default SR is found regardless of other-config.
+ self.flags(sr_matching_filter='default-sr:true',
+ group='xenserver')
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ session = get_session()
+ pool_ref = session.call_xenapi('pool.get_all')[0]
+ expected = vm_utils.safe_find_sr(session)
+ self.assertEqual(session.call_xenapi('pool.get_default_SR', pool_ref),
+ expected)
+
+
+def _create_service_entries(context, values={'avail_zone1': ['fake_host1',
+ 'fake_host2'],
+ 'avail_zone2': ['fake_host3'], }):
+ for avail_zone, hosts in values.iteritems():
+ for service_host in hosts:
+ db.service_create(context,
+ {'host': service_host,
+ 'binary': 'nova-compute',
+ 'topic': 'compute',
+ 'report_count': 0})
+ return values
+
+
+# FIXME(sirp): convert this to use XenAPITestBaseNoDB
+class XenAPIAggregateTestCase(stubs.XenAPITestBase):
+ """Unit tests for aggregate operations."""
+ def setUp(self):
+ super(XenAPIAggregateTestCase, self).setUp()
+ self.flags(connection_url='http://test_url',
+ connection_username='test_user',
+ connection_password='test_pass',
+ group='xenserver')
+ self.flags(instance_name_template='%d',
+ firewall_driver='nova.virt.xenapi.firewall.'
+ 'Dom0IptablesFirewallDriver',
+ host='host',
+ compute_driver='xenapi.XenAPIDriver',
+ default_availability_zone='avail_zone1')
+ self.flags(use_local=True, group='conductor')
+ host_ref = xenapi_fake.get_all('host')[0]
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.context = context.get_admin_context()
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ self.compute = importutils.import_object(CONF.compute_manager)
+ self.api = compute_api.AggregateAPI()
+ values = {'name': 'test_aggr',
+ 'metadata': {'availability_zone': 'test_zone',
+ pool_states.POOL_FLAG: 'XenAPI'}}
+ self.aggr = db.aggregate_create(self.context, values)
+ self.fake_metadata = {pool_states.POOL_FLAG: 'XenAPI',
+ 'master_compute': 'host',
+ 'availability_zone': 'fake_zone',
+ pool_states.KEY: pool_states.ACTIVE,
+ 'host': xenapi_fake.get_record('host',
+ host_ref)['uuid']}
+
+ def test_pool_add_to_aggregate_called_by_driver(self):
+
+ calls = []
+
+ def pool_add_to_aggregate(context, aggregate, host, slave_info=None):
+ self.assertEqual("CONTEXT", context)
+ self.assertEqual("AGGREGATE", aggregate)
+ self.assertEqual("HOST", host)
+ self.assertEqual("SLAVEINFO", slave_info)
+ calls.append(pool_add_to_aggregate)
+ self.stubs.Set(self.conn._pool,
+ "add_to_aggregate",
+ pool_add_to_aggregate)
+
+ self.conn.add_to_aggregate("CONTEXT", "AGGREGATE", "HOST",
+ slave_info="SLAVEINFO")
+
+ self.assertIn(pool_add_to_aggregate, calls)
+
+ def test_pool_remove_from_aggregate_called_by_driver(self):
+
+ calls = []
+
+ def pool_remove_from_aggregate(context, aggregate, host,
+ slave_info=None):
+ self.assertEqual("CONTEXT", context)
+ self.assertEqual("AGGREGATE", aggregate)
+ self.assertEqual("HOST", host)
+ self.assertEqual("SLAVEINFO", slave_info)
+ calls.append(pool_remove_from_aggregate)
+ self.stubs.Set(self.conn._pool,
+ "remove_from_aggregate",
+ pool_remove_from_aggregate)
+
+ self.conn.remove_from_aggregate("CONTEXT", "AGGREGATE", "HOST",
+ slave_info="SLAVEINFO")
+
+ self.assertIn(pool_remove_from_aggregate, calls)
+
+ def test_add_to_aggregate_for_first_host_sets_metadata(self):
+ def fake_init_pool(id, name):
+ fake_init_pool.called = True
+ self.stubs.Set(self.conn._pool, "_init_pool", fake_init_pool)
+
+ aggregate = self._aggregate_setup()
+ self.conn._pool.add_to_aggregate(self.context, aggregate, "host")
+ result = db.aggregate_get(self.context, aggregate['id'])
+ self.assertTrue(fake_init_pool.called)
+ self.assertThat(self.fake_metadata,
+ matchers.DictMatches(result['metadetails']))
+
+ def test_join_slave(self):
+ # Ensure join_slave gets called when the request gets to master.
+ def fake_join_slave(id, compute_uuid, host, url, user, password):
+ fake_join_slave.called = True
+ self.stubs.Set(self.conn._pool, "_join_slave", fake_join_slave)
+
+ aggregate = self._aggregate_setup(hosts=['host', 'host2'],
+ metadata=self.fake_metadata)
+ self.conn._pool.add_to_aggregate(self.context, aggregate, "host2",
+ dict(compute_uuid='fake_uuid',
+ url='fake_url',
+ user='fake_user',
+ passwd='fake_pass',
+ xenhost_uuid='fake_uuid'))
+ self.assertTrue(fake_join_slave.called)
+
+ def test_add_to_aggregate_first_host(self):
+ def fake_pool_set_name_label(self, session, pool_ref, name):
+ fake_pool_set_name_label.called = True
+ self.stubs.Set(xenapi_fake.SessionBase, "pool_set_name_label",
+ fake_pool_set_name_label)
+ self.conn._session.call_xenapi("pool.create", {"name": "asdf"})
+
+ metadata = {'availability_zone': 'fake_zone',
+ pool_states.POOL_FLAG: "XenAPI",
+ pool_states.KEY: pool_states.CREATED}
+
+ aggregate = objects.Aggregate()
+ aggregate.name = 'fake_aggregate'
+ aggregate.metadata = dict(metadata)
+ aggregate.create(self.context)
+ aggregate.add_host('host')
+ self.assertEqual(["host"], aggregate.hosts)
+ self.assertEqual(metadata, aggregate.metadata)
+
+ self.conn._pool.add_to_aggregate(self.context, aggregate, "host")
+ self.assertTrue(fake_pool_set_name_label.called)
+
+ def test_remove_from_aggregate_called(self):
+ def fake_remove_from_aggregate(context, aggregate, host):
+ fake_remove_from_aggregate.called = True
+ self.stubs.Set(self.conn._pool,
+ "remove_from_aggregate",
+ fake_remove_from_aggregate)
+
+ self.conn.remove_from_aggregate(None, None, None)
+ self.assertTrue(fake_remove_from_aggregate.called)
+
+ def test_remove_from_empty_aggregate(self):
+ result = self._aggregate_setup()
+ self.assertRaises(exception.InvalidAggregateAction,
+ self.conn._pool.remove_from_aggregate,
+ self.context, result, "test_host")
+
+ def test_remove_slave(self):
+ # Ensure eject slave gets called.
+ def fake_eject_slave(id, compute_uuid, host_uuid):
+ fake_eject_slave.called = True
+ self.stubs.Set(self.conn._pool, "_eject_slave", fake_eject_slave)
+
+ self.fake_metadata['host2'] = 'fake_host2_uuid'
+ aggregate = self._aggregate_setup(hosts=['host', 'host2'],
+ metadata=self.fake_metadata, aggr_state=pool_states.ACTIVE)
+ self.conn._pool.remove_from_aggregate(self.context, aggregate, "host2")
+ self.assertTrue(fake_eject_slave.called)
+
+ def test_remove_master_solo(self):
+ # Ensure metadata are cleared after removal.
+ def fake_clear_pool(id):
+ fake_clear_pool.called = True
+ self.stubs.Set(self.conn._pool, "_clear_pool", fake_clear_pool)
+
+ aggregate = self._aggregate_setup(metadata=self.fake_metadata)
+ self.conn._pool.remove_from_aggregate(self.context, aggregate, "host")
+ result = db.aggregate_get(self.context, aggregate['id'])
+ self.assertTrue(fake_clear_pool.called)
+ self.assertThat({'availability_zone': 'fake_zone',
+ pool_states.POOL_FLAG: 'XenAPI',
+ pool_states.KEY: pool_states.ACTIVE},
+ matchers.DictMatches(result['metadetails']))
+
+ def test_remote_master_non_empty_pool(self):
+ # Ensure AggregateError is raised if removing the master.
+ aggregate = self._aggregate_setup(hosts=['host', 'host2'],
+ metadata=self.fake_metadata)
+
+ self.assertRaises(exception.InvalidAggregateAction,
+ self.conn._pool.remove_from_aggregate,
+ self.context, aggregate, "host")
+
+ def _aggregate_setup(self, aggr_name='fake_aggregate',
+ aggr_zone='fake_zone',
+ aggr_state=pool_states.CREATED,
+ hosts=['host'], metadata=None):
+ aggregate = objects.Aggregate()
+ aggregate.name = aggr_name
+ aggregate.metadata = {'availability_zone': aggr_zone,
+ pool_states.POOL_FLAG: 'XenAPI',
+ pool_states.KEY: aggr_state,
+ }
+ if metadata:
+ aggregate.metadata.update(metadata)
+ aggregate.create(self.context)
+ for aggregate_host in hosts:
+ aggregate.add_host(aggregate_host)
+ return aggregate
+
+ def test_add_host_to_aggregate_invalid_changing_status(self):
+ """Ensure InvalidAggregateAction is raised when adding host while
+ aggregate is not ready.
+ """
+ aggregate = self._aggregate_setup(aggr_state=pool_states.CHANGING)
+ ex = self.assertRaises(exception.InvalidAggregateAction,
+ self.conn.add_to_aggregate, self.context,
+ aggregate, 'host')
+ self.assertIn('setup in progress', str(ex))
+
+ def test_add_host_to_aggregate_invalid_dismissed_status(self):
+ """Ensure InvalidAggregateAction is raised when aggregate is
+ deleted.
+ """
+ aggregate = self._aggregate_setup(aggr_state=pool_states.DISMISSED)
+ ex = self.assertRaises(exception.InvalidAggregateAction,
+ self.conn.add_to_aggregate, self.context,
+ aggregate, 'fake_host')
+ self.assertIn('aggregate deleted', str(ex))
+
+ def test_add_host_to_aggregate_invalid_error_status(self):
+ """Ensure InvalidAggregateAction is raised when aggregate is
+ in error.
+ """
+ aggregate = self._aggregate_setup(aggr_state=pool_states.ERROR)
+ ex = self.assertRaises(exception.InvalidAggregateAction,
+ self.conn.add_to_aggregate, self.context,
+ aggregate, 'fake_host')
+ self.assertIn('aggregate in error', str(ex))
+
+ def test_remove_host_from_aggregate_error(self):
+ # Ensure we can remove a host from an aggregate even if in error.
+ values = _create_service_entries(self.context)
+ fake_zone = values.keys()[0]
+ aggr = self.api.create_aggregate(self.context,
+ 'fake_aggregate', fake_zone)
+ # let's mock the fact that the aggregate is ready!
+ metadata = {pool_states.POOL_FLAG: "XenAPI",
+ pool_states.KEY: pool_states.ACTIVE}
+ db.aggregate_metadata_add(self.context, aggr['id'], metadata)
+ for aggregate_host in values[fake_zone]:
+ aggr = self.api.add_host_to_aggregate(self.context,
+ aggr['id'], aggregate_host)
+ # let's mock the fact that the aggregate is in error!
+ expected = self.api.remove_host_from_aggregate(self.context,
+ aggr['id'],
+ values[fake_zone][0])
+ self.assertEqual(len(aggr['hosts']) - 1, len(expected['hosts']))
+ self.assertEqual(expected['metadata'][pool_states.KEY],
+ pool_states.ACTIVE)
+
+ def test_remove_host_from_aggregate_invalid_dismissed_status(self):
+ """Ensure InvalidAggregateAction is raised when aggregate is
+ deleted.
+ """
+ aggregate = self._aggregate_setup(aggr_state=pool_states.DISMISSED)
+ self.assertRaises(exception.InvalidAggregateAction,
+ self.conn.remove_from_aggregate, self.context,
+ aggregate, 'fake_host')
+
+ def test_remove_host_from_aggregate_invalid_changing_status(self):
+ """Ensure InvalidAggregateAction is raised when aggregate is
+ changing.
+ """
+ aggregate = self._aggregate_setup(aggr_state=pool_states.CHANGING)
+ self.assertRaises(exception.InvalidAggregateAction,
+ self.conn.remove_from_aggregate, self.context,
+ aggregate, 'fake_host')
+
+ def test_add_aggregate_host_raise_err(self):
+ # Ensure the undo operation works correctly on add.
+ def fake_driver_add_to_aggregate(context, aggregate, host, **_ignore):
+ raise exception.AggregateError(
+ aggregate_id='', action='', reason='')
+ self.stubs.Set(self.compute.driver, "add_to_aggregate",
+ fake_driver_add_to_aggregate)
+ metadata = {pool_states.POOL_FLAG: "XenAPI",
+ pool_states.KEY: pool_states.ACTIVE}
+ db.aggregate_metadata_add(self.context, self.aggr['id'], metadata)
+ db.aggregate_host_add(self.context, self.aggr['id'], 'fake_host')
+
+ self.assertRaises(exception.AggregateError,
+ self.compute.add_aggregate_host,
+ self.context, host="fake_host",
+ aggregate=jsonutils.to_primitive(self.aggr),
+ slave_info=None)
+ excepted = db.aggregate_get(self.context, self.aggr['id'])
+ self.assertEqual(excepted['metadetails'][pool_states.KEY],
+ pool_states.ERROR)
+ self.assertEqual(excepted['hosts'], [])
+
+
+class MockComputeAPI(object):
+ def __init__(self):
+ self._mock_calls = []
+
+ def add_aggregate_host(self, ctxt, aggregate,
+ host_param, host, slave_info):
+ self._mock_calls.append((
+ self.add_aggregate_host, ctxt, aggregate,
+ host_param, host, slave_info))
+
+ def remove_aggregate_host(self, ctxt, aggregate_id, host_param,
+ host, slave_info):
+ self._mock_calls.append((
+ self.remove_aggregate_host, ctxt, aggregate_id,
+ host_param, host, slave_info))
+
+
+class StubDependencies(object):
+ """Stub dependencies for ResourcePool."""
+
+ def __init__(self):
+ self.compute_rpcapi = MockComputeAPI()
+
+ def _is_hv_pool(self, *_ignore):
+ return True
+
+ def _get_metadata(self, *_ignore):
+ return {
+ pool_states.KEY: {},
+ 'master_compute': 'master'
+ }
+
+ def _create_slave_info(self, *ignore):
+ return "SLAVE_INFO"
+
+
+class ResourcePoolWithStubs(StubDependencies, pool.ResourcePool):
+ """A ResourcePool, use stub dependencies."""
+
+
+class HypervisorPoolTestCase(test.NoDBTestCase):
+
+ fake_aggregate = {
+ 'id': 98,
+ 'hosts': [],
+ 'metadata': {
+ 'master_compute': 'master',
+ pool_states.POOL_FLAG: {},
+ pool_states.KEY: {}
+ }
+ }
+
+ def test_slave_asks_master_to_add_slave_to_pool(self):
+ slave = ResourcePoolWithStubs()
+
+ slave.add_to_aggregate("CONTEXT", self.fake_aggregate, "slave")
+
+ self.assertIn(
+ (slave.compute_rpcapi.add_aggregate_host,
+ "CONTEXT", jsonutils.to_primitive(self.fake_aggregate),
+ "slave", "master", "SLAVE_INFO"),
+ slave.compute_rpcapi._mock_calls)
+
+ def test_slave_asks_master_to_remove_slave_from_pool(self):
+ slave = ResourcePoolWithStubs()
+
+ slave.remove_from_aggregate("CONTEXT", self.fake_aggregate, "slave")
+
+ self.assertIn(
+ (slave.compute_rpcapi.remove_aggregate_host,
+ "CONTEXT", 98, "slave", "master", "SLAVE_INFO"),
+ slave.compute_rpcapi._mock_calls)
+
+
+class SwapXapiHostTestCase(test.NoDBTestCase):
+
+ def test_swapping(self):
+ self.assertEqual(
+ "http://otherserver:8765/somepath",
+ pool.swap_xapi_host(
+ "http://someserver:8765/somepath", 'otherserver'))
+
+ def test_no_port(self):
+ self.assertEqual(
+ "http://otherserver/somepath",
+ pool.swap_xapi_host(
+ "http://someserver/somepath", 'otherserver'))
+
+ def test_no_path(self):
+ self.assertEqual(
+ "http://otherserver",
+ pool.swap_xapi_host(
+ "http://someserver", 'otherserver'))
+
+
+class XenAPILiveMigrateTestCase(stubs.XenAPITestBaseNoDB):
+ """Unit tests for live_migration."""
+ def setUp(self):
+ super(XenAPILiveMigrateTestCase, self).setUp()
+ self.flags(connection_url='test_url',
+ connection_password='test_pass',
+ group='xenserver')
+ self.flags(firewall_driver='nova.virt.xenapi.firewall.'
+ 'Dom0IptablesFirewallDriver',
+ host='host')
+ db_fakes.stub_out_db_instance_api(self.stubs)
+ self.context = context.get_admin_context()
+
+ def test_live_migration_calls_vmops(self):
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ def fake_live_migrate(context, instance_ref, dest, post_method,
+ recover_method, block_migration, migrate_data):
+ fake_live_migrate.called = True
+
+ self.stubs.Set(self.conn._vmops, "live_migrate", fake_live_migrate)
+
+ self.conn.live_migration(None, None, None, None, None)
+ self.assertTrue(fake_live_migrate.called)
+
+ def test_pre_live_migration(self):
+ # ensure method is present
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ self.conn.pre_live_migration(None, None, None, None, None)
+
+ def test_post_live_migration_at_destination(self):
+ # ensure method is present
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ fake_instance = {"name": "name"}
+ fake_network_info = "network_info"
+
+ def fake_fw(instance, network_info):
+ self.assertEqual(instance, fake_instance)
+ self.assertEqual(network_info, fake_network_info)
+ fake_fw.call_count += 1
+
+ def fake_create_kernel_and_ramdisk(context, session, instance,
+ name_label):
+ return "fake-kernel-file", "fake-ramdisk-file"
+
+ fake_fw.call_count = 0
+ _vmops = self.conn._vmops
+ self.stubs.Set(_vmops.firewall_driver,
+ 'setup_basic_filtering', fake_fw)
+ self.stubs.Set(_vmops.firewall_driver,
+ 'prepare_instance_filter', fake_fw)
+ self.stubs.Set(_vmops.firewall_driver,
+ 'apply_instance_filter', fake_fw)
+ self.stubs.Set(vm_utils, "create_kernel_and_ramdisk",
+ fake_create_kernel_and_ramdisk)
+
+ def fake_get_vm_opaque_ref(instance):
+ fake_get_vm_opaque_ref.called = True
+ self.stubs.Set(_vmops, "_get_vm_opaque_ref", fake_get_vm_opaque_ref)
+ fake_get_vm_opaque_ref.called = False
+
+ def fake_strip_base_mirror_from_vdis(session, vm_ref):
+ fake_strip_base_mirror_from_vdis.called = True
+ self.stubs.Set(vm_utils, "strip_base_mirror_from_vdis",
+ fake_strip_base_mirror_from_vdis)
+ fake_strip_base_mirror_from_vdis.called = False
+
+ self.conn.post_live_migration_at_destination(None, fake_instance,
+ fake_network_info, None)
+ self.assertEqual(fake_fw.call_count, 3)
+ self.assertTrue(fake_get_vm_opaque_ref.called)
+ self.assertTrue(fake_strip_base_mirror_from_vdis.called)
+
+ def test_check_can_live_migrate_destination_with_block_migration(self):
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ self.stubs.Set(vm_utils, "safe_find_sr", lambda _x: "asdf")
+
+ expected = {'block_migration': True,
+ 'migrate_data': {
+ 'migrate_send_data': "fake_migrate_data",
+ 'destination_sr_ref': 'asdf'
+ }
+ }
+ result = self.conn.check_can_live_migrate_destination(self.context,
+ {'host': 'host'},
+ {}, {},
+ True, False)
+ self.assertEqual(expected, result)
+
+ def test_check_live_migrate_destination_verifies_ip(self):
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ for pif_ref in xenapi_fake.get_all('PIF'):
+ pif_rec = xenapi_fake.get_record('PIF', pif_ref)
+ pif_rec['IP'] = ''
+ pif_rec['IPv6'] = ''
+
+ self.stubs.Set(vm_utils, "safe_find_sr", lambda _x: "asdf")
+
+ self.assertRaises(exception.MigrationError,
+ self.conn.check_can_live_migrate_destination,
+ self.context, {'host': 'host'},
+ {}, {},
+ True, False)
+
+ def test_check_can_live_migrate_destination_block_migration_fails(self):
+ stubs.stubout_session(self.stubs,
+ stubs.FakeSessionForFailedMigrateTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ self.assertRaises(exception.MigrationError,
+ self.conn.check_can_live_migrate_destination,
+ self.context, {'host': 'host'},
+ {}, {},
+ True, False)
+
+ def _add_default_live_migrate_stubs(self, conn):
+ def fake_generate_vdi_map(destination_sr_ref, _vm_ref):
+ pass
+
+ def fake_get_iscsi_srs(destination_sr_ref, _vm_ref):
+ return []
+
+ def fake_get_vm_opaque_ref(instance):
+ return "fake_vm"
+
+ def fake_lookup_kernel_ramdisk(session, vm):
+ return ("fake_PV_kernel", "fake_PV_ramdisk")
+
+ self.stubs.Set(conn._vmops, "_generate_vdi_map",
+ fake_generate_vdi_map)
+ self.stubs.Set(conn._vmops, "_get_iscsi_srs",
+ fake_get_iscsi_srs)
+ self.stubs.Set(conn._vmops, "_get_vm_opaque_ref",
+ fake_get_vm_opaque_ref)
+ self.stubs.Set(vm_utils, "lookup_kernel_ramdisk",
+ fake_lookup_kernel_ramdisk)
+
+ def test_check_can_live_migrate_source_with_block_migrate(self):
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ self._add_default_live_migrate_stubs(self.conn)
+
+ dest_check_data = {'block_migration': True,
+ 'migrate_data': {
+ 'destination_sr_ref': None,
+ 'migrate_send_data': None
+ }}
+ result = self.conn.check_can_live_migrate_source(self.context,
+ {'host': 'host'},
+ dest_check_data)
+ self.assertEqual(dest_check_data, result)
+
+ def test_check_can_live_migrate_source_with_block_migrate_iscsi(self):
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ self._add_default_live_migrate_stubs(self.conn)
+
+ def fake_get_iscsi_srs(destination_sr_ref, _vm_ref):
+ return ['sr_ref']
+ self.stubs.Set(self.conn._vmops, "_get_iscsi_srs",
+ fake_get_iscsi_srs)
+
+ def fake_make_plugin_call(plugin, method, **args):
+ return "true"
+ self.stubs.Set(self.conn._vmops, "_make_plugin_call",
+ fake_make_plugin_call)
+
+ dest_check_data = {'block_migration': True,
+ 'migrate_data': {
+ 'destination_sr_ref': None,
+ 'migrate_send_data': None
+ }}
+ result = self.conn.check_can_live_migrate_source(self.context,
+ {'host': 'host'},
+ dest_check_data)
+ self.assertEqual(dest_check_data, result)
+
+ def test_check_can_live_migrate_source_with_block_iscsi_fails(self):
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ self._add_default_live_migrate_stubs(self.conn)
+
+ def fake_get_iscsi_srs(destination_sr_ref, _vm_ref):
+ return ['sr_ref']
+ self.stubs.Set(self.conn._vmops, "_get_iscsi_srs",
+ fake_get_iscsi_srs)
+
+ def fake_make_plugin_call(plugin, method, **args):
+ return {'returncode': 'error', 'message': 'Plugin not found'}
+ self.stubs.Set(self.conn._vmops, "_make_plugin_call",
+ fake_make_plugin_call)
+
+ self.assertRaises(exception.MigrationError,
+ self.conn.check_can_live_migrate_source,
+ self.context, {'host': 'host'},
+ {})
+
+ def test_check_can_live_migrate_source_with_block_migrate_fails(self):
+ stubs.stubout_session(self.stubs,
+ stubs.FakeSessionForFailedMigrateTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ self._add_default_live_migrate_stubs(self.conn)
+
+ dest_check_data = {'block_migration': True,
+ 'migrate_data': {
+ 'destination_sr_ref': None,
+ 'migrate_send_data': None
+ }}
+ self.assertRaises(exception.MigrationError,
+ self.conn.check_can_live_migrate_source,
+ self.context,
+ {'host': 'host'},
+ dest_check_data)
+
+ def test_check_can_live_migrate_works(self):
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ def fake_aggregate_get_by_host(context, host, key=None):
+ self.assertEqual(CONF.host, host)
+ return [dict(test_aggregate.fake_aggregate,
+ metadetails={"host": "test_host_uuid"})]
+
+ self.stubs.Set(db, "aggregate_get_by_host",
+ fake_aggregate_get_by_host)
+ self.conn.check_can_live_migrate_destination(self.context,
+ {'host': 'host'}, False, False)
+
+ def test_check_can_live_migrate_fails(self):
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ def fake_aggregate_get_by_host(context, host, key=None):
+ self.assertEqual(CONF.host, host)
+ return [dict(test_aggregate.fake_aggregate,
+ metadetails={"dest_other": "test_host_uuid"})]
+
+ self.stubs.Set(db, "aggregate_get_by_host",
+ fake_aggregate_get_by_host)
+ self.assertRaises(exception.MigrationError,
+ self.conn.check_can_live_migrate_destination,
+ self.context, {'host': 'host'}, None, None)
+
+ def test_live_migration(self):
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ def fake_get_vm_opaque_ref(instance):
+ return "fake_vm"
+ self.stubs.Set(self.conn._vmops, "_get_vm_opaque_ref",
+ fake_get_vm_opaque_ref)
+
+ def fake_get_host_opaque_ref(context, destination_hostname):
+ return "fake_host"
+ self.stubs.Set(self.conn._vmops, "_get_host_opaque_ref",
+ fake_get_host_opaque_ref)
+
+ def post_method(context, instance, destination_hostname,
+ block_migration, migrate_data):
+ post_method.called = True
+
+ self.conn.live_migration(self.conn, None, None, post_method, None)
+
+ self.assertTrue(post_method.called, "post_method.called")
+
+ def test_live_migration_on_failure(self):
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ def fake_get_vm_opaque_ref(instance):
+ return "fake_vm"
+ self.stubs.Set(self.conn._vmops, "_get_vm_opaque_ref",
+ fake_get_vm_opaque_ref)
+
+ def fake_get_host_opaque_ref(context, destination_hostname):
+ return "fake_host"
+ self.stubs.Set(self.conn._vmops, "_get_host_opaque_ref",
+ fake_get_host_opaque_ref)
+
+ def fake_call_xenapi(*args):
+ raise NotImplementedError()
+ self.stubs.Set(self.conn._vmops._session, "call_xenapi",
+ fake_call_xenapi)
+
+ def recover_method(context, instance, destination_hostname,
+ block_migration):
+ recover_method.called = True
+
+ self.assertRaises(NotImplementedError, self.conn.live_migration,
+ self.conn, None, None, None, recover_method)
+ self.assertTrue(recover_method.called, "recover_method.called")
+
+ def test_live_migration_calls_post_migration(self):
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ self._add_default_live_migrate_stubs(self.conn)
+
+ def post_method(context, instance, destination_hostname,
+ block_migration, migrate_data):
+ post_method.called = True
+
+ # pass block_migration = True and migrate data
+ migrate_data = {"destination_sr_ref": "foo",
+ "migrate_send_data": "bar"}
+ self.conn.live_migration(self.conn, None, None, post_method, None,
+ True, migrate_data)
+ self.assertTrue(post_method.called, "post_method.called")
+
+ def test_live_migration_block_cleans_srs(self):
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ self._add_default_live_migrate_stubs(self.conn)
+
+ def fake_get_iscsi_srs(context, instance):
+ return ['sr_ref']
+ self.stubs.Set(self.conn._vmops, "_get_iscsi_srs",
+ fake_get_iscsi_srs)
+
+ def fake_forget_sr(context, instance):
+ fake_forget_sr.called = True
+ self.stubs.Set(volume_utils, "forget_sr",
+ fake_forget_sr)
+
+ def post_method(context, instance, destination_hostname,
+ block_migration, migrate_data):
+ post_method.called = True
+
+ migrate_data = {"destination_sr_ref": "foo",
+ "migrate_send_data": "bar"}
+ self.conn.live_migration(self.conn, None, None, post_method, None,
+ True, migrate_data)
+
+ self.assertTrue(post_method.called, "post_method.called")
+ self.assertTrue(fake_forget_sr.called, "forget_sr.called")
+
+ def test_live_migration_with_block_migration_raises_invalid_param(self):
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ self._add_default_live_migrate_stubs(self.conn)
+
+ def recover_method(context, instance, destination_hostname,
+ block_migration):
+ recover_method.called = True
+ # pass block_migration = True and no migrate data
+ self.assertRaises(exception.InvalidParameterValue,
+ self.conn.live_migration, self.conn,
+ None, None, None, recover_method, True, None)
+ self.assertTrue(recover_method.called, "recover_method.called")
+
+ def test_live_migration_with_block_migration_fails_migrate_send(self):
+ stubs.stubout_session(self.stubs,
+ stubs.FakeSessionForFailedMigrateTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ self._add_default_live_migrate_stubs(self.conn)
+
+ def recover_method(context, instance, destination_hostname,
+ block_migration):
+ recover_method.called = True
+ # pass block_migration = True and migrate data
+ migrate_data = dict(destination_sr_ref='foo', migrate_send_data='bar')
+ self.assertRaises(exception.MigrationError,
+ self.conn.live_migration, self.conn,
+ None, None, None, recover_method, True, migrate_data)
+ self.assertTrue(recover_method.called, "recover_method.called")
+
+ def test_live_migrate_block_migration_xapi_call_parameters(self):
+
+ fake_vdi_map = object()
+
+ class Session(xenapi_fake.SessionBase):
+ def VM_migrate_send(self_, session, vmref, migrate_data, islive,
+ vdi_map, vif_map, options):
+ self.assertEqual('SOMEDATA', migrate_data)
+ self.assertEqual(fake_vdi_map, vdi_map)
+
+ stubs.stubout_session(self.stubs, Session)
+
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ self._add_default_live_migrate_stubs(conn)
+
+ def fake_generate_vdi_map(destination_sr_ref, _vm_ref):
+ return fake_vdi_map
+
+ self.stubs.Set(conn._vmops, "_generate_vdi_map",
+ fake_generate_vdi_map)
+
+ def dummy_callback(*args, **kwargs):
+ pass
+
+ conn.live_migration(
+ self.context, instance=dict(name='ignore'), dest=None,
+ post_method=dummy_callback, recover_method=dummy_callback,
+ block_migration="SOMEDATA",
+ migrate_data=dict(migrate_send_data='SOMEDATA',
+ destination_sr_ref="TARGET_SR_OPAQUE_REF"))
+
+ def test_live_migrate_pool_migration_xapi_call_parameters(self):
+
+ class Session(xenapi_fake.SessionBase):
+ def VM_pool_migrate(self_, session, vm_ref, host_ref, options):
+ self.assertEqual("fake_ref", host_ref)
+ self.assertEqual({"live": "true"}, options)
+ raise IOError()
+
+ stubs.stubout_session(self.stubs, Session)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ self._add_default_live_migrate_stubs(conn)
+
+ def fake_get_host_opaque_ref(context, destination):
+ return "fake_ref"
+
+ self.stubs.Set(conn._vmops, "_get_host_opaque_ref",
+ fake_get_host_opaque_ref)
+
+ def dummy_callback(*args, **kwargs):
+ pass
+
+ self.assertRaises(IOError, conn.live_migration,
+ self.context, instance=dict(name='ignore'), dest=None,
+ post_method=dummy_callback, recover_method=dummy_callback,
+ block_migration=False, migrate_data={})
+
+ def test_generate_vdi_map(self):
+ stubs.stubout_session(self.stubs, xenapi_fake.SessionBase)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ vm_ref = "fake_vm_ref"
+
+ def fake_find_sr(_session):
+ self.assertEqual(conn._session, _session)
+ return "source_sr_ref"
+ self.stubs.Set(vm_utils, "safe_find_sr", fake_find_sr)
+
+ def fake_get_instance_vdis_for_sr(_session, _vm_ref, _sr_ref):
+ self.assertEqual(conn._session, _session)
+ self.assertEqual(vm_ref, _vm_ref)
+ self.assertEqual("source_sr_ref", _sr_ref)
+ return ["vdi0", "vdi1"]
+
+ self.stubs.Set(vm_utils, "get_instance_vdis_for_sr",
+ fake_get_instance_vdis_for_sr)
+
+ result = conn._vmops._generate_vdi_map("dest_sr_ref", vm_ref)
+
+ self.assertEqual({"vdi0": "dest_sr_ref",
+ "vdi1": "dest_sr_ref"}, result)
+
+ def test_rollback_live_migration_at_destination(self):
+ stubs.stubout_session(self.stubs, xenapi_fake.SessionBase)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ with mock.patch.object(conn, "destroy") as mock_destroy:
+ conn.rollback_live_migration_at_destination("context",
+ "instance", [], None)
+ self.assertFalse(mock_destroy.called)
+
+
+class XenAPIInjectMetadataTestCase(stubs.XenAPITestBaseNoDB):
+ def setUp(self):
+ super(XenAPIInjectMetadataTestCase, self).setUp()
+ self.flags(connection_url='test_url',
+ connection_password='test_pass',
+ group='xenserver')
+ self.flags(firewall_driver='nova.virt.xenapi.firewall.'
+ 'Dom0IptablesFirewallDriver')
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ self.xenstore = dict(persist={}, ephem={})
+
+ self.called_fake_get_vm_opaque_ref = False
+
+ def fake_get_vm_opaque_ref(inst, instance):
+ self.called_fake_get_vm_opaque_ref = True
+ if instance["uuid"] == "not_found":
+ raise exception.NotFound
+ self.assertEqual(instance, {'uuid': 'fake'})
+ return 'vm_ref'
+
+ def fake_add_to_param_xenstore(inst, vm_ref, key, val):
+ self.assertEqual(vm_ref, 'vm_ref')
+ self.xenstore['persist'][key] = val
+
+ def fake_remove_from_param_xenstore(inst, vm_ref, key):
+ self.assertEqual(vm_ref, 'vm_ref')
+ if key in self.xenstore['persist']:
+ del self.xenstore['persist'][key]
+
+ def fake_write_to_xenstore(inst, instance, path, value, vm_ref=None):
+ self.assertEqual(instance, {'uuid': 'fake'})
+ self.assertEqual(vm_ref, 'vm_ref')
+ self.xenstore['ephem'][path] = jsonutils.dumps(value)
+
+ def fake_delete_from_xenstore(inst, instance, path, vm_ref=None):
+ self.assertEqual(instance, {'uuid': 'fake'})
+ self.assertEqual(vm_ref, 'vm_ref')
+ if path in self.xenstore['ephem']:
+ del self.xenstore['ephem'][path]
+
+ self.stubs.Set(vmops.VMOps, '_get_vm_opaque_ref',
+ fake_get_vm_opaque_ref)
+ self.stubs.Set(vmops.VMOps, '_add_to_param_xenstore',
+ fake_add_to_param_xenstore)
+ self.stubs.Set(vmops.VMOps, '_remove_from_param_xenstore',
+ fake_remove_from_param_xenstore)
+ self.stubs.Set(vmops.VMOps, '_write_to_xenstore',
+ fake_write_to_xenstore)
+ self.stubs.Set(vmops.VMOps, '_delete_from_xenstore',
+ fake_delete_from_xenstore)
+
+ def test_inject_instance_metadata(self):
+
+ # Add some system_metadata to ensure it doesn't get added
+ # to xenstore
+ instance = dict(metadata=[{'key': 'a', 'value': 1},
+ {'key': 'b', 'value': 2},
+ {'key': 'c', 'value': 3},
+ # Check xenstore key sanitizing
+ {'key': 'hi.there', 'value': 4},
+ {'key': 'hi!t.e/e', 'value': 5}],
+ # Check xenstore key sanitizing
+ system_metadata=[{'key': 'sys_a', 'value': 1},
+ {'key': 'sys_b', 'value': 2},
+ {'key': 'sys_c', 'value': 3}],
+ uuid='fake')
+ self.conn._vmops._inject_instance_metadata(instance, 'vm_ref')
+
+ self.assertEqual(self.xenstore, {
+ 'persist': {
+ 'vm-data/user-metadata/a': '1',
+ 'vm-data/user-metadata/b': '2',
+ 'vm-data/user-metadata/c': '3',
+ 'vm-data/user-metadata/hi_there': '4',
+ 'vm-data/user-metadata/hi_t_e_e': '5',
+ },
+ 'ephem': {},
+ })
+
+ def test_change_instance_metadata_add(self):
+ # Test XenStore key sanitizing here, too.
+ diff = {'test.key': ['+', 4]}
+ instance = {'uuid': 'fake'}
+ self.xenstore = {
+ 'persist': {
+ 'vm-data/user-metadata/a': '1',
+ 'vm-data/user-metadata/b': '2',
+ 'vm-data/user-metadata/c': '3',
+ },
+ 'ephem': {
+ 'vm-data/user-metadata/a': '1',
+ 'vm-data/user-metadata/b': '2',
+ 'vm-data/user-metadata/c': '3',
+ },
+ }
+
+ self.conn._vmops.change_instance_metadata(instance, diff)
+
+ self.assertEqual(self.xenstore, {
+ 'persist': {
+ 'vm-data/user-metadata/a': '1',
+ 'vm-data/user-metadata/b': '2',
+ 'vm-data/user-metadata/c': '3',
+ 'vm-data/user-metadata/test_key': '4',
+ },
+ 'ephem': {
+ 'vm-data/user-metadata/a': '1',
+ 'vm-data/user-metadata/b': '2',
+ 'vm-data/user-metadata/c': '3',
+ 'vm-data/user-metadata/test_key': '4',
+ },
+ })
+
+ def test_change_instance_metadata_update(self):
+ diff = dict(b=['+', 4])
+ instance = {'uuid': 'fake'}
+ self.xenstore = {
+ 'persist': {
+ 'vm-data/user-metadata/a': '1',
+ 'vm-data/user-metadata/b': '2',
+ 'vm-data/user-metadata/c': '3',
+ },
+ 'ephem': {
+ 'vm-data/user-metadata/a': '1',
+ 'vm-data/user-metadata/b': '2',
+ 'vm-data/user-metadata/c': '3',
+ },
+ }
+
+ self.conn._vmops.change_instance_metadata(instance, diff)
+
+ self.assertEqual(self.xenstore, {
+ 'persist': {
+ 'vm-data/user-metadata/a': '1',
+ 'vm-data/user-metadata/b': '4',
+ 'vm-data/user-metadata/c': '3',
+ },
+ 'ephem': {
+ 'vm-data/user-metadata/a': '1',
+ 'vm-data/user-metadata/b': '4',
+ 'vm-data/user-metadata/c': '3',
+ },
+ })
+
+ def test_change_instance_metadata_delete(self):
+ diff = dict(b=['-'])
+ instance = {'uuid': 'fake'}
+ self.xenstore = {
+ 'persist': {
+ 'vm-data/user-metadata/a': '1',
+ 'vm-data/user-metadata/b': '2',
+ 'vm-data/user-metadata/c': '3',
+ },
+ 'ephem': {
+ 'vm-data/user-metadata/a': '1',
+ 'vm-data/user-metadata/b': '2',
+ 'vm-data/user-metadata/c': '3',
+ },
+ }
+
+ self.conn._vmops.change_instance_metadata(instance, diff)
+
+ self.assertEqual(self.xenstore, {
+ 'persist': {
+ 'vm-data/user-metadata/a': '1',
+ 'vm-data/user-metadata/c': '3',
+ },
+ 'ephem': {
+ 'vm-data/user-metadata/a': '1',
+ 'vm-data/user-metadata/c': '3',
+ },
+ })
+
+ def test_change_instance_metadata_not_found(self):
+ instance = {'uuid': 'not_found'}
+ self.conn._vmops.change_instance_metadata(instance, "fake_diff")
+ self.assertTrue(self.called_fake_get_vm_opaque_ref)
+
+
+class XenAPISessionTestCase(test.NoDBTestCase):
+ def _get_mock_xapisession(self, software_version):
+ class MockXapiSession(xenapi_session.XenAPISession):
+ def __init__(_ignore):
+ "Skip the superclass's dirty init"
+
+ def _get_software_version(_ignore):
+ return software_version
+
+ return MockXapiSession()
+
+ def test_local_session(self):
+ session = self._get_mock_xapisession({})
+ session.is_local_connection = True
+ session.XenAPI = self.mox.CreateMockAnything()
+ session.XenAPI.xapi_local().AndReturn("local_connection")
+
+ self.mox.ReplayAll()
+ self.assertEqual("local_connection",
+ session._create_session("unix://local"))
+
+ def test_remote_session(self):
+ session = self._get_mock_xapisession({})
+ session.is_local_connection = False
+ session.XenAPI = self.mox.CreateMockAnything()
+ session.XenAPI.Session("url").AndReturn("remote_connection")
+
+ self.mox.ReplayAll()
+ self.assertEqual("remote_connection", session._create_session("url"))
+
+ def test_get_product_version_product_brand_does_not_fail(self):
+ session = self._get_mock_xapisession({
+ 'build_number': '0',
+ 'date': '2012-08-03',
+ 'hostname': 'komainu',
+ 'linux': '3.2.0-27-generic',
+ 'network_backend': 'bridge',
+ 'platform_name': 'XCP_Kronos',
+ 'platform_version': '1.6.0',
+ 'xapi': '1.3',
+ 'xen': '4.1.2',
+ 'xencenter_max': '1.10',
+ 'xencenter_min': '1.10'
+ })
+
+ self.assertEqual(
+ ((1, 6, 0), None),
+ session._get_product_version_and_brand()
+ )
+
+ def test_get_product_version_product_brand_xs_6(self):
+ session = self._get_mock_xapisession({
+ 'product_brand': 'XenServer',
+ 'product_version': '6.0.50',
+ 'platform_version': '0.0.1'
+ })
+
+ self.assertEqual(
+ ((6, 0, 50), 'XenServer'),
+ session._get_product_version_and_brand()
+ )
+
+ def test_verify_plugin_version_same(self):
+ session = self._get_mock_xapisession({})
+
+ session.PLUGIN_REQUIRED_VERSION = '2.4'
+
+ self.mox.StubOutWithMock(session, 'call_plugin_serialized')
+ session.call_plugin_serialized('nova_plugin_version', 'get_version',
+ ).AndReturn("2.4")
+
+ self.mox.ReplayAll()
+ session._verify_plugin_version()
+
+ def test_verify_plugin_version_compatible(self):
+ session = self._get_mock_xapisession({})
+ session.XenAPI = xenapi_fake.FakeXenAPI()
+
+ session.PLUGIN_REQUIRED_VERSION = '2.4'
+
+ self.mox.StubOutWithMock(session, 'call_plugin_serialized')
+ session.call_plugin_serialized('nova_plugin_version', 'get_version',
+ ).AndReturn("2.5")
+
+ self.mox.ReplayAll()
+ session._verify_plugin_version()
+
+ def test_verify_plugin_version_bad_maj(self):
+ session = self._get_mock_xapisession({})
+ session.XenAPI = xenapi_fake.FakeXenAPI()
+
+ session.PLUGIN_REQUIRED_VERSION = '2.4'
+
+ self.mox.StubOutWithMock(session, 'call_plugin_serialized')
+ session.call_plugin_serialized('nova_plugin_version', 'get_version',
+ ).AndReturn("3.0")
+
+ self.mox.ReplayAll()
+ self.assertRaises(xenapi_fake.Failure, session._verify_plugin_version)
+
+ def test_verify_plugin_version_bad_min(self):
+ session = self._get_mock_xapisession({})
+ session.XenAPI = xenapi_fake.FakeXenAPI()
+
+ session.PLUGIN_REQUIRED_VERSION = '2.4'
+
+ self.mox.StubOutWithMock(session, 'call_plugin_serialized')
+ session.call_plugin_serialized('nova_plugin_version', 'get_version',
+ ).AndReturn("2.3")
+
+ self.mox.ReplayAll()
+ self.assertRaises(xenapi_fake.Failure, session._verify_plugin_version)
+
+ def test_verify_current_version_matches(self):
+ session = self._get_mock_xapisession({})
+
+ # Import the plugin to extract its version
+ path = os.path.dirname(__file__)
+ rel_path_elem = "../../../../../plugins/xenserver/xenapi/etc/xapi.d/" \
+ "plugins/nova_plugin_version"
+ for elem in rel_path_elem.split('/'):
+ path = os.path.join(path, elem)
+ path = os.path.realpath(path)
+
+ plugin_version = None
+ with open(path) as plugin_file:
+ for line in plugin_file:
+ if "PLUGIN_VERSION = " in line:
+ plugin_version = line.strip()[17:].strip('"')
+
+ self.assertEqual(session.PLUGIN_REQUIRED_VERSION,
+ plugin_version)
+
+
+class XenAPIFakeTestCase(test.NoDBTestCase):
+ def test_query_matches(self):
+ record = {'a': '1', 'b': '2', 'c_d': '3'}
+
+ tests = {'field "a"="1"': True,
+ 'field "b"="2"': True,
+ 'field "b"="4"': False,
+ 'not field "b"="4"': True,
+ 'field "a"="1" and field "b"="4"': False,
+ 'field "a"="1" or field "b"="4"': True,
+ 'field "c__d"="3"': True,
+ 'field \'b\'=\'2\'': True,
+ }
+
+ for query in tests.keys():
+ expected = tests[query]
+ fail_msg = "for test '%s'" % query
+ self.assertEqual(xenapi_fake._query_matches(record, query),
+ expected, fail_msg)
+
+ def test_query_bad_format(self):
+ record = {'a': '1', 'b': '2', 'c': '3'}
+
+ tests = ['"a"="1" or "b"="4"',
+ 'a=1',
+ ]
+
+ for query in tests:
+ fail_msg = "for test '%s'" % query
+ self.assertFalse(xenapi_fake._query_matches(record, query),
+ fail_msg)
diff --git a/nova/tests/unit/virt/xenapi/vm_rrd.xml b/nova/tests/unit/virt/xenapi/vm_rrd.xml
new file mode 100644
index 0000000000..f9a7c8083e
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/vm_rrd.xml
@@ -0,0 +1,1101 @@
+<rrd>
+ <version>0003</version>
+ <step>5</step>
+ <lastupdate>1328795567</lastupdate>
+ <ds>
+ <name>cpu0</name>
+ <type>DERIVE</type>
+ <minimal_heartbeat>300.0000</minimal_heartbeat>
+ <min>0.0</min>
+ <max>1.0000</max>
+ <last_ds>5102.8417</last_ds>
+ <value>0.0110</value>
+ <unknown_sec>0</unknown_sec>
+ </ds>
+ <ds>
+ <name>memory</name>
+ <type>GAUGE</type>
+ <minimal_heartbeat>300.0000</minimal_heartbeat>
+ <min>0.0</min>
+ <max>Infinity</max>
+ <last_ds>4294967296</last_ds>
+ <value>10961792000.0000</value>
+ <unknown_sec>0</unknown_sec>
+ </ds>
+ <ds>
+ <name>memory_target</name>
+ <type>GAUGE</type>
+ <minimal_heartbeat>300.0000</minimal_heartbeat>
+ <min>0.0</min>
+ <max>Infinity</max>
+ <last_ds>4294967296</last_ds>
+ <value>10961792000.0000</value>
+ <unknown_sec>0</unknown_sec>
+ </ds>
+ <ds>
+ <name>vif_0_tx</name>
+ <type>DERIVE</type>
+ <minimal_heartbeat>300.0000</minimal_heartbeat>
+ <min>-Infinity</min>
+ <max>Infinity</max>
+ <last_ds>1079132206</last_ds>
+ <value>752.4007</value>
+ <unknown_sec>0</unknown_sec>
+ </ds>
+ <ds>
+ <name>vif_0_rx</name>
+ <type>DERIVE</type>
+ <minimal_heartbeat>300.0000</minimal_heartbeat>
+ <min>-Infinity</min>
+ <max>Infinity</max>
+ <last_ds>1093250983</last_ds>
+ <value>4837.8805</value>
+ <unknown_sec>0</unknown_sec>
+ </ds>
+ <ds>
+ <name>vbd_xvda_write</name>
+ <type>DERIVE</type>
+ <minimal_heartbeat>300.0000</minimal_heartbeat>
+ <min>-Infinity</min>
+ <max>Infinity</max>
+ <last_ds>4552440832</last_ds>
+ <value>0.0</value>
+ <unknown_sec>0</unknown_sec>
+ </ds>
+ <ds>
+ <name>vbd_xvda_read</name>
+ <type>DERIVE</type>
+ <minimal_heartbeat>300.0000</minimal_heartbeat>
+ <min>-Infinity</min>
+ <max>Infinity</max>
+ <last_ds>1371223040</last_ds>
+ <value>0.0</value>
+ <unknown_sec>0</unknown_sec>
+ </ds>
+ <ds>
+ <name>memory_internal_free</name>
+ <type>GAUGE</type>
+ <minimal_heartbeat>300.0000</minimal_heartbeat>
+ <min>-Infinity</min>
+ <max>Infinity</max>
+ <last_ds>1415564</last_ds>
+ <value>3612860.6020</value>
+ <unknown_sec>0</unknown_sec>
+ </ds>
+ <ds>
+ <name>vbd_xvdb_write</name>
+ <type>DERIVE</type>
+ <minimal_heartbeat>300.0000</minimal_heartbeat>
+ <min>-Infinity</min>
+ <max>Infinity</max>
+ <last_ds>0.0</last_ds>
+ <value>0.0</value>
+ <unknown_sec>2</unknown_sec>
+ </ds>
+ <ds>
+ <name>vbd_xvdb_read</name>
+ <type>DERIVE</type>
+ <minimal_heartbeat>300.0000</minimal_heartbeat>
+ <min>-Infinity</min>
+ <max>Infinity</max>
+ <last_ds>0.0</last_ds>
+ <value>0.0</value>
+ <unknown_sec>2</unknown_sec>
+ </ds>
+ <ds>
+ <name>vif_2_tx</name>
+ <type>DERIVE</type>
+ <minimal_heartbeat>300.0000</minimal_heartbeat>
+ <min>-Infinity</min>
+ <max>Infinity</max>
+ <last_ds>0.0</last_ds>
+ <value>0.0</value>
+ <unknown_sec>2</unknown_sec>
+ </ds>
+ <ds>
+ <name>vif_2_rx</name>
+ <type>DERIVE</type>
+ <minimal_heartbeat>300.0000</minimal_heartbeat>
+ <min>-Infinity</min>
+ <max>Infinity</max>
+ <last_ds>0.0</last_ds>
+ <value>0.0</value>
+ <unknown_sec>2</unknown_sec>
+ </ds>
+ <rra>
+ <cf>AVERAGE</cf>
+ <pdp_per_row>1</pdp_per_row>
+ <params>
+ <xff>0.5000</xff>
+ </params>
+ <cdp_prep>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ </cdp_prep>
+ <database>
+ <row>
+ <v>0.0259</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>270.6642</v>
+ <v>1968.1381</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>1433552.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0042</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>258.6530</v>
+ <v>1890.5522</v>
+ <v>565.3453</v>
+ <v>0.0</v>
+ <v>1433552.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0043</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>249.1120</v>
+ <v>1778.2501</v>
+ <v>817.5985</v>
+ <v>0.0</v>
+ <v>1433552.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0039</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>270.5131</v>
+ <v>1806.3336</v>
+ <v>9811.4443</v>
+ <v>0.0</v>
+ <v>1433552.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0041</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>264.3683</v>
+ <v>1952.4054</v>
+ <v>4370.4121</v>
+ <v>0.0</v>
+ <v>1433552.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0034</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>251.6331</v>
+ <v>1958.8002</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>1433552.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0042</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>274.5222</v>
+ <v>2067.5947</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>1433552.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0046</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>260.9790</v>
+ <v>2042.7045</v>
+ <v>1671.6940</v>
+ <v>0.0</v>
+ <v>1433552.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0163</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>249.0992</v>
+ <v>1845.3728</v>
+ <v>4119.4312</v>
+ <v>0.0</v>
+ <v>1431698.1250</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0098</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>273.9898</v>
+ <v>1879.1331</v>
+ <v>5459.4102</v>
+ <v>0.0</v>
+ <v>1430824.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0043</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>261.3513</v>
+ <v>2335.3000</v>
+ <v>6837.4907</v>
+ <v>0.0</v>
+ <v>1430824.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0793</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>249.2620</v>
+ <v>2092.4504</v>
+ <v>2391.9744</v>
+ <v>0.0</v>
+ <v>1430824.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0406</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>270.0746</v>
+ <v>1859.9802</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>1430824.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0043</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>263.4259</v>
+ <v>2010.8950</v>
+ <v>550.1484</v>
+ <v>0.0</v>
+ <v>1430824.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0565</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>29891.2227</v>
+ <v>26210.6699</v>
+ <v>3213.4324</v>
+ <v>0.0</v>
+ <v>1415564.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0645</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>31501.1562</v>
+ <v>29642.1641</v>
+ <v>400.9566</v>
+ <v>0.0</v>
+ <v>1415564.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0381</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>17350.7676</v>
+ <v>20748.6133</v>
+ <v>1247.4755</v>
+ <v>0.0</v>
+ <v>1415564.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0212</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>11981.0918</v>
+ <v>12866.9775</v>
+ <v>5774.9497</v>
+ <v>0.0</v>
+ <v>1415564.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0045</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>249.0901</v>
+ <v>1898.6758</v>
+ <v>4446.3750</v>
+ <v>0.0</v>
+ <v>1415564.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0614</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>249.0959</v>
+ <v>2255.1912</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>1415564.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0609</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>253.1091</v>
+ <v>2099.0601</v>
+ <v>1230.0925</v>
+ <v>0.0</v>
+ <v>1415564.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0047</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>268.6620</v>
+ <v>1759.5667</v>
+ <v>2861.2107</v>
+ <v>0.0</v>
+ <v>1415564.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0100</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>292.2647</v>
+ <v>1828.5435</v>
+ <v>3270.3474</v>
+ <v>0.0</v>
+ <v>1415564.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0093</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>303.5810</v>
+ <v>1932.1176</v>
+ <v>4485.4355</v>
+ <v>0.0</v>
+ <v>1415564.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0038</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>291.6633</v>
+ <v>1842.4425</v>
+ <v>2898.5137</v>
+ <v>0.0</v>
+ <v>1415564.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0042</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>287.4134</v>
+ <v>1816.0144</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>1415564.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ </database>
+ </rra>
+ <rra>
+ <cf>AVERAGE</cf>
+ <pdp_per_row>12</pdp_per_row>
+ <params>
+ <xff>0.5000</xff>
+ </params>
+ <cdp_prep>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0150</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>3221225472.0000</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>3221225472.0000</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>1181.3309</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>2358.2158</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>2080.5770</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>1061673.0000</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ </cdp_prep>
+ <database>
+ <row>
+ <v>0.0130</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>261.6000</v>
+ <v>1990.6442</v>
+ <v>1432.2385</v>
+ <v>0.0</v>
+ <v>1441908.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0172</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>318.8885</v>
+ <v>1979.7030</v>
+ <v>1724.9528</v>
+ <v>0.0</v>
+ <v>1441912.7500</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0483</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>3108.1233</v>
+ <v>4815.9639</v>
+ <v>4962.0503</v>
+ <v>68.2667</v>
+ <v>1441916.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0229</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>1944.2039</v>
+ <v>3757.9177</v>
+ <v>10861.6670</v>
+ <v>0.0</v>
+ <v>1439546.7500</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0639</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>44504.8789</v>
+ <v>34745.1523</v>
+ <v>9571.1455</v>
+ <v>0.0</v>
+ <v>1437892.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.2945</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>79219.1641</v>
+ <v>102827.0781</v>
+ <v>438999.3438</v>
+ <v>0.0</v>
+ <v>1415337.7500</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.1219</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>61093.7109</v>
+ <v>49836.3164</v>
+ <v>8734.3730</v>
+ <v>0.0</v>
+ <v>1399324.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0151</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>48.3914</v>
+ <v>1922.5935</v>
+ <v>2251.4346</v>
+ <v>0.0</v>
+ <v>1421237.1250</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.3162</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>80667.4922</v>
+ <v>53950.0430</v>
+ <v>416858.5000</v>
+ <v>0.0</v>
+ <v>1437032.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ </database>
+ </rra>
+ <rra>
+ <cf>AVERAGE</cf>
+ <pdp_per_row>720</pdp_per_row>
+ <params>
+ <xff>0.5000</xff>
+ </params>
+ <cdp_prep>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0848</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>3775992081.0667</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>3775992081.0667</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>16179.3166</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>13379.7997</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>109091.4636</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>323.1289</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>1259057.5294</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ </cdp_prep>
+ <database>
+ <row>
+ <v>0.1458</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>6454.3096</v>
+ <v>5327.6709</v>
+ <v>116520.9609</v>
+ <v>738.4178</v>
+ <v>2653538.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0971</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>10180.4941</v>
+ <v>10825.1777</v>
+ <v>98749.3438</v>
+ <v>523.3778</v>
+ <v>2381725.7500</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0683</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>23183.2695</v>
+ <v>19607.6523</v>
+ <v>93946.5703</v>
+ <v>807.8222</v>
+ <v>2143269.2500</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0352</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>7552.5708</v>
+ <v>7320.5391</v>
+ <v>30907.9453</v>
+ <v>150384.6406</v>
+ <v>1583336.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ </database>
+ </rra>
+ <rra>
+ <cf>AVERAGE</cf>
+ <pdp_per_row>17280</pdp_per_row>
+ <params>
+ <xff>0.5000</xff>
+ </params>
+ <cdp_prep>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0187</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>2483773622.0445</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>2483773622.0445</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>2648.2715</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>3002.4238</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>19129.3156</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>6365.7244</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>1468863.7753</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ </cdp_prep>
+ <database>
+ <row>
+ <v>0.0579</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>6291.0151</v>
+ <v>7489.2583</v>
+ <v>70915.3750</v>
+ <v>50.1570</v>
+ <v>613674.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0541</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>10406.3682</v>
+ <v>10638.9365</v>
+ <v>32972.1250</v>
+ <v>7.6800</v>
+ <v>647683.5625</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0189</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>207.0768</v>
+ <v>2145.3167</v>
+ <v>1685.8905</v>
+ <v>0.0</v>
+ <v>599934.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0202</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>71.0270</v>
+ <v>2046.6521</v>
+ <v>6703.9795</v>
+ <v>182.0444</v>
+ <v>595963.8750</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0661</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>8520.3213</v>
+ <v>8488.0664</v>
+ <v>52978.7930</v>
+ <v>7.3956</v>
+ <v>727540.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0219</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>40443.0117</v>
+ <v>20702.5996</v>
+ <v>-1377536.8750</v>
+ <v>36990.5898</v>
+ <v>1823778.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0265</v>
+ <v>4294971904.0000</v>
+ <v>4294754304.0000</v>
+ <v>6384.6367</v>
+ <v>6513.4951</v>
+ <v>22415.6348</v>
+ <v>2486.9690</v>
+ <v>3072170.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ </database>
+ </rra>
+</rrd>