summaryrefslogtreecommitdiff
path: root/nova/tests/unit/api/ec2
diff options
context:
space:
mode:
authorSean Dague <sean@dague.net>2014-11-07 14:27:03 +0100
committerSean Dague <sean@dague.net>2014-11-12 15:31:08 -0500
commit89cd6a0c493e26b5a9e017c99d731464292abbaf (patch)
treec2bf790d1684cd539b820247113492495123a163 /nova/tests/unit/api/ec2
parent5c8bbaafef590e4d346a03051a0ba55c8be26c5c (diff)
downloadnova-89cd6a0c493e26b5a9e017c99d731464292abbaf.tar.gz
move all tests to nova/tests/unit
As part of the split of functional and unit tests we need to isolate the unit tests into a separate directory for having multiple test targets in a sane way. Part of bp:functional-tests-for-nova Change-Id: Id42ba373c1bda6a312b673ab2b489ca56da8c628
Diffstat (limited to 'nova/tests/unit/api/ec2')
-rw-r--r--nova/tests/unit/api/ec2/__init__.py0
-rw-r--r--nova/tests/unit/api/ec2/public_key/dummy.fingerprint1
-rw-r--r--nova/tests/unit/api/ec2/public_key/dummy.pub1
-rw-r--r--nova/tests/unit/api/ec2/test_api.py635
-rw-r--r--nova/tests/unit/api/ec2/test_apirequest.py92
-rw-r--r--nova/tests/unit/api/ec2/test_cinder_cloud.py1096
-rw-r--r--nova/tests/unit/api/ec2/test_cloud.py3255
-rw-r--r--nova/tests/unit/api/ec2/test_ec2_validate.py277
-rw-r--r--nova/tests/unit/api/ec2/test_ec2utils.py61
-rw-r--r--nova/tests/unit/api/ec2/test_error_response.py132
-rw-r--r--nova/tests/unit/api/ec2/test_faults.py46
-rw-r--r--nova/tests/unit/api/ec2/test_middleware.py225
12 files changed, 5821 insertions, 0 deletions
diff --git a/nova/tests/unit/api/ec2/__init__.py b/nova/tests/unit/api/ec2/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/api/ec2/__init__.py
diff --git a/nova/tests/unit/api/ec2/public_key/dummy.fingerprint b/nova/tests/unit/api/ec2/public_key/dummy.fingerprint
new file mode 100644
index 0000000000..715bca27a2
--- /dev/null
+++ b/nova/tests/unit/api/ec2/public_key/dummy.fingerprint
@@ -0,0 +1 @@
+1c:87:d1:d9:32:fd:62:3c:78:2b:c0:ad:c0:15:88:df
diff --git a/nova/tests/unit/api/ec2/public_key/dummy.pub b/nova/tests/unit/api/ec2/public_key/dummy.pub
new file mode 100644
index 0000000000..d4cf2bc0d8
--- /dev/null
+++ b/nova/tests/unit/api/ec2/public_key/dummy.pub
@@ -0,0 +1 @@
+ssh-dss AAAAB3NzaC1kc3MAAACBAMGJlY9XEIm2X234pdO5yFWMp2JuOQx8U0E815IVXhmKxYCBK9ZakgZOIQmPbXoGYyV+mziDPp6HJ0wKYLQxkwLEFr51fAZjWQvRss0SinURRuLkockDfGFtD4pYJthekr/rlqMKlBSDUSpGq8jUWW60UJ18FGooFpxR7ESqQRx/AAAAFQC96LRglaUeeP+E8U/yblEJocuiWwAAAIA3XiMR8Skiz/0aBm5K50SeQznQuMJTyzt9S9uaz5QZWiFu69hOyGSFGw8fqgxEkXFJIuHobQQpGYQubLW0NdaYRqyE/Vud3JUJUb8Texld6dz8vGemyB5d1YvtSeHIo8/BGv2msOqR3u5AZTaGCBD9DhpSGOKHEdNjTtvpPd8S8gAAAIBociGZ5jf09iHLVENhyXujJbxfGRPsyNTyARJfCOGl0oFV6hEzcQyw8U/ePwjgvjc2UizMWLl8tsb2FXKHRdc2v+ND3Us+XqKQ33X3ADP4FZ/+Oj213gMyhCmvFTP0u5FmHog9My4CB7YcIWRuUR42WlhQ2IfPvKwUoTk3R+T6Og== www-data@mk
diff --git a/nova/tests/unit/api/ec2/test_api.py b/nova/tests/unit/api/ec2/test_api.py
new file mode 100644
index 0000000000..cc4a2adb75
--- /dev/null
+++ b/nova/tests/unit/api/ec2/test_api.py
@@ -0,0 +1,635 @@
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Unit tests for the API endpoint."""
+
+import random
+import re
+import StringIO
+
+import boto
+import boto.connection
+from boto.ec2 import regioninfo
+from boto import exception as boto_exc
+# newer versions of boto use their own wrapper on top of httplib.HTTPResponse
+if hasattr(boto.connection, 'HTTPResponse'):
+ httplib = boto.connection
+else:
+ import httplib
+import fixtures
+import webob
+
+from nova.api import auth
+from nova.api import ec2
+from nova.api.ec2 import ec2utils
+from nova import block_device
+from nova import context
+from nova import exception
+from nova.openstack.common import versionutils
+from nova import test
+from nova.tests.unit import matchers
+
+
+class FakeHttplibSocket(object):
+ """a fake socket implementation for httplib.HTTPResponse, trivial."""
+ def __init__(self, response_string):
+ self.response_string = response_string
+ self._buffer = StringIO.StringIO(response_string)
+
+ def makefile(self, _mode, _other):
+ """Returns the socket's internal buffer."""
+ return self._buffer
+
+
+class FakeHttplibConnection(object):
+ """A fake httplib.HTTPConnection for boto to use
+
+ requests made via this connection actually get translated and routed into
+ our WSGI app, we then wait for the response and turn it back into
+ the HTTPResponse that boto expects.
+ """
+ def __init__(self, app, host, is_secure=False):
+ self.app = app
+ self.host = host
+
+ def request(self, method, path, data, headers):
+ req = webob.Request.blank(path)
+ req.method = method
+ req.body = data
+ req.headers = headers
+ req.headers['Accept'] = 'text/html'
+ req.host = self.host
+ # Call the WSGI app, get the HTTP response
+ resp = str(req.get_response(self.app))
+ # For some reason, the response doesn't have "HTTP/1.0 " prepended; I
+ # guess that's a function the web server usually provides.
+ resp = "HTTP/1.0 %s" % resp
+ self.sock = FakeHttplibSocket(resp)
+ self.http_response = httplib.HTTPResponse(self.sock)
+ # NOTE(vish): boto is accessing private variables for some reason
+ self._HTTPConnection__response = self.http_response
+ self.http_response.begin()
+
+ def getresponse(self):
+ return self.http_response
+
+ def getresponsebody(self):
+ return self.sock.response_string
+
+ def close(self):
+ """Required for compatibility with boto/tornado."""
+ pass
+
+
+class XmlConversionTestCase(test.NoDBTestCase):
+ """Unit test api xml conversion."""
+ def test_number_conversion(self):
+ conv = ec2utils._try_convert
+ self.assertIsNone(conv('None'))
+ self.assertEqual(conv('True'), True)
+ self.assertEqual(conv('TRUE'), True)
+ self.assertEqual(conv('true'), True)
+ self.assertEqual(conv('False'), False)
+ self.assertEqual(conv('FALSE'), False)
+ self.assertEqual(conv('false'), False)
+ self.assertEqual(conv('0'), 0)
+ self.assertEqual(conv('42'), 42)
+ self.assertEqual(conv('3.14'), 3.14)
+ self.assertEqual(conv('-57.12'), -57.12)
+ self.assertEqual(conv('0x57'), 0x57)
+ self.assertEqual(conv('-0x57'), -0x57)
+ self.assertEqual(conv('-'), '-')
+ self.assertEqual(conv('-0'), 0)
+ self.assertEqual(conv('0.0'), 0.0)
+ self.assertEqual(conv('1e-8'), 0.0)
+ self.assertEqual(conv('-1e-8'), 0.0)
+ self.assertEqual(conv('0xDD8G'), '0xDD8G')
+ self.assertEqual(conv('0XDD8G'), '0XDD8G')
+ self.assertEqual(conv('-stringy'), '-stringy')
+ self.assertEqual(conv('stringy'), 'stringy')
+ self.assertEqual(conv('add'), 'add')
+ self.assertEqual(conv('remove'), 'remove')
+ self.assertEqual(conv(''), '')
+
+
+class Ec2utilsTestCase(test.NoDBTestCase):
+ def test_ec2_id_to_id(self):
+ self.assertEqual(ec2utils.ec2_id_to_id('i-0000001e'), 30)
+ self.assertEqual(ec2utils.ec2_id_to_id('ami-1d'), 29)
+ self.assertEqual(ec2utils.ec2_id_to_id('snap-0000001c'), 28)
+ self.assertEqual(ec2utils.ec2_id_to_id('vol-0000001b'), 27)
+
+ def test_bad_ec2_id(self):
+ self.assertRaises(exception.InvalidEc2Id,
+ ec2utils.ec2_id_to_id,
+ 'badone')
+
+ def test_id_to_ec2_id(self):
+ self.assertEqual(ec2utils.id_to_ec2_id(30), 'i-0000001e')
+ self.assertEqual(ec2utils.id_to_ec2_id(29, 'ami-%08x'), 'ami-0000001d')
+ self.assertEqual(ec2utils.id_to_ec2_snap_id(28), 'snap-0000001c')
+ self.assertEqual(ec2utils.id_to_ec2_vol_id(27), 'vol-0000001b')
+
+ def test_dict_from_dotted_str(self):
+ in_str = [('BlockDeviceMapping.1.DeviceName', '/dev/sda1'),
+ ('BlockDeviceMapping.1.Ebs.SnapshotId', 'snap-0000001c'),
+ ('BlockDeviceMapping.1.Ebs.VolumeSize', '80'),
+ ('BlockDeviceMapping.1.Ebs.DeleteOnTermination', 'false'),
+ ('BlockDeviceMapping.2.DeviceName', '/dev/sdc'),
+ ('BlockDeviceMapping.2.VirtualName', 'ephemeral0')]
+ expected_dict = {
+ 'block_device_mapping': {
+ '1': {'device_name': '/dev/sda1',
+ 'ebs': {'snapshot_id': 'snap-0000001c',
+ 'volume_size': 80,
+ 'delete_on_termination': False}},
+ '2': {'device_name': '/dev/sdc',
+ 'virtual_name': 'ephemeral0'}}}
+ out_dict = ec2utils.dict_from_dotted_str(in_str)
+
+ self.assertThat(out_dict, matchers.DictMatches(expected_dict))
+
+ def test_properties_root_defice_name(self):
+ mappings = [{"device": "/dev/sda1", "virtual": "root"}]
+ properties0 = {'mappings': mappings}
+ properties1 = {'root_device_name': '/dev/sdb', 'mappings': mappings}
+
+ root_device_name = block_device.properties_root_device_name(
+ properties0)
+ self.assertEqual(root_device_name, '/dev/sda1')
+
+ root_device_name = block_device.properties_root_device_name(
+ properties1)
+ self.assertEqual(root_device_name, '/dev/sdb')
+
+ def test_regex_from_ec2_regex(self):
+ def _test_re(ec2_regex, expected, literal, match=True):
+ regex = ec2utils.regex_from_ec2_regex(ec2_regex)
+ self.assertEqual(regex, expected)
+ if match:
+ self.assertIsNotNone(re.match(regex, literal))
+ else:
+ self.assertIsNone(re.match(regex, literal))
+
+ # wildcards
+ _test_re('foo', '\Afoo\Z(?s)', 'foo')
+ _test_re('foo', '\Afoo\Z(?s)', 'baz', match=False)
+ _test_re('foo?bar', '\Afoo.bar\Z(?s)', 'foo bar')
+ _test_re('foo?bar', '\Afoo.bar\Z(?s)', 'foo bar', match=False)
+ _test_re('foo*bar', '\Afoo.*bar\Z(?s)', 'foo QUUX bar')
+
+ # backslashes and escaped wildcards
+ _test_re('foo\\', '\Afoo\\\\\Z(?s)', 'foo\\')
+ _test_re('foo*bar', '\Afoo.*bar\Z(?s)', 'zork QUUX bar', match=False)
+ _test_re('foo\\?bar', '\Afoo[?]bar\Z(?s)', 'foo?bar')
+ _test_re('foo\\?bar', '\Afoo[?]bar\Z(?s)', 'foo bar', match=False)
+ _test_re('foo\\*bar', '\Afoo[*]bar\Z(?s)', 'foo*bar')
+ _test_re('foo\\*bar', '\Afoo[*]bar\Z(?s)', 'foo bar', match=False)
+
+ # analog to the example given in the EC2 API docs
+ ec2_regex = '\*nova\?\\end'
+ expected = r'\A[*]nova[?]\\end\Z(?s)'
+ literal = r'*nova?\end'
+ _test_re(ec2_regex, expected, literal)
+
+ def test_mapping_prepend_dev(self):
+ mappings = [
+ {'virtual': 'ami',
+ 'device': 'sda1'},
+ {'virtual': 'root',
+ 'device': '/dev/sda1'},
+
+ {'virtual': 'swap',
+ 'device': 'sdb1'},
+ {'virtual': 'swap',
+ 'device': '/dev/sdb2'},
+
+ {'virtual': 'ephemeral0',
+ 'device': 'sdc1'},
+ {'virtual': 'ephemeral1',
+ 'device': '/dev/sdc1'}]
+ expected_result = [
+ {'virtual': 'ami',
+ 'device': 'sda1'},
+ {'virtual': 'root',
+ 'device': '/dev/sda1'},
+
+ {'virtual': 'swap',
+ 'device': '/dev/sdb1'},
+ {'virtual': 'swap',
+ 'device': '/dev/sdb2'},
+
+ {'virtual': 'ephemeral0',
+ 'device': '/dev/sdc1'},
+ {'virtual': 'ephemeral1',
+ 'device': '/dev/sdc1'}]
+ self.assertThat(block_device.mappings_prepend_dev(mappings),
+ matchers.DictListMatches(expected_result))
+
+
+class ApiEc2TestCase(test.TestCase):
+ """Unit test for the cloud controller on an EC2 API."""
+ def setUp(self):
+ super(ApiEc2TestCase, self).setUp()
+ self.host = '127.0.0.1'
+ # NOTE(vish): skipping the Authorizer
+ roles = ['sysadmin', 'netadmin']
+ ctxt = context.RequestContext('fake', 'fake', roles=roles)
+ self.app = auth.InjectContext(ctxt, ec2.FaultWrapper(
+ ec2.RequestLogging(ec2.Requestify(ec2.Authorizer(ec2.Executor()
+ ), 'nova.api.ec2.cloud.CloudController'))))
+ self.useFixture(fixtures.FakeLogger('boto'))
+
+ def expect_http(self, host=None, is_secure=False, api_version=None):
+ """Returns a new EC2 connection."""
+ self.ec2 = boto.connect_ec2(
+ aws_access_key_id='fake',
+ aws_secret_access_key='fake',
+ is_secure=False,
+ region=regioninfo.RegionInfo(None, 'test', self.host),
+ port=8773,
+ path='/services/Cloud')
+ if api_version:
+ self.ec2.APIVersion = api_version
+
+ self.mox.StubOutWithMock(self.ec2, 'new_http_connection')
+ self.http = FakeHttplibConnection(
+ self.app, '%s:8773' % (self.host), False)
+ # pylint: disable=E1103
+ if versionutils.is_compatible('2.14', boto.Version, same_major=False):
+ self.ec2.new_http_connection(host or self.host, 8773,
+ is_secure).AndReturn(self.http)
+ elif versionutils.is_compatible('2', boto.Version, same_major=False):
+ self.ec2.new_http_connection(host or '%s:8773' % (self.host),
+ is_secure).AndReturn(self.http)
+ else:
+ self.ec2.new_http_connection(host, is_secure).AndReturn(self.http)
+ return self.http
+
+ def test_xmlns_version_matches_request_version(self):
+ self.expect_http(api_version='2010-10-30')
+ self.mox.ReplayAll()
+
+ # Any request should be fine
+ self.ec2.get_all_instances()
+ self.assertTrue(self.ec2.APIVersion in self.http.getresponsebody(),
+ 'The version in the xmlns of the response does '
+ 'not match the API version given in the request.')
+
+ def test_describe_instances(self):
+ """Test that, after creating a user and a project, the describe
+ instances call to the API works properly.
+ """
+ self.expect_http()
+ self.mox.ReplayAll()
+ self.assertEqual(self.ec2.get_all_instances(), [])
+
+ def test_terminate_invalid_instance(self):
+ # Attempt to terminate an invalid instance.
+ self.expect_http()
+ self.mox.ReplayAll()
+ self.assertRaises(boto_exc.EC2ResponseError,
+ self.ec2.terminate_instances, "i-00000005")
+
+ def test_get_all_key_pairs(self):
+ """Test that, after creating a user and project and generating
+ a key pair, that the API call to list key pairs works properly.
+ """
+ keyname = "".join(random.choice("sdiuisudfsdcnpaqwertasd")
+ for x in range(random.randint(4, 8)))
+ self.expect_http()
+ self.mox.ReplayAll()
+ self.ec2.create_key_pair(keyname)
+ rv = self.ec2.get_all_key_pairs()
+ results = [k for k in rv if k.name == keyname]
+ self.assertEqual(len(results), 1)
+
+ def test_create_duplicate_key_pair(self):
+ """Test that, after successfully generating a keypair,
+ requesting a second keypair with the same name fails sanely.
+ """
+ self.expect_http()
+ self.mox.ReplayAll()
+ self.ec2.create_key_pair('test')
+
+ try:
+ self.ec2.create_key_pair('test')
+ except boto_exc.EC2ResponseError as e:
+ if e.code == 'InvalidKeyPair.Duplicate':
+ pass
+ else:
+ self.assertEqual('InvalidKeyPair.Duplicate', e.code)
+ else:
+ self.fail('Exception not raised.')
+
+ def test_get_all_security_groups(self):
+ # Test that we can retrieve security groups.
+ self.expect_http()
+ self.mox.ReplayAll()
+
+ rv = self.ec2.get_all_security_groups()
+
+ self.assertEqual(len(rv), 1)
+ self.assertEqual(rv[0].name, 'default')
+
+ def test_create_delete_security_group(self):
+ # Test that we can create a security group.
+ self.expect_http()
+ self.mox.ReplayAll()
+
+ security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd")
+ for x in range(random.randint(4, 8)))
+
+ self.ec2.create_security_group(security_group_name, 'test group')
+
+ self.expect_http()
+ self.mox.ReplayAll()
+
+ rv = self.ec2.get_all_security_groups()
+ self.assertEqual(len(rv), 2)
+ self.assertIn(security_group_name, [group.name for group in rv])
+
+ self.expect_http()
+ self.mox.ReplayAll()
+
+ self.ec2.delete_security_group(security_group_name)
+
+ def test_group_name_valid_chars_security_group(self):
+ """Test that we sanely handle invalid security group names.
+
+ EC2 API Spec states we should only accept alphanumeric characters,
+ spaces, dashes, and underscores. Amazon implementation
+ accepts more characters - so, [:print:] is ok.
+ """
+ bad_strict_ec2 = "aa \t\x01\x02\x7f"
+ bad_amazon_ec2 = "aa #^% -=99"
+ test_raise = [
+ (True, bad_amazon_ec2, "test desc"),
+ (True, "test name", bad_amazon_ec2),
+ (False, bad_strict_ec2, "test desc"),
+ ]
+ for t in test_raise:
+ self.expect_http()
+ self.mox.ReplayAll()
+ self.flags(ec2_strict_validation=t[0])
+ self.assertRaises(boto_exc.EC2ResponseError,
+ self.ec2.create_security_group,
+ t[1],
+ t[2])
+ test_accept = [
+ (False, bad_amazon_ec2, "test desc"),
+ (False, "test name", bad_amazon_ec2),
+ ]
+ for t in test_accept:
+ self.expect_http()
+ self.mox.ReplayAll()
+ self.flags(ec2_strict_validation=t[0])
+ self.ec2.create_security_group(t[1], t[2])
+ self.expect_http()
+ self.mox.ReplayAll()
+ self.ec2.delete_security_group(t[1])
+
+ def test_group_name_valid_length_security_group(self):
+ """Test that we sanely handle invalid security group names.
+
+ API Spec states that the length should not exceed 255 char.
+ """
+ self.expect_http()
+ self.mox.ReplayAll()
+
+ # Test block group_name > 255 chars
+ security_group_name = "".join(random.choice("poiuytrewqasdfghjklmnbvc")
+ for x in range(random.randint(256, 266)))
+
+ self.assertRaises(boto_exc.EC2ResponseError,
+ self.ec2.create_security_group,
+ security_group_name,
+ 'test group')
+
+ def test_authorize_revoke_security_group_cidr(self):
+ """Test that we can add and remove CIDR based rules
+ to a security group
+ """
+ self.expect_http()
+ self.mox.ReplayAll()
+
+ security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd")
+ for x in range(random.randint(4, 8)))
+
+ group = self.ec2.create_security_group(security_group_name,
+ 'test group')
+
+ self.expect_http()
+ self.mox.ReplayAll()
+ group.connection = self.ec2
+
+ group.authorize('tcp', 80, 81, '0.0.0.0/0')
+ group.authorize('icmp', -1, -1, '0.0.0.0/0')
+ group.authorize('udp', 80, 81, '0.0.0.0/0')
+ group.authorize('tcp', 1, 65535, '0.0.0.0/0')
+ group.authorize('udp', 1, 65535, '0.0.0.0/0')
+ group.authorize('icmp', 1, 0, '0.0.0.0/0')
+ group.authorize('icmp', 0, 1, '0.0.0.0/0')
+ group.authorize('icmp', 0, 0, '0.0.0.0/0')
+
+ def _assert(message, *args):
+ try:
+ group.authorize(*args)
+ except boto_exc.EC2ResponseError as e:
+ self.assertEqual(e.status, 400, 'Expected status to be 400')
+ self.assertIn(message, e.error_message)
+ else:
+ raise self.failureException, 'EC2ResponseError not raised'
+
+ # Invalid CIDR address
+ _assert('Invalid CIDR', 'tcp', 80, 81, '0.0.0.0/0444')
+ # Missing ports
+ _assert('Not enough parameters', 'tcp', '0.0.0.0/0')
+ # from port cannot be greater than to port
+ _assert('Invalid port range', 'tcp', 100, 1, '0.0.0.0/0')
+ # For tcp, negative values are not allowed
+ _assert('Invalid port range', 'tcp', -1, 1, '0.0.0.0/0')
+ # For tcp, valid port range 1-65535
+ _assert('Invalid port range', 'tcp', 1, 65599, '0.0.0.0/0')
+ # Invalid Cidr for ICMP type
+ _assert('Invalid CIDR', 'icmp', -1, -1, '0.0.444.0/4')
+ # Invalid protocol
+ _assert('Invalid IP protocol', 'xyz', 1, 14, '0.0.0.0/0')
+ # Invalid port
+ _assert('Invalid input received: To and From ports must be integers',
+ 'tcp', " ", "81", '0.0.0.0/0')
+ # Invalid icmp port
+ _assert('Invalid input received: '
+ 'Type and Code must be integers for ICMP protocol type',
+ 'icmp', " ", "81", '0.0.0.0/0')
+ # Invalid CIDR Address
+ _assert('Invalid CIDR', 'icmp', -1, -1, '0.0.0.0')
+ # Invalid CIDR Address
+ _assert('Invalid CIDR', 'icmp', -1, -1, '0.0.0.0/')
+ # Invalid Cidr ports
+ _assert('Invalid port range', 'icmp', 1, 256, '0.0.0.0/0')
+
+ self.expect_http()
+ self.mox.ReplayAll()
+
+ rv = self.ec2.get_all_security_groups()
+
+ group = [grp for grp in rv if grp.name == security_group_name][0]
+
+ self.assertEqual(len(group.rules), 8)
+ self.assertEqual(int(group.rules[0].from_port), 80)
+ self.assertEqual(int(group.rules[0].to_port), 81)
+ self.assertEqual(len(group.rules[0].grants), 1)
+ self.assertEqual(str(group.rules[0].grants[0]), '0.0.0.0/0')
+
+ self.expect_http()
+ self.mox.ReplayAll()
+ group.connection = self.ec2
+
+ group.revoke('tcp', 80, 81, '0.0.0.0/0')
+ group.revoke('icmp', -1, -1, '0.0.0.0/0')
+ group.revoke('udp', 80, 81, '0.0.0.0/0')
+ group.revoke('tcp', 1, 65535, '0.0.0.0/0')
+ group.revoke('udp', 1, 65535, '0.0.0.0/0')
+ group.revoke('icmp', 1, 0, '0.0.0.0/0')
+ group.revoke('icmp', 0, 1, '0.0.0.0/0')
+ group.revoke('icmp', 0, 0, '0.0.0.0/0')
+
+ self.expect_http()
+ self.mox.ReplayAll()
+
+ self.ec2.delete_security_group(security_group_name)
+
+ self.expect_http()
+ self.mox.ReplayAll()
+ group.connection = self.ec2
+
+ rv = self.ec2.get_all_security_groups()
+
+ self.assertEqual(len(rv), 1)
+ self.assertEqual(rv[0].name, 'default')
+
+ def test_authorize_revoke_security_group_cidr_v6(self):
+ """Test that we can add and remove CIDR based rules
+ to a security group for IPv6
+ """
+ self.expect_http()
+ self.mox.ReplayAll()
+
+ security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd")
+ for x in range(random.randint(4, 8)))
+
+ group = self.ec2.create_security_group(security_group_name,
+ 'test group')
+
+ self.expect_http()
+ self.mox.ReplayAll()
+ group.connection = self.ec2
+
+ group.authorize('tcp', 80, 81, '::/0')
+
+ self.expect_http()
+ self.mox.ReplayAll()
+
+ rv = self.ec2.get_all_security_groups()
+
+ group = [grp for grp in rv if grp.name == security_group_name][0]
+ self.assertEqual(len(group.rules), 1)
+ self.assertEqual(int(group.rules[0].from_port), 80)
+ self.assertEqual(int(group.rules[0].to_port), 81)
+ self.assertEqual(len(group.rules[0].grants), 1)
+ self.assertEqual(str(group.rules[0].grants[0]), '::/0')
+
+ self.expect_http()
+ self.mox.ReplayAll()
+ group.connection = self.ec2
+
+ group.revoke('tcp', 80, 81, '::/0')
+
+ self.expect_http()
+ self.mox.ReplayAll()
+
+ self.ec2.delete_security_group(security_group_name)
+
+ self.expect_http()
+ self.mox.ReplayAll()
+ group.connection = self.ec2
+
+ rv = self.ec2.get_all_security_groups()
+
+ self.assertEqual(len(rv), 1)
+ self.assertEqual(rv[0].name, 'default')
+
+ def test_authorize_revoke_security_group_foreign_group(self):
+ """Test that we can grant and revoke another security group access
+ to a security group
+ """
+ self.expect_http()
+ self.mox.ReplayAll()
+
+ rand_string = 'sdiuisudfsdcnpaqwertasd'
+ security_group_name = "".join(random.choice(rand_string)
+ for x in range(random.randint(4, 8)))
+ other_security_group_name = "".join(random.choice(rand_string)
+ for x in range(random.randint(4, 8)))
+
+ group = self.ec2.create_security_group(security_group_name,
+ 'test group')
+
+ self.expect_http()
+ self.mox.ReplayAll()
+
+ other_group = self.ec2.create_security_group(other_security_group_name,
+ 'some other group')
+
+ self.expect_http()
+ self.mox.ReplayAll()
+ group.connection = self.ec2
+
+ group.authorize(src_group=other_group)
+
+ self.expect_http()
+ self.mox.ReplayAll()
+
+ rv = self.ec2.get_all_security_groups()
+
+ # I don't bother checkng that we actually find it here,
+ # because the create/delete unit test further up should
+ # be good enough for that.
+ for group in rv:
+ if group.name == security_group_name:
+ self.assertEqual(len(group.rules), 3)
+ self.assertEqual(len(group.rules[0].grants), 1)
+ self.assertEqual(str(group.rules[0].grants[0]),
+ '%s-%s' % (other_security_group_name, 'fake'))
+
+ self.expect_http()
+ self.mox.ReplayAll()
+
+ rv = self.ec2.get_all_security_groups()
+
+ for group in rv:
+ if group.name == security_group_name:
+ self.expect_http()
+ self.mox.ReplayAll()
+ group.connection = self.ec2
+ group.revoke(src_group=other_group)
+
+ self.expect_http()
+ self.mox.ReplayAll()
+
+ self.ec2.delete_security_group(security_group_name)
+ self.ec2.delete_security_group(other_security_group_name)
diff --git a/nova/tests/unit/api/ec2/test_apirequest.py b/nova/tests/unit/api/ec2/test_apirequest.py
new file mode 100644
index 0000000000..4b2dee96f8
--- /dev/null
+++ b/nova/tests/unit/api/ec2/test_apirequest.py
@@ -0,0 +1,92 @@
+# Copyright 2014 Hewlett-Packard Development Company, L.P.
+#
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Unit tests for the API Request internals."""
+
+import copy
+
+from oslo.utils import timeutils
+
+from nova.api.ec2 import apirequest
+from nova import test
+
+
+class APIRequestTestCase(test.NoDBTestCase):
+
+ def setUp(self):
+ super(APIRequestTestCase, self).setUp()
+ self.req = apirequest.APIRequest("FakeController", "FakeAction",
+ "FakeVersion", {})
+ self.resp = {
+ 'string': 'foo',
+ 'int': 1,
+ 'long': long(1),
+ 'bool': False,
+ 'dict': {
+ 'string': 'foo',
+ 'int': 1,
+ }
+ }
+
+ # The previous will produce an output that looks like the
+ # following (excusing line wrap for 80 cols):
+ #
+ # <FakeActionResponse xmlns="http://ec2.amazonaws.com/doc/\
+ # FakeVersion/">
+ # <requestId>uuid</requestId>
+ # <int>1</int>
+ # <dict>
+ # <int>1</int>
+ # <string>foo</string>
+ # </dict>
+ # <bool>false</bool>
+ # <string>foo</string>
+ # </FakeActionResponse>
+ #
+ # We don't attempt to ever test for the full document because
+ # hash seed order might impact it's rendering order. The fact
+ # that running the function doesn't explode is a big part of
+ # the win.
+
+ def test_render_response_ascii(self):
+ data = self.req._render_response(self.resp, 'uuid')
+ self.assertIn('<FakeActionResponse xmlns="http://ec2.amazonaws.com/'
+ 'doc/FakeVersion/', data)
+ self.assertIn('<int>1</int>', data)
+ self.assertIn('<string>foo</string>', data)
+
+ def test_render_response_utf8(self):
+ resp = copy.deepcopy(self.resp)
+ resp['utf8'] = unichr(40960) + u'abcd' + unichr(1972)
+ data = self.req._render_response(resp, 'uuid')
+ self.assertIn('<utf8>&#40960;abcd&#1972;</utf8>', data)
+
+ # Tests for individual data element format functions
+
+ def test_return_valid_isoformat(self):
+ """Ensure that the ec2 api returns datetime in xs:dateTime
+ (which apparently isn't datetime.isoformat())
+ NOTE(ken-pepple): https://bugs.launchpad.net/nova/+bug/721297
+ """
+ conv = apirequest._database_to_isoformat
+ # sqlite database representation with microseconds
+ time_to_convert = timeutils.parse_strtime("2011-02-21 20:14:10.634276",
+ "%Y-%m-%d %H:%M:%S.%f")
+ self.assertEqual(conv(time_to_convert), '2011-02-21T20:14:10.634Z')
+ # mysqlite database representation
+ time_to_convert = timeutils.parse_strtime("2011-02-21 19:56:18",
+ "%Y-%m-%d %H:%M:%S")
+ self.assertEqual(conv(time_to_convert), '2011-02-21T19:56:18.000Z')
diff --git a/nova/tests/unit/api/ec2/test_cinder_cloud.py b/nova/tests/unit/api/ec2/test_cinder_cloud.py
new file mode 100644
index 0000000000..78db126aee
--- /dev/null
+++ b/nova/tests/unit/api/ec2/test_cinder_cloud.py
@@ -0,0 +1,1096 @@
+# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+# Copyright 2013 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+import uuid
+
+import fixtures
+from oslo.config import cfg
+
+from nova.api.ec2 import cloud
+from nova.api.ec2 import ec2utils
+from nova.compute import api as compute_api
+from nova.compute import flavors
+from nova.compute import utils as compute_utils
+from nova import context
+from nova import db
+from nova import exception
+from nova import objects
+from nova import test
+from nova.tests.unit import cast_as_call
+from nova.tests.unit import fake_network
+from nova.tests.unit import fake_notifier
+from nova.tests.unit import fake_utils
+from nova.tests.unit.image import fake
+from nova.tests.unit import matchers
+from nova import volume
+
+CONF = cfg.CONF
+CONF.import_opt('compute_driver', 'nova.virt.driver')
+CONF.import_opt('default_flavor', 'nova.compute.flavors')
+CONF.import_opt('use_ipv6', 'nova.netconf')
+
+
+def get_fake_cache():
+ def _ip(ip, fixed=True, floats=None):
+ ip_dict = {'address': ip, 'type': 'fixed'}
+ if not fixed:
+ ip_dict['type'] = 'floating'
+ if fixed and floats:
+ ip_dict['floating_ips'] = [_ip(f, fixed=False) for f in floats]
+ return ip_dict
+
+ info = [{'address': 'aa:bb:cc:dd:ee:ff',
+ 'id': 1,
+ 'network': {'bridge': 'br0',
+ 'id': 1,
+ 'label': 'private',
+ 'subnets': [{'cidr': '192.168.0.0/24',
+ 'ips': [_ip('192.168.0.3',
+ floats=['1.2.3.4',
+ '5.6.7.8']),
+ _ip('192.168.0.4')]}]}}]
+ if CONF.use_ipv6:
+ ipv6_addr = 'fe80:b33f::a8bb:ccff:fedd:eeff'
+ info[0]['network']['subnets'].append({'cidr': 'fe80:b33f::/64',
+ 'ips': [_ip(ipv6_addr)]})
+ return info
+
+
+def get_instances_with_cached_ips(orig_func, *args, **kwargs):
+ """Kludge the cache into instance(s) without having to create DB
+ entries
+ """
+ instances = orig_func(*args, **kwargs)
+ if isinstance(instances, list):
+ for instance in instances:
+ instance['info_cache'] = {'network_info': get_fake_cache()}
+ else:
+ instances['info_cache'] = {'network_info': get_fake_cache()}
+ return instances
+
+
+class CinderCloudTestCase(test.TestCase):
+
+ REQUIRES_LOCKING = True
+
+ def setUp(self):
+ super(CinderCloudTestCase, self).setUp()
+ ec2utils.reset_cache()
+ self.useFixture(fixtures.TempDir()).path
+ fake_utils.stub_out_utils_spawn_n(self.stubs)
+ self.flags(compute_driver='nova.virt.fake.FakeDriver',
+ volume_api_class='nova.tests.unit.fake_volume.API')
+
+ def fake_show(meh, context, id, **kwargs):
+ return {'id': id,
+ 'name': 'fake_name',
+ 'container_format': 'ami',
+ 'status': 'active',
+ 'properties': {
+ 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'type': 'machine',
+ 'image_state': 'available'}}
+
+ def fake_detail(_self, context, **kwargs):
+ image = fake_show(None, context, None)
+ image['name'] = kwargs.get('filters', {}).get('name')
+ return [image]
+
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show)
+ self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
+ fake.stub_out_image_service(self.stubs)
+
+ def dumb(*args, **kwargs):
+ pass
+
+ self.stubs.Set(compute_utils, 'notify_about_instance_usage', dumb)
+ fake_network.set_stub_network_methods(self.stubs)
+
+ # set up our cloud
+ self.cloud = cloud.CloudController()
+ self.flags(scheduler_driver='nova.scheduler.chance.ChanceScheduler')
+
+ # Short-circuit the conductor service
+ self.flags(use_local=True, group='conductor')
+
+ # Stub out the notification service so we use the no-op serializer
+ # and avoid lazy-load traces with the wrap_exception decorator in
+ # the compute service.
+ fake_notifier.stub_notifier(self.stubs)
+ self.addCleanup(fake_notifier.reset)
+
+ # set up services
+ self.conductor = self.start_service('conductor',
+ manager=CONF.conductor.manager)
+ self.compute = self.start_service('compute')
+ self.scheduler = self.start_service('scheduler')
+ self.network = self.start_service('network')
+ self.consoleauth = self.start_service('consoleauth')
+
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = context.RequestContext(self.user_id,
+ self.project_id,
+ is_admin=True)
+ self.volume_api = volume.API()
+ self.volume_api.reset_fake_api(self.context)
+
+ self.useFixture(cast_as_call.CastAsCall(self.stubs))
+
+ # make sure we can map ami-00000001/2 to a uuid in FakeImageService
+ db.s3_image_create(self.context,
+ 'cedef40a-ed67-4d10-800e-17455edce175')
+ db.s3_image_create(self.context,
+ '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6')
+
+ def tearDown(self):
+ self.volume_api.reset_fake_api(self.context)
+ super(CinderCloudTestCase, self).tearDown()
+ fake.FakeImageService_reset()
+
+ def _stub_instance_get_with_fixed_ips(self, func_name):
+ orig_func = getattr(self.cloud.compute_api, func_name)
+
+ def fake_get(*args, **kwargs):
+ return get_instances_with_cached_ips(orig_func, *args, **kwargs)
+ self.stubs.Set(self.cloud.compute_api, func_name, fake_get)
+
+ def _create_key(self, name):
+ # NOTE(vish): create depends on pool, so just call helper directly
+ keypair_api = compute_api.KeypairAPI()
+ return keypair_api.create_key_pair(self.context, self.context.user_id,
+ name)
+
+ def test_describe_volumes(self):
+ # Makes sure describe_volumes works and filters results.
+
+ vol1 = self.cloud.create_volume(self.context,
+ size=1,
+ name='test-1',
+ description='test volume 1')
+ self.assertEqual(vol1['status'], 'available')
+ vol2 = self.cloud.create_volume(self.context,
+ size=1,
+ name='test-2',
+ description='test volume 2')
+ result = self.cloud.describe_volumes(self.context)
+ self.assertEqual(len(result['volumeSet']), 2)
+ result = self.cloud.describe_volumes(self.context,
+ [vol1['volumeId']])
+ self.assertEqual(len(result['volumeSet']), 1)
+ self.assertEqual(vol1['volumeId'], result['volumeSet'][0]['volumeId'])
+
+ self.cloud.delete_volume(self.context, vol1['volumeId'])
+ self.cloud.delete_volume(self.context, vol2['volumeId'])
+
+ def test_format_volume_maps_status(self):
+ fake_volume = {'id': 1,
+ 'status': 'creating',
+ 'availability_zone': 'nova',
+ 'volumeId': 'vol-0000000a',
+ 'attachmentSet': [{}],
+ 'snapshotId': None,
+ 'created_at': '2013-04-18T06:03:35.025626',
+ 'size': 1,
+ 'mountpoint': None,
+ 'attach_status': None}
+
+ self.assertEqual(self.cloud._format_volume(self.context,
+ fake_volume)['status'],
+ 'creating')
+
+ fake_volume['status'] = 'attaching'
+ self.assertEqual(self.cloud._format_volume(self.context,
+ fake_volume)['status'],
+ 'in-use')
+ fake_volume['status'] = 'detaching'
+ self.assertEqual(self.cloud._format_volume(self.context,
+ fake_volume)['status'],
+ 'in-use')
+ fake_volume['status'] = 'banana'
+ self.assertEqual(self.cloud._format_volume(self.context,
+ fake_volume)['status'],
+ 'banana')
+
+ def test_create_volume_in_availability_zone(self):
+ """Makes sure create_volume works when we specify an availability
+ zone
+ """
+ availability_zone = 'zone1:host1'
+
+ result = self.cloud.create_volume(self.context,
+ size=1,
+ availability_zone=availability_zone)
+ volume_id = result['volumeId']
+ availabilityZone = result['availabilityZone']
+ self.assertEqual(availabilityZone, availability_zone)
+ result = self.cloud.describe_volumes(self.context)
+ self.assertEqual(len(result['volumeSet']), 1)
+ self.assertEqual(result['volumeSet'][0]['volumeId'], volume_id)
+ self.assertEqual(result['volumeSet'][0]['availabilityZone'],
+ availabilityZone)
+
+ self.cloud.delete_volume(self.context, volume_id)
+
+ def test_create_volume_from_snapshot(self):
+ # Makes sure create_volume works when we specify a snapshot.
+ availability_zone = 'zone1:host1'
+ vol1 = self.cloud.create_volume(self.context,
+ size=1,
+ availability_zone=availability_zone)
+ snap = self.cloud.create_snapshot(self.context,
+ vol1['volumeId'],
+ name='snap-1',
+ description='test snap of vol %s'
+ % vol1['volumeId'])
+
+ vol2 = self.cloud.create_volume(self.context,
+ snapshot_id=snap['snapshotId'])
+ volume1_id = vol1['volumeId']
+ volume2_id = vol2['volumeId']
+
+ result = self.cloud.describe_volumes(self.context)
+ self.assertEqual(len(result['volumeSet']), 2)
+ self.assertEqual(result['volumeSet'][1]['volumeId'], volume2_id)
+
+ self.cloud.delete_volume(self.context, volume2_id)
+ self.cloud.delete_snapshot(self.context, snap['snapshotId'])
+ self.cloud.delete_volume(self.context, volume1_id)
+
+ def test_volume_status_of_attaching_volume(self):
+ """Test the volume's status in response when attaching a volume."""
+ vol1 = self.cloud.create_volume(self.context,
+ size=1,
+ name='test-ls',
+ description='test volume ls')
+ self.assertEqual('available', vol1['status'])
+
+ kwargs = {'image_id': 'ami-1',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1}
+ ec2_instance_id = self._run_instance(**kwargs)
+ resp = self.cloud.attach_volume(self.context,
+ vol1['volumeId'],
+ ec2_instance_id,
+ '/dev/sde')
+ # Here,the status should be 'attaching',but it can be 'attached' in
+ # unittest scenario if the attach action is very fast.
+ self.assertIn(resp['status'], ('attaching', 'attached'))
+
+ def test_volume_status_of_detaching_volume(self):
+ """Test the volume's status in response when detaching a volume."""
+ vol1 = self.cloud.create_volume(self.context,
+ size=1,
+ name='test-ls',
+ description='test volume ls')
+ self.assertEqual('available', vol1['status'])
+ vol1_uuid = ec2utils.ec2_vol_id_to_uuid(vol1['volumeId'])
+ kwargs = {'image_id': 'ami-1',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1,
+ 'block_device_mapping': [{'device_name': '/dev/sdb',
+ 'volume_id': vol1_uuid,
+ 'delete_on_termination': True}]}
+ self._run_instance(**kwargs)
+ resp = self.cloud.detach_volume(self.context,
+ vol1['volumeId'])
+
+ # Here,the status should be 'detaching',but it can be 'detached' in
+ # unittest scenario if the detach action is very fast.
+ self.assertIn(resp['status'], ('detaching', 'detached'))
+
+ def test_describe_snapshots(self):
+ # Makes sure describe_snapshots works and filters results.
+ availability_zone = 'zone1:host1'
+ vol1 = self.cloud.create_volume(self.context,
+ size=1,
+ availability_zone=availability_zone)
+ snap1 = self.cloud.create_snapshot(self.context,
+ vol1['volumeId'],
+ name='snap-1',
+ description='test snap1 of vol %s' %
+ vol1['volumeId'])
+ snap2 = self.cloud.create_snapshot(self.context,
+ vol1['volumeId'],
+ name='snap-1',
+ description='test snap2 of vol %s' %
+ vol1['volumeId'])
+
+ result = self.cloud.describe_snapshots(self.context)
+ self.assertEqual(len(result['snapshotSet']), 2)
+ result = self.cloud.describe_snapshots(
+ self.context,
+ snapshot_id=[snap2['snapshotId']])
+ self.assertEqual(len(result['snapshotSet']), 1)
+
+ self.cloud.delete_snapshot(self.context, snap1['snapshotId'])
+ self.cloud.delete_snapshot(self.context, snap2['snapshotId'])
+ self.cloud.delete_volume(self.context, vol1['volumeId'])
+
+ def test_format_snapshot_maps_status(self):
+ fake_snapshot = {'status': 'new',
+ 'id': 1,
+ 'volume_id': 1,
+ 'created_at': 1353560191.08117,
+ 'progress': 90,
+ 'project_id': str(uuid.uuid4()),
+ 'volume_size': 10000,
+ 'display_description': 'desc'}
+
+ self.assertEqual(self.cloud._format_snapshot(self.context,
+ fake_snapshot)['status'],
+ 'pending')
+
+ fake_snapshot['status'] = 'creating'
+ self.assertEqual(self.cloud._format_snapshot(self.context,
+ fake_snapshot)['status'],
+ 'pending')
+
+ fake_snapshot['status'] = 'available'
+ self.assertEqual(self.cloud._format_snapshot(self.context,
+ fake_snapshot)['status'],
+ 'completed')
+
+ fake_snapshot['status'] = 'active'
+ self.assertEqual(self.cloud._format_snapshot(self.context,
+ fake_snapshot)['status'],
+ 'completed')
+
+ fake_snapshot['status'] = 'deleting'
+ self.assertEqual(self.cloud._format_snapshot(self.context,
+ fake_snapshot)['status'],
+ 'pending')
+
+ fake_snapshot['status'] = 'deleted'
+ self.assertIsNone(self.cloud._format_snapshot(self.context,
+ fake_snapshot))
+
+ fake_snapshot['status'] = 'error'
+ self.assertEqual(self.cloud._format_snapshot(self.context,
+ fake_snapshot)['status'],
+ 'error')
+
+ fake_snapshot['status'] = 'banana'
+ self.assertEqual(self.cloud._format_snapshot(self.context,
+ fake_snapshot)['status'],
+ 'banana')
+
+ def test_create_snapshot(self):
+ # Makes sure create_snapshot works.
+ availability_zone = 'zone1:host1'
+ result = self.cloud.describe_snapshots(self.context)
+ vol1 = self.cloud.create_volume(self.context,
+ size=1,
+ availability_zone=availability_zone)
+ snap1 = self.cloud.create_snapshot(self.context,
+ vol1['volumeId'],
+ name='snap-1',
+ description='test snap1 of vol %s' %
+ vol1['volumeId'])
+
+ snapshot_id = snap1['snapshotId']
+ result = self.cloud.describe_snapshots(self.context)
+ self.assertEqual(len(result['snapshotSet']), 1)
+ self.assertEqual(result['snapshotSet'][0]['snapshotId'], snapshot_id)
+
+ self.cloud.delete_snapshot(self.context, snap1['snapshotId'])
+ self.cloud.delete_volume(self.context, vol1['volumeId'])
+
+ def test_delete_snapshot(self):
+ # Makes sure delete_snapshot works.
+ availability_zone = 'zone1:host1'
+ vol1 = self.cloud.create_volume(self.context,
+ size=1,
+ availability_zone=availability_zone)
+ snap1 = self.cloud.create_snapshot(self.context,
+ vol1['volumeId'],
+ name='snap-1',
+ description='test snap1 of vol %s' %
+ vol1['volumeId'])
+
+ snapshot_id = snap1['snapshotId']
+ result = self.cloud.delete_snapshot(self.context,
+ snapshot_id=snapshot_id)
+ self.assertTrue(result)
+ self.cloud.delete_volume(self.context, vol1['volumeId'])
+
+ def _block_device_mapping_create(self, instance_uuid, mappings):
+ volumes = []
+ for bdm in mappings:
+ db.block_device_mapping_create(self.context, bdm)
+ if 'volume_id' in bdm:
+ values = {'id': bdm['volume_id']}
+ for bdm_key, vol_key in [('snapshot_id', 'snapshot_id'),
+ ('snapshot_size', 'volume_size'),
+ ('delete_on_termination',
+ 'delete_on_termination')]:
+ if bdm_key in bdm:
+ values[vol_key] = bdm[bdm_key]
+ kwargs = {'name': 'bdmtest-volume',
+ 'description': 'bdm test volume description',
+ 'status': 'available',
+ 'host': 'fake',
+ 'size': 1,
+ 'attach_status': 'detached',
+ 'volume_id': values['id']}
+ vol = self.volume_api.create_with_kwargs(self.context,
+ **kwargs)
+ if 'snapshot_id' in values:
+ self.volume_api.create_snapshot(self.context,
+ vol['id'],
+ 'snapshot-bdm',
+ 'fake snap for bdm tests',
+ values['snapshot_id'])
+
+ self.volume_api.attach(self.context, vol['id'],
+ instance_uuid, bdm['device_name'])
+ volumes.append(vol)
+ return volumes
+
+ def _setUpBlockDeviceMapping(self):
+ image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
+ sys_meta = flavors.save_flavor_info(
+ {}, flavors.get_flavor(1))
+ inst0 = db.instance_create(self.context,
+ {'image_ref': image_uuid,
+ 'instance_type_id': 1,
+ 'root_device_name': '/dev/sdb1',
+ 'system_metadata': sys_meta})
+ inst1 = db.instance_create(self.context,
+ {'image_ref': image_uuid,
+ 'instance_type_id': 1,
+ 'root_device_name': '/dev/sdc1',
+ 'system_metadata': sys_meta})
+ inst2 = db.instance_create(self.context,
+ {'image_ref': '',
+ 'instance_type_id': 1,
+ 'root_device_name': '/dev/vda',
+ 'system_metadata': sys_meta})
+
+ instance0_uuid = inst0['uuid']
+ mappings0 = [
+ {'instance_uuid': instance0_uuid,
+ 'device_name': '/dev/sdb1',
+ 'snapshot_id': '1',
+ 'volume_id': '2'},
+ {'instance_uuid': instance0_uuid,
+ 'device_name': '/dev/sdb2',
+ 'volume_id': '3',
+ 'volume_size': 1},
+ {'instance_uuid': instance0_uuid,
+ 'device_name': '/dev/sdb3',
+ 'delete_on_termination': True,
+ 'snapshot_id': '4',
+ 'volume_id': '5'},
+ {'instance_uuid': instance0_uuid,
+ 'device_name': '/dev/sdb4',
+ 'delete_on_termination': False,
+ 'snapshot_id': '6',
+ 'volume_id': '7'},
+ {'instance_uuid': instance0_uuid,
+ 'device_name': '/dev/sdb5',
+ 'snapshot_id': '8',
+ 'volume_id': '9',
+ 'volume_size': 0},
+ {'instance_uuid': instance0_uuid,
+ 'device_name': '/dev/sdb6',
+ 'snapshot_id': '10',
+ 'volume_id': '11',
+ 'volume_size': 1},
+ {'instance_uuid': instance0_uuid,
+ 'device_name': '/dev/sdb7',
+ 'no_device': True},
+ {'instance_uuid': instance0_uuid,
+ 'device_name': '/dev/sdb8',
+ 'virtual_name': 'swap'},
+ {'instance_uuid': instance0_uuid,
+ 'device_name': '/dev/sdb9',
+ 'virtual_name': 'ephemeral3'}]
+ instance2_uuid = inst2['uuid']
+ mappings2 = [
+ {'instance_uuid': instance2_uuid,
+ 'device_name': 'vda',
+ 'snapshot_id': '1',
+ 'volume_id': '21'}]
+
+ volumes0 = self._block_device_mapping_create(instance0_uuid, mappings0)
+ volumes2 = self._block_device_mapping_create(instance2_uuid, mappings2)
+ return ((inst0, inst1, inst2), (volumes0, [], volumes2))
+
+ def _tearDownBlockDeviceMapping(self, instances, volumes):
+ for vols in volumes:
+ for vol in vols:
+ self.volume_api.delete(self.context, vol['id'])
+ for instance in instances:
+ for bdm in db.block_device_mapping_get_all_by_instance(
+ self.context, instance['uuid']):
+ db.block_device_mapping_destroy(self.context, bdm['id'])
+ db.instance_destroy(self.context, instance['uuid'])
+
+ _expected_instance_bdm0 = {
+ 'instanceId': 'i-00000001',
+ 'rootDeviceName': '/dev/sdb1',
+ 'rootDeviceType': 'ebs'}
+
+ _expected_block_device_mapping0 = [
+ {'deviceName': '/dev/sdb1',
+ 'ebs': {'status': 'attached',
+ 'deleteOnTermination': False,
+ 'volumeId': 'vol-00000002',
+ }},
+ {'deviceName': '/dev/sdb2',
+ 'ebs': {'status': 'attached',
+ 'deleteOnTermination': False,
+ 'volumeId': 'vol-00000003',
+ }},
+ {'deviceName': '/dev/sdb3',
+ 'ebs': {'status': 'attached',
+ 'deleteOnTermination': True,
+ 'volumeId': 'vol-00000005',
+ }},
+ {'deviceName': '/dev/sdb4',
+ 'ebs': {'status': 'attached',
+ 'deleteOnTermination': False,
+ 'volumeId': 'vol-00000007',
+ }},
+ {'deviceName': '/dev/sdb5',
+ 'ebs': {'status': 'attached',
+ 'deleteOnTermination': False,
+ 'volumeId': 'vol-00000009',
+ }},
+ {'deviceName': '/dev/sdb6',
+ 'ebs': {'status': 'attached',
+ 'deleteOnTermination': False,
+ 'volumeId': 'vol-0000000b', }}]
+ # NOTE(yamahata): swap/ephemeral device case isn't supported yet.
+
+ _expected_instance_bdm1 = {
+ 'instanceId': 'i-00000002',
+ 'rootDeviceName': '/dev/sdc1',
+ 'rootDeviceType': 'instance-store'}
+
+ _expected_instance_bdm2 = {
+ 'instanceId': 'i-00000003',
+ 'rootDeviceName': '/dev/vda',
+ 'rootDeviceType': 'ebs'}
+
+ def test_format_instance_bdm(self):
+ (instances, volumes) = self._setUpBlockDeviceMapping()
+
+ result = {}
+ self.cloud._format_instance_bdm(self.context, instances[0]['uuid'],
+ '/dev/sdb1', result)
+ self.assertThat(
+ {'rootDeviceType': self._expected_instance_bdm0['rootDeviceType']},
+ matchers.IsSubDictOf(result))
+ self._assertEqualBlockDeviceMapping(
+ self._expected_block_device_mapping0, result['blockDeviceMapping'])
+
+ result = {}
+ self.cloud._format_instance_bdm(self.context, instances[1]['uuid'],
+ '/dev/sdc1', result)
+ self.assertThat(
+ {'rootDeviceType': self._expected_instance_bdm1['rootDeviceType']},
+ matchers.IsSubDictOf(result))
+
+ self._tearDownBlockDeviceMapping(instances, volumes)
+
+ def _assertInstance(self, instance_id):
+ ec2_instance_id = ec2utils.id_to_ec2_id(instance_id)
+ result = self.cloud.describe_instances(self.context,
+ instance_id=[ec2_instance_id])
+ result = result['reservationSet'][0]
+ self.assertEqual(len(result['instancesSet']), 1)
+ result = result['instancesSet'][0]
+ self.assertEqual(result['instanceId'], ec2_instance_id)
+ return result
+
+ def _assertEqualBlockDeviceMapping(self, expected, result):
+ self.assertEqual(len(expected), len(result))
+ for x in expected:
+ found = False
+ for y in result:
+ if x['deviceName'] == y['deviceName']:
+ self.assertThat(x, matchers.IsSubDictOf(y))
+ found = True
+ break
+ self.assertTrue(found)
+
+ def test_describe_instances_bdm(self):
+ """Make sure describe_instances works with root_device_name and
+ block device mappings
+ """
+ (instances, volumes) = self._setUpBlockDeviceMapping()
+
+ result = self._assertInstance(instances[0]['id'])
+ self.assertThat(
+ self._expected_instance_bdm0,
+ matchers.IsSubDictOf(result))
+ self._assertEqualBlockDeviceMapping(
+ self._expected_block_device_mapping0, result['blockDeviceMapping'])
+
+ result = self._assertInstance(instances[1]['id'])
+ self.assertThat(
+ self._expected_instance_bdm1,
+ matchers.IsSubDictOf(result))
+
+ result = self._assertInstance(instances[2]['id'])
+ self.assertThat(
+ self._expected_instance_bdm2,
+ matchers.IsSubDictOf(result))
+
+ self._tearDownBlockDeviceMapping(instances, volumes)
+
+ def _setUpImageSet(self, create_volumes_and_snapshots=False):
+ self.flags(max_local_block_devices=-1)
+ mappings1 = [
+ {'device': '/dev/sda1', 'virtual': 'root'},
+
+ {'device': 'sdb0', 'virtual': 'ephemeral0'},
+ {'device': 'sdb1', 'virtual': 'ephemeral1'},
+ {'device': 'sdb2', 'virtual': 'ephemeral2'},
+ {'device': 'sdb3', 'virtual': 'ephemeral3'},
+ {'device': 'sdb4', 'virtual': 'ephemeral4'},
+
+ {'device': 'sdc0', 'virtual': 'swap'},
+ {'device': 'sdc1', 'virtual': 'swap'},
+ {'device': 'sdc2', 'virtual': 'swap'},
+ {'device': 'sdc3', 'virtual': 'swap'},
+ {'device': 'sdc4', 'virtual': 'swap'}]
+ block_device_mapping1 = [
+ {'device_name': '/dev/sdb1', 'snapshot_id': 1234567},
+ {'device_name': '/dev/sdb2', 'volume_id': 1234567},
+ {'device_name': '/dev/sdb3', 'virtual_name': 'ephemeral5'},
+ {'device_name': '/dev/sdb4', 'no_device': True},
+
+ {'device_name': '/dev/sdc1', 'snapshot_id': 12345678},
+ {'device_name': '/dev/sdc2', 'volume_id': 12345678},
+ {'device_name': '/dev/sdc3', 'virtual_name': 'ephemeral6'},
+ {'device_name': '/dev/sdc4', 'no_device': True}]
+ image1 = {
+ 'id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'name': 'fake_name',
+ 'status': 'active',
+ 'properties': {
+ 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'type': 'machine',
+ 'image_state': 'available',
+ 'mappings': mappings1,
+ 'block_device_mapping': block_device_mapping1,
+ }
+ }
+
+ mappings2 = [{'device': '/dev/sda1', 'virtual': 'root'}]
+ block_device_mapping2 = [{'device_name': '/dev/sdb1',
+ 'snapshot_id': 1234567}]
+ image2 = {
+ 'id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
+ 'name': 'fake_name',
+ 'status': 'active',
+ 'properties': {
+ 'kernel_id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
+ 'type': 'machine',
+ 'root_device_name': '/dev/sdb1',
+ 'mappings': mappings2,
+ 'block_device_mapping': block_device_mapping2}}
+
+ def fake_show(meh, context, image_id, **kwargs):
+ _images = [copy.deepcopy(image1), copy.deepcopy(image2)]
+ for i in _images:
+ if str(i['id']) == str(image_id):
+ return i
+ raise exception.ImageNotFound(image_id=image_id)
+
+ def fake_detail(meh, context):
+ return [copy.deepcopy(image1), copy.deepcopy(image2)]
+
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show)
+ self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
+
+ volumes = []
+ snapshots = []
+ if create_volumes_and_snapshots:
+ for bdm in block_device_mapping1:
+ if 'volume_id' in bdm:
+ vol = self._volume_create(bdm['volume_id'])
+ volumes.append(vol['id'])
+ if 'snapshot_id' in bdm:
+ kwargs = {'volume_id': 76543210,
+ 'volume_size': 1,
+ 'name': 'test-snap',
+ 'description': 'test snap desc',
+ 'snap_id': bdm['snapshot_id'],
+ 'status': 'available'}
+ snap = self.volume_api.create_snapshot_with_kwargs(
+ self.context, **kwargs)
+ snapshots.append(snap['id'])
+ return (volumes, snapshots)
+
+ def _assertImageSet(self, result, root_device_type, root_device_name):
+ self.assertEqual(1, len(result['imagesSet']))
+ result = result['imagesSet'][0]
+ self.assertIn('rootDeviceType', result)
+ self.assertEqual(result['rootDeviceType'], root_device_type)
+ self.assertIn('rootDeviceName', result)
+ self.assertEqual(result['rootDeviceName'], root_device_name)
+ self.assertIn('blockDeviceMapping', result)
+
+ return result
+
+ _expected_root_device_name1 = '/dev/sda1'
+ # NOTE(yamahata): noDevice doesn't make sense when returning mapping
+ # It makes sense only when user overriding existing
+ # mapping.
+ _expected_bdms1 = [
+ {'deviceName': '/dev/sdb0', 'virtualName': 'ephemeral0'},
+ {'deviceName': '/dev/sdb1', 'ebs': {'snapshotId':
+ 'snap-00053977'}},
+ {'deviceName': '/dev/sdb2', 'ebs': {'snapshotId':
+ 'vol-00053977'}},
+ {'deviceName': '/dev/sdb3', 'virtualName': 'ephemeral5'},
+
+ {'deviceName': '/dev/sdc0', 'virtualName': 'swap'},
+ {'deviceName': '/dev/sdc1', 'ebs': {'snapshotId':
+ 'snap-00bc614e'}},
+ {'deviceName': '/dev/sdc2', 'ebs': {'snapshotId':
+ 'vol-00bc614e'}},
+ {'deviceName': '/dev/sdc3', 'virtualName': 'ephemeral6'},
+ ]
+
+ _expected_root_device_name2 = '/dev/sdb1'
+ _expected_bdms2 = [{'deviceName': '/dev/sdb1',
+ 'ebs': {'snapshotId': 'snap-00053977'}}]
+
+ def _run_instance(self, **kwargs):
+ rv = self.cloud.run_instances(self.context, **kwargs)
+ instance_id = rv['instancesSet'][0]['instanceId']
+ return instance_id
+
+ def _restart_compute_service(self, periodic_interval_max=None):
+ """restart compute service. NOTE: fake driver forgets all instances."""
+ self.compute.kill()
+ if periodic_interval_max:
+ self.compute = self.start_service(
+ 'compute', periodic_interval_max=periodic_interval_max)
+ else:
+ self.compute = self.start_service('compute')
+
+ def _volume_create(self, volume_id=None):
+ kwargs = {'name': 'test-volume',
+ 'description': 'test volume description',
+ 'status': 'available',
+ 'host': 'fake',
+ 'size': 1,
+ 'attach_status': 'detached'}
+ if volume_id:
+ kwargs['volume_id'] = volume_id
+ return self.volume_api.create_with_kwargs(self.context, **kwargs)
+
+ def _assert_volume_attached(self, vol, instance_uuid, mountpoint):
+ self.assertEqual(vol['instance_uuid'], instance_uuid)
+ self.assertEqual(vol['mountpoint'], mountpoint)
+ self.assertEqual(vol['status'], "in-use")
+ self.assertEqual(vol['attach_status'], "attached")
+
+ def _assert_volume_detached(self, vol):
+ self.assertIsNone(vol['instance_uuid'])
+ self.assertIsNone(vol['mountpoint'])
+ self.assertEqual(vol['status'], "available")
+ self.assertEqual(vol['attach_status'], "detached")
+
+ def test_stop_start_with_volume(self):
+ # Make sure run instance with block device mapping works.
+ availability_zone = 'zone1:host1'
+ vol1 = self.cloud.create_volume(self.context,
+ size=1,
+ availability_zone=availability_zone)
+ vol2 = self.cloud.create_volume(self.context,
+ size=1,
+ availability_zone=availability_zone)
+ vol1_uuid = ec2utils.ec2_vol_id_to_uuid(vol1['volumeId'])
+ vol2_uuid = ec2utils.ec2_vol_id_to_uuid(vol2['volumeId'])
+ # enforce periodic tasks run in short time to avoid wait for 60s.
+ self._restart_compute_service(periodic_interval_max=0.3)
+
+ kwargs = {'image_id': 'ami-1',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1,
+ 'block_device_mapping': [{'device_name': '/dev/sdb',
+ 'volume_id': vol1_uuid,
+ 'delete_on_termination': False},
+ {'device_name': '/dev/sdc',
+ 'volume_id': vol2_uuid,
+ 'delete_on_termination': True},
+ ]}
+ ec2_instance_id = self._run_instance(**kwargs)
+ instance_uuid = ec2utils.ec2_inst_id_to_uuid(self.context,
+ ec2_instance_id)
+ vols = self.volume_api.get_all(self.context)
+ vols = [v for v in vols if v['instance_uuid'] == instance_uuid]
+
+ self.assertEqual(len(vols), 2)
+ for vol in vols:
+ self.assertIn(str(vol['id']), [str(vol1_uuid), str(vol2_uuid)])
+ if str(vol['id']) == str(vol1_uuid):
+ self.volume_api.attach(self.context, vol['id'],
+ instance_uuid, '/dev/sdb')
+ elif str(vol['id']) == str(vol2_uuid):
+ self.volume_api.attach(self.context, vol['id'],
+ instance_uuid, '/dev/sdc')
+
+ vol = self.volume_api.get(self.context, vol1_uuid)
+ self._assert_volume_attached(vol, instance_uuid, '/dev/sdb')
+
+ vol = self.volume_api.get(self.context, vol2_uuid)
+ self._assert_volume_attached(vol, instance_uuid, '/dev/sdc')
+
+ result = self.cloud.stop_instances(self.context, [ec2_instance_id])
+ self.assertTrue(result)
+
+ vol = self.volume_api.get(self.context, vol1_uuid)
+ self._assert_volume_attached(vol, instance_uuid, '/dev/sdb')
+
+ vol = self.volume_api.get(self.context, vol1_uuid)
+ self._assert_volume_attached(vol, instance_uuid, '/dev/sdb')
+
+ vol = self.volume_api.get(self.context, vol2_uuid)
+ self._assert_volume_attached(vol, instance_uuid, '/dev/sdc')
+
+ self.cloud.start_instances(self.context, [ec2_instance_id])
+ vols = self.volume_api.get_all(self.context)
+ vols = [v for v in vols if v['instance_uuid'] == instance_uuid]
+ self.assertEqual(len(vols), 2)
+ for vol in vols:
+ self.assertIn(str(vol['id']), [str(vol1_uuid), str(vol2_uuid)])
+ self.assertIn(vol['mountpoint'], ['/dev/sdb', '/dev/sdc'])
+ self.assertEqual(vol['instance_uuid'], instance_uuid)
+ self.assertEqual(vol['status'], "in-use")
+ self.assertEqual(vol['attach_status'], "attached")
+
+ # Here we puke...
+ self.cloud.terminate_instances(self.context, [ec2_instance_id])
+
+ admin_ctxt = context.get_admin_context(read_deleted="no")
+ vol = self.volume_api.get(admin_ctxt, vol2_uuid)
+ self.assertFalse(vol['deleted'])
+ self.cloud.delete_volume(self.context, vol1['volumeId'])
+ self._restart_compute_service()
+
+ def test_stop_with_attached_volume(self):
+ # Make sure attach info is reflected to block device mapping.
+
+ availability_zone = 'zone1:host1'
+ vol1 = self.cloud.create_volume(self.context,
+ size=1,
+ availability_zone=availability_zone)
+ vol2 = self.cloud.create_volume(self.context,
+ size=1,
+ availability_zone=availability_zone)
+ vol1_uuid = ec2utils.ec2_vol_id_to_uuid(vol1['volumeId'])
+ vol2_uuid = ec2utils.ec2_vol_id_to_uuid(vol2['volumeId'])
+
+ # enforce periodic tasks run in short time to avoid wait for 60s.
+ self._restart_compute_service(periodic_interval_max=0.3)
+ kwargs = {'image_id': 'ami-1',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1,
+ 'block_device_mapping': [{'device_name': '/dev/sdb',
+ 'volume_id': vol1_uuid,
+ 'delete_on_termination': True}]}
+ ec2_instance_id = self._run_instance(**kwargs)
+ instance_uuid = ec2utils.ec2_inst_id_to_uuid(self.context,
+ ec2_instance_id)
+
+ vols = self.volume_api.get_all(self.context)
+ vols = [v for v in vols if v['instance_uuid'] == instance_uuid]
+ self.assertEqual(len(vols), 1)
+ for vol in vols:
+ self.assertEqual(vol['id'], vol1_uuid)
+ self._assert_volume_attached(vol, instance_uuid, '/dev/sdb')
+ vol = self.volume_api.get(self.context, vol2_uuid)
+ self._assert_volume_detached(vol)
+
+ inst_obj = objects.Instance.get_by_uuid(self.context, instance_uuid)
+ self.cloud.compute_api.attach_volume(self.context,
+ inst_obj,
+ volume_id=vol2_uuid,
+ device='/dev/sdc')
+
+ vol1 = self.volume_api.get(self.context, vol1_uuid)
+ self._assert_volume_attached(vol1, instance_uuid, '/dev/sdb')
+
+ vol2 = self.volume_api.get(self.context, vol2_uuid)
+ self._assert_volume_attached(vol2, instance_uuid, '/dev/sdc')
+
+ self.cloud.compute_api.detach_volume(self.context,
+ inst_obj, vol1)
+
+ vol1 = self.volume_api.get(self.context, vol1_uuid)
+ self._assert_volume_detached(vol1)
+
+ result = self.cloud.stop_instances(self.context, [ec2_instance_id])
+ self.assertTrue(result)
+
+ vol2 = self.volume_api.get(self.context, vol2_uuid)
+ self._assert_volume_attached(vol2, instance_uuid, '/dev/sdc')
+
+ self.cloud.start_instances(self.context, [ec2_instance_id])
+ vols = self.volume_api.get_all(self.context)
+ vols = [v for v in vols if v['instance_uuid'] == instance_uuid]
+ self.assertEqual(len(vols), 1)
+
+ self._assert_volume_detached(vol1)
+
+ vol1 = self.volume_api.get(self.context, vol1_uuid)
+ self._assert_volume_detached(vol1)
+
+ self.cloud.terminate_instances(self.context, [ec2_instance_id])
+
+ def _create_snapshot(self, ec2_volume_id):
+ result = self.cloud.create_snapshot(self.context,
+ volume_id=ec2_volume_id)
+ return result['snapshotId']
+
+ def test_run_with_snapshot(self):
+ # Makes sure run/stop/start instance with snapshot works.
+ availability_zone = 'zone1:host1'
+ vol1 = self.cloud.create_volume(self.context,
+ size=1,
+ availability_zone=availability_zone)
+
+ snap1 = self.cloud.create_snapshot(self.context,
+ vol1['volumeId'],
+ name='snap-1',
+ description='test snap of vol %s' %
+ vol1['volumeId'])
+ snap1_uuid = ec2utils.ec2_snap_id_to_uuid(snap1['snapshotId'])
+
+ snap2 = self.cloud.create_snapshot(self.context,
+ vol1['volumeId'],
+ name='snap-2',
+ description='test snap of vol %s' %
+ vol1['volumeId'])
+ snap2_uuid = ec2utils.ec2_snap_id_to_uuid(snap2['snapshotId'])
+
+ kwargs = {'image_id': 'ami-1',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1,
+ 'block_device_mapping': [{'device_name': '/dev/vdb',
+ 'snapshot_id': snap1_uuid,
+ 'delete_on_termination': False, },
+ {'device_name': '/dev/vdc',
+ 'snapshot_id': snap2_uuid,
+ 'delete_on_termination': True}]}
+ ec2_instance_id = self._run_instance(**kwargs)
+ instance_uuid = ec2utils.ec2_inst_id_to_uuid(self.context,
+ ec2_instance_id)
+
+ vols = self.volume_api.get_all(self.context)
+ vols = [v for v in vols if v['instance_uuid'] == instance_uuid]
+
+ self.assertEqual(len(vols), 2)
+
+ vol1_id = None
+ vol2_id = None
+ for vol in vols:
+ snapshot_uuid = vol['snapshot_id']
+ if snapshot_uuid == snap1_uuid:
+ vol1_id = vol['id']
+ mountpoint = '/dev/vdb'
+ elif snapshot_uuid == snap2_uuid:
+ vol2_id = vol['id']
+ mountpoint = '/dev/vdc'
+ else:
+ self.fail()
+
+ self._assert_volume_attached(vol, instance_uuid, mountpoint)
+
+ # Just make sure we found them
+ self.assertTrue(vol1_id)
+ self.assertTrue(vol2_id)
+
+ self.cloud.terminate_instances(self.context, [ec2_instance_id])
+
+ admin_ctxt = context.get_admin_context(read_deleted="no")
+ vol = self.volume_api.get(admin_ctxt, vol1_id)
+ self._assert_volume_detached(vol)
+ self.assertFalse(vol['deleted'])
+
+ def test_create_image(self):
+ # Make sure that CreateImage works.
+ # enforce periodic tasks run in short time to avoid wait for 60s.
+ self._restart_compute_service(periodic_interval_max=0.3)
+
+ (volumes, snapshots) = self._setUpImageSet(
+ create_volumes_and_snapshots=True)
+
+ kwargs = {'image_id': 'ami-1',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1}
+ ec2_instance_id = self._run_instance(**kwargs)
+
+ self.cloud.terminate_instances(self.context, [ec2_instance_id])
+ self._restart_compute_service()
+
+ @staticmethod
+ def _fake_bdm_get(ctxt, id):
+ return [{'volume_id': 87654321,
+ 'snapshot_id': None,
+ 'no_device': None,
+ 'virtual_name': None,
+ 'delete_on_termination': True,
+ 'device_name': '/dev/sdh'},
+ {'volume_id': None,
+ 'snapshot_id': 98765432,
+ 'no_device': None,
+ 'virtual_name': None,
+ 'delete_on_termination': True,
+ 'device_name': '/dev/sdi'},
+ {'volume_id': None,
+ 'snapshot_id': None,
+ 'no_device': True,
+ 'virtual_name': None,
+ 'delete_on_termination': None,
+ 'device_name': None},
+ {'volume_id': None,
+ 'snapshot_id': None,
+ 'no_device': None,
+ 'virtual_name': 'ephemeral0',
+ 'delete_on_termination': None,
+ 'device_name': '/dev/sdb'},
+ {'volume_id': None,
+ 'snapshot_id': None,
+ 'no_device': None,
+ 'virtual_name': 'swap',
+ 'delete_on_termination': None,
+ 'device_name': '/dev/sdc'},
+ {'volume_id': None,
+ 'snapshot_id': None,
+ 'no_device': None,
+ 'virtual_name': 'ephemeral1',
+ 'delete_on_termination': None,
+ 'device_name': '/dev/sdd'},
+ {'volume_id': None,
+ 'snapshot_id': None,
+ 'no_device': None,
+ 'virtual_name': 'ephemeral2',
+ 'delete_on_termination': None,
+ 'device_name': '/dev/sd3'},
+ ]
diff --git a/nova/tests/unit/api/ec2/test_cloud.py b/nova/tests/unit/api/ec2/test_cloud.py
new file mode 100644
index 0000000000..113af8c96c
--- /dev/null
+++ b/nova/tests/unit/api/ec2/test_cloud.py
@@ -0,0 +1,3255 @@
+# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+# Copyright 2013 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import base64
+import copy
+import datetime
+import functools
+import os
+import string
+import tempfile
+
+import fixtures
+import iso8601
+import mock
+from oslo.config import cfg
+from oslo.utils import timeutils
+
+from nova.api.ec2 import cloud
+from nova.api.ec2 import ec2utils
+from nova.api.ec2 import inst_state
+from nova.api.metadata import password
+from nova.compute import api as compute_api
+from nova.compute import flavors
+from nova.compute import power_state
+from nova.compute import rpcapi as compute_rpcapi
+from nova.compute import utils as compute_utils
+from nova.compute import vm_states
+from nova import context
+from nova import db
+from nova import exception
+from nova.image import s3
+from nova.network import api as network_api
+from nova.network import base_api as base_network_api
+from nova.network import model
+from nova.network import neutronv2
+from nova import objects
+from nova.objects import base as obj_base
+from nova.openstack.common import log as logging
+from nova.openstack.common import policy as common_policy
+from nova.openstack.common import uuidutils
+from nova import policy
+from nova import test
+from nova.tests.unit.api.openstack.compute.contrib import (
+ test_neutron_security_groups as test_neutron)
+from nova.tests.unit import cast_as_call
+from nova.tests.unit import fake_block_device
+from nova.tests.unit import fake_network
+from nova.tests.unit import fake_notifier
+from nova.tests.unit import fake_utils
+from nova.tests.unit.image import fake
+from nova.tests.unit import matchers
+from nova import utils
+from nova.virt import fake as fake_virt
+from nova import volume
+
+CONF = cfg.CONF
+CONF.import_opt('compute_driver', 'nova.virt.driver')
+CONF.import_opt('default_flavor', 'nova.compute.flavors')
+CONF.import_opt('use_ipv6', 'nova.netconf')
+LOG = logging.getLogger(__name__)
+
+HOST = "testhost"
+
+
+def get_fake_cache(get_floating):
+ def _ip(ip, fixed=True, floats=None):
+ ip_dict = {'address': ip, 'type': 'fixed'}
+ if not fixed:
+ ip_dict['type'] = 'floating'
+ if fixed and floats:
+ ip_dict['floating_ips'] = [_ip(f, fixed=False) for f in floats]
+ return ip_dict
+
+ if get_floating:
+ ip_info = [_ip('192.168.0.3',
+ floats=['1.2.3.4', '5.6.7.8']),
+ _ip('192.168.0.4')]
+ else:
+ ip_info = [_ip('192.168.0.3'),
+ _ip('192.168.0.4')]
+
+ info = [{'address': 'aa:bb:cc:dd:ee:ff',
+ 'id': 1,
+ 'network': {'bridge': 'br0',
+ 'id': 1,
+ 'label': 'private',
+ 'subnets': [{'cidr': '192.168.0.0/24',
+ 'ips': ip_info}]}}]
+
+ if CONF.use_ipv6:
+ ipv6_addr = 'fe80:b33f::a8bb:ccff:fedd:eeff'
+ info[0]['network']['subnets'].append({'cidr': 'fe80:b33f::/64',
+ 'ips': [_ip(ipv6_addr)]})
+
+ return model.NetworkInfo.hydrate(info)
+
+
+def get_instances_with_cached_ips(orig_func, get_floating,
+ *args, **kwargs):
+ """Kludge the cache into instance(s) without having to create DB
+ entries
+ """
+ instances = orig_func(*args, **kwargs)
+
+ if kwargs.get('want_objects', False):
+ info_cache = objects.InstanceInfoCache()
+ info_cache.network_info = get_fake_cache(get_floating)
+ info_cache.obj_reset_changes()
+ else:
+ info_cache = {'network_info': get_fake_cache(get_floating)}
+
+ if isinstance(instances, (list, obj_base.ObjectListBase)):
+ for instance in instances:
+ instance['info_cache'] = info_cache
+ else:
+ instances['info_cache'] = info_cache
+ return instances
+
+
+class CloudTestCase(test.TestCase):
+
+ REQUIRES_LOCKING = True
+
+ def setUp(self):
+ super(CloudTestCase, self).setUp()
+ self.useFixture(test.SampleNetworks())
+ ec2utils.reset_cache()
+ self.flags(compute_driver='nova.virt.fake.FakeDriver',
+ volume_api_class='nova.tests.unit.fake_volume.API')
+ self.useFixture(fixtures.FakeLogger('boto'))
+ fake_utils.stub_out_utils_spawn_n(self.stubs)
+
+ def fake_show(meh, context, id, **kwargs):
+ return {'id': id,
+ 'name': 'fake_name',
+ 'container_format': 'ami',
+ 'status': 'active',
+ 'properties': {
+ 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'type': 'machine',
+ 'image_state': 'available'}}
+
+ def fake_detail(_self, context, **kwargs):
+ image = fake_show(None, context, None)
+ image['name'] = kwargs.get('filters', {}).get('name')
+ return [image]
+
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show)
+ self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
+ fake.stub_out_image_service(self.stubs)
+
+ def dumb(*args, **kwargs):
+ pass
+
+ self.stubs.Set(compute_utils, 'notify_about_instance_usage', dumb)
+ fake_network.set_stub_network_methods(self.stubs)
+
+ # set up our cloud
+ self.cloud = cloud.CloudController()
+ self.flags(scheduler_driver='nova.scheduler.chance.ChanceScheduler')
+
+ # Short-circuit the conductor service
+ self.flags(use_local=True, group='conductor')
+
+ # Stub out the notification service so we use the no-op serializer
+ # and avoid lazy-load traces with the wrap_exception decorator in
+ # the compute service.
+ fake_notifier.stub_notifier(self.stubs)
+ self.addCleanup(fake_notifier.reset)
+
+ # set up services
+ self.conductor = self.start_service('conductor',
+ manager=CONF.conductor.manager)
+ self.compute = self.start_service('compute')
+ self.scheduler = self.start_service('scheduler')
+ self.network = self.start_service('network')
+ self.consoleauth = self.start_service('consoleauth')
+
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = context.RequestContext(self.user_id,
+ self.project_id,
+ is_admin=True)
+ self.volume_api = volume.API()
+
+ self.useFixture(cast_as_call.CastAsCall(self.stubs))
+
+ # make sure we can map ami-00000001/2 to a uuid in FakeImageService
+ db.s3_image_create(self.context,
+ 'cedef40a-ed67-4d10-800e-17455edce175')
+ db.s3_image_create(self.context,
+ '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6')
+
+ def tearDown(self):
+ self.volume_api.reset_fake_api(self.context)
+ super(CloudTestCase, self).tearDown()
+ fake.FakeImageService_reset()
+
+ def fake_get_target(obj, iqn):
+ return 1
+
+ def fake_remove_iscsi_target(obj, tid, lun, vol_id, **kwargs):
+ pass
+
+ def _stub_instance_get_with_fixed_ips(self,
+ func_name, get_floating=True):
+ orig_func = getattr(self.cloud.compute_api, func_name)
+
+ def fake_get(*args, **kwargs):
+ return get_instances_with_cached_ips(orig_func, get_floating,
+ *args, **kwargs)
+ self.stubs.Set(self.cloud.compute_api, func_name, fake_get)
+
+ def _create_key(self, name):
+ # NOTE(vish): create depends on pool, so just call helper directly
+ keypair_api = compute_api.KeypairAPI()
+ return keypair_api.create_key_pair(self.context, self.context.user_id,
+ name)
+
+ def test_describe_regions(self):
+ # Makes sure describe regions runs without raising an exception.
+ result = self.cloud.describe_regions(self.context)
+ self.assertEqual(len(result['regionInfo']), 1)
+ self.flags(region_list=["one=test_host1", "two=test_host2"])
+ result = self.cloud.describe_regions(self.context)
+ self.assertEqual(len(result['regionInfo']), 2)
+
+ def test_describe_addresses(self):
+ # Makes sure describe addresses runs without raising an exception.
+ address = "10.10.10.10"
+ db.floating_ip_create(self.context,
+ {'address': address,
+ 'pool': 'nova'})
+ self.flags(network_api_class='nova.network.api.API')
+ self.cloud.allocate_address(self.context)
+ self.cloud.describe_addresses(self.context)
+ self.cloud.release_address(self.context,
+ public_ip=address)
+ db.floating_ip_destroy(self.context, address)
+
+ def test_describe_addresses_in_neutron(self):
+ # Makes sure describe addresses runs without raising an exception.
+ address = "10.10.10.10"
+ self.flags(network_api_class='nova.network.neutronv2.api.API')
+ db.floating_ip_create(self.context,
+ {'address': address,
+ 'pool': 'nova'})
+ self.cloud.allocate_address(self.context)
+ self.cloud.describe_addresses(self.context)
+ self.cloud.release_address(self.context,
+ public_ip=address)
+ db.floating_ip_destroy(self.context, address)
+
+ def test_describe_specific_address(self):
+ # Makes sure describe specific address works.
+ addresses = ["10.10.10.10", "10.10.10.11"]
+ for address in addresses:
+ db.floating_ip_create(self.context,
+ {'address': address,
+ 'pool': 'nova'})
+ self.cloud.allocate_address(self.context)
+ result = self.cloud.describe_addresses(self.context)
+ self.assertEqual(len(result['addressesSet']), 2)
+ result = self.cloud.describe_addresses(self.context,
+ public_ip=['10.10.10.10'])
+ self.assertEqual(len(result['addressesSet']), 1)
+ for address in addresses:
+ self.cloud.release_address(self.context,
+ public_ip=address)
+ db.floating_ip_destroy(self.context, address)
+
+ def test_allocate_address(self):
+ address = "10.10.10.10"
+ allocate = self.cloud.allocate_address
+ db.floating_ip_create(self.context,
+ {'address': address,
+ 'pool': 'nova'})
+ self.assertEqual(allocate(self.context)['publicIp'], address)
+ db.floating_ip_destroy(self.context, address)
+ self.assertRaises(exception.NoMoreFloatingIps,
+ allocate,
+ self.context)
+
+ def test_release_address(self):
+ address = "10.10.10.10"
+ db.floating_ip_create(self.context,
+ {'address': address,
+ 'pool': 'nova',
+ 'project_id': self.project_id})
+ result = self.cloud.release_address(self.context, address)
+ self.assertEqual(result.get('return', None), 'true')
+
+ def test_associate_disassociate_address(self):
+ # Verifies associate runs cleanly without raising an exception.
+ address = "10.10.10.10"
+ db.floating_ip_create(self.context,
+ {'address': address,
+ 'pool': 'nova'})
+ self.cloud.allocate_address(self.context)
+ # TODO(jkoelker) Probably need to query for instance_type_id and
+ # make sure we get a valid one
+ inst = db.instance_create(self.context, {'host': self.compute.host,
+ 'display_name': HOST,
+ 'instance_type_id': 1})
+ networks = db.network_get_all(self.context)
+ for network in networks:
+ db.network_update(self.context, network['id'],
+ {'host': self.network.host})
+ project_id = self.context.project_id
+ nw_info = self.network.allocate_for_instance(self.context,
+ instance_id=inst['id'],
+ instance_uuid=inst['uuid'],
+ host=inst['host'],
+ vpn=None,
+ rxtx_factor=3,
+ project_id=project_id,
+ macs=None)
+
+ fixed_ips = nw_info.fixed_ips()
+ ec2_id = ec2utils.id_to_ec2_inst_id(inst['uuid'])
+
+ self.stubs.Set(ec2utils, 'get_ip_info_for_instance',
+ lambda *args: {'fixed_ips': ['10.0.0.1'],
+ 'fixed_ip6s': [],
+ 'floating_ips': []})
+ self.stubs.Set(network_api.API, 'get_instance_id_by_floating_address',
+ lambda *args: 1)
+
+ def fake_update_instance_cache_with_nw_info(api, context, instance,
+ nw_info=None,
+ update_cells=True):
+
+ return
+
+ self.stubs.Set(base_network_api, "update_instance_cache_with_nw_info",
+ fake_update_instance_cache_with_nw_info)
+
+ self.cloud.associate_address(self.context,
+ instance_id=ec2_id,
+ public_ip=address)
+ self.cloud.disassociate_address(self.context,
+ public_ip=address)
+ self.cloud.release_address(self.context,
+ public_ip=address)
+ self.network.deallocate_fixed_ip(self.context, fixed_ips[0]['address'],
+ inst['host'])
+ db.instance_destroy(self.context, inst['uuid'])
+ db.floating_ip_destroy(self.context, address)
+
+ def test_disassociate_auto_assigned_address(self):
+ """Verifies disassociating auto assigned floating IP
+ raises an exception
+ """
+ address = "10.10.10.10"
+
+ def fake_get(*args, **kwargs):
+ pass
+
+ def fake_disassociate_floating_ip(*args, **kwargs):
+ raise exception.CannotDisassociateAutoAssignedFloatingIP()
+
+ self.stubs.Set(network_api.API, 'get_instance_id_by_floating_address',
+ lambda *args: 1)
+ self.stubs.Set(self.cloud.compute_api, 'get', fake_get)
+ self.stubs.Set(network_api.API, 'disassociate_floating_ip',
+ fake_disassociate_floating_ip)
+
+ self.assertRaises(exception.CannotDisassociateAutoAssignedFloatingIP,
+ self.cloud.disassociate_address,
+ self.context, public_ip=address)
+
+ def test_disassociate_unassociated_address(self):
+ address = "10.10.10.10"
+ db.floating_ip_create(self.context,
+ {'address': address,
+ 'pool': 'nova'})
+ self.cloud.allocate_address(self.context)
+ self.cloud.describe_addresses(self.context)
+ self.assertRaises(exception.InvalidAssociation,
+ self.cloud.disassociate_address,
+ self.context, public_ip=address)
+ db.floating_ip_destroy(self.context, address)
+
+ def test_describe_security_groups(self):
+ # Makes sure describe_security_groups works and filters results.
+ sec = db.security_group_create(self.context,
+ {'project_id': self.context.project_id,
+ 'name': 'test'})
+ result = self.cloud.describe_security_groups(self.context)
+ # NOTE(vish): should have the default group as well
+ self.assertEqual(len(result['securityGroupInfo']), 2)
+ result = self.cloud.describe_security_groups(self.context,
+ group_name=[sec['name']])
+ self.assertEqual(len(result['securityGroupInfo']), 1)
+ self.assertEqual(
+ result['securityGroupInfo'][0]['groupName'],
+ sec['name'])
+ db.security_group_destroy(self.context, sec['id'])
+
+ def test_describe_security_groups_all_tenants(self):
+ # Makes sure describe_security_groups works and filters results.
+ sec = db.security_group_create(self.context,
+ {'project_id': 'foobar',
+ 'name': 'test'})
+
+ def _check_name(result, i, expected):
+ self.assertEqual(result['securityGroupInfo'][i]['groupName'],
+ expected)
+
+ # include all tenants
+ filter = [{'name': 'all-tenants', 'value': {'1': 1}}]
+ result = self.cloud.describe_security_groups(self.context,
+ filter=filter)
+ self.assertEqual(len(result['securityGroupInfo']), 2)
+ _check_name(result, 0, 'default')
+ _check_name(result, 1, sec['name'])
+
+ # exclude all tenants
+ filter = [{'name': 'all-tenants', 'value': {'1': 0}}]
+ result = self.cloud.describe_security_groups(self.context,
+ filter=filter)
+ self.assertEqual(len(result['securityGroupInfo']), 1)
+ _check_name(result, 0, 'default')
+
+ # default all tenants
+ result = self.cloud.describe_security_groups(self.context)
+ self.assertEqual(len(result['securityGroupInfo']), 1)
+ _check_name(result, 0, 'default')
+
+ db.security_group_destroy(self.context, sec['id'])
+
+ def test_describe_security_groups_by_id(self):
+ sec = db.security_group_create(self.context,
+ {'project_id': self.context.project_id,
+ 'name': 'test'})
+ result = self.cloud.describe_security_groups(self.context,
+ group_id=[sec['id']])
+ self.assertEqual(len(result['securityGroupInfo']), 1)
+ self.assertEqual(
+ result['securityGroupInfo'][0]['groupName'],
+ sec['name'])
+ default = db.security_group_get_by_name(self.context,
+ self.context.project_id,
+ 'default')
+ result = self.cloud.describe_security_groups(self.context,
+ group_id=[default['id']])
+ self.assertEqual(len(result['securityGroupInfo']), 1)
+ self.assertEqual(
+ result['securityGroupInfo'][0]['groupName'],
+ 'default')
+ db.security_group_destroy(self.context, sec['id'])
+
+ def test_create_delete_security_group(self):
+ descript = 'test description'
+ create = self.cloud.create_security_group
+ result = create(self.context, 'testgrp', descript)
+ group_descript = result['securityGroupSet'][0]['groupDescription']
+ self.assertEqual(descript, group_descript)
+ delete = self.cloud.delete_security_group
+ self.assertTrue(delete(self.context, 'testgrp'))
+
+ def test_security_group_quota_limit(self):
+ self.flags(quota_security_groups=10)
+ for i in range(1, CONF.quota_security_groups):
+ name = 'test name %i' % i
+ descript = 'test description %i' % i
+ create = self.cloud.create_security_group
+ create(self.context, name, descript)
+
+ # 11'th group should fail
+ self.assertRaises(exception.SecurityGroupLimitExceeded,
+ create, self.context, 'foo', 'bar')
+
+ def test_delete_security_group_by_id(self):
+ sec = db.security_group_create(self.context,
+ {'project_id': self.context.project_id,
+ 'name': 'test'})
+ delete = self.cloud.delete_security_group
+ self.assertTrue(delete(self.context, group_id=sec['id']))
+
+ def test_delete_security_group_with_bad_name(self):
+ delete = self.cloud.delete_security_group
+ notfound = exception.SecurityGroupNotFound
+ self.assertRaises(notfound, delete, self.context, 'badname')
+
+ def test_delete_security_group_with_bad_group_id(self):
+ delete = self.cloud.delete_security_group
+ notfound = exception.SecurityGroupNotFound
+ self.assertRaises(notfound, delete, self.context, group_id=999)
+
+ def test_delete_security_group_no_params(self):
+ delete = self.cloud.delete_security_group
+ self.assertRaises(exception.MissingParameter, delete, self.context)
+
+ def test_delete_security_group_policy_not_allowed(self):
+ rules = {'compute_extension:security_groups':
+ common_policy.parse_rule('project_id:%(project_id)s')}
+ policy.set_rules(rules)
+
+ with mock.patch.object(self.cloud.security_group_api,
+ 'get') as get:
+ get.return_value = {'project_id': 'invalid'}
+
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.cloud.delete_security_group, self.context,
+ 'fake-name', 'fake-id')
+
+ def test_authorize_security_group_ingress_policy_not_allowed(self):
+ rules = {'compute_extension:security_groups':
+ common_policy.parse_rule('project_id:%(project_id)s')}
+ policy.set_rules(rules)
+
+ with mock.patch.object(self.cloud.security_group_api,
+ 'get') as get:
+ get.return_value = {'project_id': 'invalid'}
+
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.cloud.authorize_security_group_ingress, self.context,
+ 'fake-name', 'fake-id')
+
+ def test_authorize_security_group_ingress(self):
+ kwargs = {'project_id': self.context.project_id, 'name': 'test'}
+ sec = db.security_group_create(self.context, kwargs)
+ authz = self.cloud.authorize_security_group_ingress
+ kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'}
+ self.assertTrue(authz(self.context, group_name=sec['name'], **kwargs))
+
+ def test_authorize_security_group_ingress_ip_permissions_ip_ranges(self):
+ kwargs = {'project_id': self.context.project_id, 'name': 'test'}
+ sec = db.security_group_create(self.context, kwargs)
+ authz = self.cloud.authorize_security_group_ingress
+ kwargs = {'ip_permissions': [{'to_port': 81, 'from_port': 81,
+ 'ip_ranges':
+ {'1': {'cidr_ip': u'0.0.0.0/0'},
+ '2': {'cidr_ip': u'10.10.10.10/32'}},
+ 'ip_protocol': u'tcp'}]}
+ self.assertTrue(authz(self.context, group_name=sec['name'], **kwargs))
+
+ def test_authorize_security_group_fail_missing_source_group(self):
+ kwargs = {'project_id': self.context.project_id, 'name': 'test'}
+ sec = db.security_group_create(self.context, kwargs)
+ authz = self.cloud.authorize_security_group_ingress
+ kwargs = {'ip_permissions': [{'to_port': 81, 'from_port': 81,
+ 'ip_ranges': {'1': {'cidr_ip': u'0.0.0.0/0'},
+ '2': {'cidr_ip': u'10.10.10.10/32'}},
+ 'groups': {'1': {'user_id': u'someuser',
+ 'group_name': u'somegroup1'}},
+ 'ip_protocol': u'tcp'}]}
+ self.assertRaises(exception.SecurityGroupNotFound, authz,
+ self.context, group_name=sec['name'], **kwargs)
+
+ def test_authorize_security_group_ingress_ip_permissions_groups(self):
+ kwargs = {
+ 'project_id': self.context.project_id,
+ 'user_id': self.context.user_id,
+ 'name': 'test'
+ }
+ sec = db.security_group_create(self.context,
+ {'project_id': 'someuser',
+ 'user_id': 'someuser',
+ 'description': '',
+ 'name': 'somegroup1'})
+ sec = db.security_group_create(self.context,
+ {'project_id': 'someuser',
+ 'user_id': 'someuser',
+ 'description': '',
+ 'name': 'othergroup2'})
+ sec = db.security_group_create(self.context, kwargs)
+ authz = self.cloud.authorize_security_group_ingress
+ kwargs = {'ip_permissions': [{'to_port': 81, 'from_port': 81,
+ 'groups': {'1': {'user_id': u'someuser',
+ 'group_name': u'somegroup1'},
+ '2': {'user_id': u'someuser',
+ 'group_name': u'othergroup2'}},
+ 'ip_protocol': u'tcp'}]}
+ self.assertTrue(authz(self.context, group_name=sec['name'], **kwargs))
+
+ def test_describe_security_group_ingress_groups(self):
+ kwargs = {
+ 'project_id': self.context.project_id,
+ 'user_id': self.context.user_id,
+ 'name': 'test'
+ }
+ sec1 = db.security_group_create(self.context, kwargs)
+ sec2 = db.security_group_create(self.context,
+ {'project_id': 'someuser',
+ 'user_id': 'someuser',
+ 'description': '',
+ 'name': 'somegroup1'})
+ sec3 = db.security_group_create(self.context,
+ {'project_id': 'someuser',
+ 'user_id': 'someuser',
+ 'description': '',
+ 'name': 'othergroup2'})
+ authz = self.cloud.authorize_security_group_ingress
+ kwargs = {'ip_permissions': [
+ {'groups': {'1': {'user_id': u'someuser',
+ 'group_name': u'somegroup1'}}},
+ {'ip_protocol': 'tcp',
+ 'from_port': 80,
+ 'to_port': 80,
+ 'groups': {'1': {'user_id': u'someuser',
+ 'group_name': u'othergroup2'}}}]}
+ self.assertTrue(authz(self.context, group_name=sec1['name'], **kwargs))
+ describe = self.cloud.describe_security_groups
+ groups = describe(self.context, group_name=['test'])
+ self.assertEqual(len(groups['securityGroupInfo']), 1)
+ actual_rules = groups['securityGroupInfo'][0]['ipPermissions']
+ self.assertEqual(len(actual_rules), 4)
+ expected_rules = [{'fromPort': -1,
+ 'groups': [{'groupName': 'somegroup1',
+ 'userId': 'someuser'}],
+ 'ipProtocol': 'icmp',
+ 'ipRanges': [],
+ 'toPort': -1},
+ {'fromPort': 1,
+ 'groups': [{'groupName': u'somegroup1',
+ 'userId': u'someuser'}],
+ 'ipProtocol': 'tcp',
+ 'ipRanges': [],
+ 'toPort': 65535},
+ {'fromPort': 1,
+ 'groups': [{'groupName': u'somegroup1',
+ 'userId': u'someuser'}],
+ 'ipProtocol': 'udp',
+ 'ipRanges': [],
+ 'toPort': 65535},
+ {'fromPort': 80,
+ 'groups': [{'groupName': u'othergroup2',
+ 'userId': u'someuser'}],
+ 'ipProtocol': u'tcp',
+ 'ipRanges': [],
+ 'toPort': 80}]
+ for rule in expected_rules:
+ self.assertIn(rule, actual_rules)
+
+ db.security_group_destroy(self.context, sec3['id'])
+ db.security_group_destroy(self.context, sec2['id'])
+ db.security_group_destroy(self.context, sec1['id'])
+
+ def test_revoke_security_group_ingress_policy_not_allowed(self):
+ rules = {'compute_extension:security_groups':
+ common_policy.parse_rule('project_id:%(project_id)s')}
+ policy.set_rules(rules)
+
+ with mock.patch.object(self.cloud.security_group_api,
+ 'get') as get:
+ get.return_value = {'project_id': 'invalid'}
+
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.cloud.revoke_security_group_ingress, self.context,
+ 'fake-name', 'fake-id')
+
+ def test_revoke_security_group_ingress(self):
+ kwargs = {'project_id': self.context.project_id, 'name': 'test'}
+ sec = db.security_group_create(self.context, kwargs)
+ authz = self.cloud.authorize_security_group_ingress
+ kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'}
+ authz(self.context, group_id=sec['id'], **kwargs)
+ revoke = self.cloud.revoke_security_group_ingress
+ self.assertTrue(revoke(self.context, group_name=sec['name'], **kwargs))
+
+ def test_authorize_revoke_security_group_ingress_by_id(self):
+ sec = db.security_group_create(self.context,
+ {'project_id': self.context.project_id,
+ 'name': 'test'})
+ authz = self.cloud.authorize_security_group_ingress
+ kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'}
+ authz(self.context, group_id=sec['id'], **kwargs)
+ revoke = self.cloud.revoke_security_group_ingress
+ self.assertTrue(revoke(self.context, group_id=sec['id'], **kwargs))
+
+ def test_authorize_security_group_ingress_missing_protocol_params(self):
+ kwargs = {'project_id': self.context.project_id, 'name': 'test'}
+ db.security_group_create(self.context, kwargs)
+ authz = self.cloud.authorize_security_group_ingress
+ self.assertRaises(exception.MissingParameter, authz, self.context,
+ 'test')
+
+ def test_authorize_security_group_ingress_missing_group_name_or_id(self):
+ kwargs = {'project_id': self.context.project_id, 'name': 'test'}
+ authz = self.cloud.authorize_security_group_ingress
+ self.assertRaises(exception.MissingParameter, authz, self.context,
+ **kwargs)
+
+ def test_authorize_security_group_ingress_already_exists(self):
+ kwargs = {'project_id': self.context.project_id, 'name': 'test'}
+ sec = db.security_group_create(self.context, kwargs)
+ authz = self.cloud.authorize_security_group_ingress
+ kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'}
+ authz(self.context, group_name=sec['name'], **kwargs)
+ self.assertRaises(exception.SecurityGroupRuleExists, authz,
+ self.context, group_name=sec['name'], **kwargs)
+
+ def test_security_group_ingress_quota_limit(self):
+ self.flags(quota_security_group_rules=20)
+ kwargs = {'project_id': self.context.project_id, 'name': 'test'}
+ sec_group = db.security_group_create(self.context, kwargs)
+ authz = self.cloud.authorize_security_group_ingress
+ for i in range(100, 120):
+ kwargs = {'to_port': i, 'from_port': i, 'ip_protocol': 'tcp'}
+ authz(self.context, group_id=sec_group['id'], **kwargs)
+
+ kwargs = {'to_port': 121, 'from_port': 121, 'ip_protocol': 'tcp'}
+ self.assertRaises(exception.SecurityGroupLimitExceeded, authz,
+ self.context, group_id=sec_group['id'], **kwargs)
+
+ def _test_authorize_security_group_no_ports_with_source_group(self, proto):
+ kwargs = {
+ 'project_id': self.context.project_id,
+ 'user_id': self.context.user_id,
+ 'description': '',
+ 'name': 'test'
+ }
+ sec = db.security_group_create(self.context, kwargs)
+
+ authz = self.cloud.authorize_security_group_ingress
+ auth_kwargs = {'ip_protocol': proto,
+ 'groups': {'1': {'user_id': self.context.user_id,
+ 'group_name': u'test'}}}
+ self.assertTrue(authz(self.context, group_name=sec['name'],
+ **auth_kwargs))
+
+ describe = self.cloud.describe_security_groups
+ groups = describe(self.context, group_name=['test'])
+ self.assertEqual(len(groups['securityGroupInfo']), 1)
+ actual_rules = groups['securityGroupInfo'][0]['ipPermissions']
+ expected_rules = [{'groups': [{'groupName': 'test',
+ 'userId': self.context.user_id}],
+ 'ipProtocol': proto,
+ 'ipRanges': []}]
+ if proto == 'icmp':
+ expected_rules[0]['fromPort'] = -1
+ expected_rules[0]['toPort'] = -1
+ else:
+ expected_rules[0]['fromPort'] = 1
+ expected_rules[0]['toPort'] = 65535
+ self.assertTrue(expected_rules == actual_rules)
+ describe = self.cloud.describe_security_groups
+ groups = describe(self.context, group_name=['test'])
+
+ db.security_group_destroy(self.context, sec['id'])
+
+ def _test_authorize_security_group_no_ports_no_source_group(self, proto):
+ kwargs = {
+ 'project_id': self.context.project_id,
+ 'user_id': self.context.user_id,
+ 'description': '',
+ 'name': 'test'
+ }
+ sec = db.security_group_create(self.context, kwargs)
+
+ authz = self.cloud.authorize_security_group_ingress
+ auth_kwargs = {'ip_protocol': proto}
+ self.assertRaises(exception.MissingParameter, authz, self.context,
+ group_name=sec['name'], **auth_kwargs)
+
+ db.security_group_destroy(self.context, sec['id'])
+
+ def test_authorize_security_group_no_ports_icmp(self):
+ self._test_authorize_security_group_no_ports_with_source_group('icmp')
+ self._test_authorize_security_group_no_ports_no_source_group('icmp')
+
+ def test_authorize_security_group_no_ports_tcp(self):
+ self._test_authorize_security_group_no_ports_with_source_group('tcp')
+ self._test_authorize_security_group_no_ports_no_source_group('tcp')
+
+ def test_authorize_security_group_no_ports_udp(self):
+ self._test_authorize_security_group_no_ports_with_source_group('udp')
+ self._test_authorize_security_group_no_ports_no_source_group('udp')
+
+ def test_revoke_security_group_ingress_missing_group_name_or_id(self):
+ kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'}
+ revoke = self.cloud.revoke_security_group_ingress
+ self.assertRaises(exception.MissingParameter, revoke,
+ self.context, **kwargs)
+
+ def test_delete_security_group_in_use_by_group(self):
+ self.cloud.create_security_group(self.context, 'testgrp1',
+ "test group 1")
+ self.cloud.create_security_group(self.context, 'testgrp2',
+ "test group 2")
+ kwargs = {'groups': {'1': {'user_id': u'%s' % self.context.user_id,
+ 'group_name': u'testgrp2'}},
+ }
+ self.cloud.authorize_security_group_ingress(self.context,
+ group_name='testgrp1', **kwargs)
+
+ group1 = db.security_group_get_by_name(self.context,
+ self.project_id, 'testgrp1')
+ get_rules = db.security_group_rule_get_by_security_group
+
+ self.assertTrue(get_rules(self.context, group1['id']))
+ self.cloud.delete_security_group(self.context, 'testgrp2')
+ self.assertFalse(get_rules(self.context, group1['id']))
+
+ def test_delete_security_group_in_use_by_instance(self):
+ # Ensure that a group can not be deleted if in use by an instance.
+ image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
+ args = {'reservation_id': 'a',
+ 'image_ref': image_uuid,
+ 'instance_type_id': 1,
+ 'host': 'host1',
+ 'vm_state': 'active'}
+ inst = db.instance_create(self.context, args)
+
+ args = {'user_id': self.context.user_id,
+ 'project_id': self.context.project_id,
+ 'name': 'testgrp',
+ 'description': 'Test group'}
+ group = db.security_group_create(self.context, args)
+
+ db.instance_add_security_group(self.context, inst['uuid'], group['id'])
+
+ self.assertRaises(exception.InvalidGroup,
+ self.cloud.delete_security_group,
+ self.context, 'testgrp')
+
+ db.instance_destroy(self.context, inst['uuid'])
+
+ self.cloud.delete_security_group(self.context, 'testgrp')
+
+ def test_describe_availability_zones(self):
+ # Makes sure describe_availability_zones works and filters results.
+ service1 = db.service_create(self.context, {'host': 'host1_zones',
+ 'binary': "nova-compute",
+ 'topic': 'compute',
+ 'report_count': 0})
+ service2 = db.service_create(self.context, {'host': 'host2_zones',
+ 'binary': "nova-compute",
+ 'topic': 'compute',
+ 'report_count': 0})
+ # Aggregate based zones
+ agg = db.aggregate_create(self.context,
+ {'name': 'agg1'}, {'availability_zone': 'zone1'})
+ db.aggregate_host_add(self.context, agg['id'], 'host1_zones')
+ agg = db.aggregate_create(self.context,
+ {'name': 'agg2'}, {'availability_zone': 'zone2'})
+ db.aggregate_host_add(self.context, agg['id'], 'host2_zones')
+ result = self.cloud.describe_availability_zones(self.context)
+ self.assertEqual(len(result['availabilityZoneInfo']), 3)
+ admin_ctxt = context.get_admin_context(read_deleted="no")
+ result = self.cloud.describe_availability_zones(admin_ctxt,
+ zone_name='verbose')
+ self.assertEqual(len(result['availabilityZoneInfo']), 18)
+ db.service_destroy(self.context, service1['id'])
+ db.service_destroy(self.context, service2['id'])
+
+ def test_describe_availability_zones_verbose(self):
+ # Makes sure describe_availability_zones works and filters results.
+ service1 = db.service_create(self.context, {'host': 'host1_zones',
+ 'binary': "nova-compute",
+ 'topic': 'compute',
+ 'report_count': 0})
+ service2 = db.service_create(self.context, {'host': 'host2_zones',
+ 'binary': "nova-compute",
+ 'topic': 'compute',
+ 'report_count': 0})
+ agg = db.aggregate_create(self.context,
+ {'name': 'agg1'}, {'availability_zone': 'second_zone'})
+ db.aggregate_host_add(self.context, agg['id'], 'host2_zones')
+
+ admin_ctxt = context.get_admin_context(read_deleted="no")
+ result = self.cloud.describe_availability_zones(admin_ctxt,
+ zone_name='verbose')
+
+ self.assertEqual(len(result['availabilityZoneInfo']), 17)
+ db.service_destroy(self.context, service1['id'])
+ db.service_destroy(self.context, service2['id'])
+
+ def assertEqualSorted(self, x, y):
+ self.assertEqual(sorted(x), sorted(y))
+
+ def test_describe_instances(self):
+ # Makes sure describe_instances works and filters results.
+ self.flags(use_ipv6=True)
+
+ self._stub_instance_get_with_fixed_ips('get_all')
+ self._stub_instance_get_with_fixed_ips('get')
+
+ image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
+ sys_meta = flavors.save_flavor_info(
+ {}, flavors.get_flavor(1))
+
+ sys_meta['EC2_client_token'] = "client-token-1"
+ inst1 = db.instance_create(self.context, {'reservation_id': 'a',
+ 'image_ref': image_uuid,
+ 'instance_type_id': 1,
+ 'host': 'host1',
+ 'hostname': 'server-1234',
+ 'vm_state': 'active',
+ 'system_metadata': sys_meta})
+
+ sys_meta['EC2_client_token'] = "client-token-2"
+ inst2 = db.instance_create(self.context, {'reservation_id': 'a',
+ 'image_ref': image_uuid,
+ 'instance_type_id': 1,
+ 'host': 'host2',
+ 'hostname': 'server-4321',
+ 'vm_state': 'active',
+ 'system_metadata': sys_meta})
+ comp1 = db.service_create(self.context, {'host': 'host1',
+ 'topic': "compute"})
+ agg = db.aggregate_create(self.context,
+ {'name': 'agg1'}, {'availability_zone': 'zone1'})
+ db.aggregate_host_add(self.context, agg['id'], 'host1')
+
+ comp2 = db.service_create(self.context, {'host': 'host2',
+ 'topic': "compute"})
+ agg2 = db.aggregate_create(self.context,
+ {'name': 'agg2'}, {'availability_zone': 'zone2'})
+ db.aggregate_host_add(self.context, agg2['id'], 'host2')
+
+ result = self.cloud.describe_instances(self.context)
+ result = result['reservationSet'][0]
+ self.assertEqual(len(result['instancesSet']), 2)
+
+ # Now try filtering.
+ instance_id = ec2utils.id_to_ec2_inst_id(inst2['uuid'])
+ result = self.cloud.describe_instances(self.context,
+ instance_id=[instance_id])
+ result = result['reservationSet'][0]
+ self.assertEqual(len(result['instancesSet']), 1)
+ instance = result['instancesSet'][0]
+ self.assertEqual(instance['instanceId'], instance_id)
+ self.assertEqual(instance['placement']['availabilityZone'], 'zone2')
+ self.assertEqual(instance['ipAddress'], '1.2.3.4')
+ self.assertEqual(instance['dnsName'], '1.2.3.4')
+ self.assertEqual(instance['tagSet'], [])
+ self.assertEqual(instance['privateDnsName'], 'server-4321')
+ self.assertEqual(instance['privateIpAddress'], '192.168.0.3')
+ self.assertEqual(instance['dnsNameV6'],
+ 'fe80:b33f::a8bb:ccff:fedd:eeff')
+ self.assertEqual(instance['clientToken'], 'client-token-2')
+
+ # A filter with even one invalid id should cause an exception to be
+ # raised
+ self.assertRaises(exception.InstanceNotFound,
+ self.cloud.describe_instances, self.context,
+ instance_id=[instance_id, '435679'])
+
+ db.instance_destroy(self.context, inst1['uuid'])
+ db.instance_destroy(self.context, inst2['uuid'])
+ db.service_destroy(self.context, comp1['id'])
+ db.service_destroy(self.context, comp2['id'])
+
+ def test_describe_instances_all_invalid(self):
+ # Makes sure describe_instances works and filters results.
+ self.flags(use_ipv6=True)
+
+ self._stub_instance_get_with_fixed_ips('get_all')
+ self._stub_instance_get_with_fixed_ips('get')
+
+ instance_id = ec2utils.id_to_ec2_inst_id('435679')
+ self.assertRaises(exception.InstanceNotFound,
+ self.cloud.describe_instances, self.context,
+ instance_id=[instance_id])
+
+ def test_describe_instances_with_filters(self):
+ # Makes sure describe_instances works and filters results.
+ filters = {'filter': [{'name': 'test',
+ 'value': ['a', 'b']},
+ {'name': 'another_test',
+ 'value': 'a string'}]}
+
+ self._stub_instance_get_with_fixed_ips('get_all')
+ self._stub_instance_get_with_fixed_ips('get')
+
+ result = self.cloud.describe_instances(self.context, **filters)
+ self.assertEqual(result, {'reservationSet': []})
+
+ def test_describe_instances_with_filters_tags(self):
+ # Makes sure describe_instances works and filters tag results.
+
+ # We need to stub network calls
+ self._stub_instance_get_with_fixed_ips('get_all')
+ self._stub_instance_get_with_fixed_ips('get')
+
+ # We need to stub out the MQ call - it won't succeed. We do want
+ # to check that the method is called, though
+ meta_changes = [None]
+
+ def fake_change_instance_metadata(inst, ctxt, diff, instance=None,
+ instance_uuid=None):
+ meta_changes[0] = diff
+
+ self.stubs.Set(compute_rpcapi.ComputeAPI, 'change_instance_metadata',
+ fake_change_instance_metadata)
+
+ utc = iso8601.iso8601.Utc()
+
+ # Create some test images
+ sys_meta = flavors.save_flavor_info(
+ {}, flavors.get_flavor(1))
+ image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
+ inst1_kwargs = {
+ 'reservation_id': 'a',
+ 'image_ref': image_uuid,
+ 'instance_type_id': 1,
+ 'host': 'host1',
+ 'vm_state': 'active',
+ 'launched_at': timeutils.utcnow(),
+ 'hostname': 'server-1111',
+ 'created_at': datetime.datetime(2012, 5, 1, 1, 1, 1,
+ tzinfo=utc),
+ 'system_metadata': sys_meta
+ }
+
+ inst2_kwargs = {
+ 'reservation_id': 'b',
+ 'image_ref': image_uuid,
+ 'instance_type_id': 1,
+ 'host': 'host2',
+ 'vm_state': 'active',
+ 'launched_at': timeutils.utcnow(),
+ 'hostname': 'server-1112',
+ 'created_at': datetime.datetime(2012, 5, 1, 1, 1, 2,
+ tzinfo=utc),
+ 'system_metadata': sys_meta
+ }
+
+ inst1 = db.instance_create(self.context, inst1_kwargs)
+ ec2_id1 = ec2utils.id_to_ec2_inst_id(inst1['uuid'])
+
+ inst2 = db.instance_create(self.context, inst2_kwargs)
+ ec2_id2 = ec2utils.id_to_ec2_inst_id(inst2['uuid'])
+
+ # Create some tags
+ # We get one overlapping pair, one overlapping key, and a
+ # disparate pair
+ # inst1 : {'foo': 'bar', 'baz': 'wibble', 'bax': 'wobble'}
+ # inst2 : {'foo': 'bar', 'baz': 'quux', 'zog': 'bobble'}
+
+ md = {'key': 'foo', 'value': 'bar'}
+ self.cloud.create_tags(self.context, resource_id=[ec2_id1, ec2_id2],
+ tag=[md])
+
+ md2 = {'key': 'baz', 'value': 'wibble'}
+ md3 = {'key': 'bax', 'value': 'wobble'}
+ self.cloud.create_tags(self.context, resource_id=[ec2_id1],
+ tag=[md2, md3])
+
+ md4 = {'key': 'baz', 'value': 'quux'}
+ md5 = {'key': 'zog', 'value': 'bobble'}
+ self.cloud.create_tags(self.context, resource_id=[ec2_id2],
+ tag=[md4, md5])
+ # We should be able to search by:
+
+ inst1_ret = {
+ 'groupSet': None,
+ 'instancesSet': [{'amiLaunchIndex': None,
+ 'dnsName': '1.2.3.4',
+ 'dnsNameV6': 'fe80:b33f::a8bb:ccff:fedd:eeff',
+ 'imageId': 'ami-00000001',
+ 'instanceId': 'i-00000001',
+ 'instanceState': {'code': 16,
+ 'name': 'running'},
+ 'instanceType': u'm1.medium',
+ 'ipAddress': '1.2.3.4',
+ 'keyName': 'None (None, host1)',
+ 'launchTime':
+ datetime.datetime(2012, 5, 1, 1, 1, 1,
+ tzinfo=utc),
+ 'placement': {
+ 'availabilityZone': 'nova'},
+ 'privateDnsName': u'server-1111',
+ 'privateIpAddress': '192.168.0.3',
+ 'productCodesSet': None,
+ 'rootDeviceName': '/dev/sda1',
+ 'rootDeviceType': 'instance-store',
+ 'tagSet': [{'key': u'foo',
+ 'value': u'bar'},
+ {'key': u'baz',
+ 'value': u'wibble'},
+ {'key': u'bax',
+ 'value': u'wobble'}]}],
+ 'ownerId': None,
+ 'reservationId': u'a'}
+
+ inst2_ret = {
+ 'groupSet': None,
+ 'instancesSet': [{'amiLaunchIndex': None,
+ 'dnsName': '1.2.3.4',
+ 'dnsNameV6': 'fe80:b33f::a8bb:ccff:fedd:eeff',
+ 'imageId': 'ami-00000001',
+ 'instanceId': 'i-00000002',
+ 'instanceState': {'code': 16,
+ 'name': 'running'},
+ 'instanceType': u'm1.medium',
+ 'ipAddress': '1.2.3.4',
+ 'keyName': u'None (None, host2)',
+ 'launchTime':
+ datetime.datetime(2012, 5, 1, 1, 1, 2,
+ tzinfo=utc),
+ 'placement': {
+ 'availabilityZone': 'nova'},
+ 'privateDnsName': u'server-1112',
+ 'privateIpAddress': '192.168.0.3',
+ 'productCodesSet': None,
+ 'rootDeviceName': '/dev/sda1',
+ 'rootDeviceType': 'instance-store',
+ 'tagSet': [{'key': u'foo',
+ 'value': u'bar'},
+ {'key': u'baz',
+ 'value': u'quux'},
+ {'key': u'zog',
+ 'value': u'bobble'}]}],
+ 'ownerId': None,
+ 'reservationId': u'b'}
+
+ # No filter
+ result = self.cloud.describe_instances(self.context)
+ self.assertEqual(result, {'reservationSet': [inst1_ret, inst2_ret]})
+
+ # Key search
+ # Both should have tags with key 'foo' and value 'bar'
+ filters = {'filter': [{'name': 'tag:foo',
+ 'value': ['bar']}]}
+ result = self.cloud.describe_instances(self.context, **filters)
+ self.assertEqual(result, {'reservationSet': [inst1_ret, inst2_ret]})
+
+ # Both should have tags with key 'foo'
+ filters = {'filter': [{'name': 'tag-key',
+ 'value': ['foo']}]}
+ result = self.cloud.describe_instances(self.context, **filters)
+ self.assertEqual(result, {'reservationSet': [inst1_ret, inst2_ret]})
+
+ # Value search
+ # Only inst2 should have tags with key 'baz' and value 'quux'
+ filters = {'filter': [{'name': 'tag:baz',
+ 'value': ['quux']}]}
+ result = self.cloud.describe_instances(self.context, **filters)
+ self.assertEqual(result, {'reservationSet': [inst2_ret]})
+
+ # Only inst2 should have tags with value 'quux'
+ filters = {'filter': [{'name': 'tag-value',
+ 'value': ['quux']}]}
+ result = self.cloud.describe_instances(self.context, **filters)
+ self.assertEqual(result, {'reservationSet': [inst2_ret]})
+
+ # Multiple values
+ # Both should have tags with key 'baz' and values in the set
+ # ['quux', 'wibble']
+ filters = {'filter': [{'name': 'tag:baz',
+ 'value': ['quux', 'wibble']}]}
+ result = self.cloud.describe_instances(self.context, **filters)
+ self.assertEqual(result, {'reservationSet': [inst1_ret, inst2_ret]})
+
+ # Both should have tags with key 'baz' or tags with value 'bar'
+ filters = {'filter': [{'name': 'tag-key',
+ 'value': ['baz']},
+ {'name': 'tag-value',
+ 'value': ['bar']}]}
+ result = self.cloud.describe_instances(self.context, **filters)
+ self.assertEqual(result, {'reservationSet': [inst1_ret, inst2_ret]})
+
+ # Confirm deletion of tags
+ # Check for format 'tag:'
+ self.cloud.delete_tags(self.context, resource_id=[ec2_id1], tag=[md])
+ filters = {'filter': [{'name': 'tag:foo',
+ 'value': ['bar']}]}
+ result = self.cloud.describe_instances(self.context, **filters)
+ self.assertEqual(result, {'reservationSet': [inst2_ret]})
+
+ # Check for format 'tag-'
+ filters = {'filter': [{'name': 'tag-key',
+ 'value': ['foo']}]}
+ result = self.cloud.describe_instances(self.context, **filters)
+ self.assertEqual(result, {'reservationSet': [inst2_ret]})
+ filters = {'filter': [{'name': 'tag-value',
+ 'value': ['bar']}]}
+ result = self.cloud.describe_instances(self.context, **filters)
+ self.assertEqual(result, {'reservationSet': [inst2_ret]})
+
+ # destroy the test instances
+ db.instance_destroy(self.context, inst1['uuid'])
+ db.instance_destroy(self.context, inst2['uuid'])
+
+ def test_describe_instances_sorting(self):
+ # Makes sure describe_instances works and is sorted as expected.
+ self.flags(use_ipv6=True)
+
+ self._stub_instance_get_with_fixed_ips('get_all')
+ self._stub_instance_get_with_fixed_ips('get')
+
+ image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
+ sys_meta = flavors.save_flavor_info(
+ {}, flavors.get_flavor(1))
+ inst_base = {
+ 'reservation_id': 'a',
+ 'image_ref': image_uuid,
+ 'instance_type_id': 1,
+ 'vm_state': 'active',
+ 'system_metadata': sys_meta,
+ }
+
+ utc = iso8601.iso8601.Utc()
+
+ inst1_kwargs = {}
+ inst1_kwargs.update(inst_base)
+ inst1_kwargs['host'] = 'host1'
+ inst1_kwargs['hostname'] = 'server-1111'
+ inst1_kwargs['created_at'] = datetime.datetime(2012, 5, 1, 1, 1, 1,
+ tzinfo=utc)
+ inst1 = db.instance_create(self.context, inst1_kwargs)
+
+ inst2_kwargs = {}
+ inst2_kwargs.update(inst_base)
+ inst2_kwargs['host'] = 'host2'
+ inst2_kwargs['hostname'] = 'server-2222'
+ inst2_kwargs['created_at'] = datetime.datetime(2012, 2, 1, 1, 1, 1,
+ tzinfo=utc)
+ inst2 = db.instance_create(self.context, inst2_kwargs)
+
+ inst3_kwargs = {}
+ inst3_kwargs.update(inst_base)
+ inst3_kwargs['host'] = 'host3'
+ inst3_kwargs['hostname'] = 'server-3333'
+ inst3_kwargs['created_at'] = datetime.datetime(2012, 2, 5, 1, 1, 1,
+ tzinfo=utc)
+ inst3 = db.instance_create(self.context, inst3_kwargs)
+
+ comp1 = db.service_create(self.context, {'host': 'host1',
+ 'topic': "compute"})
+
+ comp2 = db.service_create(self.context, {'host': 'host2',
+ 'topic': "compute"})
+
+ result = self.cloud.describe_instances(self.context)
+ result = result['reservationSet'][0]['instancesSet']
+ self.assertEqual(result[0]['launchTime'], inst2_kwargs['created_at'])
+ self.assertEqual(result[1]['launchTime'], inst3_kwargs['created_at'])
+ self.assertEqual(result[2]['launchTime'], inst1_kwargs['created_at'])
+
+ db.instance_destroy(self.context, inst1['uuid'])
+ db.instance_destroy(self.context, inst2['uuid'])
+ db.instance_destroy(self.context, inst3['uuid'])
+ db.service_destroy(self.context, comp1['id'])
+ db.service_destroy(self.context, comp2['id'])
+
+ def test_describe_instance_state(self):
+ # Makes sure describe_instances for instanceState works.
+
+ def test_instance_state(expected_code, expected_name,
+ power_state_, vm_state_, values=None):
+ image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
+ sys_meta = flavors.save_flavor_info(
+ {}, flavors.get_flavor(1))
+ values = values or {}
+ values.update({'image_ref': image_uuid, 'instance_type_id': 1,
+ 'power_state': power_state_, 'vm_state': vm_state_,
+ 'system_metadata': sys_meta})
+ inst = db.instance_create(self.context, values)
+
+ instance_id = ec2utils.id_to_ec2_inst_id(inst['uuid'])
+ result = self.cloud.describe_instances(self.context,
+ instance_id=[instance_id])
+ result = result['reservationSet'][0]
+ result = result['instancesSet'][0]['instanceState']
+
+ name = result['name']
+ code = result['code']
+ self.assertEqual(code, expected_code)
+ self.assertEqual(name, expected_name)
+
+ db.instance_destroy(self.context, inst['uuid'])
+
+ test_instance_state(inst_state.RUNNING_CODE, inst_state.RUNNING,
+ power_state.RUNNING, vm_states.ACTIVE)
+ test_instance_state(inst_state.STOPPED_CODE, inst_state.STOPPED,
+ power_state.NOSTATE, vm_states.STOPPED,
+ {'shutdown_terminate': False})
+
+ def test_describe_instances_no_ipv6(self):
+ # Makes sure describe_instances w/ no ipv6 works.
+ self.flags(use_ipv6=False)
+
+ self._stub_instance_get_with_fixed_ips('get_all')
+ self._stub_instance_get_with_fixed_ips('get')
+
+ image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
+ sys_meta = flavors.save_flavor_info(
+ {}, flavors.get_flavor(1))
+ inst1 = db.instance_create(self.context, {'reservation_id': 'a',
+ 'image_ref': image_uuid,
+ 'instance_type_id': 1,
+ 'hostname': 'server-1234',
+ 'vm_state': 'active',
+ 'system_metadata': sys_meta})
+ comp1 = db.service_create(self.context, {'host': 'host1',
+ 'topic': "compute"})
+ result = self.cloud.describe_instances(self.context)
+ result = result['reservationSet'][0]
+ self.assertEqual(len(result['instancesSet']), 1)
+ instance = result['instancesSet'][0]
+ instance_id = ec2utils.id_to_ec2_inst_id(inst1['uuid'])
+ self.assertEqual(instance['instanceId'], instance_id)
+ self.assertEqual(instance['ipAddress'], '1.2.3.4')
+ self.assertEqual(instance['dnsName'], '1.2.3.4')
+ self.assertEqual(instance['privateDnsName'], 'server-1234')
+ self.assertEqual(instance['privateIpAddress'], '192.168.0.3')
+ self.assertNotIn('dnsNameV6', instance)
+ db.instance_destroy(self.context, inst1['uuid'])
+ db.service_destroy(self.context, comp1['id'])
+
+ def test_describe_instances_deleted(self):
+ image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
+ sys_meta = flavors.save_flavor_info(
+ {}, flavors.get_flavor(1))
+ args1 = {'reservation_id': 'a',
+ 'image_ref': image_uuid,
+ 'instance_type_id': 1,
+ 'host': 'host1',
+ 'vm_state': 'active',
+ 'system_metadata': sys_meta}
+ inst1 = db.instance_create(self.context, args1)
+ args2 = {'reservation_id': 'b',
+ 'image_ref': image_uuid,
+ 'instance_type_id': 1,
+ 'host': 'host1',
+ 'vm_state': 'active',
+ 'system_metadata': sys_meta}
+ inst2 = db.instance_create(self.context, args2)
+ db.instance_destroy(self.context, inst1['uuid'])
+ result = self.cloud.describe_instances(self.context)
+ self.assertEqual(len(result['reservationSet']), 1)
+ result1 = result['reservationSet'][0]['instancesSet']
+ self.assertEqual(result1[0]['instanceId'],
+ ec2utils.id_to_ec2_inst_id(inst2['uuid']))
+
+ def test_describe_instances_with_image_deleted(self):
+ image_uuid = 'aebef54a-ed67-4d10-912f-14455edce176'
+ sys_meta = flavors.save_flavor_info(
+ {}, flavors.get_flavor(1))
+ args1 = {'reservation_id': 'a',
+ 'image_ref': image_uuid,
+ 'instance_type_id': 1,
+ 'host': 'host1',
+ 'vm_state': 'active',
+ 'system_metadata': sys_meta}
+ db.instance_create(self.context, args1)
+ args2 = {'reservation_id': 'b',
+ 'image_ref': image_uuid,
+ 'instance_type_id': 1,
+ 'host': 'host1',
+ 'vm_state': 'active',
+ 'system_metadata': sys_meta}
+ db.instance_create(self.context, args2)
+ result = self.cloud.describe_instances(self.context)
+ self.assertEqual(len(result['reservationSet']), 2)
+
+ def test_describe_instances_dnsName_set(self):
+ # Verifies dnsName doesn't get set if floating IP is set.
+ self._stub_instance_get_with_fixed_ips('get_all', get_floating=False)
+ self._stub_instance_get_with_fixed_ips('get', get_floating=False)
+
+ image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
+ sys_meta = flavors.save_flavor_info(
+ {}, flavors.get_flavor(1))
+ db.instance_create(self.context, {'reservation_id': 'a',
+ 'image_ref': image_uuid,
+ 'instance_type_id': 1,
+ 'host': 'host1',
+ 'hostname': 'server-1234',
+ 'vm_state': 'active',
+ 'system_metadata': sys_meta})
+ result = self.cloud.describe_instances(self.context)
+ result = result['reservationSet'][0]
+ instance = result['instancesSet'][0]
+ self.assertIsNone(instance['dnsName'])
+
+ def test_describe_instances_booting_from_a_volume(self):
+ sys_meta = flavors.save_flavor_info(
+ {}, flavors.get_flavor(1))
+ inst = objects.Instance(self.context)
+ inst.reservation_id = 'a'
+ inst.image_ref = ''
+ inst.root_device_name = '/dev/sdh'
+ inst.instance_type_id = 1
+ inst.vm_state = vm_states.ACTIVE
+ inst.host = 'host1'
+ inst.system_metadata = sys_meta
+ inst.create()
+ result = self.cloud.describe_instances(self.context)
+ result = result['reservationSet'][0]
+ instance = result['instancesSet'][0]
+ self.assertIsNone(instance['imageId'])
+
+ def test_describe_images(self):
+ describe_images = self.cloud.describe_images
+
+ def fake_detail(meh, context, **kwargs):
+ return [{'id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'name': 'fake_name',
+ 'container_format': 'ami',
+ 'status': 'active',
+ 'properties': {
+ 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'type': 'machine'}}]
+
+ def fake_show_none(meh, context, id):
+ raise exception.ImageNotFound(image_id='bad_image_id')
+
+ def fake_detail_none(self, context, **kwargs):
+ return []
+
+ self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
+ # list all
+ result1 = describe_images(self.context)
+ result1 = result1['imagesSet'][0]
+ self.assertEqual(result1['imageId'], 'ami-00000001')
+ # provided a valid image_id
+ result2 = describe_images(self.context, ['ami-00000001'])
+ self.assertEqual(1, len(result2['imagesSet']))
+ # provide more than 1 valid image_id
+ result3 = describe_images(self.context, ['ami-00000001',
+ 'ami-00000002'])
+ self.assertEqual(2, len(result3['imagesSet']))
+ # provide a non-existing image_id
+ self.stubs.UnsetAll()
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show_none)
+ self.stubs.Set(fake._FakeImageService, 'detail', fake_detail_none)
+ self.assertRaises(exception.ImageNotFound, describe_images,
+ self.context, ['ami-fake'])
+
+ def assertDictListUnorderedMatch(self, L1, L2, key):
+ self.assertEqual(len(L1), len(L2))
+ for d1 in L1:
+ self.assertIn(key, d1)
+ for d2 in L2:
+ self.assertIn(key, d2)
+ if d1[key] == d2[key]:
+ self.assertThat(d1, matchers.DictMatches(d2))
+
+ def _setUpImageSet(self, create_volumes_and_snapshots=False):
+ self.flags(max_local_block_devices=-1)
+ mappings1 = [
+ {'device': '/dev/sda1', 'virtual': 'root'},
+
+ {'device': 'sdb0', 'virtual': 'ephemeral0'},
+ {'device': 'sdb1', 'virtual': 'ephemeral1'},
+ {'device': 'sdb2', 'virtual': 'ephemeral2'},
+ {'device': 'sdb3', 'virtual': 'ephemeral3'},
+ {'device': 'sdb4', 'virtual': 'ephemeral4'},
+
+ {'device': 'sdc0', 'virtual': 'swap'},
+ {'device': 'sdc1', 'virtual': 'swap'},
+ {'device': 'sdc2', 'virtual': 'swap'},
+ {'device': 'sdc3', 'virtual': 'swap'},
+ {'device': 'sdc4', 'virtual': 'swap'}]
+ block_device_mapping1 = [
+ {'device_name': '/dev/sdb1',
+ 'snapshot_id': 'ccec42a2-c220-4806-b762-6b12fbb592e3'},
+ {'device_name': '/dev/sdb2',
+ 'volume_id': 'ccec42a2-c220-4806-b762-6b12fbb592e4'},
+ {'device_name': '/dev/sdb3', 'virtual_name': 'ephemeral5'},
+ {'device_name': '/dev/sdb4', 'no_device': True},
+
+ {'device_name': '/dev/sdc1',
+ 'snapshot_id': 'ccec42a2-c220-4806-b762-6b12fbb592e5'},
+ {'device_name': '/dev/sdc2',
+ 'volume_id': 'ccec42a2-c220-4806-b762-6b12fbb592e6'},
+ {'device_name': '/dev/sdc3', 'virtual_name': 'ephemeral6'},
+ {'device_name': '/dev/sdc4', 'no_device': True}]
+ image1 = {
+ 'id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'name': 'fake_name',
+ 'status': 'active',
+ 'properties': {
+ 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'type': 'machine',
+ 'image_state': 'available',
+ 'mappings': mappings1,
+ 'block_device_mapping': block_device_mapping1,
+ }
+ }
+
+ mappings2 = [{'device': '/dev/sda1', 'virtual': 'root'}]
+ block_device_mapping2 = [{'device_name': '/dev/sdb1',
+ 'snapshot_id': 'ccec42a2-c220-4806-b762-6b12fbb592e7',
+ 'volume_id': None}]
+ image2 = {
+ 'id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
+ 'name': 'fake_name',
+ 'status': 'active',
+ 'properties': {
+ 'kernel_id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
+ 'type': 'machine',
+ 'root_device_name': '/dev/sdb1',
+ 'mappings': mappings2,
+ 'block_device_mapping': block_device_mapping2}}
+
+ def fake_show(meh, context, image_id, **kwargs):
+ _images = [copy.deepcopy(image1), copy.deepcopy(image2)]
+ for i in _images:
+ if str(i['id']) == str(image_id):
+ return i
+ raise exception.ImageNotFound(image_id=image_id)
+
+ def fake_detail(meh, context, **kwargs):
+ return [copy.deepcopy(image1), copy.deepcopy(image2)]
+
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show)
+ self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
+
+ volumes = []
+ snapshots = []
+ if create_volumes_and_snapshots:
+ for bdm in block_device_mapping1:
+ if 'volume_id' in bdm:
+ vol = self._volume_create(bdm['volume_id'])
+ volumes.append(vol['id'])
+ if 'snapshot_id' in bdm:
+ snap = self._snapshot_create(bdm['snapshot_id'])
+ snapshots.append(snap['id'])
+ return (volumes, snapshots)
+
+ def _assertImageSet(self, result, root_device_type, root_device_name):
+ self.assertEqual(1, len(result['imagesSet']))
+ result = result['imagesSet'][0]
+ self.assertIn('rootDeviceType', result)
+ self.assertEqual(result['rootDeviceType'], root_device_type)
+ self.assertIn('rootDeviceName', result)
+ self.assertEqual(result['rootDeviceName'], root_device_name)
+ self.assertIn('blockDeviceMapping', result)
+
+ return result
+
+ _expected_root_device_name1 = '/dev/sda1'
+ # NOTE(yamahata): noDevice doesn't make sense when returning mapping
+ # It makes sense only when user overriding existing
+ # mapping.
+ _expected_bdms1 = [
+ {'deviceName': '/dev/sdb0', 'virtualName': 'ephemeral0'},
+ {'deviceName': '/dev/sdb1', 'ebs': {'snapshotId':
+ 'snap-00000001'}},
+ {'deviceName': '/dev/sdb2', 'ebs': {'snapshotId':
+ 'vol-00000001'}},
+ {'deviceName': '/dev/sdb3', 'virtualName': 'ephemeral5'},
+ # {'deviceName': '/dev/sdb4', 'noDevice': True},
+
+ {'deviceName': '/dev/sdc0', 'virtualName': 'swap'},
+ {'deviceName': '/dev/sdc1', 'ebs': {'snapshotId':
+ 'snap-00000002'}},
+ {'deviceName': '/dev/sdc2', 'ebs': {'snapshotId':
+ 'vol-00000002'}},
+ {'deviceName': '/dev/sdc3', 'virtualName': 'ephemeral6'},
+ # {'deviceName': '/dev/sdc4', 'noDevice': True}
+ ]
+
+ _expected_root_device_name2 = '/dev/sdb1'
+ _expected_bdms2 = [{'deviceName': '/dev/sdb1',
+ 'ebs': {'snapshotId': 'snap-00000003'}}]
+
+ # NOTE(yamahata):
+ # InstanceBlockDeviceMappingItemType
+ # rootDeviceType
+ # rootDeviceName
+ # blockDeviceMapping
+ # deviceName
+ # virtualName
+ # ebs
+ # snapshotId
+ # volumeSize
+ # deleteOnTermination
+ # noDevice
+ def test_describe_image_mapping(self):
+ # test for rootDeviceName and blockDeviceMapping.
+ describe_images = self.cloud.describe_images
+ self._setUpImageSet()
+
+ result = describe_images(self.context, ['ami-00000001'])
+ result = self._assertImageSet(result, 'instance-store',
+ self._expected_root_device_name1)
+
+ self.assertDictListUnorderedMatch(result['blockDeviceMapping'],
+ self._expected_bdms1, 'deviceName')
+
+ result = describe_images(self.context, ['ami-00000002'])
+ result = self._assertImageSet(result, 'ebs',
+ self._expected_root_device_name2)
+
+ self.assertDictListUnorderedMatch(result['blockDeviceMapping'],
+ self._expected_bdms2, 'deviceName')
+
+ def test_describe_image_attribute(self):
+ describe_image_attribute = self.cloud.describe_image_attribute
+
+ def fake_show(meh, context, id, **kwargs):
+ return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'name': 'fake_name',
+ 'status': 'active',
+ 'properties': {
+ 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'type': 'machine'},
+ 'container_format': 'ami',
+ 'is_public': True}
+
+ def fake_detail(self, context, **kwargs):
+ image = fake_show(None, context, None)
+ image['name'] = kwargs.get('filters', {}).get('name')
+ return [image]
+
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show)
+ self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
+ result = describe_image_attribute(self.context, 'ami-00000001',
+ 'launchPermission')
+ self.assertEqual([{'group': 'all'}], result['launchPermission'])
+ result = describe_image_attribute(self.context, 'ami-00000001',
+ 'kernel')
+ self.assertEqual('aki-00000001', result['kernel']['value'])
+ result = describe_image_attribute(self.context, 'ami-00000001',
+ 'ramdisk')
+ self.assertEqual('ari-00000001', result['ramdisk']['value'])
+
+ def test_describe_image_attribute_root_device_name(self):
+ describe_image_attribute = self.cloud.describe_image_attribute
+ self._setUpImageSet()
+
+ result = describe_image_attribute(self.context, 'ami-00000001',
+ 'rootDeviceName')
+ self.assertEqual(result['rootDeviceName'],
+ self._expected_root_device_name1)
+ result = describe_image_attribute(self.context, 'ami-00000002',
+ 'rootDeviceName')
+ self.assertEqual(result['rootDeviceName'],
+ self._expected_root_device_name2)
+
+ def test_describe_image_attribute_block_device_mapping(self):
+ describe_image_attribute = self.cloud.describe_image_attribute
+ self._setUpImageSet()
+
+ result = describe_image_attribute(self.context, 'ami-00000001',
+ 'blockDeviceMapping')
+ self.assertDictListUnorderedMatch(result['blockDeviceMapping'],
+ self._expected_bdms1, 'deviceName')
+ result = describe_image_attribute(self.context, 'ami-00000002',
+ 'blockDeviceMapping')
+ self.assertDictListUnorderedMatch(result['blockDeviceMapping'],
+ self._expected_bdms2, 'deviceName')
+
+ def test_modify_image_attribute(self):
+ modify_image_attribute = self.cloud.modify_image_attribute
+
+ fake_metadata = {
+ 'id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'name': 'fake_name',
+ 'container_format': 'ami',
+ 'status': 'active',
+ 'properties': {
+ 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'type': 'machine'},
+ 'is_public': False}
+
+ def fake_show(meh, context, id, **kwargs):
+ return copy.deepcopy(fake_metadata)
+
+ def fake_detail(self, context, **kwargs):
+ image = fake_show(None, context, None)
+ image['name'] = kwargs.get('filters', {}).get('name')
+ return [image]
+
+ def fake_update(meh, context, image_id, metadata, data=None):
+ self.assertEqual(metadata['properties']['kernel_id'],
+ fake_metadata['properties']['kernel_id'])
+ self.assertEqual(metadata['properties']['ramdisk_id'],
+ fake_metadata['properties']['ramdisk_id'])
+ self.assertTrue(metadata['is_public'])
+ image = copy.deepcopy(fake_metadata)
+ image.update(metadata)
+ return image
+
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show)
+ self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
+ self.stubs.Set(fake._FakeImageService, 'update', fake_update)
+ result = modify_image_attribute(self.context, 'ami-00000001',
+ 'launchPermission', 'add',
+ user_group=['all'])
+ self.assertTrue(result['is_public'])
+
+ def test_register_image(self):
+ register_image = self.cloud.register_image
+
+ def fake_create(*args, **kwargs):
+ # NOTE(vish): We are mocking s3 so make sure we have converted
+ # to ids instead of uuids.
+ return {'id': 1,
+ 'name': 'fake_name',
+ 'container_format': 'ami',
+ 'properties': {'kernel_id': 1,
+ 'ramdisk_id': 1,
+ 'type': 'machine'
+ },
+ 'is_public': False
+ }
+
+ self.stubs.Set(s3.S3ImageService, 'create', fake_create)
+ image_location = 'fake_bucket/fake.img.manifest.xml'
+ result = register_image(self.context, image_location)
+ self.assertEqual(result['imageId'], 'ami-00000001')
+
+ def test_register_image_empty(self):
+ register_image = self.cloud.register_image
+ self.assertRaises(exception.MissingParameter, register_image,
+ self.context, image_location=None)
+
+ def test_register_image_name(self):
+ register_image = self.cloud.register_image
+
+ def fake_create(_self, context, metadata, data=None):
+ self.assertEqual(metadata['name'], self.expected_name)
+ metadata['id'] = 1
+ metadata['container_format'] = 'ami'
+ metadata['is_public'] = False
+ return metadata
+
+ self.stubs.Set(s3.S3ImageService, 'create', fake_create)
+ self.expected_name = 'fake_bucket/fake.img.manifest.xml'
+ register_image(self.context,
+ image_location=self.expected_name,
+ name=None)
+ self.expected_name = 'an image name'
+ register_image(self.context,
+ image_location='some_location',
+ name=self.expected_name)
+
+ def test_format_image(self):
+ image = {
+ 'id': 1,
+ 'container_format': 'ami',
+ 'name': 'name',
+ 'owner': 'someone',
+ 'properties': {
+ 'image_location': 'location',
+ 'kernel_id': 1,
+ 'ramdisk_id': 1,
+ 'type': 'machine'},
+ 'is_public': False}
+ expected = {'name': 'name',
+ 'imageOwnerId': 'someone',
+ 'isPublic': False,
+ 'imageId': 'ami-00000001',
+ 'imageState': None,
+ 'rootDeviceType': 'instance-store',
+ 'architecture': None,
+ 'imageLocation': 'location',
+ 'kernelId': 'aki-00000001',
+ 'ramdiskId': 'ari-00000001',
+ 'rootDeviceName': '/dev/sda1',
+ 'imageType': 'machine',
+ 'description': None}
+ result = self.cloud._format_image(image)
+ self.assertThat(result, matchers.DictMatches(expected))
+ image['properties']['image_location'] = None
+ expected['imageLocation'] = 'None (name)'
+ result = self.cloud._format_image(image)
+ self.assertThat(result, matchers.DictMatches(expected))
+ image['name'] = None
+ image['properties']['image_location'] = 'location'
+ expected['imageLocation'] = 'location'
+ expected['name'] = 'location'
+ result = self.cloud._format_image(image)
+ self.assertThat(result, matchers.DictMatches(expected))
+
+ def test_deregister_image(self):
+ deregister_image = self.cloud.deregister_image
+
+ def fake_delete(self, context, id):
+ return None
+
+ self.stubs.Set(fake._FakeImageService, 'delete', fake_delete)
+ # valid image
+ result = deregister_image(self.context, 'ami-00000001')
+ self.assertTrue(result)
+ # invalid image
+ self.stubs.UnsetAll()
+
+ def fake_detail_empty(self, context, **kwargs):
+ return []
+
+ self.stubs.Set(fake._FakeImageService, 'detail', fake_detail_empty)
+ self.assertRaises(exception.ImageNotFound, deregister_image,
+ self.context, 'ami-bad001')
+
+ def test_deregister_image_wrong_container_type(self):
+ deregister_image = self.cloud.deregister_image
+
+ def fake_delete(self, context, id):
+ return None
+
+ self.stubs.Set(fake._FakeImageService, 'delete', fake_delete)
+ self.assertRaises(exception.NotFound, deregister_image, self.context,
+ 'aki-00000001')
+
+ def _run_instance(self, **kwargs):
+ rv = self.cloud.run_instances(self.context, **kwargs)
+ instance_id = rv['instancesSet'][0]['instanceId']
+ return instance_id
+
+ def test_get_password_data(self):
+ instance_id = self._run_instance(
+ image_id='ami-1',
+ instance_type=CONF.default_flavor,
+ max_count=1)
+ self.stubs.Set(password, 'extract_password', lambda i: 'fakepass')
+ output = self.cloud.get_password_data(context=self.context,
+ instance_id=[instance_id])
+ self.assertEqual(output['passwordData'], 'fakepass')
+ self.cloud.terminate_instances(self.context, [instance_id])
+
+ def test_console_output(self):
+ instance_id = self._run_instance(
+ image_id='ami-1',
+ instance_type=CONF.default_flavor,
+ max_count=1)
+ output = self.cloud.get_console_output(context=self.context,
+ instance_id=[instance_id])
+ self.assertEqual(base64.b64decode(output['output']),
+ 'FAKE CONSOLE OUTPUT\nANOTHER\nLAST LINE')
+ # TODO(soren): We need this until we can stop polling in the rpc code
+ # for unit tests.
+ self.cloud.terminate_instances(self.context, [instance_id])
+
+ def test_key_generation(self):
+ result, private_key = self._create_key('test')
+
+ expected = db.key_pair_get(self.context,
+ self.context.user_id,
+ 'test')['public_key']
+
+ (fd, fname) = tempfile.mkstemp()
+ os.write(fd, private_key)
+
+ public_key, err = utils.execute('ssh-keygen', '-e', '-f', fname)
+
+ os.unlink(fname)
+
+ # assert key fields are equal
+ self.assertEqual(''.join(public_key.split("\n")[2:-2]),
+ expected.split(" ")[1].strip())
+
+ def test_describe_key_pairs(self):
+ self._create_key('test1')
+ self._create_key('test2')
+ result = self.cloud.describe_key_pairs(self.context)
+ keys = result["keySet"]
+ self.assertTrue(filter(lambda k: k['keyName'] == 'test1', keys))
+ self.assertTrue(filter(lambda k: k['keyName'] == 'test2', keys))
+
+ def test_describe_bad_key_pairs(self):
+ self.assertRaises(exception.KeypairNotFound,
+ self.cloud.describe_key_pairs, self.context,
+ key_name=['DoesNotExist'])
+
+ def test_import_key_pair(self):
+ pubkey_path = os.path.join(os.path.dirname(__file__), 'public_key')
+ with open(pubkey_path + '/dummy.pub') as f:
+ dummypub = f.readline().rstrip()
+ with open(pubkey_path + '/dummy.fingerprint') as f:
+ dummyfprint = f.readline().rstrip()
+ key_name = 'testimportkey'
+ public_key_material = base64.b64encode(dummypub)
+ result = self.cloud.import_key_pair(self.context,
+ key_name,
+ public_key_material)
+ self.assertEqual(result['keyName'], key_name)
+ self.assertEqual(result['keyFingerprint'], dummyfprint)
+ keydata = db.key_pair_get(self.context,
+ self.context.user_id,
+ key_name)
+ self.assertEqual(dummypub, keydata['public_key'])
+ self.assertEqual(dummyfprint, keydata['fingerprint'])
+
+ def test_import_key_pair_quota_limit(self):
+ self.flags(quota_key_pairs=0)
+ pubkey_path = os.path.join(os.path.dirname(__file__), 'public_key')
+ f = open(pubkey_path + '/dummy.pub', 'r')
+ dummypub = f.readline().rstrip()
+ f.close
+ f = open(pubkey_path + '/dummy.fingerprint', 'r')
+ f.readline().rstrip()
+ f.close
+ key_name = 'testimportkey'
+ public_key_material = base64.b64encode(dummypub)
+ self.assertRaises(exception.KeypairLimitExceeded,
+ self.cloud.import_key_pair, self.context, key_name,
+ public_key_material)
+
+ def test_create_key_pair(self):
+ good_names = ('a', 'a' * 255, string.ascii_letters + ' -_')
+ bad_names = ('', 'a' * 256, '*', '/')
+
+ for key_name in good_names:
+ result = self.cloud.create_key_pair(self.context,
+ key_name)
+ self.assertEqual(result['keyName'], key_name)
+
+ for key_name in bad_names:
+ self.assertRaises(exception.InvalidKeypair,
+ self.cloud.create_key_pair,
+ self.context,
+ key_name)
+
+ def test_create_key_pair_quota_limit(self):
+ self.flags(quota_key_pairs=10)
+ for i in range(0, 10):
+ key_name = 'key_%i' % i
+ result = self.cloud.create_key_pair(self.context,
+ key_name)
+ self.assertEqual(result['keyName'], key_name)
+
+ # 11'th group should fail
+ self.assertRaises(exception.KeypairLimitExceeded,
+ self.cloud.create_key_pair,
+ self.context,
+ 'foo')
+
+ def test_delete_key_pair(self):
+ self._create_key('test')
+ self.cloud.delete_key_pair(self.context, 'test')
+
+ def test_run_instances(self):
+ kwargs = {'image_id': 'ami-00000001',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1}
+ run_instances = self.cloud.run_instances
+
+ def fake_show(self, context, id, **kwargs):
+ return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'name': 'fake_name',
+ 'properties': {
+ 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'type': 'machine'},
+ 'container_format': 'ami',
+ 'status': 'active'}
+
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show)
+
+ def dumb(*args, **kwargs):
+ pass
+
+ self.stubs.Set(compute_utils, 'notify_about_instance_usage', dumb)
+
+ self.useFixture(cast_as_call.CastAsCall(self.stubs))
+
+ result = run_instances(self.context, **kwargs)
+ instance = result['instancesSet'][0]
+ self.assertEqual(instance['imageId'], 'ami-00000001')
+ self.assertEqual(instance['instanceId'], 'i-00000001')
+ self.assertEqual(instance['instanceState']['name'], 'running')
+ self.assertEqual(instance['instanceType'], 'm1.small')
+
+ def test_run_instances_invalid_maxcount(self):
+ kwargs = {'image_id': 'ami-00000001',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 0}
+ run_instances = self.cloud.run_instances
+
+ def fake_show(self, context, id, **kwargs):
+ return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'name': 'fake_name',
+ 'container_format': 'ami',
+ 'properties': {
+ 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'type': 'machine'},
+ 'status': 'active'}
+ self.stubs.UnsetAll()
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show)
+ self.assertRaises(exception.InvalidInput, run_instances,
+ self.context, **kwargs)
+
+ def test_run_instances_invalid_mincount(self):
+ kwargs = {'image_id': 'ami-00000001',
+ 'instance_type': CONF.default_flavor,
+ 'min_count': 0}
+ run_instances = self.cloud.run_instances
+
+ def fake_show(self, context, id, **kwargs):
+ return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'name': 'fake_name',
+ 'container_format': 'ami',
+ 'properties': {
+ 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'type': 'machine'},
+ 'status': 'active'}
+ self.stubs.UnsetAll()
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show)
+ self.assertRaises(exception.InvalidInput, run_instances,
+ self.context, **kwargs)
+
+ def test_run_instances_invalid_count(self):
+ kwargs = {'image_id': 'ami-00000001',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1,
+ 'min_count': 2}
+ run_instances = self.cloud.run_instances
+
+ def fake_show(self, context, id, **kwargs):
+ return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'name': 'fake_name',
+ 'container_format': 'ami',
+ 'properties': {
+ 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'type': 'machine'},
+ 'status': 'active'}
+ self.stubs.UnsetAll()
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show)
+ self.assertRaises(exception.InvalidInput, run_instances,
+ self.context, **kwargs)
+
+ def test_run_instances_availability_zone(self):
+ kwargs = {'image_id': 'ami-00000001',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1,
+ 'placement': {'availability_zone': 'fake'},
+ }
+ run_instances = self.cloud.run_instances
+
+ def fake_show(self, context, id, **kwargs):
+ return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'name': 'fake_name',
+ 'properties': {
+ 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'type': 'machine'},
+ 'container_format': 'ami',
+ 'status': 'active'}
+
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show)
+
+ self.useFixture(cast_as_call.CastAsCall(self.stubs))
+
+ def fake_format(*args, **kwargs):
+ pass
+
+ self.stubs.Set(self.cloud, '_format_run_instances', fake_format)
+
+ def fake_create(*args, **kwargs):
+ self.assertEqual(kwargs['availability_zone'], 'fake')
+ return ({'id': 'fake-instance'}, 'fake-res-id')
+
+ self.stubs.Set(self.cloud.compute_api, 'create', fake_create)
+
+ # NOTE(vish) the assert for this call is in the fake_create method.
+ run_instances(self.context, **kwargs)
+
+ def test_empty_reservation_id_from_token(self):
+ client_token = 'client-token-1'
+
+ def fake_get_all_system_metadata(context, search_filts):
+ reference = [{'key': ['EC2_client_token']},
+ {'value': ['client-token-1']}]
+ self.assertEqual(search_filts, reference)
+ return []
+
+ self.stubs.Set(self.cloud.compute_api, 'get_all_system_metadata',
+ fake_get_all_system_metadata)
+ resv_id = self.cloud._resv_id_from_token(self.context, client_token)
+ self.assertIsNone(resv_id)
+
+ def test_run_instances_idempotent(self):
+ # Ensure subsequent run_instances calls with same client token
+ # are idempotent and that ones with different client_token are not
+
+ kwargs = {'image_id': 'ami-00000001',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1}
+
+ run_instances = self.cloud.run_instances
+
+ def fake_show(self, context, id, **kwargs):
+ return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'name': 'fake_name',
+ 'properties': {
+ 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'type': 'machine'},
+ 'container_format': 'ami',
+ 'status': 'active'}
+
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show)
+
+ def dumb(*args, **kwargs):
+ pass
+
+ self.stubs.Set(compute_utils, 'notify_about_instance_usage', dumb)
+
+ self.useFixture(cast_as_call.CastAsCall(self.stubs))
+
+ kwargs['client_token'] = 'client-token-1'
+ result = run_instances(self.context, **kwargs)
+ instance = result['instancesSet'][0]
+ self.assertEqual(instance['instanceId'], 'i-00000001')
+
+ kwargs['client_token'] = 'client-token-2'
+ result = run_instances(self.context, **kwargs)
+ instance = result['instancesSet'][0]
+ self.assertEqual(instance['instanceId'], 'i-00000002')
+
+ kwargs['client_token'] = 'client-token-2'
+ result = run_instances(self.context, **kwargs)
+ instance = result['instancesSet'][0]
+ self.assertEqual(instance['instanceId'], 'i-00000002')
+
+ kwargs['client_token'] = 'client-token-1'
+ result = run_instances(self.context, **kwargs)
+ instance = result['instancesSet'][0]
+ self.assertEqual(instance['instanceId'], 'i-00000001')
+
+ kwargs['client_token'] = 'client-token-3'
+ result = run_instances(self.context, **kwargs)
+ instance = result['instancesSet'][0]
+ self.assertEqual(instance['instanceId'], 'i-00000003')
+
+ # make sure terminated instances lose their client tokens
+ self.cloud.stop_instances(self.context,
+ instance_id=[instance['instanceId']])
+ self.cloud.terminate_instances(self.context,
+ instance_id=[instance['instanceId']])
+
+ kwargs['client_token'] = 'client-token-3'
+ result = run_instances(self.context, **kwargs)
+ instance = result['instancesSet'][0]
+ self.assertEqual(instance['instanceId'], 'i-00000004')
+
+ def test_run_instances_image_state_none(self):
+ kwargs = {'image_id': 'ami-00000001',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1}
+ run_instances = self.cloud.run_instances
+
+ def fake_show_no_state(self, context, id):
+ return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'name': 'fake_name',
+ 'properties': {
+ 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'type': 'machine'}, 'container_format': 'ami'}
+
+ self.stubs.UnsetAll()
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show_no_state)
+ self.assertRaises(exception.ImageNotActive, run_instances,
+ self.context, **kwargs)
+
+ def test_run_instances_image_state_invalid(self):
+ kwargs = {'image_id': 'ami-00000001',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1}
+ run_instances = self.cloud.run_instances
+
+ def fake_show_decrypt(self, context, id):
+ return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'name': 'fake_name',
+ 'container_format': 'ami',
+ 'status': 'active',
+ 'properties': {
+ 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'type': 'machine', 'image_state': 'decrypting'}}
+
+ self.stubs.UnsetAll()
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show_decrypt)
+ self.assertRaises(exception.ImageNotActive, run_instances,
+ self.context, **kwargs)
+
+ def test_run_instances_image_status_active(self):
+ kwargs = {'image_id': 'ami-00000001',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1}
+ run_instances = self.cloud.run_instances
+
+ def fake_show_stat_active(self, context, id, **kwargs):
+ return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'name': 'fake_name',
+ 'container_format': 'ami',
+ 'properties': {
+ 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'type': 'machine'},
+ 'status': 'active'}
+
+ def fake_id_to_glance_id(context, id):
+ return 'cedef40a-ed67-4d10-800e-17455edce175'
+
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show_stat_active)
+ self.stubs.Set(ec2utils, 'id_to_glance_id', fake_id_to_glance_id)
+
+ result = run_instances(self.context, **kwargs)
+ self.assertEqual(len(result['instancesSet']), 1)
+
+ def _restart_compute_service(self, periodic_interval_max=None):
+ """restart compute service. NOTE: fake driver forgets all instances."""
+ self.compute.kill()
+ if periodic_interval_max:
+ self.compute = self.start_service(
+ 'compute', periodic_interval_max=periodic_interval_max)
+ else:
+ self.compute = self.start_service('compute')
+
+ def test_stop_start_instance(self):
+ # Makes sure stop/start instance works.
+ # enforce periodic tasks run in short time to avoid wait for 60s.
+ self._restart_compute_service(periodic_interval_max=0.3)
+
+ kwargs = {'image_id': 'ami-1',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1, }
+ instance_id = self._run_instance(**kwargs)
+
+ # a running instance can't be started.
+ self.assertRaises(exception.InstanceInvalidState,
+ self.cloud.start_instances,
+ self.context, [instance_id])
+
+ expected = {'instancesSet': [
+ {'instanceId': 'i-00000001',
+ 'previousState': {'code': 16,
+ 'name': 'running'},
+ 'currentState': {'code': 64,
+ 'name': 'stopping'}}]}
+ result = self.cloud.stop_instances(self.context, [instance_id])
+ self.assertEqual(result, expected)
+
+ expected = {'instancesSet': [
+ {'instanceId': 'i-00000001',
+ 'previousState': {'code': 80,
+ 'name': 'stopped'},
+ 'currentState': {'code': 0,
+ 'name': 'pending'}}]}
+ result = self.cloud.start_instances(self.context, [instance_id])
+ self.assertEqual(result, expected)
+
+ expected = {'instancesSet': [
+ {'instanceId': 'i-00000001',
+ 'previousState': {'code': 16,
+ 'name': 'running'},
+ 'currentState': {'code': 64,
+ 'name': 'stopping'}}]}
+ result = self.cloud.stop_instances(self.context, [instance_id])
+ self.assertEqual(result, expected)
+
+ expected = {'instancesSet': [
+ {'instanceId': 'i-00000001',
+ 'previousState': {'code': 80,
+ 'name': 'stopped'},
+ 'currentState': {'code': 32,
+ 'name': 'shutting-down'}}]}
+ result = self.cloud.terminate_instances(self.context, [instance_id])
+ self.assertEqual(result, expected)
+
+ def test_start_instances(self):
+ kwargs = {'image_id': 'ami-1',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1, }
+ instance_id = self._run_instance(**kwargs)
+
+ result = self.cloud.stop_instances(self.context, [instance_id])
+ self.assertTrue(result)
+
+ expected = {'instancesSet': [
+ {'instanceId': 'i-00000001',
+ 'previousState': {'code': 80,
+ 'name': 'stopped'},
+ 'currentState': {'code': 0,
+ 'name': 'pending'}}]}
+ result = self.cloud.start_instances(self.context, [instance_id])
+ self.assertEqual(result, expected)
+
+ expected = {'instancesSet': [
+ {'instanceId': 'i-00000001',
+ 'previousState': {'code': 16,
+ 'name': 'running'},
+ 'currentState': {'code': 32,
+ 'name': 'shutting-down'}}]}
+ result = self.cloud.terminate_instances(self.context, [instance_id])
+ self.assertEqual(result, expected)
+ self._restart_compute_service()
+
+ def test_start_instances_policy_failed(self):
+ kwargs = {'image_id': 'ami-1',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1, }
+ instance_id = self._run_instance(**kwargs)
+ rules = {
+ "compute:start":
+ common_policy.parse_rule("project_id:non_fake"),
+ }
+ policy.set_rules(rules)
+ exc = self.assertRaises(exception.PolicyNotAuthorized,
+ self.cloud.start_instances,
+ self.context, [instance_id])
+ self.assertIn("compute:start", exc.format_message())
+ self._restart_compute_service()
+
+ def test_stop_instances(self):
+ kwargs = {'image_id': 'ami-1',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1, }
+ instance_id = self._run_instance(**kwargs)
+
+ expected = {'instancesSet': [
+ {'instanceId': 'i-00000001',
+ 'previousState': {'code': 16,
+ 'name': 'running'},
+ 'currentState': {'code': 64,
+ 'name': 'stopping'}}]}
+ result = self.cloud.stop_instances(self.context, [instance_id])
+ self.assertEqual(result, expected)
+
+ expected = {'instancesSet': [
+ {'instanceId': 'i-00000001',
+ 'previousState': {'code': 80,
+ 'name': 'stopped'},
+ 'currentState': {'code': 32,
+ 'name': 'shutting-down'}}]}
+ result = self.cloud.terminate_instances(self.context, [instance_id])
+ self.assertEqual(result, expected)
+ self._restart_compute_service()
+
+ def test_stop_instances_policy_failed(self):
+ kwargs = {'image_id': 'ami-1',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1, }
+ instance_id = self._run_instance(**kwargs)
+ rules = {
+ "compute:stop":
+ common_policy.parse_rule("project_id:non_fake")
+ }
+ policy.set_rules(rules)
+ exc = self.assertRaises(exception.PolicyNotAuthorized,
+ self.cloud.stop_instances,
+ self.context, [instance_id])
+ self.assertIn("compute:stop", exc.format_message())
+ self._restart_compute_service()
+
+ def test_terminate_instances(self):
+ kwargs = {'image_id': 'ami-1',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1, }
+ instance_id = self._run_instance(**kwargs)
+
+ # a running instance can't be started.
+ self.assertRaises(exception.InstanceInvalidState,
+ self.cloud.start_instances,
+ self.context, [instance_id])
+
+ expected = {'instancesSet': [
+ {'instanceId': 'i-00000001',
+ 'previousState': {'code': 16,
+ 'name': 'running'},
+ 'currentState': {'code': 32,
+ 'name': 'shutting-down'}}]}
+ result = self.cloud.terminate_instances(self.context, [instance_id])
+ self.assertEqual(result, expected)
+ self._restart_compute_service()
+
+ def test_terminate_instances_invalid_instance_id(self):
+ kwargs = {'image_id': 'ami-1',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1, }
+ self._run_instance(**kwargs)
+
+ self.assertRaises(exception.InstanceNotFound,
+ self.cloud.terminate_instances,
+ self.context, ['i-2'])
+ self._restart_compute_service()
+
+ def test_terminate_instances_disable_terminate(self):
+ kwargs = {'image_id': 'ami-1',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1, }
+ instance_id = self._run_instance(**kwargs)
+
+ internal_uuid = db.get_instance_uuid_by_ec2_id(self.context,
+ ec2utils.ec2_id_to_id(instance_id))
+ db.instance_update(self.context, internal_uuid,
+ {'disable_terminate': True})
+
+ expected = {'instancesSet': [
+ {'instanceId': 'i-00000001',
+ 'previousState': {'code': 16,
+ 'name': 'running'},
+ 'currentState': {'code': 16,
+ 'name': 'running'}}]}
+ result = self.cloud.terminate_instances(self.context, [instance_id])
+ self.assertEqual(result, expected)
+
+ db.instance_update(self.context, internal_uuid,
+ {'disable_terminate': False})
+
+ expected = {'instancesSet': [
+ {'instanceId': 'i-00000001',
+ 'previousState': {'code': 16,
+ 'name': 'running'},
+ 'currentState': {'code': 32,
+ 'name': 'shutting-down'}}]}
+ result = self.cloud.terminate_instances(self.context, [instance_id])
+ self.assertEqual(result, expected)
+ self._restart_compute_service()
+
+ def test_terminate_instances_two_instances(self):
+ kwargs = {'image_id': 'ami-1',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1, }
+ inst1 = self._run_instance(**kwargs)
+ inst2 = self._run_instance(**kwargs)
+
+ expected = {'instancesSet': [
+ {'instanceId': 'i-00000001',
+ 'previousState': {'code': 16,
+ 'name': 'running'},
+ 'currentState': {'code': 64,
+ 'name': 'stopping'}}]}
+ result = self.cloud.stop_instances(self.context, [inst1])
+ self.assertEqual(result, expected)
+
+ expected = {'instancesSet': [
+ {'instanceId': 'i-00000001',
+ 'previousState': {'code': 80,
+ 'name': 'stopped'},
+ 'currentState': {'code': 32,
+ 'name': 'shutting-down'}},
+ {'instanceId': 'i-00000002',
+ 'previousState': {'code': 16,
+ 'name': 'running'},
+ 'currentState': {'code': 32,
+ 'name': 'shutting-down'}}]}
+ result = self.cloud.terminate_instances(self.context, [inst1, inst2])
+ self.assertEqual(result, expected)
+ self._restart_compute_service()
+
+ def test_reboot_instances(self):
+ kwargs = {'image_id': 'ami-1',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1, }
+ instance_id = self._run_instance(**kwargs)
+
+ # a running instance can't be started.
+ self.assertRaises(exception.InstanceInvalidState,
+ self.cloud.start_instances,
+ self.context, [instance_id])
+
+ result = self.cloud.reboot_instances(self.context, [instance_id])
+ self.assertTrue(result)
+
+ def _volume_create(self, volume_id=None):
+ kwargs = {'name': 'test-volume',
+ 'description': 'test volume description',
+ 'status': 'available',
+ 'host': 'fake',
+ 'size': 1,
+ 'attach_status': 'detached'}
+ if volume_id:
+ kwargs['volume_id'] = volume_id
+ return self.volume_api.create_with_kwargs(self.context, **kwargs)
+
+ def _snapshot_create(self, snapshot_id=None):
+ kwargs = {'volume_id': 'ccec42a2-c220-4806-b762-6b12fbb592e4',
+ 'status': "available",
+ 'volume_size': 1}
+ if snapshot_id:
+ kwargs['snap_id'] = snapshot_id
+ return self.volume_api.create_snapshot_with_kwargs(self.context,
+ **kwargs)
+
+ def _create_snapshot(self, ec2_volume_id):
+ result = self.cloud.create_snapshot(self.context,
+ volume_id=ec2_volume_id)
+ return result['snapshotId']
+
+ def _do_test_create_image(self, no_reboot):
+ """Make sure that CreateImage works."""
+ # enforce periodic tasks run in short time to avoid wait for 60s.
+ self._restart_compute_service(periodic_interval_max=0.3)
+
+ (volumes, snapshots) = self._setUpImageSet(
+ create_volumes_and_snapshots=True)
+
+ kwargs = {'image_id': 'ami-1',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1}
+ ec2_instance_id = self._run_instance(**kwargs)
+
+ def fake_show(meh, context, id, **kwargs):
+ bdm = [dict(snapshot_id=snapshots[0],
+ volume_size=1,
+ device_name='sda1',
+ delete_on_termination=False)]
+ props = dict(kernel_id='cedef40a-ed67-4d10-800e-17455edce175',
+ ramdisk_id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
+ root_device_name='/dev/sda1',
+ block_device_mapping=bdm)
+ return dict(id=id,
+ properties=props,
+ container_format='ami',
+ status='active',
+ is_public=True)
+
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show)
+
+ def fake_block_device_mapping_get_all_by_instance(context, inst_id,
+ use_slave=False):
+ return [fake_block_device.FakeDbBlockDeviceDict(
+ {'volume_id': volumes[0],
+ 'snapshot_id': snapshots[0],
+ 'source_type': 'snapshot',
+ 'destination_type': 'volume',
+ 'volume_size': 1,
+ 'device_name': 'sda1',
+ 'boot_index': 0,
+ 'delete_on_termination': False,
+ 'connection_info': '{"foo":"bar"}',
+ 'no_device': None})]
+
+ self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
+ fake_block_device_mapping_get_all_by_instance)
+
+ virt_driver = {}
+
+ def fake_power_on(self, context, instance, network_info,
+ block_device_info):
+ virt_driver['powered_on'] = True
+
+ self.stubs.Set(fake_virt.FakeDriver, 'power_on', fake_power_on)
+
+ def fake_power_off(self, instance,
+ shutdown_timeout, shutdown_attempts):
+ virt_driver['powered_off'] = True
+
+ self.stubs.Set(fake_virt.FakeDriver, 'power_off', fake_power_off)
+
+ result = self.cloud.create_image(self.context, ec2_instance_id,
+ no_reboot=no_reboot)
+ ec2_ids = [result['imageId']]
+ created_image = self.cloud.describe_images(self.context,
+ ec2_ids)['imagesSet'][0]
+
+ self.assertIn('blockDeviceMapping', created_image)
+ bdm = created_image['blockDeviceMapping'][0]
+ self.assertEqual(bdm.get('deviceName'), 'sda1')
+ self.assertIn('ebs', bdm)
+ self.assertEqual(bdm['ebs'].get('snapshotId'),
+ ec2utils.id_to_ec2_snap_id(snapshots[0]))
+ self.assertEqual(created_image.get('kernelId'), 'aki-00000001')
+ self.assertEqual(created_image.get('ramdiskId'), 'ari-00000002')
+ self.assertEqual(created_image.get('rootDeviceType'), 'ebs')
+ self.assertNotEqual(virt_driver.get('powered_on'), no_reboot)
+ self.assertNotEqual(virt_driver.get('powered_off'), no_reboot)
+
+ self.cloud.terminate_instances(self.context, [ec2_instance_id])
+
+ self._restart_compute_service()
+
+ def test_create_image_no_reboot(self):
+ # Make sure that CreateImage works.
+ self._do_test_create_image(True)
+
+ def test_create_image_with_reboot(self):
+ # Make sure that CreateImage works.
+ self._do_test_create_image(False)
+
+ def test_create_image_instance_store(self):
+ """Ensure CreateImage fails as expected for an instance-store-backed
+ instance
+ """
+ # enforce periodic tasks run in short time to avoid wait for 60s.
+ self._restart_compute_service(periodic_interval_max=0.3)
+
+ (volumes, snapshots) = self._setUpImageSet(
+ create_volumes_and_snapshots=True)
+
+ kwargs = {'image_id': 'ami-1',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1}
+ ec2_instance_id = self._run_instance(**kwargs)
+
+ def fake_block_device_mapping_get_all_by_instance(context, inst_id,
+ use_slave=False):
+ return [fake_block_device.FakeDbBlockDeviceDict(
+ {'volume_id': volumes[0],
+ 'snapshot_id': snapshots[0],
+ 'source_type': 'snapshot',
+ 'destination_type': 'volume',
+ 'volume_size': 1,
+ 'device_name': 'vda',
+ 'delete_on_termination': False,
+ 'no_device': None})]
+
+ self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
+ fake_block_device_mapping_get_all_by_instance)
+
+ self.assertRaises(exception.InvalidParameterValue,
+ self.cloud.create_image,
+ self.context,
+ ec2_instance_id,
+ no_reboot=True)
+
+ @staticmethod
+ def _fake_bdm_get(ctxt, id, use_slave=False):
+ blockdms = [{'volume_id': 87654321,
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'snapshot_id': None,
+ 'no_device': None,
+ 'delete_on_termination': True,
+ 'device_name': '/dev/sdh'},
+ {'volume_id': None,
+ 'snapshot_id': 98765432,
+ 'source_type': 'snapshot',
+ 'destination_type': 'volume',
+ 'no_device': None,
+ 'delete_on_termination': True,
+ 'device_name': '/dev/sdi'},
+ {'volume_id': None,
+ 'snapshot_id': None,
+ 'no_device': True,
+ 'source_type': 'blank',
+ 'destination_type': None,
+ 'delete_on_termination': None,
+ 'device_name': None},
+ {'volume_id': None,
+ 'snapshot_id': None,
+ 'no_device': None,
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'guest_format': None,
+ 'delete_on_termination': None,
+ 'device_name': '/dev/sdb'},
+ {'volume_id': None,
+ 'snapshot_id': None,
+ 'no_device': None,
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'guest_format': 'swap',
+ 'delete_on_termination': None,
+ 'device_name': '/dev/sdc'},
+ {'volume_id': None,
+ 'snapshot_id': None,
+ 'no_device': None,
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'guest_format': None,
+ 'delete_on_termination': None,
+ 'device_name': '/dev/sdd'},
+ {'volume_id': None,
+ 'snapshot_id': None,
+ 'no_device': None,
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'guest_format': None,
+ 'delete_on_termination': None,
+ 'device_name': '/dev/sd3'},
+ ]
+
+ extra = {
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': 0,
+ 'id': 0,
+ 'device_type': None,
+ 'disk_bus': None,
+ 'instance_uuid': '',
+ 'image_id': None,
+ 'volume_size': None,
+ 'connection_info': None,
+ 'boot_index': None,
+ 'guest_format': None,
+ }
+
+ for bdm in blockdms:
+ bdm.update(extra)
+
+ return blockdms
+
+ def test_describe_instance_attribute(self):
+ # Make sure that describe_instance_attribute works.
+ self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
+ self._fake_bdm_get)
+
+ def fake_get(ctxt, instance_id, want_objects=False):
+ self.assertTrue(want_objects)
+ inst_type = flavors.get_default_flavor()
+ inst_type['name'] = 'fake_type'
+ sys_meta = flavors.save_flavor_info({}, inst_type)
+ secgroups = objects.SecurityGroupList()
+ secgroups.objects.append(
+ objects.SecurityGroup(name='fake0'))
+ secgroups.objects.append(
+ objects.SecurityGroup(name='fake1'))
+ instance = objects.Instance(ctxt)
+ instance.id = 0
+ instance.uuid = 'e5fe5518-0288-4fa3-b0c4-c79764101b85'
+ instance.root_device_name = '/dev/sdh'
+ instance.security_groups = secgroups
+ instance.vm_state = vm_states.STOPPED
+ instance.kernel_id = 'cedef40a-ed67-4d10-800e-17455edce175'
+ instance.ramdisk_id = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ instance.user_data = 'fake-user data'
+ instance.shutdown_terminate = False
+ instance.disable_terminate = False
+ instance.system_metadata = sys_meta
+ return instance
+ self.stubs.Set(self.cloud.compute_api, 'get', fake_get)
+
+ def fake_ec2_instance_get_by_id(ctxt, int_id):
+ if int_id == 305419896:
+ fake_map = {
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': 0,
+ 'id': 305419896,
+ 'uuid': 'e5fe5518-0288-4fa3-b0c4-c79764101b85',
+ }
+ return fake_map
+ raise exception.InstanceNotFound(instance_id=int_id)
+ self.stubs.Set(db, 'ec2_instance_get_by_id',
+ fake_ec2_instance_get_by_id)
+
+ get_attribute = functools.partial(
+ self.cloud.describe_instance_attribute,
+ self.context, 'i-12345678')
+
+ bdm = get_attribute('blockDeviceMapping')
+ bdm['blockDeviceMapping'].sort()
+
+ expected_bdm = {'instance_id': 'i-12345678',
+ 'rootDeviceType': 'ebs',
+ 'blockDeviceMapping': [
+ {'deviceName': '/dev/sdh',
+ 'ebs': {'status': 'attached',
+ 'deleteOnTermination': True,
+ 'volumeId': 'vol-05397fb1',
+ 'attachTime': '13:56:24'}}]}
+ expected_bdm['blockDeviceMapping'].sort()
+ self.assertEqual(bdm, expected_bdm)
+ groupSet = get_attribute('groupSet')
+ groupSet['groupSet'].sort()
+ expected_groupSet = {'instance_id': 'i-12345678',
+ 'groupSet': [{'groupId': 'fake0'},
+ {'groupId': 'fake1'}]}
+ expected_groupSet['groupSet'].sort()
+ self.assertEqual(groupSet, expected_groupSet)
+ self.assertEqual(get_attribute('instanceInitiatedShutdownBehavior'),
+ {'instance_id': 'i-12345678',
+ 'instanceInitiatedShutdownBehavior': 'stop'})
+ self.assertEqual(get_attribute('disableApiTermination'),
+ {'instance_id': 'i-12345678',
+ 'disableApiTermination': False})
+ self.assertEqual(get_attribute('instanceType'),
+ {'instance_id': 'i-12345678',
+ 'instanceType': 'fake_type'})
+ self.assertEqual(get_attribute('kernel'),
+ {'instance_id': 'i-12345678',
+ 'kernel': 'aki-00000001'})
+ self.assertEqual(get_attribute('ramdisk'),
+ {'instance_id': 'i-12345678',
+ 'ramdisk': 'ari-00000002'})
+ self.assertEqual(get_attribute('rootDeviceName'),
+ {'instance_id': 'i-12345678',
+ 'rootDeviceName': '/dev/sdh'})
+ # NOTE(yamahata): this isn't supported
+ # get_attribute('sourceDestCheck')
+ self.assertEqual(get_attribute('userData'),
+ {'instance_id': 'i-12345678',
+ 'userData': '}\xa9\x1e\xba\xc7\xabu\xabZ'})
+
+ def test_instance_initiated_shutdown_behavior(self):
+ def test_dia_iisb(expected_result, **kwargs):
+ """test describe_instance_attribute
+ attribute instance_initiated_shutdown_behavior
+ """
+ kwargs.update({'instance_type': CONF.default_flavor,
+ 'max_count': 1})
+ instance_id = self._run_instance(**kwargs)
+
+ result = self.cloud.describe_instance_attribute(self.context,
+ instance_id, 'instanceInitiatedShutdownBehavior')
+ self.assertEqual(result['instanceInitiatedShutdownBehavior'],
+ expected_result)
+
+ expected = {'instancesSet': [
+ {'instanceId': instance_id,
+ 'previousState': {'code': 16,
+ 'name': 'running'},
+ 'currentState': {'code': 32,
+ 'name': 'shutting-down'}}]}
+ result = self.cloud.terminate_instances(self.context,
+ [instance_id])
+ self.assertEqual(result, expected)
+ self._restart_compute_service()
+
+ test_dia_iisb('stop', image_id='ami-1')
+
+ block_device_mapping = [{'device_name': '/dev/vdb',
+ 'virtual_name': 'ephemeral0'}]
+ test_dia_iisb('stop', image_id='ami-2',
+ block_device_mapping=block_device_mapping)
+
+ def fake_show(self, context, id_, **kwargs):
+ LOG.debug("id_ %s", id_)
+
+ prop = {}
+ if id_ == 'ami-3':
+ pass
+ elif id_ == 'ami-4':
+ prop = {'mappings': [{'device': 'sdb0',
+ 'virtual': 'ephemeral0'}]}
+ elif id_ == 'ami-5':
+ prop = {'block_device_mapping':
+ [{'device_name': '/dev/sdb0',
+ 'virtual_name': 'ephemeral0'}]}
+ elif id_ == 'ami-6':
+ prop = {'mappings': [{'device': 'sdb0',
+ 'virtual': 'ephemeral0'}],
+ 'block_device_mapping':
+ [{'device_name': '/dev/sdb0',
+ 'virtual_name': 'ephemeral0'}]}
+
+ prop_base = {'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'type': 'machine'}
+ prop_base.update(prop)
+
+ return {
+ 'id': id_,
+ 'name': 'fake_name',
+ 'properties': prop_base,
+ 'container_format': 'ami',
+ 'status': 'active'}
+
+ # NOTE(yamahata): create ami-3 ... ami-7
+ # ami-1 and ami-2 is already created by setUp()
+ for i in range(3, 8):
+ db.s3_image_create(self.context, 'ami-%d' % i)
+
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show)
+
+ test_dia_iisb('stop', image_id='ami-3')
+ test_dia_iisb('stop', image_id='ami-4')
+ test_dia_iisb('stop', image_id='ami-5')
+ test_dia_iisb('stop', image_id='ami-6')
+ test_dia_iisb('terminate', image_id='ami-7',
+ instance_initiated_shutdown_behavior='terminate')
+
+ def test_create_delete_tags(self):
+
+ # We need to stub network calls
+ self._stub_instance_get_with_fixed_ips('get_all')
+ self._stub_instance_get_with_fixed_ips('get')
+
+ # We need to stub out the MQ call - it won't succeed. We do want
+ # to check that the method is called, though
+ meta_changes = [None]
+
+ def fake_change_instance_metadata(inst, ctxt, diff, instance=None,
+ instance_uuid=None):
+ meta_changes[0] = diff
+
+ self.stubs.Set(compute_rpcapi.ComputeAPI, 'change_instance_metadata',
+ fake_change_instance_metadata)
+
+ # Create a test image
+ image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
+ inst1_kwargs = {
+ 'reservation_id': 'a',
+ 'image_ref': image_uuid,
+ 'instance_type_id': 1,
+ 'vm_state': 'active',
+ 'launched_at': timeutils.utcnow(),
+ 'hostname': 'server-1111',
+ 'created_at': datetime.datetime(2012, 5, 1, 1, 1, 1)
+ }
+
+ inst1 = db.instance_create(self.context, inst1_kwargs)
+ ec2_id = ec2utils.id_to_ec2_inst_id(inst1['uuid'])
+
+ # Create some tags
+ md = {'key': 'foo', 'value': 'bar'}
+ md_result = {'foo': 'bar'}
+ self.cloud.create_tags(self.context, resource_id=[ec2_id],
+ tag=[md])
+
+ metadata = self.cloud.compute_api.get_instance_metadata(self.context,
+ inst1)
+ self.assertEqual(metadata, md_result)
+ self.assertEqual(meta_changes, [{'foo': ['+', 'bar']}])
+
+ # Delete them
+ self.cloud.delete_tags(self.context, resource_id=[ec2_id],
+ tag=[{'key': 'foo', 'value': 'bar'}])
+
+ metadata = self.cloud.compute_api.get_instance_metadata(self.context,
+ inst1)
+ self.assertEqual(metadata, {})
+ self.assertEqual(meta_changes, [{'foo': ['-']}])
+
+ def test_describe_tags(self):
+ # We need to stub network calls
+ self._stub_instance_get_with_fixed_ips('get_all')
+ self._stub_instance_get_with_fixed_ips('get')
+
+ # We need to stub out the MQ call - it won't succeed. We do want
+ # to check that the method is called, though
+ meta_changes = [None]
+
+ def fake_change_instance_metadata(inst, ctxt, diff, instance=None,
+ instance_uuid=None):
+ meta_changes[0] = diff
+
+ self.stubs.Set(compute_rpcapi.ComputeAPI, 'change_instance_metadata',
+ fake_change_instance_metadata)
+
+ # Create some test images
+ image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
+ inst1_kwargs = {
+ 'reservation_id': 'a',
+ 'image_ref': image_uuid,
+ 'instance_type_id': 1,
+ 'vm_state': 'active',
+ 'launched_at': timeutils.utcnow(),
+ 'hostname': 'server-1111',
+ 'created_at': datetime.datetime(2012, 5, 1, 1, 1, 1)
+ }
+
+ inst2_kwargs = {
+ 'reservation_id': 'b',
+ 'image_ref': image_uuid,
+ 'instance_type_id': 1,
+ 'vm_state': 'active',
+ 'launched_at': timeutils.utcnow(),
+ 'hostname': 'server-1112',
+ 'created_at': datetime.datetime(2012, 5, 1, 1, 1, 2)
+ }
+
+ inst1 = db.instance_create(self.context, inst1_kwargs)
+ ec2_id1 = ec2utils.id_to_ec2_inst_id(inst1['uuid'])
+
+ inst2 = db.instance_create(self.context, inst2_kwargs)
+ ec2_id2 = ec2utils.id_to_ec2_inst_id(inst2['uuid'])
+
+ # Create some tags
+ # We get one overlapping pair, and each has a different key value pair
+ # inst1 : {'foo': 'bar', 'bax': 'wibble'}
+ # inst1 : {'foo': 'bar', 'baz': 'quux'}
+
+ md = {'key': 'foo', 'value': 'bar'}
+ md_result = {'foo': 'bar'}
+ self.cloud.create_tags(self.context, resource_id=[ec2_id1, ec2_id2],
+ tag=[md])
+
+ self.assertEqual(meta_changes, [{'foo': ['+', 'bar']}])
+
+ metadata = self.cloud.compute_api.get_instance_metadata(self.context,
+ inst1)
+ self.assertEqual(metadata, md_result)
+
+ metadata = self.cloud.compute_api.get_instance_metadata(self.context,
+ inst2)
+ self.assertEqual(metadata, md_result)
+
+ md2 = {'key': 'baz', 'value': 'quux'}
+ md2_result = {'baz': 'quux'}
+ md2_result.update(md_result)
+ self.cloud.create_tags(self.context, resource_id=[ec2_id2],
+ tag=[md2])
+
+ self.assertEqual(meta_changes, [{'baz': ['+', 'quux']}])
+
+ metadata = self.cloud.compute_api.get_instance_metadata(self.context,
+ inst2)
+ self.assertEqual(metadata, md2_result)
+
+ md3 = {'key': 'bax', 'value': 'wibble'}
+ md3_result = {'bax': 'wibble'}
+ md3_result.update(md_result)
+ self.cloud.create_tags(self.context, resource_id=[ec2_id1],
+ tag=[md3])
+
+ self.assertEqual(meta_changes, [{'bax': ['+', 'wibble']}])
+
+ metadata = self.cloud.compute_api.get_instance_metadata(self.context,
+ inst1)
+ self.assertEqual(metadata, md3_result)
+
+ inst1_key_foo = {'key': u'foo', 'resource_id': 'i-00000001',
+ 'resource_type': 'instance', 'value': u'bar'}
+ inst1_key_bax = {'key': u'bax', 'resource_id': 'i-00000001',
+ 'resource_type': 'instance', 'value': u'wibble'}
+ inst2_key_foo = {'key': u'foo', 'resource_id': 'i-00000002',
+ 'resource_type': 'instance', 'value': u'bar'}
+ inst2_key_baz = {'key': u'baz', 'resource_id': 'i-00000002',
+ 'resource_type': 'instance', 'value': u'quux'}
+
+ # We should be able to search by:
+ # No filter
+ tags = self.cloud.describe_tags(self.context)['tagSet']
+ self.assertEqualSorted(tags, [inst1_key_foo, inst2_key_foo,
+ inst2_key_baz, inst1_key_bax])
+
+ # Resource ID
+ tags = self.cloud.describe_tags(self.context,
+ filter=[{'name': 'resource-id',
+ 'value': [ec2_id1]}])['tagSet']
+ self.assertEqualSorted(tags, [inst1_key_foo, inst1_key_bax])
+
+ # Resource Type
+ tags = self.cloud.describe_tags(self.context,
+ filter=[{'name': 'resource-type',
+ 'value': ['instance']}])['tagSet']
+ self.assertEqualSorted(tags, [inst1_key_foo, inst2_key_foo,
+ inst2_key_baz, inst1_key_bax])
+
+ # Key, either bare or with wildcards
+ tags = self.cloud.describe_tags(self.context,
+ filter=[{'name': 'key',
+ 'value': ['foo']}])['tagSet']
+ self.assertEqualSorted(tags, [inst1_key_foo, inst2_key_foo])
+
+ tags = self.cloud.describe_tags(self.context,
+ filter=[{'name': 'key',
+ 'value': ['baz']}])['tagSet']
+ self.assertEqualSorted(tags, [inst2_key_baz])
+
+ tags = self.cloud.describe_tags(self.context,
+ filter=[{'name': 'key',
+ 'value': ['ba?']}])['tagSet']
+ self.assertEqualSorted(tags, [inst1_key_bax, inst2_key_baz])
+
+ tags = self.cloud.describe_tags(self.context,
+ filter=[{'name': 'key',
+ 'value': ['b*']}])['tagSet']
+ self.assertEqualSorted(tags, [inst1_key_bax, inst2_key_baz])
+
+ # Value, either bare or with wildcards
+ tags = self.cloud.describe_tags(self.context,
+ filter=[{'name': 'value',
+ 'value': ['bar']}])['tagSet']
+ self.assertEqualSorted(tags, [inst1_key_foo, inst2_key_foo])
+
+ tags = self.cloud.describe_tags(self.context,
+ filter=[{'name': 'value',
+ 'value': ['wi*']}])['tagSet']
+ self.assertEqual(tags, [inst1_key_bax])
+
+ tags = self.cloud.describe_tags(self.context,
+ filter=[{'name': 'value',
+ 'value': ['quu?']}])['tagSet']
+ self.assertEqual(tags, [inst2_key_baz])
+
+ # Multiple values
+ tags = self.cloud.describe_tags(self.context,
+ filter=[{'name': 'key',
+ 'value': ['baz', 'bax']}])['tagSet']
+ self.assertEqualSorted(tags, [inst2_key_baz, inst1_key_bax])
+
+ # Multiple filters (AND): no match
+ tags = self.cloud.describe_tags(self.context,
+ filter=[{'name': 'key',
+ 'value': ['baz']},
+ {'name': 'value',
+ 'value': ['wibble']}])['tagSet']
+ self.assertEqual(tags, [])
+
+ # Multiple filters (AND): match
+ tags = self.cloud.describe_tags(self.context,
+ filter=[{'name': 'key',
+ 'value': ['baz']},
+ {'name': 'value',
+ 'value': ['quux']}])['tagSet']
+ self.assertEqualSorted(tags, [inst2_key_baz])
+
+ # And we should fail on supported resource types
+ self.assertRaises(exception.InvalidParameterValue,
+ self.cloud.describe_tags,
+ self.context,
+ filter=[{'name': 'resource-type',
+ 'value': ['instance', 'volume']}])
+
+ def test_resource_type_from_id(self):
+ self.assertEqual(
+ ec2utils.resource_type_from_id(self.context, 'i-12345'),
+ 'instance')
+ self.assertEqual(
+ ec2utils.resource_type_from_id(self.context, 'r-12345'),
+ 'reservation')
+ self.assertEqual(
+ ec2utils.resource_type_from_id(self.context, 'vol-12345'),
+ 'volume')
+ self.assertEqual(
+ ec2utils.resource_type_from_id(self.context, 'snap-12345'),
+ 'snapshot')
+ self.assertEqual(
+ ec2utils.resource_type_from_id(self.context, 'ami-12345'),
+ 'image')
+ self.assertEqual(
+ ec2utils.resource_type_from_id(self.context, 'ari-12345'),
+ 'image')
+ self.assertEqual(
+ ec2utils.resource_type_from_id(self.context, 'aki-12345'),
+ 'image')
+ self.assertIsNone(
+ ec2utils.resource_type_from_id(self.context, 'x-12345'))
+
+ @mock.patch.object(ec2utils, 'ec2_vol_id_to_uuid',
+ side_effect=lambda
+ ec2_volume_id: uuidutils.generate_uuid())
+ def test_detach_volume_unattched_error(self, mock_ec2_vol_id_to_uuid):
+ # Validates that VolumeUnattached is raised if the volume doesn't
+ # have an instance_uuid value.
+ ec2_volume_id = 'vol-987654321'
+
+ with mock.patch.object(self.cloud.volume_api, 'get',
+ side_effect=lambda context, volume_id:
+ {'id': volume_id}) as mock_get:
+ self.assertRaises(exception.VolumeUnattached,
+ self.cloud.detach_volume,
+ self.context,
+ ec2_volume_id)
+ mock_get.assert_called_once_with(self.context, mock.ANY)
+ mock_ec2_vol_id_to_uuid.assert_called_once_with(ec2_volume_id)
+
+
+class CloudTestCaseNeutronProxy(test.NoDBTestCase):
+ def setUp(self):
+ super(CloudTestCaseNeutronProxy, self).setUp()
+ cfg.CONF.set_override('security_group_api', 'neutron')
+ self.cloud = cloud.CloudController()
+ self.original_client = neutronv2.get_client
+ neutronv2.get_client = test_neutron.get_client
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = context.RequestContext(self.user_id,
+ self.project_id,
+ is_admin=True)
+
+ def tearDown(self):
+ neutronv2.get_client = self.original_client
+ test_neutron.get_client()._reset()
+ super(CloudTestCaseNeutronProxy, self).tearDown()
+
+ def test_describe_security_groups(self):
+ # Makes sure describe_security_groups works and filters results.
+ group_name = 'test'
+ description = 'test'
+ self.cloud.create_security_group(self.context, group_name,
+ description)
+ result = self.cloud.describe_security_groups(self.context)
+ # NOTE(vish): should have the default group as well
+ self.assertEqual(len(result['securityGroupInfo']), 2)
+ result = self.cloud.describe_security_groups(self.context,
+ group_name=[group_name])
+ self.assertEqual(len(result['securityGroupInfo']), 1)
+ self.assertEqual(result['securityGroupInfo'][0]['groupName'],
+ group_name)
+ self.cloud.delete_security_group(self.context, group_name)
+
+ def test_describe_security_groups_by_id(self):
+ group_name = 'test'
+ description = 'test'
+ self.cloud.create_security_group(self.context, group_name,
+ description)
+ neutron = test_neutron.get_client()
+ # Get id from neutron since cloud.create_security_group
+ # does not expose it.
+ search_opts = {'name': group_name}
+ groups = neutron.list_security_groups(
+ **search_opts)['security_groups']
+ result = self.cloud.describe_security_groups(self.context,
+ group_id=[groups[0]['id']])
+ self.assertEqual(len(result['securityGroupInfo']), 1)
+ self.assertEqual(
+ result['securityGroupInfo'][0]['groupName'],
+ group_name)
+ self.cloud.delete_security_group(self.context, group_name)
+
+ def test_create_delete_security_group(self):
+ descript = 'test description'
+ create = self.cloud.create_security_group
+ result = create(self.context, 'testgrp', descript)
+ group_descript = result['securityGroupSet'][0]['groupDescription']
+ self.assertEqual(descript, group_descript)
+ delete = self.cloud.delete_security_group
+ self.assertTrue(delete(self.context, 'testgrp'))
+
+
+class FormatMappingTestCase(test.TestCase):
+
+ def test_format_mapping(self):
+ properties = {'block_device_mapping':
+ [{'guest_format': None, 'boot_index': 0,
+ 'no_device': None, 'volume_id': None,
+ 'volume_size': None, 'disk_bus': 'virtio',
+ 'image_id': None, 'source_type': 'snapshot',
+ 'device_type': 'disk',
+ 'snapshot_id': '993b31ac-452e-4fed-b745-7718385f1811',
+ 'destination_type': 'volume',
+ 'delete_on_termination': None},
+ {'guest_format': None, 'boot_index': None,
+ 'no_device': None, 'volume_id': None,
+ 'volume_size': None, 'disk_bus': None,
+ 'image_id': None, 'source_type': 'snapshot',
+ 'device_type': None,
+ 'snapshot_id': 'b409a2de-1c79-46bf-aa7e-ebdb4bf427ef',
+ 'destination_type': 'volume',
+ 'delete_on_termination': None}],
+ 'checksum': '50bdc35edb03a38d91b1b071afb20a3c',
+ 'min_ram': '0', 'disk_format': 'qcow2',
+ 'image_name': 'cirros-0.3.0-x86_64-disk', 'bdm_v2': 'True',
+ 'image_id': '4fce9db9-d89e-4eea-8d20-e2bae15292c1',
+ 'root_device_name': '/dev/vda', 'container_format': 'bare',
+ 'min_disk': '0', 'size': '9761280'}
+ result = {'description': None,
+ 'imageOwnerId': '9fd1513f52f14fe49fa1c83e40c63541',
+ 'isPublic': False, 'imageId': 'ami-00000002',
+ 'imageState': 'available', 'architecture': None,
+ 'imageLocation': 'None (xb)',
+ 'rootDeviceType': 'instance-store',
+ 'rootDeviceName': '/dev/vda',
+ 'imageType': 'machine', 'name': 'xb'}
+ cloud._format_mappings(properties, result)
+ expected = {'architecture': None,
+ 'blockDeviceMapping':
+ [{'ebs': {'snapshotId': 'snap-00000002'}}],
+ 'description': None,
+ 'imageId': 'ami-00000002',
+ 'imageLocation': 'None (xb)',
+ 'imageOwnerId': '9fd1513f52f14fe49fa1c83e40c63541',
+ 'imageState': 'available',
+ 'imageType': 'machine',
+ 'isPublic': False,
+ 'name': 'xb',
+ 'rootDeviceName': '/dev/vda',
+ 'rootDeviceType': 'instance-store'}
+ self.assertEqual(expected, result)
diff --git a/nova/tests/unit/api/ec2/test_ec2_validate.py b/nova/tests/unit/api/ec2/test_ec2_validate.py
new file mode 100644
index 0000000000..53ae8c110e
--- /dev/null
+++ b/nova/tests/unit/api/ec2/test_ec2_validate.py
@@ -0,0 +1,277 @@
+# Copyright 2012 Cloudscaling, Inc.
+# All Rights Reserved.
+# Copyright 2013 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+
+from oslo.config import cfg
+from oslo.utils import timeutils
+
+from nova.api.ec2 import cloud
+from nova.api.ec2 import ec2utils
+from nova.compute import utils as compute_utils
+from nova import context
+from nova import db
+from nova import exception
+from nova import test
+from nova.tests.unit import cast_as_call
+from nova.tests.unit import fake_network
+from nova.tests.unit import fake_notifier
+from nova.tests.unit.image import fake
+
+CONF = cfg.CONF
+CONF.import_opt('compute_driver', 'nova.virt.driver')
+
+
+class EC2ValidateTestCase(test.TestCase):
+ def setUp(self):
+ super(EC2ValidateTestCase, self).setUp()
+ self.flags(compute_driver='nova.virt.fake.FakeDriver')
+
+ def dumb(*args, **kwargs):
+ pass
+
+ self.stubs.Set(compute_utils, 'notify_about_instance_usage', dumb)
+ fake_network.set_stub_network_methods(self.stubs)
+
+ # set up our cloud
+ self.cloud = cloud.CloudController()
+
+ # Short-circuit the conductor service
+ self.flags(use_local=True, group='conductor')
+
+ # Stub out the notification service so we use the no-op serializer
+ # and avoid lazy-load traces with the wrap_exception decorator in
+ # the compute service.
+ fake_notifier.stub_notifier(self.stubs)
+ self.addCleanup(fake_notifier.reset)
+
+ # set up services
+ self.conductor = self.start_service('conductor',
+ manager=CONF.conductor.manager)
+ self.compute = self.start_service('compute')
+ self.scheduter = self.start_service('scheduler')
+ self.network = self.start_service('network')
+ self.image_service = fake.FakeImageService()
+
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = context.RequestContext(self.user_id,
+ self.project_id,
+ is_admin=True)
+
+ self.EC2_MALFORMED_IDS = ['foobar', '', 123]
+ self.EC2_VALID__IDS = ['i-284f3a41', 'i-001', 'i-deadbeef']
+
+ self.ec2_id_exception_map = [(x,
+ exception.InvalidInstanceIDMalformed)
+ for x in self.EC2_MALFORMED_IDS]
+ self.ec2_id_exception_map.extend([(x, exception.InstanceNotFound)
+ for x in self.EC2_VALID__IDS])
+ self.volume_id_exception_map = [(x,
+ exception.InvalidVolumeIDMalformed)
+ for x in self.EC2_MALFORMED_IDS]
+ self.volume_id_exception_map.extend([(x, exception.VolumeNotFound)
+ for x in self.EC2_VALID__IDS])
+
+ def fake_show(meh, context, id, **kwargs):
+ return {'id': id,
+ 'container_format': 'ami',
+ 'properties': {
+ 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'type': 'machine',
+ 'image_state': 'available'}}
+
+ def fake_detail(self, context, **kwargs):
+ image = fake_show(self, context, None)
+ image['name'] = kwargs.get('name')
+ return [image]
+
+ fake.stub_out_image_service(self.stubs)
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show)
+ self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
+
+ self.useFixture(cast_as_call.CastAsCall(self.stubs))
+
+ # make sure we can map ami-00000001/2 to a uuid in FakeImageService
+ db.s3_image_create(self.context,
+ 'cedef40a-ed67-4d10-800e-17455edce175')
+ db.s3_image_create(self.context,
+ '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6')
+
+ def tearDown(self):
+ super(EC2ValidateTestCase, self).tearDown()
+ fake.FakeImageService_reset()
+
+ # EC2_API tests (InvalidInstanceID.Malformed)
+ def test_console_output(self):
+ for ec2_id, e in self.ec2_id_exception_map:
+ self.assertRaises(e,
+ self.cloud.get_console_output,
+ context=self.context,
+ instance_id=[ec2_id])
+
+ def test_describe_instance_attribute(self):
+ for ec2_id, e in self.ec2_id_exception_map:
+ self.assertRaises(e,
+ self.cloud.describe_instance_attribute,
+ context=self.context,
+ instance_id=ec2_id,
+ attribute='kernel')
+
+ def test_instance_lifecycle(self):
+ lifecycle = [self.cloud.terminate_instances,
+ self.cloud.reboot_instances,
+ self.cloud.stop_instances,
+ self.cloud.start_instances,
+ ]
+ for cmd in lifecycle:
+ for ec2_id, e in self.ec2_id_exception_map:
+ self.assertRaises(e,
+ cmd,
+ context=self.context,
+ instance_id=[ec2_id])
+
+ def test_create_image(self):
+ for ec2_id, e in self.ec2_id_exception_map:
+ self.assertRaises(e,
+ self.cloud.create_image,
+ context=self.context,
+ instance_id=ec2_id)
+
+ def test_create_snapshot(self):
+ for ec2_id, e in self.volume_id_exception_map:
+ self.assertRaises(e,
+ self.cloud.create_snapshot,
+ context=self.context,
+ volume_id=ec2_id)
+
+ def test_describe_volumes(self):
+ for ec2_id, e in self.volume_id_exception_map:
+ self.assertRaises(e,
+ self.cloud.describe_volumes,
+ context=self.context,
+ volume_id=[ec2_id])
+
+ def test_delete_volume(self):
+ for ec2_id, e in self.volume_id_exception_map:
+ self.assertRaises(e,
+ self.cloud.delete_volume,
+ context=self.context,
+ volume_id=ec2_id)
+
+ def test_detach_volume(self):
+ for ec2_id, e in self.volume_id_exception_map:
+ self.assertRaises(e,
+ self.cloud.detach_volume,
+ context=self.context,
+ volume_id=ec2_id)
+
+
+class EC2TimestampValidationTestCase(test.NoDBTestCase):
+ """Test case for EC2 request timestamp validation."""
+
+ def test_validate_ec2_timestamp_valid(self):
+ params = {'Timestamp': '2011-04-22T11:29:49Z'}
+ expired = ec2utils.is_ec2_timestamp_expired(params)
+ self.assertFalse(expired)
+
+ def test_validate_ec2_timestamp_old_format(self):
+ params = {'Timestamp': '2011-04-22T11:29:49'}
+ expired = ec2utils.is_ec2_timestamp_expired(params)
+ self.assertTrue(expired)
+
+ def test_validate_ec2_timestamp_not_set(self):
+ params = {}
+ expired = ec2utils.is_ec2_timestamp_expired(params)
+ self.assertFalse(expired)
+
+ def test_validate_ec2_timestamp_ms_time_regex(self):
+ result = ec2utils._ms_time_regex.match('2011-04-22T11:29:49.123Z')
+ self.assertIsNotNone(result)
+ result = ec2utils._ms_time_regex.match('2011-04-22T11:29:49.123456Z')
+ self.assertIsNotNone(result)
+ result = ec2utils._ms_time_regex.match('2011-04-22T11:29:49.1234567Z')
+ self.assertIsNone(result)
+ result = ec2utils._ms_time_regex.match('2011-04-22T11:29:49.123')
+ self.assertIsNone(result)
+ result = ec2utils._ms_time_regex.match('2011-04-22T11:29:49Z')
+ self.assertIsNone(result)
+
+ def test_validate_ec2_timestamp_aws_sdk_format(self):
+ params = {'Timestamp': '2011-04-22T11:29:49.123Z'}
+ expired = ec2utils.is_ec2_timestamp_expired(params)
+ self.assertFalse(expired)
+ expired = ec2utils.is_ec2_timestamp_expired(params, expires=300)
+ self.assertTrue(expired)
+
+ def test_validate_ec2_timestamp_invalid_format(self):
+ params = {'Timestamp': '2011-04-22T11:29:49.000P'}
+ expired = ec2utils.is_ec2_timestamp_expired(params)
+ self.assertTrue(expired)
+
+ def test_validate_ec2_timestamp_advanced_time(self):
+
+ # EC2 request with Timestamp in advanced time
+ timestamp = timeutils.utcnow() + datetime.timedelta(seconds=250)
+ params = {'Timestamp': timeutils.strtime(timestamp,
+ "%Y-%m-%dT%H:%M:%SZ")}
+ expired = ec2utils.is_ec2_timestamp_expired(params, expires=300)
+ self.assertFalse(expired)
+
+ def test_validate_ec2_timestamp_advanced_time_expired(self):
+ timestamp = timeutils.utcnow() + datetime.timedelta(seconds=350)
+ params = {'Timestamp': timeutils.strtime(timestamp,
+ "%Y-%m-%dT%H:%M:%SZ")}
+ expired = ec2utils.is_ec2_timestamp_expired(params, expires=300)
+ self.assertTrue(expired)
+
+ def test_validate_ec2_req_timestamp_not_expired(self):
+ params = {'Timestamp': timeutils.isotime()}
+ expired = ec2utils.is_ec2_timestamp_expired(params, expires=15)
+ self.assertFalse(expired)
+
+ def test_validate_ec2_req_timestamp_expired(self):
+ params = {'Timestamp': '2011-04-22T12:00:00Z'}
+ compare = ec2utils.is_ec2_timestamp_expired(params, expires=300)
+ self.assertTrue(compare)
+
+ def test_validate_ec2_req_expired(self):
+ params = {'Expires': timeutils.isotime()}
+ expired = ec2utils.is_ec2_timestamp_expired(params)
+ self.assertTrue(expired)
+
+ def test_validate_ec2_req_not_expired(self):
+ expire = timeutils.utcnow() + datetime.timedelta(seconds=350)
+ params = {'Expires': timeutils.strtime(expire, "%Y-%m-%dT%H:%M:%SZ")}
+ expired = ec2utils.is_ec2_timestamp_expired(params)
+ self.assertFalse(expired)
+
+ def test_validate_Expires_timestamp_invalid_format(self):
+
+ # EC2 request with invalid Expires
+ params = {'Expires': '2011-04-22T11:29:49'}
+ expired = ec2utils.is_ec2_timestamp_expired(params)
+ self.assertTrue(expired)
+
+ def test_validate_ec2_req_timestamp_Expires(self):
+
+ # EC2 request with both Timestamp and Expires
+ params = {'Timestamp': '2011-04-22T11:29:49Z',
+ 'Expires': timeutils.isotime()}
+ self.assertRaises(exception.InvalidRequest,
+ ec2utils.is_ec2_timestamp_expired,
+ params)
diff --git a/nova/tests/unit/api/ec2/test_ec2utils.py b/nova/tests/unit/api/ec2/test_ec2utils.py
new file mode 100644
index 0000000000..9dceb7de12
--- /dev/null
+++ b/nova/tests/unit/api/ec2/test_ec2utils.py
@@ -0,0 +1,61 @@
+# Copyright 2014 - Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.api.ec2 import ec2utils
+from nova import context
+from nova import objects
+from nova import test
+
+
+class EC2UtilsTestCase(test.TestCase):
+ def setUp(self):
+ self.ctxt = context.get_admin_context()
+ ec2utils.reset_cache()
+ super(EC2UtilsTestCase, self).setUp()
+
+ def test_get_int_id_from_snapshot_uuid(self):
+ smap = objects.EC2SnapshotMapping(self.ctxt, uuid='fake-uuid')
+ smap.create()
+ smap_id = ec2utils.get_int_id_from_snapshot_uuid(self.ctxt,
+ 'fake-uuid')
+ self.assertEqual(smap.id, smap_id)
+
+ def test_get_int_id_from_snapshot_uuid_creates_mapping(self):
+ smap_id = ec2utils.get_int_id_from_snapshot_uuid(self.ctxt,
+ 'fake-uuid')
+ smap = objects.EC2SnapshotMapping.get_by_id(self.ctxt, smap_id)
+ self.assertEqual('fake-uuid', smap.uuid)
+
+ def test_get_snapshot_uuid_from_int_id(self):
+ smap = objects.EC2SnapshotMapping(self.ctxt, uuid='fake-uuid')
+ smap.create()
+ smap_uuid = ec2utils.get_snapshot_uuid_from_int_id(self.ctxt, smap.id)
+ self.assertEqual(smap.uuid, smap_uuid)
+
+ def test_id_to_glance_id(self):
+ s3imap = objects.S3ImageMapping(self.ctxt, uuid='fake-uuid')
+ s3imap.create()
+ uuid = ec2utils.id_to_glance_id(self.ctxt, s3imap.id)
+ self.assertEqual(uuid, s3imap.uuid)
+
+ def test_glance_id_to_id(self):
+ s3imap = objects.S3ImageMapping(self.ctxt, uuid='fake-uuid')
+ s3imap.create()
+ s3imap_id = ec2utils.glance_id_to_id(self.ctxt, s3imap.uuid)
+ self.assertEqual(s3imap_id, s3imap.id)
+
+ def test_glance_id_to_id_creates_mapping(self):
+ s3imap_id = ec2utils.glance_id_to_id(self.ctxt, 'fake-uuid')
+ s3imap = objects.S3ImageMapping.get_by_id(self.ctxt, s3imap_id)
+ self.assertEqual('fake-uuid', s3imap.uuid)
diff --git a/nova/tests/unit/api/ec2/test_error_response.py b/nova/tests/unit/api/ec2/test_error_response.py
new file mode 100644
index 0000000000..925d6723ed
--- /dev/null
+++ b/nova/tests/unit/api/ec2/test_error_response.py
@@ -0,0 +1,132 @@
+#
+# Copyright 2013 - Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+"""
+Unit tests for EC2 error responses.
+"""
+
+from lxml import etree
+
+from nova.api import ec2
+from nova import context
+from nova import test
+from nova import wsgi
+
+
+class TestClientExceptionEC2(Exception):
+ ec2_code = 'ClientException.Test'
+ message = "Test Client Exception."
+ code = 400
+
+
+class TestServerExceptionEC2(Exception):
+ ec2_code = 'ServerException.Test'
+ message = "Test Server Exception."
+ code = 500
+
+
+class Ec2ErrorResponseTestCase(test.NoDBTestCase):
+ """Test EC2 error responses.
+
+ This deals mostly with api/ec2/__init__.py code, especially
+ the ec2_error_ex helper.
+ """
+ def setUp(self):
+ super(Ec2ErrorResponseTestCase, self).setUp()
+ self.context = context.RequestContext('test_user_id',
+ 'test_project_id')
+ self.req = wsgi.Request.blank('/test')
+ self.req.environ['nova.context'] = self.context
+
+ def _validate_ec2_error(self, response, http_status, ec2_code, msg=None,
+ unknown_msg=False):
+ self.assertEqual(response.status_code, http_status,
+ 'Expected HTTP status %s' % http_status)
+ root_e = etree.XML(response.body)
+ self.assertEqual(root_e.tag, 'Response',
+ "Top element must be Response.")
+ errors_e = root_e.find('Errors')
+ self.assertEqual(len(errors_e), 1,
+ "Expected exactly one Error element in Errors.")
+ error_e = errors_e[0]
+ self.assertEqual(error_e.tag, 'Error',
+ "Expected Error element.")
+ # Code
+ code_e = error_e.find('Code')
+ self.assertIsNotNone(code_e, "Code element must be present.")
+ self.assertEqual(code_e.text, ec2_code)
+ # Message
+ if msg or unknown_msg:
+ message_e = error_e.find('Message')
+ self.assertIsNotNone(code_e, "Message element must be present.")
+ if msg:
+ self.assertEqual(message_e.text, msg)
+ elif unknown_msg:
+ self.assertEqual(message_e.text, "Unknown error occurred.",
+ "Error message should be anonymous.")
+ # RequestID
+ requestid_e = root_e.find('RequestID')
+ self.assertIsNotNone(requestid_e,
+ 'RequestID element should be present.')
+ self.assertEqual(requestid_e.text, self.context.request_id)
+
+ def test_exception_ec2_4xx(self):
+ """Test response to EC2 exception with code = 400."""
+ msg = "Test client failure."
+ err = ec2.ec2_error_ex(TestClientExceptionEC2(msg), self.req)
+ self._validate_ec2_error(err, TestClientExceptionEC2.code,
+ TestClientExceptionEC2.ec2_code, msg)
+
+ def test_exception_ec2_5xx(self):
+ """Test response to EC2 exception with code = 500.
+
+ Expected errors are treated as client ones even with 5xx code.
+ """
+ msg = "Test client failure with 5xx error code."
+ err = ec2.ec2_error_ex(TestServerExceptionEC2(msg), self.req)
+ self._validate_ec2_error(err, 400, TestServerExceptionEC2.ec2_code,
+ msg)
+
+ def test_unexpected_exception_ec2_4xx(self):
+ """Test response to unexpected EC2 exception with code = 400."""
+ msg = "Test unexpected client failure."
+ err = ec2.ec2_error_ex(TestClientExceptionEC2(msg), self.req,
+ unexpected=True)
+ self._validate_ec2_error(err, TestClientExceptionEC2.code,
+ TestClientExceptionEC2.ec2_code, msg)
+
+ def test_unexpected_exception_ec2_5xx(self):
+ """Test response to unexpected EC2 exception with code = 500.
+
+ Server exception messages (with code >= 500 or without code) should
+ be filtered as they might contain sensitive information.
+ """
+ msg = "Test server failure."
+ err = ec2.ec2_error_ex(TestServerExceptionEC2(msg), self.req,
+ unexpected=True)
+ self._validate_ec2_error(err, TestServerExceptionEC2.code,
+ TestServerExceptionEC2.ec2_code,
+ unknown_msg=True)
+
+ def test_unexpected_exception_builtin(self):
+ """Test response to builtin unexpected exception.
+
+ Server exception messages (with code >= 500 or without code) should
+ be filtered as they might contain sensitive information.
+ """
+ msg = "Test server failure."
+ err = ec2.ec2_error_ex(RuntimeError(msg), self.req, unexpected=True)
+ self._validate_ec2_error(err, 500, 'RuntimeError', unknown_msg=True)
diff --git a/nova/tests/unit/api/ec2/test_faults.py b/nova/tests/unit/api/ec2/test_faults.py
new file mode 100644
index 0000000000..ae71be9bbf
--- /dev/null
+++ b/nova/tests/unit/api/ec2/test_faults.py
@@ -0,0 +1,46 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mox
+import webob
+
+from nova.api.ec2 import faults
+from nova import test
+from nova import wsgi
+
+
+class TestFaults(test.NoDBTestCase):
+ """Tests covering ec2 Fault class."""
+
+ def test_fault_exception(self):
+ # Ensure the status_int is set correctly on faults.
+ fault = faults.Fault(webob.exc.HTTPBadRequest(
+ explanation='test'))
+ self.assertIsInstance(fault.wrapped_exc, webob.exc.HTTPBadRequest)
+
+ def test_fault_exception_status_int(self):
+ # Ensure the status_int is set correctly on faults.
+ fault = faults.Fault(webob.exc.HTTPNotFound(explanation='test'))
+ self.assertEqual(fault.wrapped_exc.status_int, 404)
+
+ def test_fault_call(self):
+ # Ensure proper EC2 response on faults.
+ message = 'test message'
+ ex = webob.exc.HTTPNotFound(explanation=message)
+ fault = faults.Fault(ex)
+ req = wsgi.Request.blank('/test')
+ req.GET['AWSAccessKeyId'] = "test_user_id:test_project_id"
+ self.mox.StubOutWithMock(faults, 'ec2_error_response')
+ faults.ec2_error_response(mox.IgnoreArg(), 'HTTPNotFound',
+ message=message, status=ex.status_int)
+ self.mox.ReplayAll()
+ fault(req)
diff --git a/nova/tests/unit/api/ec2/test_middleware.py b/nova/tests/unit/api/ec2/test_middleware.py
new file mode 100644
index 0000000000..3eb9c703da
--- /dev/null
+++ b/nova/tests/unit/api/ec2/test_middleware.py
@@ -0,0 +1,225 @@
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from eventlet.green import httplib
+from lxml import etree
+import mox
+from oslo.config import cfg
+from oslo.utils import timeutils
+import webob
+import webob.dec
+import webob.exc
+
+from nova.api import ec2
+from nova import context
+from nova import exception
+from nova import test
+from nova import wsgi
+
+CONF = cfg.CONF
+
+
+@webob.dec.wsgify
+def conditional_forbid(req):
+ """Helper wsgi app returns 403 if param 'die' is 1."""
+ if 'die' in req.params and req.params['die'] == '1':
+ raise webob.exc.HTTPForbidden()
+ return 'OK'
+
+
+class LockoutTestCase(test.NoDBTestCase):
+ """Test case for the Lockout middleware."""
+ def setUp(self): # pylint: disable=C0103
+ super(LockoutTestCase, self).setUp()
+ timeutils.set_time_override()
+ self.lockout = ec2.Lockout(conditional_forbid)
+
+ def tearDown(self): # pylint: disable=C0103
+ timeutils.clear_time_override()
+ super(LockoutTestCase, self).tearDown()
+
+ def _send_bad_attempts(self, access_key, num_attempts=1):
+ """Fail x."""
+ for i in xrange(num_attempts):
+ req = webob.Request.blank('/?AWSAccessKeyId=%s&die=1' % access_key)
+ self.assertEqual(req.get_response(self.lockout).status_int, 403)
+
+ def _is_locked_out(self, access_key):
+ """Sends a test request to see if key is locked out."""
+ req = webob.Request.blank('/?AWSAccessKeyId=%s' % access_key)
+ return (req.get_response(self.lockout).status_int == 403)
+
+ def test_lockout(self):
+ self._send_bad_attempts('test', CONF.lockout_attempts)
+ self.assertTrue(self._is_locked_out('test'))
+
+ def test_timeout(self):
+ self._send_bad_attempts('test', CONF.lockout_attempts)
+ self.assertTrue(self._is_locked_out('test'))
+ timeutils.advance_time_seconds(CONF.lockout_minutes * 60)
+ self.assertFalse(self._is_locked_out('test'))
+
+ def test_multiple_keys(self):
+ self._send_bad_attempts('test1', CONF.lockout_attempts)
+ self.assertTrue(self._is_locked_out('test1'))
+ self.assertFalse(self._is_locked_out('test2'))
+ timeutils.advance_time_seconds(CONF.lockout_minutes * 60)
+ self.assertFalse(self._is_locked_out('test1'))
+ self.assertFalse(self._is_locked_out('test2'))
+
+ def test_window_timeout(self):
+ self._send_bad_attempts('test', CONF.lockout_attempts - 1)
+ self.assertFalse(self._is_locked_out('test'))
+ timeutils.advance_time_seconds(CONF.lockout_window * 60)
+ self._send_bad_attempts('test', CONF.lockout_attempts - 1)
+ self.assertFalse(self._is_locked_out('test'))
+
+
+class ExecutorTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(ExecutorTestCase, self).setUp()
+ self.executor = ec2.Executor()
+
+ def _execute(self, invoke):
+ class Fake(object):
+ pass
+ fake_ec2_request = Fake()
+ fake_ec2_request.invoke = invoke
+
+ fake_wsgi_request = Fake()
+
+ fake_wsgi_request.environ = {
+ 'nova.context': context.get_admin_context(),
+ 'ec2.request': fake_ec2_request,
+ }
+ return self.executor(fake_wsgi_request)
+
+ def _extract_message(self, result):
+ tree = etree.fromstring(result.body)
+ return tree.findall('./Errors')[0].find('Error/Message').text
+
+ def _extract_code(self, result):
+ tree = etree.fromstring(result.body)
+ return tree.findall('./Errors')[0].find('Error/Code').text
+
+ def test_instance_not_found(self):
+ def not_found(context):
+ raise exception.InstanceNotFound(instance_id=5)
+ result = self._execute(not_found)
+ self.assertIn('i-00000005', self._extract_message(result))
+ self.assertEqual('InvalidInstanceID.NotFound',
+ self._extract_code(result))
+
+ def test_instance_not_found_none(self):
+ def not_found(context):
+ raise exception.InstanceNotFound(instance_id=None)
+
+ # NOTE(mikal): we want no exception to be raised here, which was what
+ # was happening in bug/1080406
+ result = self._execute(not_found)
+ self.assertIn('None', self._extract_message(result))
+ self.assertEqual('InvalidInstanceID.NotFound',
+ self._extract_code(result))
+
+ def test_snapshot_not_found(self):
+ def not_found(context):
+ raise exception.SnapshotNotFound(snapshot_id=5)
+ result = self._execute(not_found)
+ self.assertIn('snap-00000005', self._extract_message(result))
+ self.assertEqual('InvalidSnapshot.NotFound',
+ self._extract_code(result))
+
+ def test_volume_not_found(self):
+ def not_found(context):
+ raise exception.VolumeNotFound(volume_id=5)
+ result = self._execute(not_found)
+ self.assertIn('vol-00000005', self._extract_message(result))
+ self.assertEqual('InvalidVolume.NotFound', self._extract_code(result))
+
+
+class FakeResponse(object):
+ reason = "Test Reason"
+
+ def __init__(self, status=400):
+ self.status = status
+
+ def read(self):
+ return '{}'
+
+
+class KeystoneAuthTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(KeystoneAuthTestCase, self).setUp()
+ self.kauth = ec2.EC2KeystoneAuth(conditional_forbid)
+
+ def _validate_ec2_error(self, response, http_status, ec2_code):
+ self.assertEqual(response.status_code, http_status,
+ 'Expected HTTP status %s' % http_status)
+ root_e = etree.XML(response.body)
+ self.assertEqual(root_e.tag, 'Response',
+ "Top element must be Response.")
+ errors_e = root_e.find('Errors')
+ error_e = errors_e[0]
+ code_e = error_e.find('Code')
+ self.assertIsNotNone(code_e, "Code element must be present.")
+ self.assertEqual(code_e.text, ec2_code)
+
+ def test_no_signature(self):
+ req = wsgi.Request.blank('/test')
+ resp = self.kauth(req)
+ self._validate_ec2_error(resp, 400, 'AuthFailure')
+
+ def test_no_key_id(self):
+ req = wsgi.Request.blank('/test')
+ req.GET['Signature'] = 'test-signature'
+ resp = self.kauth(req)
+ self._validate_ec2_error(resp, 400, 'AuthFailure')
+
+ def test_communication_failure(self):
+ req = wsgi.Request.blank('/test')
+ req.GET['Signature'] = 'test-signature'
+ req.GET['AWSAccessKeyId'] = 'test-key-id'
+
+ conn = httplib.HTTPConnection('/mock')
+ self.mox.StubOutWithMock(httplib.HTTPConnection, 'request')
+ self.mox.StubOutWithMock(httplib.HTTPConnection, 'getresponse')
+ conn.request('POST', mox.IgnoreArg(), body=mox.IgnoreArg(),
+ headers=mox.IgnoreArg())
+ resp = FakeResponse()
+ conn.getresponse().AndReturn(resp)
+ self.mox.ReplayAll()
+
+ resp = self.kauth(req)
+ self._validate_ec2_error(resp, 400, 'AuthFailure')
+
+ def test_no_result_data(self):
+ req = wsgi.Request.blank('/test')
+ req.GET['Signature'] = 'test-signature'
+ req.GET['AWSAccessKeyId'] = 'test-key-id'
+
+ conn = httplib.HTTPConnection('/mock')
+ self.mox.StubOutWithMock(httplib.HTTPConnection, 'request')
+ self.mox.StubOutWithMock(httplib.HTTPConnection, 'getresponse')
+ self.mox.StubOutWithMock(httplib.HTTPConnection, 'close')
+ conn.request('POST', mox.IgnoreArg(), body=mox.IgnoreArg(),
+ headers=mox.IgnoreArg())
+ resp = FakeResponse(200)
+ conn.getresponse().AndReturn(resp)
+ conn.close()
+ self.mox.ReplayAll()
+
+ resp = self.kauth(req)
+ self._validate_ec2_error(resp, 400, 'AuthFailure')