summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--HACKING.rst2
-rw-r--r--doc/v3/api_samples/os-cloudpipe/cloud-pipe-update-req.json6
-rw-r--r--doc/v3/api_samples/os-security-groups/security-group-add-post-req.json5
-rw-r--r--doc/v3/api_samples/os-security-groups/security-group-post-req.json6
-rw-r--r--doc/v3/api_samples/os-security-groups/security-group-remove-post-req.json5
-rw-r--r--doc/v3/api_samples/os-security-groups/security-groups-create-resp.json9
-rw-r--r--doc/v3/api_samples/os-security-groups/security-groups-get-resp.json9
-rw-r--r--doc/v3/api_samples/os-security-groups/security-groups-list-get-resp.json11
-rw-r--r--doc/v3/api_samples/os-security-groups/server-get-resp.json12
-rw-r--r--doc/v3/api_samples/os-security-groups/server-post-req.json2
-rw-r--r--doc/v3/api_samples/os-security-groups/server-post-resp.json10
-rw-r--r--doc/v3/api_samples/os-security-groups/server-security-groups-list-resp.json11
-rw-r--r--doc/v3/api_samples/os-security-groups/servers-detail-resp.json12
-rw-r--r--etc/nova/api-paste.ini2
-rw-r--r--etc/nova/policy.json2
-rw-r--r--nova/api/auth.py2
-rw-r--r--nova/api/compute_req_id.py2
-rw-r--r--nova/api/ec2/__init__.py8
-rw-r--r--nova/api/metadata/handler.py6
-rw-r--r--nova/api/openstack/compute/contrib/agents.py6
-rw-r--r--nova/api/openstack/compute/contrib/cloudpipe.py6
-rw-r--r--nova/api/openstack/compute/contrib/cloudpipe_update.py2
-rw-r--r--nova/api/openstack/compute/contrib/security_group_default_rules.py3
-rw-r--r--nova/api/openstack/compute/contrib/security_groups.py3
-rw-r--r--nova/api/openstack/compute/contrib/simple_tenant_usage.py3
-rw-r--r--nova/api/openstack/compute/plugins/__init__.py2
-rw-r--r--nova/api/openstack/compute/plugins/v3/baremetal_nodes.py173
-rw-r--r--nova/api/openstack/compute/plugins/v3/block_device_mapping.py4
-rw-r--r--nova/api/openstack/compute/plugins/v3/block_device_mapping_v1.py4
-rw-r--r--nova/api/openstack/compute/plugins/v3/cloudpipe.py33
-rw-r--r--nova/api/openstack/compute/plugins/v3/security_groups.py279
-rw-r--r--nova/api/openstack/compute/plugins/v3/servers.py2
-rw-r--r--nova/api/openstack/compute/plugins/v3/simple_tenant_usage.py3
-rw-r--r--nova/api/openstack/compute/schemas/v3/cloudpipe.py39
-rw-r--r--nova/api/openstack/compute/servers.py9
-rw-r--r--nova/api/openstack/extensions.py2
-rw-r--r--nova/api/openstack/wsgi.py4
-rw-r--r--nova/api/validation/parameter_types.py6
-rw-r--r--nova/compute/api.py2
-rw-r--r--nova/compute/hvtype.py3
-rw-r--r--nova/compute/manager.py22
-rw-r--r--nova/compute/resource_tracker.py2
-rw-r--r--nova/conductor/rpcapi.py15
-rw-r--r--nova/console/vmrc.py148
-rw-r--r--nova/hacking/checks.py31
-rw-r--r--nova/image/glance.py8
-rw-r--r--nova/network/neutronv2/api.py11
-rw-r--r--nova/network/security_group/neutron_driver.py14
-rw-r--r--nova/openstack/common/middleware/__init__.py0
-rw-r--r--nova/openstack/common/middleware/base.py56
-rw-r--r--nova/openstack/common/middleware/request_id.py44
-rw-r--r--nova/pci/device.py (renamed from nova/pci/pci_device.py)0
-rw-r--r--[-rwxr-xr-x]nova/pci/devspec.py (renamed from nova/pci/pci_devspec.py)14
-rw-r--r--nova/pci/manager.py (renamed from nova/pci/pci_manager.py)18
-rw-r--r--nova/pci/request.py (renamed from nova/pci/pci_request.py)6
-rw-r--r--nova/pci/stats.py (renamed from nova/pci/pci_stats.py)8
-rw-r--r--nova/pci/utils.py (renamed from nova/pci/pci_utils.py)0
-rw-r--r--nova/pci/whitelist.py (renamed from nova/pci/pci_whitelist.py)4
-rw-r--r--nova/scheduler/filter_scheduler.py11
-rw-r--r--nova/scheduler/host_manager.py2
-rw-r--r--nova/service.py2
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_baremetal_nodes.py28
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_block_device_mapping.py6
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_block_device_mapping_v1.py6
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_cloudpipe_update.py39
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_networks.py47
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_neutron_security_groups.py10
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_security_groups.py26
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_tenant_networks.py45
-rw-r--r--nova/tests/api/openstack/compute/plugins/v3/test_pci.py6
-rw-r--r--nova/tests/api/openstack/test_common.py3
-rw-r--r--nova/tests/api/test_auth.py2
-rw-r--r--nova/tests/compute/test_claims.py4
-rw-r--r--nova/tests/compute/test_compute.py8
-rw-r--r--nova/tests/compute/test_hvtype.py3
-rw-r--r--nova/tests/compute/test_keypairs.py5
-rw-r--r--nova/tests/compute/test_resource_tracker.py2
-rw-r--r--nova/tests/conductor/test_conductor.py29
-rw-r--r--nova/tests/consoleauth/test_rpcapi.py2
-rw-r--r--nova/tests/db/test_db_api.py2
-rw-r--r--nova/tests/fake_network.py2
-rw-r--r--nova/tests/fake_policy.py1
-rw-r--r--nova/tests/integrated/v3/api_samples/os-cloudpipe/cloud-pipe-update-req.json.tpl6
-rw-r--r--nova/tests/integrated/v3/api_samples/os-security-groups/security-group-add-post-req.json.tpl5
-rw-r--r--nova/tests/integrated/v3/api_samples/os-security-groups/security-group-post-req.json.tpl6
-rw-r--r--nova/tests/integrated/v3/api_samples/os-security-groups/security-group-remove-post-req.json.tpl5
-rw-r--r--nova/tests/integrated/v3/api_samples/os-security-groups/security-groups-create-resp.json.tpl9
-rw-r--r--nova/tests/integrated/v3/api_samples/os-security-groups/security-groups-get-resp.json.tpl9
-rw-r--r--nova/tests/integrated/v3/api_samples/os-security-groups/security-groups-list-get-resp.json.tpl11
-rw-r--r--nova/tests/integrated/v3/api_samples/os-security-groups/server-security-groups-list-resp.json.tpl11
-rw-r--r--nova/tests/integrated/v3/test_cloudpipe.py9
-rw-r--r--nova/tests/integrated/v3/test_security_groups.py113
-rw-r--r--nova/tests/network/test_neutronv2.py4
-rw-r--r--nova/tests/pci/fakes.py (renamed from nova/tests/pci/pci_fakes.py)4
-rw-r--r--nova/tests/pci/test_device.py (renamed from nova/tests/pci/test_pci_device.py)47
-rw-r--r--nova/tests/pci/test_devspec.py (renamed from nova/tests/pci/test_pci_devspec.py)81
-rw-r--r--nova/tests/pci/test_manager.py (renamed from nova/tests/pci/test_pci_manager.py)16
-rw-r--r--nova/tests/pci/test_request.py (renamed from nova/tests/pci/test_pci_request.py)24
-rw-r--r--nova/tests/pci/test_stats.py (renamed from nova/tests/pci/test_pci_stats.py)18
-rw-r--r--nova/tests/pci/test_utils.py (renamed from nova/tests/pci/test_pci_utils.py)16
-rw-r--r--nova/tests/pci/test_whitelist.py (renamed from nova/tests/pci/test_pci_whitelist.py)14
-rw-r--r--nova/tests/scheduler/filters/__init__.py0
-rw-r--r--nova/tests/scheduler/filters/test_affinity_filters.py185
-rw-r--r--nova/tests/scheduler/filters/test_disk_filters.py57
-rw-r--r--nova/tests/scheduler/filters/test_extra_specs_ops.py200
-rw-r--r--nova/tests/scheduler/filters/test_ram_filters.py89
-rw-r--r--nova/tests/scheduler/test_filter_scheduler.py19
-rw-r--r--nova/tests/scheduler/test_host_filters.py497
-rw-r--r--nova/tests/test_exception.py14
-rw-r--r--nova/tests/test_hacking.py34
-rw-r--r--nova/tests/test_metadata.py22
-rw-r--r--nova/tests/virt/libvirt/test_config.py14
-rw-r--r--nova/tests/virt/libvirt/test_driver.py108
-rw-r--r--nova/tests/virt/vmwareapi/stubs.py6
-rw-r--r--nova/tests/virt/vmwareapi/test_configdrive.py4
-rw-r--r--nova/tests/virt/vmwareapi/test_driver_api.py34
-rw-r--r--nova/tests/virt/vmwareapi/test_images.py (renamed from nova/tests/virt/vmwareapi/test_vmware_images.py)26
-rw-r--r--nova/tests/virt/vmwareapi/test_vm_util.py4
-rw-r--r--nova/tests/virt/vmwareapi/test_vmops.py18
-rw-r--r--nova/tests/virt/xenapi/test_vmops.py2
-rw-r--r--nova/virt/libvirt/config.py10
-rw-r--r--nova/virt/libvirt/designer.py2
-rw-r--r--nova/virt/libvirt/driver.py165
-rw-r--r--nova/virt/libvirt/imagebackend.py4
-rw-r--r--nova/virt/vmwareapi/__init__.py4
-rw-r--r--nova/virt/vmwareapi/driver.py19
-rw-r--r--nova/virt/vmwareapi/error_util.py14
-rw-r--r--nova/virt/vmwareapi/images.py (renamed from nova/virt/vmwareapi/vmware_images.py)2
-rw-r--r--nova/virt/vmwareapi/vm_util.py10
-rw-r--r--nova/virt/vmwareapi/vmops.py12
-rw-r--r--nova/virt/xenapi/host.py2
-rw-r--r--nova/virt/xenapi/vmops.py2
-rw-r--r--nova/volume/cinder.py2
-rw-r--r--requirements.txt1
-rw-r--r--setup.cfg1
135 files changed, 2166 insertions, 1227 deletions
diff --git a/HACKING.rst b/HACKING.rst
index f1a7f1ed1b..0fb08e6670 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -37,7 +37,7 @@ Nova Specific Commandments
- [N322] Method's default argument shouldn't be mutable
- [N323] Ensure that the _() function is explicitly imported to ensure proper translations.
- [N324] Ensure that jsonutils.%(fun)s must be used instead of json.%(fun)s
-- [N325] str() cannot be used on an exception. Remove use or use six.text_type()
+- [N325] str() and unicode() cannot be used on an exception. Remove use or use six.text_type()
- [N326] Translated messages cannot be concatenated. String should be included in translated message.
- [N327] assert_called_once() is not a valid method
diff --git a/doc/v3/api_samples/os-cloudpipe/cloud-pipe-update-req.json b/doc/v3/api_samples/os-cloudpipe/cloud-pipe-update-req.json
new file mode 100644
index 0000000000..7882765b48
--- /dev/null
+++ b/doc/v3/api_samples/os-cloudpipe/cloud-pipe-update-req.json
@@ -0,0 +1,6 @@
+{
+ "configure_project": {
+ "vpn_ip": "192.168.1.1",
+ "vpn_port": "2000"
+ }
+} \ No newline at end of file
diff --git a/doc/v3/api_samples/os-security-groups/security-group-add-post-req.json b/doc/v3/api_samples/os-security-groups/security-group-add-post-req.json
new file mode 100644
index 0000000000..6b88a23392
--- /dev/null
+++ b/doc/v3/api_samples/os-security-groups/security-group-add-post-req.json
@@ -0,0 +1,5 @@
+{
+ "addSecurityGroup": {
+ "name": "test"
+ }
+} \ No newline at end of file
diff --git a/doc/v3/api_samples/os-security-groups/security-group-post-req.json b/doc/v3/api_samples/os-security-groups/security-group-post-req.json
new file mode 100644
index 0000000000..0951408c59
--- /dev/null
+++ b/doc/v3/api_samples/os-security-groups/security-group-post-req.json
@@ -0,0 +1,6 @@
+{
+ "security_group": {
+ "name": "test",
+ "description": "description"
+ }
+} \ No newline at end of file
diff --git a/doc/v3/api_samples/os-security-groups/security-group-remove-post-req.json b/doc/v3/api_samples/os-security-groups/security-group-remove-post-req.json
new file mode 100644
index 0000000000..a6b2e09866
--- /dev/null
+++ b/doc/v3/api_samples/os-security-groups/security-group-remove-post-req.json
@@ -0,0 +1,5 @@
+{
+ "removeSecurityGroup": {
+ "name": "test"
+ }
+} \ No newline at end of file
diff --git a/doc/v3/api_samples/os-security-groups/security-groups-create-resp.json b/doc/v3/api_samples/os-security-groups/security-groups-create-resp.json
new file mode 100644
index 0000000000..1b134d7908
--- /dev/null
+++ b/doc/v3/api_samples/os-security-groups/security-groups-create-resp.json
@@ -0,0 +1,9 @@
+{
+ "security_group": {
+ "description": "default",
+ "id": 1,
+ "name": "default",
+ "rules": [],
+ "tenant_id": "openstack"
+ }
+} \ No newline at end of file
diff --git a/doc/v3/api_samples/os-security-groups/security-groups-get-resp.json b/doc/v3/api_samples/os-security-groups/security-groups-get-resp.json
new file mode 100644
index 0000000000..1b134d7908
--- /dev/null
+++ b/doc/v3/api_samples/os-security-groups/security-groups-get-resp.json
@@ -0,0 +1,9 @@
+{
+ "security_group": {
+ "description": "default",
+ "id": 1,
+ "name": "default",
+ "rules": [],
+ "tenant_id": "openstack"
+ }
+} \ No newline at end of file
diff --git a/doc/v3/api_samples/os-security-groups/security-groups-list-get-resp.json b/doc/v3/api_samples/os-security-groups/security-groups-list-get-resp.json
new file mode 100644
index 0000000000..a5c33e6995
--- /dev/null
+++ b/doc/v3/api_samples/os-security-groups/security-groups-list-get-resp.json
@@ -0,0 +1,11 @@
+{
+ "security_groups": [
+ {
+ "description": "default",
+ "id": 1,
+ "name": "default",
+ "rules": [],
+ "tenant_id": "openstack"
+ }
+ ]
+} \ No newline at end of file
diff --git a/doc/v3/api_samples/os-security-groups/server-get-resp.json b/doc/v3/api_samples/os-security-groups/server-get-resp.json
index ab8e4cbcdc..1cc9bb4e0f 100644
--- a/doc/v3/api_samples/os-security-groups/server-get-resp.json
+++ b/doc/v3/api_samples/os-security-groups/server-get-resp.json
@@ -10,7 +10,7 @@
}
]
},
- "created": "2013-09-25T03:29:13Z",
+ "created": "2014-09-18T10:13:33Z",
"flavor": {
"id": "1",
"links": [
@@ -20,8 +20,8 @@
}
]
},
- "hostId": "0e312d6763795d572ccd716973fd078290d9ec446517b222d3395660",
- "id": "f6961f7a-0133-4f27-94cd-901dca4ba426",
+ "hostId": "24451d49cba30e60300a5b928ebc93a2d0b43c084a677b0a14fd678b",
+ "id": "b08eb8d8-db43-44fb-bd89-dfe3302b84ef",
"image": {
"id": "70a599e0-31e7-49b7-b260-868f441e862b",
"links": [
@@ -34,11 +34,11 @@
"key_name": null,
"links": [
{
- "href": "http://openstack.example.com/v3/servers/f6961f7a-0133-4f27-94cd-901dca4ba426",
+ "href": "http://openstack.example.com/v3/servers/b08eb8d8-db43-44fb-bd89-dfe3302b84ef",
"rel": "self"
},
{
- "href": "http://openstack.example.com/servers/f6961f7a-0133-4f27-94cd-901dca4ba426",
+ "href": "http://openstack.example.com/servers/b08eb8d8-db43-44fb-bd89-dfe3302b84ef",
"rel": "bookmark"
}
],
@@ -54,7 +54,7 @@
],
"status": "ACTIVE",
"tenant_id": "openstack",
- "updated": "2013-09-25T03:29:14Z",
+ "updated": "2014-09-18T10:13:34Z",
"user_id": "fake"
}
}
diff --git a/doc/v3/api_samples/os-security-groups/server-post-req.json b/doc/v3/api_samples/os-security-groups/server-post-req.json
index fc8bd8c76b..5a5648d80f 100644
--- a/doc/v3/api_samples/os-security-groups/server-post-req.json
+++ b/doc/v3/api_samples/os-security-groups/server-post-req.json
@@ -8,4 +8,4 @@
},
"security_groups": [{"name": "test"}]
}
-}
+} \ No newline at end of file
diff --git a/doc/v3/api_samples/os-security-groups/server-post-resp.json b/doc/v3/api_samples/os-security-groups/server-post-resp.json
index 07a797ff4a..3f69ad31d3 100644
--- a/doc/v3/api_samples/os-security-groups/server-post-resp.json
+++ b/doc/v3/api_samples/os-security-groups/server-post-resp.json
@@ -1,14 +1,14 @@
{
"server": {
- "adminPass": "ki8cbWeZdxH6",
- "id": "2dabdd93-ced7-4607-a542-2516de84e0e5",
+ "adminPass": "xhS2khTdkRkT",
+ "id": "60874907-c72b-4a01-805d-54c992510e47",
"links": [
{
- "href": "http://openstack.example.com/v3/servers/2dabdd93-ced7-4607-a542-2516de84e0e5",
+ "href": "http://openstack.example.com/v3/servers/60874907-c72b-4a01-805d-54c992510e47",
"rel": "self"
},
{
- "href": "http://openstack.example.com/servers/2dabdd93-ced7-4607-a542-2516de84e0e5",
+ "href": "http://openstack.example.com/servers/60874907-c72b-4a01-805d-54c992510e47",
"rel": "bookmark"
}
],
@@ -18,4 +18,4 @@
}
]
}
-}
+} \ No newline at end of file
diff --git a/doc/v3/api_samples/os-security-groups/server-security-groups-list-resp.json b/doc/v3/api_samples/os-security-groups/server-security-groups-list-resp.json
new file mode 100644
index 0000000000..a5c33e6995
--- /dev/null
+++ b/doc/v3/api_samples/os-security-groups/server-security-groups-list-resp.json
@@ -0,0 +1,11 @@
+{
+ "security_groups": [
+ {
+ "description": "default",
+ "id": 1,
+ "name": "default",
+ "rules": [],
+ "tenant_id": "openstack"
+ }
+ ]
+} \ No newline at end of file
diff --git a/doc/v3/api_samples/os-security-groups/servers-detail-resp.json b/doc/v3/api_samples/os-security-groups/servers-detail-resp.json
index 8612a18348..240a930e57 100644
--- a/doc/v3/api_samples/os-security-groups/servers-detail-resp.json
+++ b/doc/v3/api_samples/os-security-groups/servers-detail-resp.json
@@ -11,7 +11,7 @@
}
]
},
- "created": "2013-09-25T03:29:11Z",
+ "created": "2014-09-18T10:13:33Z",
"flavor": {
"id": "1",
"links": [
@@ -21,8 +21,8 @@
}
]
},
- "hostId": "afeeb125d4d37d0a2123e3144a20a6672fda5d4b6cb85ec193430d82",
- "id": "1b94e3fc-1b1c-431a-a077-6b280fb720ce",
+ "hostId": "2ab794bccd321fe64f9f8b679266aa2c96825f467434bbdd71b09b1d",
+ "id": "d182742c-6f20-479c-8e32-f79f9c9df6e3",
"image": {
"id": "70a599e0-31e7-49b7-b260-868f441e862b",
"links": [
@@ -35,11 +35,11 @@
"key_name": null,
"links": [
{
- "href": "http://openstack.example.com/v3/servers/1b94e3fc-1b1c-431a-a077-6b280fb720ce",
+ "href": "http://openstack.example.com/v3/servers/d182742c-6f20-479c-8e32-f79f9c9df6e3",
"rel": "self"
},
{
- "href": "http://openstack.example.com/servers/1b94e3fc-1b1c-431a-a077-6b280fb720ce",
+ "href": "http://openstack.example.com/servers/d182742c-6f20-479c-8e32-f79f9c9df6e3",
"rel": "bookmark"
}
],
@@ -55,7 +55,7 @@
],
"status": "ACTIVE",
"tenant_id": "openstack",
- "updated": "2013-09-25T03:29:12Z",
+ "updated": "2014-09-18T10:13:34Z",
"user_id": "fake"
}
]
diff --git a/etc/nova/api-paste.ini b/etc/nova/api-paste.ini
index 1ba61c4269..f16e36cb01 100644
--- a/etc/nova/api-paste.ini
+++ b/etc/nova/api-paste.ini
@@ -81,7 +81,7 @@ noauth = request_id faultwrap sizelimit noauth_v3 osapi_compute_app_v3
keystone = request_id faultwrap sizelimit authtoken keystonecontext osapi_compute_app_v3
[filter:request_id]
-paste.filter_factory = oslo.middleware:RequestId.factory
+paste.filter_factory = nova.openstack.common.middleware.request_id:RequestIdMiddleware.factory
[filter:compute_req_id]
paste.filter_factory = nova.api.compute_req_id:ComputeReqIdMiddleware.factory
diff --git a/etc/nova/policy.json b/etc/nova/policy.json
index 1d02dd8391..2ba387d4b7 100644
--- a/etc/nova/policy.json
+++ b/etc/nova/policy.json
@@ -65,6 +65,8 @@
"compute_extension:v3:os-attach-interfaces": "",
"compute_extension:v3:os-attach-interfaces:discoverable": "",
"compute_extension:baremetal_nodes": "rule:admin_api",
+ "compute_extension:v3:os-baremetal-nodes": "rule:admin_api",
+ "compute_extension:v3:os-baremetal-nodes:discoverable": "",
"compute_extension:v3:os-block-device-mapping-v1:discoverable": "",
"compute_extension:cells": "rule:admin_api",
"compute_extension:cells:create": "rule:admin_api",
diff --git a/nova/api/auth.py b/nova/api/auth.py
index 1c99037570..d6e6c1430c 100644
--- a/nova/api/auth.py
+++ b/nova/api/auth.py
@@ -17,7 +17,6 @@ Common Auth Middleware.
"""
from oslo.config import cfg
-from oslo.middleware import request_id
from oslo.serialization import jsonutils
import webob.dec
import webob.exc
@@ -26,6 +25,7 @@ from nova import context
from nova.i18n import _
from nova.i18n import _LW
from nova.openstack.common import log as logging
+from nova.openstack.common.middleware import request_id
from nova import wsgi
diff --git a/nova/api/compute_req_id.py b/nova/api/compute_req_id.py
index 711756e686..2118d96eb0 100644
--- a/nova/api/compute_req_id.py
+++ b/nova/api/compute_req_id.py
@@ -23,10 +23,10 @@ Responses for APIv3 are taken care of by the request_id middleware provided
in oslo.
"""
-from oslo.middleware import base
import webob.dec
from nova.openstack.common import context
+from nova.openstack.common.middleware import base
ENV_REQUEST_ID = 'openstack.request_id'
diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py
index a76cab7c49..962c195a21 100644
--- a/nova/api/ec2/__init__.py
+++ b/nova/api/ec2/__init__.py
@@ -87,7 +87,7 @@ class FaultWrapper(wsgi.Middleware):
try:
return req.get_response(self.application)
except Exception as ex:
- LOG.exception(_("FaultWrapper: %s"), unicode(ex))
+ LOG.exception(_("FaultWrapper: %s"), ex)
return faults.Fault(webob.exc.HTTPInternalServerError())
@@ -321,7 +321,7 @@ class Requestify(wsgi.Middleware):
except KeyError:
raise webob.exc.HTTPBadRequest()
except exception.InvalidRequest as err:
- raise webob.exc.HTTPBadRequest(explanation=unicode(err))
+ raise webob.exc.HTTPBadRequest(explanation=six.text_type(err))
LOG.debug('action: %s', action)
for key, value in args.items():
@@ -506,9 +506,9 @@ def ec2_error_ex(ex, req, code=None, message=None, unexpected=False):
request_id = context.request_id
log_msg_args = {
'ex_name': type(ex).__name__,
- 'ex_str': unicode(ex)
+ 'ex_str': ex
}
- log_fun(log_msg % log_msg_args, context=context)
+ log_fun(log_msg, log_msg_args, context=context)
if ex.args and not message and (not unexpected or status < 500):
message = unicode(ex.args[0])
diff --git a/nova/api/metadata/handler.py b/nova/api/metadata/handler.py
index 9ae64b7242..9862128ee0 100644
--- a/nova/api/metadata/handler.py
+++ b/nova/api/metadata/handler.py
@@ -133,7 +133,11 @@ class MetadataRequestHandler(wsgi.Application):
return data(req, meta_data)
resp = base.ec2_md_print(data)
- req.response.body = resp
+ if isinstance(resp, six.text_type):
+ req.response.text = resp
+ else:
+ req.response.body = resp
+
req.response.content_type = meta_data.get_mimetype()
return req.response
diff --git a/nova/api/openstack/compute/contrib/agents.py b/nova/api/openstack/compute/contrib/agents.py
index c05eb4ef2f..70c6874dd8 100644
--- a/nova/api/openstack/compute/contrib/agents.py
+++ b/nova/api/openstack/compute/contrib/agents.py
@@ -97,7 +97,7 @@ class AgentController(object):
md5hash = para['md5hash']
version = para['version']
except (TypeError, KeyError) as ex:
- msg = _("Invalid request body: %s") % unicode(ex)
+ msg = _("Invalid request body: %s") % ex
raise webob.exc.HTTPBadRequest(explanation=msg)
try:
@@ -115,7 +115,7 @@ class AgentController(object):
agent.md5hash = md5hash
agent.save()
except ValueError as ex:
- msg = _("Invalid request body: %s") % unicode(ex)
+ msg = _("Invalid request body: %s") % ex
raise webob.exc.HTTPBadRequest(explanation=msg)
except exception.AgentBuildNotFound as ex:
raise webob.exc.HTTPNotFound(explanation=ex.format_message())
@@ -153,7 +153,7 @@ class AgentController(object):
url = agent['url']
md5hash = agent['md5hash']
except (TypeError, KeyError) as ex:
- msg = _("Invalid request body: %s") % unicode(ex)
+ msg = _("Invalid request body: %s") % ex
raise webob.exc.HTTPBadRequest(explanation=msg)
try:
diff --git a/nova/api/openstack/compute/contrib/cloudpipe.py b/nova/api/openstack/compute/contrib/cloudpipe.py
index 0f05f59c5c..abae9bf06e 100644
--- a/nova/api/openstack/compute/contrib/cloudpipe.py
+++ b/nova/api/openstack/compute/contrib/cloudpipe.py
@@ -81,8 +81,8 @@ class CloudpipeController(object):
if pipelib.is_vpn_image(instance['image_ref'])
and instance['vm_state'] != vm_states.DELETED]
- def _get_cloudpipe_for_project(self, context, project_id):
- """Get the cloudpipe instance for a project ID."""
+ def _get_cloudpipe_for_project(self, context):
+ """Get the cloudpipe instance for a project from context."""
cloudpipes = self._get_all_cloudpipes(context) or [None]
return cloudpipes[0]
@@ -143,7 +143,7 @@ class CloudpipeController(object):
context.user_id = 'project-vpn'
context.is_admin = False
context.roles = []
- instance = self._get_cloudpipe_for_project(context, project_id)
+ instance = self._get_cloudpipe_for_project(context)
if not instance:
try:
result = self.cloudpipe.launch_vpn_instance(context)
diff --git a/nova/api/openstack/compute/contrib/cloudpipe_update.py b/nova/api/openstack/compute/contrib/cloudpipe_update.py
index 662915ba8e..a00229894a 100644
--- a/nova/api/openstack/compute/contrib/cloudpipe_update.py
+++ b/nova/api/openstack/compute/contrib/cloudpipe_update.py
@@ -53,7 +53,7 @@ class CloudpipeUpdateController(wsgi.Controller):
network.vpn_public_port = vpn_port
network.save()
except (TypeError, KeyError, ValueError) as ex:
- msg = _("Invalid request body: %s") % unicode(ex)
+ msg = _("Invalid request body: %s") % ex
raise webob.exc.HTTPBadRequest(explanation=msg)
return webob.Response(status_int=202)
diff --git a/nova/api/openstack/compute/contrib/security_group_default_rules.py b/nova/api/openstack/compute/contrib/security_group_default_rules.py
index 30ed7d094f..08dabca889 100644
--- a/nova/api/openstack/compute/contrib/security_group_default_rules.py
+++ b/nova/api/openstack/compute/contrib/security_group_default_rules.py
@@ -13,6 +13,7 @@
# under the License.
from xml.dom import minidom
+import six
import webob
from webob import exc
@@ -119,7 +120,7 @@ class SecurityGroupDefaultRulesController(sg.SecurityGroupControllerBase):
ip_protocol=sg_rule.get('ip_protocol'),
cidr=sg_rule.get('cidr'))
except Exception as exp:
- raise exc.HTTPBadRequest(explanation=unicode(exp))
+ raise exc.HTTPBadRequest(explanation=six.text_type(exp))
if values is None:
msg = _('Not enough parameters to build a valid rule.')
diff --git a/nova/api/openstack/compute/contrib/security_groups.py b/nova/api/openstack/compute/contrib/security_groups.py
index 43ba3ca8b1..52fa1e6c3d 100644
--- a/nova/api/openstack/compute/contrib/security_groups.py
+++ b/nova/api/openstack/compute/contrib/security_groups.py
@@ -20,6 +20,7 @@ import contextlib
from xml.dom import minidom
from oslo.serialization import jsonutils
+import six
import webob
from webob import exc
@@ -390,7 +391,7 @@ class SecurityGroupRulesController(SecurityGroupControllerBase):
cidr=sg_rule.get('cidr'),
group_id=sg_rule.get('group_id'))
except Exception as exp:
- raise exc.HTTPBadRequest(explanation=unicode(exp))
+ raise exc.HTTPBadRequest(explanation=six.text_type(exp))
if new_rule is None:
msg = _("Not enough parameters to build a valid rule.")
diff --git a/nova/api/openstack/compute/contrib/simple_tenant_usage.py b/nova/api/openstack/compute/contrib/simple_tenant_usage.py
index c1b13fd792..8b0c1d6c3b 100644
--- a/nova/api/openstack/compute/contrib/simple_tenant_usage.py
+++ b/nova/api/openstack/compute/contrib/simple_tenant_usage.py
@@ -17,6 +17,7 @@ import datetime
import iso8601
from oslo.utils import timeutils
+import six
import six.moves.urllib.parse as urlparse
from webob import exc
@@ -55,7 +56,7 @@ def parse_strtime(dstr, fmt):
try:
return timeutils.parse_strtime(dstr, fmt)
except (TypeError, ValueError) as e:
- raise exception.InvalidStrTime(reason=unicode(e))
+ raise exception.InvalidStrTime(reason=six.text_type(e))
class SimpleTenantUsageTemplate(xmlutil.TemplateBuilder):
diff --git a/nova/api/openstack/compute/plugins/__init__.py b/nova/api/openstack/compute/plugins/__init__.py
index 73857e2541..71cbe00e9d 100644
--- a/nova/api/openstack/compute/plugins/__init__.py
+++ b/nova/api/openstack/compute/plugins/__init__.py
@@ -48,7 +48,7 @@ class LoadedExtensionInfo(object):
' '.join(extension.__doc__.strip().split()))
LOG.debug('Ext version: %i', extension.version)
except AttributeError as ex:
- LOG.exception(_("Exception loading extension: %s"), unicode(ex))
+ LOG.exception(_("Exception loading extension: %s"), ex)
return False
return True
diff --git a/nova/api/openstack/compute/plugins/v3/baremetal_nodes.py b/nova/api/openstack/compute/plugins/v3/baremetal_nodes.py
new file mode 100644
index 0000000000..3582e88090
--- /dev/null
+++ b/nova/api/openstack/compute/plugins/v3/baremetal_nodes.py
@@ -0,0 +1,173 @@
+# Copyright (c) 2013 NTT DOCOMO, INC.
+# Copyright 2014 IBM Corporation.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""The bare-metal admin extension."""
+
+from oslo.config import cfg
+from oslo.utils import importutils
+import webob
+
+from nova.api.openstack import extensions
+from nova.api.openstack import wsgi
+from nova.i18n import _
+
+ironic_client = importutils.try_import('ironicclient.client')
+
+CONF = cfg.CONF
+ALIAS = "os-baremetal-nodes"
+authorize = extensions.extension_authorizer('compute', 'v3:' + ALIAS)
+
+node_fields = ['id', 'cpus', 'local_gb', 'memory_mb', 'pm_address',
+ 'pm_user', 'service_host', 'terminal_port', 'instance_uuid']
+
+node_ext_fields = ['uuid', 'task_state', 'updated_at', 'pxe_config_path']
+
+interface_fields = ['id', 'address', 'datapath_id', 'port_no']
+
+CONF.import_opt('api_version',
+ 'nova.virt.ironic.driver',
+ group='ironic')
+CONF.import_opt('api_endpoint',
+ 'nova.virt.ironic.driver',
+ group='ironic')
+CONF.import_opt('admin_username',
+ 'nova.virt.ironic.driver',
+ group='ironic')
+CONF.import_opt('admin_password',
+ 'nova.virt.ironic.driver',
+ group='ironic')
+CONF.import_opt('admin_tenant_name',
+ 'nova.virt.ironic.driver',
+ group='ironic')
+CONF.import_opt('compute_driver', 'nova.virt.driver')
+
+
+def _interface_dict(interface_ref):
+ d = {}
+ for f in interface_fields:
+ d[f] = interface_ref.get(f)
+ return d
+
+
+def _get_ironic_client():
+ """return an Ironic client."""
+ # TODO(NobodyCam): Fix insecure setting
+ kwargs = {'os_username': CONF.ironic.admin_username,
+ 'os_password': CONF.ironic.admin_password,
+ 'os_auth_url': CONF.ironic.admin_url,
+ 'os_tenant_name': CONF.ironic.admin_tenant_name,
+ 'os_service_type': 'baremetal',
+ 'os_endpoint_type': 'public',
+ 'insecure': 'true',
+ 'ironic_url': CONF.ironic.api_endpoint}
+ icli = ironic_client.get_client(CONF.ironic.api_version, **kwargs)
+ return icli
+
+
+def _no_ironic_proxy(cmd):
+ raise webob.exc.HTTPBadRequest(
+ explanation=_("Command Not supported. Please use Ironic "
+ "command %(cmd)s to perform this "
+ "action.") % {'cmd': cmd})
+
+
+class BareMetalNodeController(wsgi.Controller):
+ """The Bare-Metal Node API controller for the OpenStack API."""
+
+ def _node_dict(self, node_ref):
+ d = {}
+ for f in node_fields:
+ d[f] = node_ref.get(f)
+ for f in node_ext_fields:
+ d[f] = node_ref.get(f)
+ return d
+
+ @extensions.expected_errors(404)
+ def index(self, req):
+ context = req.environ['nova.context']
+ authorize(context)
+ nodes = []
+ # proxy command to Ironic
+ icli = _get_ironic_client()
+ ironic_nodes = icli.node.list(detail=True)
+ for inode in ironic_nodes:
+ node = {'id': inode.uuid,
+ 'interfaces': [],
+ 'host': 'IRONIC MANAGED',
+ 'task_state': inode.provision_state,
+ 'cpus': inode.properties['cpus'],
+ 'memory_mb': inode.properties['memory_mb'],
+ 'disk_gb': inode.properties['local_gb']}
+ nodes.append(node)
+ return {'nodes': nodes}
+
+ @extensions.expected_errors(404)
+ def show(self, req, id):
+ context = req.environ['nova.context']
+ authorize(context)
+ # proxy command to Ironic
+ icli = _get_ironic_client()
+ inode = icli.node.get(id)
+ iports = icli.node.list_ports(id)
+ node = {'id': inode.uuid,
+ 'interfaces': [],
+ 'host': 'IRONIC MANAGED',
+ 'task_state': inode.provision_state,
+ 'cpus': inode.properties['cpus'],
+ 'memory_mb': inode.properties['memory_mb'],
+ 'disk_gb': inode.properties['local_gb'],
+ 'instance_uuid': inode.instance_uuid}
+ for port in iports:
+ node['interfaces'].append({'address': port.address})
+ return {'node': node}
+
+ @extensions.expected_errors(400)
+ def create(self, req, body):
+ _no_ironic_proxy("port-create")
+
+ @extensions.expected_errors(400)
+ def delete(self, req, id):
+ _no_ironic_proxy("port-create")
+
+ @wsgi.action('add_interface')
+ @extensions.expected_errors(400)
+ def _add_interface(self, req, id, body):
+ _no_ironic_proxy("port-create")
+
+ @wsgi.action('remove_interface')
+ @extensions.expected_errors(400)
+ def _remove_interface(self, req, id, body):
+ _no_ironic_proxy("port-delete")
+
+
+class BareMetalNodes(extensions.V3APIExtensionBase):
+ """Admin-only bare-metal node administration."""
+
+ name = "BareMetalNodes"
+ alias = ALIAS
+ version = 1
+
+ def get_resources(self):
+ resource = [extensions.ResourceExtension(ALIAS,
+ BareMetalNodeController(),
+ member_actions={"action": "POST"})]
+ return resource
+
+ def get_controller_extensions(self):
+ """It's an abstract function V3APIExtensionBase and the extension
+ will not be loaded without it.
+ """
+ return []
diff --git a/nova/api/openstack/compute/plugins/v3/block_device_mapping.py b/nova/api/openstack/compute/plugins/v3/block_device_mapping.py
index ef54f064df..b0794a63cb 100644
--- a/nova/api/openstack/compute/plugins/v3/block_device_mapping.py
+++ b/nova/api/openstack/compute/plugins/v3/block_device_mapping.py
@@ -53,6 +53,10 @@ class BlockDeviceMapping(extensions.V3APIExtensionBase):
'is not allowed in the same request.')
raise exc.HTTPBadRequest(explanation=expl)
+ if not isinstance(bdm, list):
+ msg = _('block_device_mapping_v2 must be a list')
+ raise exc.HTTPBadRequest(explanation=msg)
+
try:
block_device_mapping = [
block_device.BlockDeviceDict.from_api(bdm_dict)
diff --git a/nova/api/openstack/compute/plugins/v3/block_device_mapping_v1.py b/nova/api/openstack/compute/plugins/v3/block_device_mapping_v1.py
index 187ab61b84..f9c5b2b906 100644
--- a/nova/api/openstack/compute/plugins/v3/block_device_mapping_v1.py
+++ b/nova/api/openstack/compute/plugins/v3/block_device_mapping_v1.py
@@ -54,6 +54,10 @@ class BlockDeviceMappingV1(extensions.V3APIExtensionBase):
'is not allowed in the same request.')
raise exc.HTTPBadRequest(explanation=expl)
+ if not isinstance(block_device_mapping, list):
+ msg = _('block_device_mapping must be a list')
+ raise exc.HTTPBadRequest(explanation=msg)
+
for bdm in block_device_mapping:
try:
block_device.validate_device_name(bdm.get("device_name"))
diff --git a/nova/api/openstack/compute/plugins/v3/cloudpipe.py b/nova/api/openstack/compute/plugins/v3/cloudpipe.py
index a0eb192829..6d57d15c72 100644
--- a/nova/api/openstack/compute/plugins/v3/cloudpipe.py
+++ b/nova/api/openstack/compute/plugins/v3/cloudpipe.py
@@ -18,8 +18,10 @@ from oslo.config import cfg
from oslo.utils import timeutils
from webob import exc
+from nova.api.openstack.compute.schemas.v3 import cloudpipe
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
+from nova.api import validation
from nova.cloudpipe import pipelib
from nova import compute
from nova.compute import utils as compute_utils
@@ -27,6 +29,7 @@ from nova.compute import vm_states
from nova import exception
from nova.i18n import _
from nova import network
+from nova import objects
from nova.openstack.common import fileutils
from nova import utils
@@ -61,8 +64,8 @@ class CloudpipeController(wsgi.Controller):
if pipelib.is_vpn_image(instance['image_ref'])
and instance['vm_state'] != vm_states.DELETED]
- def _get_cloudpipe_for_project(self, context, project_id):
- """Get the cloudpipe instance for a project ID."""
+ def _get_cloudpipe_for_project(self, context):
+ """Get the cloudpipe instance for a project from context."""
cloudpipes = self._get_all_cloudpipes(context) or [None]
return cloudpipes[0]
@@ -123,7 +126,7 @@ class CloudpipeController(wsgi.Controller):
context.user_id = 'project-vpn'
context.is_admin = False
context.roles = []
- instance = self._get_cloudpipe_for_project(context, project_id)
+ instance = self._get_cloudpipe_for_project(context)
if not instance:
try:
result = self.cloudpipe.launch_vpn_instance(context)
@@ -143,6 +146,30 @@ class CloudpipeController(wsgi.Controller):
for x in self._get_all_cloudpipes(context)]
return {'cloudpipes': vpns}
+ @wsgi.response(202)
+ @extensions.expected_errors(400)
+ @validation.schema(cloudpipe.update)
+ def update(self, req, id, body):
+ """Configure cloudpipe parameters for the project."""
+
+ context = req.environ['nova.context']
+ authorize(context)
+
+ if id != "configure-project":
+ msg = _("Unknown action %s") % id
+ raise exc.HTTPBadRequest(explanation=msg)
+
+ project_id = context.project_id
+ networks = objects.NetworkList.get_by_project(context, project_id)
+
+ params = body['configure_project']
+ vpn_ip = params['vpn_ip']
+ vpn_port = params['vpn_port']
+ for nw in networks:
+ nw.vpn_public_address = vpn_ip
+ nw.vpn_public_port = vpn_port
+ nw.save()
+
class Cloudpipe(extensions.V3APIExtensionBase):
"""Adds actions to create cloudpipe instances.
diff --git a/nova/api/openstack/compute/plugins/v3/security_groups.py b/nova/api/openstack/compute/plugins/v3/security_groups.py
index 38020abf9b..e48750842a 100644
--- a/nova/api/openstack/compute/plugins/v3/security_groups.py
+++ b/nova/api/openstack/compute/plugins/v3/security_groups.py
@@ -15,9 +15,12 @@
# under the License.
"""The security groups extension."""
+import contextlib
from oslo.serialization import jsonutils
+from webob import exc
+from nova.api.openstack import common
from nova.api.openstack.compute.schemas.v3 import security_groups as \
schema_security_groups
from nova.api.openstack import extensions
@@ -25,10 +28,13 @@ from nova.api.openstack import wsgi
from nova import compute
from nova.compute import api as compute_api
from nova import exception
+from nova.i18n import _
from nova.network.security_group import neutron_driver
from nova.network.security_group import openstack_driver
+from nova.openstack.common import log as logging
+LOG = logging.getLogger(__name__)
ALIAS = 'os-security-groups'
ATTRIBUTE_NAME = 'security_groups'
authorize = extensions.extension_authorizer('compute', 'v3:' + ALIAS)
@@ -41,6 +47,263 @@ def _authorize_context(req):
return context
+@contextlib.contextmanager
+def translate_exceptions():
+ """Translate nova exceptions to http exceptions."""
+ try:
+ yield
+ except exception.Invalid as exp:
+ msg = exp.format_message()
+ raise exc.HTTPBadRequest(explanation=msg)
+ except exception.SecurityGroupNotFound as exp:
+ msg = exp.format_message()
+ raise exc.HTTPNotFound(explanation=msg)
+ except exception.InstanceNotFound as exp:
+ msg = exp.format_message()
+ raise exc.HTTPNotFound(explanation=msg)
+ except exception.SecurityGroupLimitExceeded as exp:
+ msg = exp.format_message()
+ raise exc.HTTPForbidden(explanation=msg)
+ except exception.NoUniqueMatch as exp:
+ msg = exp.format_message()
+ raise exc.HTTPConflict(explanation=msg)
+
+
+class SecurityGroupControllerBase(wsgi.Controller):
+ """Base class for Security Group controllers."""
+
+ def __init__(self):
+ self.security_group_api = (
+ openstack_driver.get_openstack_security_group_driver())
+ self.compute_api = compute.API(
+ security_group_api=self.security_group_api)
+
+ def _format_security_group_rule(self, context, rule, group_rule_data=None):
+ """Return a secuity group rule in desired API response format.
+
+ If group_rule_data is passed in that is used rather than querying
+ for it.
+ """
+ sg_rule = {}
+ sg_rule['id'] = rule['id']
+ sg_rule['parent_group_id'] = rule['parent_group_id']
+ sg_rule['ip_protocol'] = rule['protocol']
+ sg_rule['from_port'] = rule['from_port']
+ sg_rule['to_port'] = rule['to_port']
+ sg_rule['group'] = {}
+ sg_rule['ip_range'] = {}
+ if rule['group_id']:
+ with translate_exceptions():
+ try:
+ source_group = self.security_group_api.get(
+ context, id=rule['group_id'])
+ except exception.SecurityGroupNotFound:
+ # NOTE(arosen): There is a possible race condition that can
+ # occur here if two api calls occur concurrently: one that
+ # lists the security groups and another one that deletes a
+ # security group rule that has a group_id before the
+ # group_id is fetched. To handle this if
+ # SecurityGroupNotFound is raised we return None instead
+ # of the rule and the caller should ignore the rule.
+ LOG.debug("Security Group ID %s does not exist",
+ rule['group_id'])
+ return
+ sg_rule['group'] = {'name': source_group.get('name'),
+ 'tenant_id': source_group.get('project_id')}
+ elif group_rule_data:
+ sg_rule['group'] = group_rule_data
+ else:
+ sg_rule['ip_range'] = {'cidr': rule['cidr']}
+ return sg_rule
+
+ def _format_security_group(self, context, group):
+ security_group = {}
+ security_group['id'] = group['id']
+ security_group['description'] = group['description']
+ security_group['name'] = group['name']
+ security_group['tenant_id'] = group['project_id']
+ security_group['rules'] = []
+ for rule in group['rules']:
+ formatted_rule = self._format_security_group_rule(context, rule)
+ if formatted_rule:
+ security_group['rules'] += [formatted_rule]
+ return security_group
+
+ def _from_body(self, body, key):
+ if not body:
+ raise exc.HTTPBadRequest(
+ explanation=_("The request body can't be empty"))
+ value = body.get(key, None)
+ if value is None:
+ raise exc.HTTPBadRequest(
+ explanation=_("Missing parameter %s") % key)
+ return value
+
+
+class SecurityGroupController(SecurityGroupControllerBase):
+ """The Security group API controller for the OpenStack API."""
+
+ def show(self, req, id):
+ """Return data about the given security group."""
+ context = _authorize_context(req)
+
+ with translate_exceptions():
+ id = self.security_group_api.validate_id(id)
+ security_group = self.security_group_api.get(context, None, id,
+ map_exception=True)
+
+ return {'security_group': self._format_security_group(context,
+ security_group)}
+
+ @wsgi.response(202)
+ def delete(self, req, id):
+ """Delete a security group."""
+ context = _authorize_context(req)
+
+ with translate_exceptions():
+ id = self.security_group_api.validate_id(id)
+ security_group = self.security_group_api.get(context, None, id,
+ map_exception=True)
+ self.security_group_api.destroy(context, security_group)
+
+ def index(self, req):
+ """Returns a list of security groups."""
+ context = _authorize_context(req)
+
+ search_opts = {}
+ search_opts.update(req.GET)
+
+ with translate_exceptions():
+ project_id = context.project_id
+ raw_groups = self.security_group_api.list(context,
+ project=project_id,
+ search_opts=search_opts)
+
+ limited_list = common.limited(raw_groups, req)
+ result = [self._format_security_group(context, group)
+ for group in limited_list]
+
+ return {'security_groups':
+ list(sorted(result,
+ key=lambda k: (k['tenant_id'], k['name'])))}
+
+ def create(self, req, body):
+ """Creates a new security group."""
+ context = _authorize_context(req)
+
+ security_group = self._from_body(body, 'security_group')
+
+ group_name = security_group.get('name', None)
+ group_description = security_group.get('description', None)
+
+ with translate_exceptions():
+ self.security_group_api.validate_property(group_name, 'name', None)
+ self.security_group_api.validate_property(group_description,
+ 'description', None)
+ group_ref = self.security_group_api.create_security_group(
+ context, group_name, group_description)
+
+ return {'security_group': self._format_security_group(context,
+ group_ref)}
+
+ def update(self, req, id, body):
+ """Update a security group."""
+ context = _authorize_context(req)
+
+ with translate_exceptions():
+ id = self.security_group_api.validate_id(id)
+ security_group = self.security_group_api.get(context, None, id,
+ map_exception=True)
+
+ security_group_data = self._from_body(body, 'security_group')
+ group_name = security_group_data.get('name', None)
+ group_description = security_group_data.get('description', None)
+
+ with translate_exceptions():
+ self.security_group_api.validate_property(group_name, 'name', None)
+ self.security_group_api.validate_property(group_description,
+ 'description', None)
+ group_ref = self.security_group_api.update_security_group(
+ context, security_group, group_name, group_description)
+
+ return {'security_group': self._format_security_group(context,
+ group_ref)}
+
+
+class ServerSecurityGroupController(SecurityGroupControllerBase):
+
+ def index(self, req, server_id):
+ """Returns a list of security groups for the given instance."""
+ context = _authorize_context(req)
+
+ self.security_group_api.ensure_default(context)
+
+ with translate_exceptions():
+ instance = self.compute_api.get(context, server_id)
+ groups = self.security_group_api.get_instance_security_groups(
+ context, instance['uuid'], True)
+
+ result = [self._format_security_group(context, group)
+ for group in groups]
+
+ return {'security_groups':
+ list(sorted(result,
+ key=lambda k: (k['tenant_id'], k['name'])))}
+
+
+class SecurityGroupActionController(wsgi.Controller):
+ def __init__(self, *args, **kwargs):
+ super(SecurityGroupActionController, self).__init__(*args, **kwargs)
+ self.security_group_api = (
+ openstack_driver.get_openstack_security_group_driver())
+ self.compute_api = compute.API(
+ security_group_api=self.security_group_api)
+
+ def _parse(self, body, action):
+ try:
+ body = body[action]
+ group_name = body['name']
+ except TypeError:
+ msg = _("Missing parameter dict")
+ raise exc.HTTPBadRequest(explanation=msg)
+ except KeyError:
+ msg = _("Security group not specified")
+ raise exc.HTTPBadRequest(explanation=msg)
+
+ if not group_name or group_name.strip() == '':
+ msg = _("Security group name cannot be empty")
+ raise exc.HTTPBadRequest(explanation=msg)
+
+ return group_name
+
+ def _invoke(self, method, context, id, group_name):
+ with translate_exceptions():
+ instance = self.compute_api.get(context, id)
+ method(context, instance, group_name)
+
+ @wsgi.response(202)
+ @wsgi.action('addSecurityGroup')
+ def _addSecurityGroup(self, req, id, body):
+ context = req.environ['nova.context']
+ authorize(context)
+
+ group_name = self._parse(body, 'addSecurityGroup')
+
+ return self._invoke(self.security_group_api.add_to_instance,
+ context, id, group_name)
+
+ @wsgi.response(202)
+ @wsgi.action('removeSecurityGroup')
+ def _removeSecurityGroup(self, req, id, body):
+ context = req.environ['nova.context']
+ authorize(context)
+
+ group_name = self._parse(body, 'removeSecurityGroup')
+
+ return self._invoke(self.security_group_api.remove_from_instance,
+ context, id, group_name)
+
+
class SecurityGroupsOutputController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(SecurityGroupsOutputController, self).__init__(*args, **kwargs)
@@ -115,12 +378,20 @@ class SecurityGroups(extensions.V3APIExtensionBase):
version = 1
def get_controller_extensions(self):
- controller = SecurityGroupsOutputController()
- output = extensions.ControllerExtension(self, 'servers', controller)
- return [output]
+ secgrp_output_ext = extensions.ControllerExtension(
+ self, 'servers', SecurityGroupsOutputController())
+ secgrp_act_ext = extensions.ControllerExtension(
+ self, 'servers', SecurityGroupActionController())
+ return [secgrp_output_ext, secgrp_act_ext]
def get_resources(self):
- return []
+ secgrp_ext = extensions.ResourceExtension('os-security-groups',
+ SecurityGroupController())
+ server_secgrp_ext = extensions.ResourceExtension(
+ 'os-security-groups',
+ controller=ServerSecurityGroupController(),
+ parent=dict(member_name='server', collection_name='servers'))
+ return [secgrp_ext, server_secgrp_ext]
# NOTE(gmann): This function is not supposed to use 'body_deprecated_param'
# parameter as this is placed to handle scheduler_hint extension for V2.1.
diff --git a/nova/api/openstack/compute/plugins/v3/servers.py b/nova/api/openstack/compute/plugins/v3/servers.py
index 767eef1adc..5aef949c1f 100644
--- a/nova/api/openstack/compute/plugins/v3/servers.py
+++ b/nova/api/openstack/compute/plugins/v3/servers.py
@@ -549,7 +549,7 @@ class ServersController(wsgi.Controller):
'err_msg': err.value}
raise exc.HTTPBadRequest(explanation=msg)
except UnicodeDecodeError as error:
- msg = "UnicodeError: %s" % unicode(error)
+ msg = "UnicodeError: %s" % error
raise exc.HTTPBadRequest(explanation=msg)
except (exception.ImageNotActive,
exception.FlavorDiskTooSmall,
diff --git a/nova/api/openstack/compute/plugins/v3/simple_tenant_usage.py b/nova/api/openstack/compute/plugins/v3/simple_tenant_usage.py
index f672b484d3..a79153baed 100644
--- a/nova/api/openstack/compute/plugins/v3/simple_tenant_usage.py
+++ b/nova/api/openstack/compute/plugins/v3/simple_tenant_usage.py
@@ -17,6 +17,7 @@ import datetime
import iso8601
from oslo.utils import timeutils
+import six
import six.moves.urllib.parse as urlparse
from webob import exc
@@ -37,7 +38,7 @@ def parse_strtime(dstr, fmt):
try:
return timeutils.parse_strtime(dstr, fmt)
except (TypeError, ValueError) as e:
- raise exception.InvalidStrTime(reason=unicode(e))
+ raise exception.InvalidStrTime(reason=six.text_type(e))
class SimpleTenantUsageController(object):
diff --git a/nova/api/openstack/compute/schemas/v3/cloudpipe.py b/nova/api/openstack/compute/schemas/v3/cloudpipe.py
new file mode 100644
index 0000000000..d4e0183772
--- /dev/null
+++ b/nova/api/openstack/compute/schemas/v3/cloudpipe.py
@@ -0,0 +1,39 @@
+# Copyright 2014 IBM Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.api.validation import parameter_types
+
+
+update = {
+ 'type': 'object',
+ 'properties': {
+ 'configure_project': {
+ 'type': 'object',
+ 'properties': {
+ 'vpn_ip': {
+ 'type': 'string',
+ 'oneOf': [
+ {'format': 'ipv4'},
+ {'format': 'ipv6'}
+ ],
+ },
+ 'vpn_port': parameter_types.tcp_udp_port,
+ },
+ 'required': ['vpn_ip', 'vpn_port'],
+ 'additionalProperties': False,
+ },
+ },
+ 'required': ['configure_project'],
+ 'additionalProperties': False,
+}
diff --git a/nova/api/openstack/compute/servers.py b/nova/api/openstack/compute/servers.py
index 92a0102238..7ac660226b 100644
--- a/nova/api/openstack/compute/servers.py
+++ b/nova/api/openstack/compute/servers.py
@@ -857,6 +857,9 @@ class Controller(wsgi.Controller):
legacy_bdm = True
if self.ext_mgr.is_loaded('os-volumes'):
block_device_mapping = server_dict.get('block_device_mapping', [])
+ if not isinstance(block_device_mapping, list):
+ msg = _('block_device_mapping must be a list')
+ raise exc.HTTPBadRequest(explanation=msg)
for bdm in block_device_mapping:
try:
block_device.validate_device_name(bdm.get("device_name"))
@@ -879,6 +882,10 @@ class Controller(wsgi.Controller):
'is not allowed in the same request.')
raise exc.HTTPBadRequest(explanation=expl)
+ if not isinstance(block_device_mapping_v2, list):
+ msg = _('block_device_mapping_v2 must be a list')
+ raise exc.HTTPBadRequest(explanation=msg)
+
# Assume legacy format
legacy_bdm = not bool(block_device_mapping_v2)
@@ -980,7 +987,7 @@ class Controller(wsgi.Controller):
'err_msg': err.value}
raise exc.HTTPBadRequest(explanation=msg)
except UnicodeDecodeError as error:
- msg = "UnicodeError: %s" % unicode(error)
+ msg = "UnicodeError: %s" % error
raise exc.HTTPBadRequest(explanation=msg)
except (exception.ImageNotActive,
exception.FlavorDiskTooSmall,
diff --git a/nova/api/openstack/extensions.py b/nova/api/openstack/extensions.py
index 8bb8d38bef..90702b93ef 100644
--- a/nova/api/openstack/extensions.py
+++ b/nova/api/openstack/extensions.py
@@ -239,7 +239,7 @@ class ExtensionManager(object):
LOG.debug('Ext namespace: %s', extension.namespace)
LOG.debug('Ext updated: %s', extension.updated)
except AttributeError as ex:
- LOG.exception(_("Exception loading extension: %s"), unicode(ex))
+ LOG.exception(_("Exception loading extension: %s"), ex)
return False
return True
diff --git a/nova/api/openstack/wsgi.py b/nova/api/openstack/wsgi.py
index b830783927..4a8e514ee1 100644
--- a/nova/api/openstack/wsgi.py
+++ b/nova/api/openstack/wsgi.py
@@ -662,10 +662,10 @@ class ResourceExceptionHandler(object):
exc_info=exc_info)
raise Fault(webob.exc.HTTPBadRequest())
elif isinstance(ex_value, Fault):
- LOG.info(_LI("Fault thrown: %s"), unicode(ex_value))
+ LOG.info(_LI("Fault thrown: %s"), ex_value)
raise ex_value
elif isinstance(ex_value, webob.exc.HTTPException):
- LOG.info(_LI("HTTP exception thrown: %s"), unicode(ex_value))
+ LOG.info(_LI("HTTP exception thrown: %s"), ex_value)
raise Fault(ex_value)
# We didn't handle the exception
diff --git a/nova/api/validation/parameter_types.py b/nova/api/validation/parameter_types.py
index 5ddf116ad6..af8bb228c5 100644
--- a/nova/api/validation/parameter_types.py
+++ b/nova/api/validation/parameter_types.py
@@ -100,3 +100,9 @@ metadata = {
},
'additionalProperties': False
}
+
+
+mac_address = {
+ 'type': 'string',
+ 'pattern': '^([0-9a-fA-F]{2})(:[0-9a-fA-F]{2}){5}$'
+}
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 4780499955..c6bc424bb7 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -64,7 +64,7 @@ from nova.objects import quotas as quotas_obj
from nova.objects import security_group as security_group_obj
from nova.openstack.common import log as logging
from nova.openstack.common import uuidutils
-from nova.pci import pci_request
+from nova.pci import request as pci_request
import nova.policy
from nova import quota
from nova import rpc
diff --git a/nova/compute/hvtype.py b/nova/compute/hvtype.py
index d6b6d35dad..b559ca77b6 100644
--- a/nova/compute/hvtype.py
+++ b/nova/compute/hvtype.py
@@ -91,6 +91,9 @@ def canonicalize(name):
if newname == "xapi":
newname = XEN
+ elif newname == "powervm":
+ # TODO(mriedem): Remove the translation shim in the 2015.2 'L' release.
+ newname = PHYP
if not is_valid(newname):
raise exception.InvalidHypervisorVirtType(hvtype=name)
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index c289d61000..d0713b573f 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -1487,7 +1487,7 @@ class ComputeManager(manager.Manager):
self._log_original_error(exc_info, instance.uuid)
raise exception.RescheduledException(
instance_uuid=instance.uuid,
- reason=unicode(exc_info[1]))
+ reason=six.text_type(exc_info[1]))
else:
# not re-scheduling, go to error:
raise exc_info[0], exc_info[1], exc_info[2]
@@ -2415,7 +2415,7 @@ class ComputeManager(manager.Manager):
exc_info = sys.exc_info()
LOG.warn(_LW('Failed to delete volume: %(volume_id)s due '
'to %(exc)s'), {'volume_id': bdm.volume_id,
- 'exc': unicode(exc)})
+ 'exc': exc})
if exc_info is not None and raise_exc:
six.reraise(exc_info[0], exc_info[1], exc_info[2])
@@ -3264,7 +3264,7 @@ class ComputeManager(manager.Manager):
instance=instance)
raise exception.InstanceNotRescuable(
instance_id=instance.uuid,
- reason=_("Driver Error: %s") % unicode(e))
+ reason=_("Driver Error: %s") % e)
self.conductor_api.notify_usage_exists(context, instance,
current_period=True)
@@ -4055,7 +4055,7 @@ class ComputeManager(manager.Manager):
instance.system_metadata['old_vm_state'] = instance.vm_state
with self._error_out_instance_on_exception(context, instance,
- instance_state=instance['vm_state']):
+ instance_state=instance.vm_state):
self.driver.suspend(instance)
current_power_state = self._get_power_state(context, instance)
instance.power_state = current_power_state
@@ -5432,6 +5432,16 @@ class ComputeManager(manager.Manager):
LOG.debug(msg, instance=instance)
continue
+ # race condition: This condition is hit when this method is
+ # called between the save of the migration record with a status of
+ # finished and the save of the instance object with a state of
+ # RESIZED. The migration record should not be set to error.
+ if instance.task_state == task_states.RESIZE_FINISH:
+ msg = ("Instance still resizing during resize "
+ "confirmation. Skipping.")
+ LOG.debug(msg, instance=instance)
+ continue
+
vm_state = instance['vm_state']
task_state = instance['task_state']
if vm_state != vm_states.RESIZED or task_state is not None:
@@ -5933,7 +5943,7 @@ class ComputeManager(manager.Manager):
except Exception as e:
LOG.warning(_("Periodic reclaim failed to delete "
"instance: %s"),
- unicode(e), instance=instance)
+ e, instance=instance)
@periodic_task.periodic_task
def update_available_resource(self, context):
@@ -6044,7 +6054,7 @@ class ComputeManager(manager.Manager):
except Exception as e:
LOG.warning(_("Periodic cleanup failed to delete "
"instance: %s"),
- unicode(e), instance=instance)
+ e, instance=instance)
else:
raise Exception(_("Unrecognized value '%s'"
" for CONF.running_deleted_"
diff --git a/nova/compute/resource_tracker.py b/nova/compute/resource_tracker.py
index f1a3e90788..bbcadca757 100644
--- a/nova/compute/resource_tracker.py
+++ b/nova/compute/resource_tracker.py
@@ -36,7 +36,7 @@ from nova.i18n import _
from nova import objects
from nova.objects import base as obj_base
from nova.openstack.common import log as logging
-from nova.pci import pci_manager
+from nova.pci import manager as pci_manager
from nova import rpc
from nova.scheduler import client as scheduler_client
from nova import utils
diff --git a/nova/conductor/rpcapi.py b/nova/conductor/rpcapi.py
index 93e6b18a5f..08de767a4e 100644
--- a/nova/conductor/rpcapi.py
+++ b/nova/conductor/rpcapi.py
@@ -281,7 +281,20 @@ class ConductorAPI(object):
def service_update(self, context, service, values):
service_p = jsonutils.to_primitive(service)
- cctxt = self.client.prepare()
+
+ # (NOTE:jichenjc)If we're calling this periodically, it makes no
+ # sense for the RPC timeout to be more than the service
+ # report interval. Select 5 here is only find a reaonable long
+ # interval as threshold.
+ timeout = CONF.report_interval
+ if timeout and timeout > 5:
+ timeout -= 1
+
+ if timeout:
+ cctxt = self.client.prepare(timeout=timeout)
+ else:
+ cctxt = self.client.prepare()
+
return cctxt.call(context, 'service_update',
service=service_p, values=values)
diff --git a/nova/console/vmrc.py b/nova/console/vmrc.py
deleted file mode 100644
index 2a4f84d676..0000000000
--- a/nova/console/vmrc.py
+++ /dev/null
@@ -1,148 +0,0 @@
-# Copyright (c) 2011 Citrix Systems, Inc.
-# Copyright 2011 OpenStack Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""VMRC console drivers."""
-
-import base64
-
-from oslo.config import cfg
-from oslo.serialization import jsonutils
-
-from nova import exception
-from nova.i18n import _LW
-from nova.openstack.common import log as logging
-from nova.virt.vmwareapi import vim_util
-
-
-vmrc_opts = [
- cfg.IntOpt('console_vmrc_port',
- default=443,
- help="DEPRECATED. Port for VMware VMRC connections"),
- cfg.IntOpt('console_vmrc_error_retries',
- default=10,
- help="DEPRECATED. "
- "Number of retries for retrieving VMRC information"),
- ]
-
-CONF = cfg.CONF
-CONF.register_opts(vmrc_opts)
-LOG = logging.getLogger(__name__)
-
-
-class VMRCConsole(object):
- """VMRC console driver with ESX credentials."""
-
- def __init__(self):
- super(VMRCConsole, self).__init__()
- LOG.warning(_LW('The ESX driver has been removed! '
- 'This code will be removed in Kilo release!'))
-
- @property
- def console_type(self):
- return 'vmrc+credentials'
-
- def get_port(self, context):
- """Get available port for consoles."""
- return CONF.console_vmrc_port
-
- def setup_console(self, context, console):
- """Sets up console."""
- pass
-
- def teardown_console(self, context, console):
- """Tears down console."""
- pass
-
- def init_host(self):
- """Perform console initialization."""
- pass
-
- def fix_pool_password(self, password):
- """Encode password."""
- # TODO(sateesh): Encrypt pool password
- return password
-
- def generate_password(self, vim_session, pool, instance_name):
- """Returns VMRC Connection credentials.
-
- Return string is of the form '<VM PATH>:<ESX Username>@<ESX Password>'.
-
- """
- username, password = pool['username'], pool['password']
- vms = vim_session._call_method(vim_util, 'get_objects',
- 'VirtualMachine', ['name', 'config.files.vmPathName'])
- vm_ds_path_name = None
- vm_ref = None
- for vm in vms:
- vm_name = None
- ds_path_name = None
- for prop in vm.propSet:
- if prop.name == 'name':
- vm_name = prop.val
- elif prop.name == 'config.files.vmPathName':
- ds_path_name = prop.val
- if vm_name == instance_name:
- vm_ref = vm.obj
- vm_ds_path_name = ds_path_name
- break
- if vm_ref is None:
- raise exception.InstanceNotFound(instance_id=instance_name)
- json_data = jsonutils.dumps({'vm_id': vm_ds_path_name,
- 'username': username,
- 'password': password})
- return base64.b64encode(json_data)
-
- def is_otp(self):
- """Is one time password or not."""
- return False
-
-
-class VMRCSessionConsole(VMRCConsole):
- """VMRC console driver with VMRC One Time Sessions."""
-
- def __init__(self):
- super(VMRCSessionConsole, self).__init__()
- LOG.warning(_LW('This code will be removed in Kilo release!'))
-
- @property
- def console_type(self):
- return 'vmrc+session'
-
- def generate_password(self, vim_session, pool, instance_name):
- """Returns a VMRC Session.
-
- Return string is of the form '<VM MOID>:<VMRC Ticket>'.
-
- """
- vms = vim_session._call_method(vim_util, 'get_objects',
- 'VirtualMachine', ['name'])
- vm_ref = None
- for vm in vms:
- if vm.propSet[0].val == instance_name:
- vm_ref = vm.obj
- if vm_ref is None:
- raise exception.InstanceNotFound(instance_id=instance_name)
- virtual_machine_ticket = vim_session._call_method(
- vim_session.vim,
- 'AcquireCloneTicket',
- vim_session.vim.get_service_content().sessionManager)
- json_data = jsonutils.dumps({'vm_id': str(vm_ref.value),
- 'username': virtual_machine_ticket,
- 'password': virtual_machine_ticket})
- return base64.b64encode(json_data)
-
- def is_otp(self):
- """Is one time password or not."""
- return True
diff --git a/nova/hacking/checks.py b/nova/hacking/checks.py
index 22133f579b..5212f96d7f 100644
--- a/nova/hacking/checks.py
+++ b/nova/hacking/checks.py
@@ -358,20 +358,21 @@ def check_assert_called_once(logical_line, filename):
yield (pos, msg)
-class CheckForStrExc(BaseASTChecker):
- """Checks for the use of str() on an exception.
-
- This currently only handles the case where str() is used in
- the scope of an exception handler. If the exception is passed
- into a function, returned from an assertRaises, or used on an
- exception created in the same scope, this does not catch it.
+class CheckForStrUnicodeExc(BaseASTChecker):
+ """Checks for the use of str() or unicode() on an exception.
+
+ This currently only handles the case where str() or unicode()
+ is used in the scope of an exception handler. If the exception
+ is passed into a function, returned from an assertRaises, or
+ used on an exception created in the same scope, this does not
+ catch it.
"""
- CHECK_DESC = ('N325 str() cannot be used on an exception. '
- 'Remove or use six.text_type()')
+ CHECK_DESC = ('N325 str() and unicode() cannot be used on an '
+ 'exception. Remove or use six.text_type()')
def __init__(self, tree, filename):
- super(CheckForStrExc, self).__init__(tree, filename)
+ super(CheckForStrUnicodeExc, self).__init__(tree, filename)
self.name = []
self.already_checked = []
@@ -379,19 +380,19 @@ class CheckForStrExc(BaseASTChecker):
for handler in node.handlers:
if handler.name:
self.name.append(handler.name.id)
- super(CheckForStrExc, self).generic_visit(node)
+ super(CheckForStrUnicodeExc, self).generic_visit(node)
self.name = self.name[:-1]
else:
- super(CheckForStrExc, self).generic_visit(node)
+ super(CheckForStrUnicodeExc, self).generic_visit(node)
def visit_Call(self, node):
- if self._check_call_names(node, ['str']):
+ if self._check_call_names(node, ['str', 'unicode']):
if node not in self.already_checked:
self.already_checked.append(node)
if isinstance(node.args[0], ast.Name):
if node.args[0].id in self.name:
self.add_error(node.args[0])
- super(CheckForStrExc, self).generic_visit(node)
+ super(CheckForStrUnicodeExc, self).generic_visit(node)
class CheckForTransAdd(BaseASTChecker):
@@ -435,5 +436,5 @@ def factory(register):
register(check_explicit_underscore_import)
register(use_jsonutils)
register(check_assert_called_once)
- register(CheckForStrExc)
+ register(CheckForStrUnicodeExc)
register(CheckForTransAdd)
diff --git a/nova/image/glance.py b/nova/image/glance.py
index d40943fd24..d58128c9d6 100644
--- a/nova/image/glance.py
+++ b/nova/image/glance.py
@@ -612,18 +612,18 @@ def _translate_image_exception(image_id, exc_value):
if isinstance(exc_value, glanceclient.exc.NotFound):
return exception.ImageNotFound(image_id=image_id)
if isinstance(exc_value, glanceclient.exc.BadRequest):
- return exception.Invalid(unicode(exc_value))
+ return exception.Invalid(six.text_type(exc_value))
return exc_value
def _translate_plain_exception(exc_value):
if isinstance(exc_value, (glanceclient.exc.Forbidden,
glanceclient.exc.Unauthorized)):
- return exception.Forbidden(unicode(exc_value))
+ return exception.Forbidden(six.text_type(exc_value))
if isinstance(exc_value, glanceclient.exc.NotFound):
- return exception.NotFound(unicode(exc_value))
+ return exception.NotFound(six.text_type(exc_value))
if isinstance(exc_value, glanceclient.exc.BadRequest):
- return exception.Invalid(unicode(exc_value))
+ return exception.Invalid(six.text_type(exc_value))
return exc_value
diff --git a/nova/network/neutronv2/api.py b/nova/network/neutronv2/api.py
index 224681bb1a..10d65ad43d 100644
--- a/nova/network/neutronv2/api.py
+++ b/nova/network/neutronv2/api.py
@@ -21,6 +21,7 @@ import uuid
from neutronclient.common import exceptions as neutron_client_exc
from oslo.config import cfg
from oslo.utils import excutils
+import six
from nova.api.openstack import extensions
from nova.compute import flavors
@@ -35,9 +36,9 @@ from nova import objects
from nova.openstack.common import lockutils
from nova.openstack.common import log as logging
from nova.openstack.common import uuidutils
-from nova.pci import pci_manager
-from nova.pci import pci_request
-from nova.pci import pci_whitelist
+from nova.pci import manager as pci_manager
+from nova.pci import request as pci_request
+from nova.pci import whitelist as pci_whitelist
neutron_opts = [
cfg.StrOpt('url',
@@ -1151,9 +1152,9 @@ class API(base_api.NetworkAPI):
fip = client.create_floatingip(param)
except (neutron_client_exc.IpAddressGenerationFailureClient,
neutron_client_exc.ExternalIpAddressExhaustedClient) as e:
- raise exception.NoMoreFloatingIps(unicode(e))
+ raise exception.NoMoreFloatingIps(six.text_type(e))
except neutron_client_exc.OverQuotaClient as e:
- raise exception.FloatingIpLimitExceeded(unicode(e))
+ raise exception.FloatingIpLimitExceeded(six.text_type(e))
return fip['floatingip']['floating_ip_address']
diff --git a/nova/network/security_group/neutron_driver.py b/nova/network/security_group/neutron_driver.py
index f4b8afbe69..f446911f64 100644
--- a/nova/network/security_group/neutron_driver.py
+++ b/nova/network/security_group/neutron_driver.py
@@ -62,7 +62,7 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase):
# quota
raise exc.HTTPBadRequest()
elif e.status_code == 409:
- self.raise_over_quota(unicode(e))
+ self.raise_over_quota(six.text_type(e))
raise exc_info[0], exc_info[1], exc_info[2]
return self._convert_to_nova_security_group_format(security_group)
@@ -136,7 +136,7 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase):
exc_info = sys.exc_info()
if e.status_code == 404:
LOG.debug("Neutron security group %s not found", name)
- self.raise_not_found(unicode(e))
+ self.raise_not_found(six.text_type(e))
else:
LOG.error(_("Neutron Error: %s"), e)
raise exc_info[0], exc_info[1], exc_info[2]
@@ -181,9 +181,9 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase):
except n_exc.NeutronClientException as e:
exc_info = sys.exc_info()
if e.status_code == 404:
- self.raise_not_found(unicode(e))
+ self.raise_not_found(six.text_type(e))
elif e.status_code == 409:
- self.raise_invalid_property(unicode(e))
+ self.raise_invalid_property(six.text_type(e))
else:
LOG.error(_("Neutron Error: %s"), e)
raise exc_info[0], exc_info[1], exc_info[2]
@@ -207,11 +207,11 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase):
if e.status_code == 404:
LOG.exception(_("Neutron Error getting security group %s"),
name)
- self.raise_not_found(unicode(e))
+ self.raise_not_found(six.text_type(e))
elif e.status_code == 409:
LOG.exception(_("Neutron Error adding rules to security "
"group %s"), name)
- self.raise_over_quota(unicode(e))
+ self.raise_over_quota(six.text_type(e))
else:
LOG.exception(_("Neutron Error:"))
raise exc_info[0], exc_info[1], exc_info[2]
@@ -278,7 +278,7 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase):
exc_info = sys.exc_info()
if e.status_code == 404:
LOG.debug("Neutron security group rule %s not found", id)
- self.raise_not_found(unicode(e))
+ self.raise_not_found(six.text_type(e))
else:
LOG.error(_("Neutron Error: %s"), e)
raise exc_info[0], exc_info[1], exc_info[2]
diff --git a/nova/openstack/common/middleware/__init__.py b/nova/openstack/common/middleware/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/openstack/common/middleware/__init__.py
diff --git a/nova/openstack/common/middleware/base.py b/nova/openstack/common/middleware/base.py
new file mode 100644
index 0000000000..464a1ccd72
--- /dev/null
+++ b/nova/openstack/common/middleware/base.py
@@ -0,0 +1,56 @@
+# Copyright 2011 OpenStack Foundation.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Base class(es) for WSGI Middleware."""
+
+import webob.dec
+
+
+class Middleware(object):
+ """Base WSGI middleware wrapper.
+
+ These classes require an application to be initialized that will be called
+ next. By default the middleware will simply call its wrapped app, or you
+ can override __call__ to customize its behavior.
+ """
+
+ @classmethod
+ def factory(cls, global_conf, **local_conf):
+ """Factory method for paste.deploy."""
+ return cls
+
+ def __init__(self, application):
+ self.application = application
+
+ def process_request(self, req):
+ """Called on each request.
+
+ If this returns None, the next application down the stack will be
+ executed. If it returns a response then that response will be returned
+ and execution will stop here.
+ """
+ return None
+
+ def process_response(self, response):
+ """Do whatever you'd like to the response."""
+ return response
+
+ @webob.dec.wsgify
+ def __call__(self, req):
+ response = self.process_request(req)
+ if response:
+ return response
+ response = req.get_response(self.application)
+ return self.process_response(response)
diff --git a/nova/openstack/common/middleware/request_id.py b/nova/openstack/common/middleware/request_id.py
new file mode 100644
index 0000000000..03cf59416e
--- /dev/null
+++ b/nova/openstack/common/middleware/request_id.py
@@ -0,0 +1,44 @@
+# Copyright (c) 2013 NEC Corporation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Middleware that ensures request ID.
+
+It ensures to assign request ID for each API request and set it to
+request environment. The request ID is also added to API response.
+"""
+
+import webob.dec
+
+from nova.openstack.common import context
+from nova.openstack.common.middleware import base
+from nova.openstack.common import versionutils
+
+
+ENV_REQUEST_ID = 'openstack.request_id'
+HTTP_RESP_HEADER_REQUEST_ID = 'x-openstack-request-id'
+
+
+@versionutils.deprecated(as_of=versionutils.deprecated.JUNO,
+ in_favor_of='oslo.middleware.RequestId')
+class RequestIdMiddleware(base.Middleware):
+
+ @webob.dec.wsgify
+ def __call__(self, req):
+ req_id = context.generate_request_id()
+ req.environ[ENV_REQUEST_ID] = req_id
+ response = req.get_response(self.application)
+ if HTTP_RESP_HEADER_REQUEST_ID not in response.headers:
+ response.headers.add(HTTP_RESP_HEADER_REQUEST_ID, req_id)
+ return response
diff --git a/nova/pci/pci_device.py b/nova/pci/device.py
index 176e08a11a..176e08a11a 100644
--- a/nova/pci/pci_device.py
+++ b/nova/pci/device.py
diff --git a/nova/pci/pci_devspec.py b/nova/pci/devspec.py
index e78251bedd..caa4235320 100755..100644
--- a/nova/pci/pci_devspec.py
+++ b/nova/pci/devspec.py
@@ -17,7 +17,7 @@ import re
from oslo.serialization import jsonutils
from nova import exception
-from nova.pci import pci_utils
+from nova.pci import utils
MAX_VENDOR_ID = 0xFFFF
MAX_PRODUCT_ID = 0xFFFF
@@ -53,7 +53,7 @@ class PciAddress(object):
| pci_passthrough_whitelist = {"address":"*:0a:00.*",
| "physical_network":"physnet1"}
- | pci_passthrough_whitelist = {"address":":0a:00.",
+ | pci_passthrough_whitelist = {"vendor_id":"1137","product_id":"0071"}
This function class will validate the address fields, check for wildcards,
and insert wildcards where the field is left blank.
@@ -69,12 +69,12 @@ class PciAddress(object):
def _check_physical_function(self):
if ANY in (self.domain, self.bus, self.slot, self.func):
return
- self.is_physical_function = pci_utils.is_physical_function(self)
+ self.is_physical_function = utils.is_physical_function(self)
def _init_address_fields(self, pci_addr):
if self.is_physical_function:
(self.domain, self.bus, self.slot,
- self.func) = pci_utils.get_pci_address_fields(pci_addr)
+ self.func) = utils.get_pci_address_fields(pci_addr)
return
dbs, sep, func = pci_addr.partition('.')
if func:
@@ -112,12 +112,12 @@ class PciAddress(object):
if not pci_phys_addr:
return False
domain, bus, slot, func = (
- pci_utils.get_pci_address_fields(pci_phys_addr))
+ utils.get_pci_address_fields(pci_phys_addr))
return (self.domain == domain and self.bus == bus and
self.slot == slot and self.func == func)
else:
domain, bus, slot, func = (
- pci_utils.get_pci_address_fields(pci_addr))
+ utils.get_pci_address_fields(pci_addr))
conditions = [
self.domain in (ANY, domain),
self.bus in (ANY, bus),
@@ -149,7 +149,7 @@ class PciDeviceSpec(object):
raise exception.PciDeviceInvalidDeviceName()
if not self.address:
if self.dev_name:
- self.address, pf = pci_utils.get_function_by_ifname(
+ self.address, pf = utils.get_function_by_ifname(
self.dev_name)
if not self.address:
raise exception.PciDeviceNotFoundById(id=self.dev_name)
diff --git a/nova/pci/pci_manager.py b/nova/pci/manager.py
index 90f1e4f9d9..5007baff7b 100644
--- a/nova/pci/pci_manager.py
+++ b/nova/pci/manager.py
@@ -23,8 +23,8 @@ from nova import exception
from nova.i18n import _
from nova import objects
from nova.openstack.common import log as logging
-from nova.pci import pci_device
-from nova.pci import pci_stats
+from nova.pci import device
+from nova.pci import stats
LOG = logging.getLogger(__name__)
@@ -52,7 +52,7 @@ class PciDevTracker(object):
super(PciDevTracker, self).__init__()
self.stale = {}
self.node_id = node_id
- self.stats = pci_stats.PciDeviceStats()
+ self.stats = stats.PciDeviceStats()
if node_id:
self.pci_devs = list(
objects.PciDeviceList.get_by_compute_node(context, node_id))
@@ -107,7 +107,7 @@ class PciDevTracker(object):
for existed in self.pci_devs:
if existed['address'] in exist_addrs - new_addrs:
try:
- pci_device.remove(existed)
+ device.remove(existed)
except exception.PciDeviceInvalidStatus as e:
LOG.warn(_("Trying to remove device with %(status)s "
"ownership %(instance_uuid)s because of "
@@ -140,7 +140,7 @@ class PciDevTracker(object):
# by force in future.
self.stale[new_value['address']] = new_value
else:
- pci_device.update_device(existed, new_value)
+ device.update_device(existed, new_value)
for dev in [dev for dev in devices if
dev['address'] in new_addrs - exist_addrs]:
@@ -158,18 +158,18 @@ class PciDevTracker(object):
if not devs:
raise exception.PciDeviceRequestFailed(pci_requests)
for dev in devs:
- pci_device.claim(dev, instance)
+ device.claim(dev, instance)
return devs
def _allocate_instance(self, instance, devs):
for dev in devs:
- pci_device.allocate(dev, instance)
+ device.allocate(dev, instance)
def _free_device(self, dev, instance=None):
- pci_device.free(dev, instance)
+ device.free(dev, instance)
stale = self.stale.pop(dev['address'], None)
if stale:
- pci_device.update_device(dev, stale)
+ device.update_device(dev, stale)
self.stats.add_device(dev)
def _free_instance(self, instance):
diff --git a/nova/pci/pci_request.py b/nova/pci/request.py
index 3aae710738..cb0ae049bc 100644
--- a/nova/pci/pci_request.py
+++ b/nova/pci/request.py
@@ -46,7 +46,7 @@ import six
from nova import exception
from nova import objects
from nova.openstack.common import log as logging
-from nova.pci import pci_utils
+from nova.pci import utils
pci_alias_opts = [
cfg.MultiStrOpt('pci_alias',
@@ -91,11 +91,11 @@ _ALIAS_SCHEMA = {
},
"product_id": {
"type": "string",
- "pattern": pci_utils.PCI_VENDOR_PATTERN,
+ "pattern": utils.PCI_VENDOR_PATTERN,
},
"vendor_id": {
"type": "string",
- "pattern": pci_utils.PCI_VENDOR_PATTERN,
+ "pattern": utils.PCI_VENDOR_PATTERN,
},
"device_type": {
"type": "string",
diff --git a/nova/pci/pci_stats.py b/nova/pci/stats.py
index 7dd6c09c0f..d49d723dc9 100644
--- a/nova/pci/pci_stats.py
+++ b/nova/pci/stats.py
@@ -21,8 +21,8 @@ from oslo.serialization import jsonutils
from nova import exception
from nova.i18n import _LE
from nova.openstack.common import log as logging
-from nova.pci import pci_utils
-from nova.pci import pci_whitelist
+from nova.pci import utils
+from nova.pci import whitelist
LOG = logging.getLogger(__name__)
@@ -81,7 +81,7 @@ class PciDeviceStats(object):
"""
# Don't add a device that doesn't have a matching device spec.
# This can happen during initial sync up with the controller
- devspec = pci_whitelist.get_pci_device_devspec(dev)
+ devspec = whitelist.get_pci_device_devspec(dev)
if not devspec:
return
tags = devspec.get_tags()
@@ -174,7 +174,7 @@ class PciDeviceStats(object):
@staticmethod
def _filter_pools_for_spec(pools, request_specs):
return [pool for pool in pools
- if pci_utils.pci_device_prop_match(pool, request_specs)]
+ if utils.pci_device_prop_match(pool, request_specs)]
def _apply_request(self, pools, request):
count = request.count
diff --git a/nova/pci/pci_utils.py b/nova/pci/utils.py
index fbdec9effc..fbdec9effc 100644
--- a/nova/pci/pci_utils.py
+++ b/nova/pci/utils.py
diff --git a/nova/pci/pci_whitelist.py b/nova/pci/whitelist.py
index 75e630de52..5113bac0ca 100644
--- a/nova/pci/pci_whitelist.py
+++ b/nova/pci/whitelist.py
@@ -17,7 +17,7 @@
from oslo.config import cfg
from nova.openstack.common import log as logging
-from nova.pci import pci_devspec
+from nova.pci import devspec
pci_opts = [cfg.MultiStrOpt('pci_passthrough_whitelist',
default=[],
@@ -46,7 +46,7 @@ class PciHostDevicesWhiteList(object):
"""Parse and validate the pci whitelist from the nova config."""
specs = []
for jsonspec in whitelists:
- spec = pci_devspec.PciDeviceSpec(jsonspec)
+ spec = devspec.PciDeviceSpec(jsonspec)
specs.append(spec)
return specs
diff --git a/nova/scheduler/filter_scheduler.py b/nova/scheduler/filter_scheduler.py
index 0cfd49a2ef..a259cb31bd 100644
--- a/nova/scheduler/filter_scheduler.py
+++ b/nova/scheduler/filter_scheduler.py
@@ -148,7 +148,16 @@ class FilterScheduler(driver.Scheduler):
# Couldn't fulfill the request_spec
if len(selected_hosts) < num_instances:
- raise exception.NoValidHost(reason='')
+ # Log the details but don't put those into the reason since
+ # we don't want to give away too much information about our
+ # actual environment.
+ LOG.debug('There are %(hosts)d hosts available but '
+ '%(num_instances)d instances requested to build.',
+ {'hosts': len(selected_hosts),
+ 'num_instances': num_instances})
+
+ reason = _('There are not enough hosts available.')
+ raise exception.NoValidHost(reason=reason)
dests = [dict(host=host.obj.host, nodename=host.obj.nodename,
limits=host.obj.limits) for host in selected_hosts]
diff --git a/nova/scheduler/host_manager.py b/nova/scheduler/host_manager.py
index e6f2c2e98b..13a6bcaab4 100644
--- a/nova/scheduler/host_manager.py
+++ b/nova/scheduler/host_manager.py
@@ -30,7 +30,7 @@ from nova import db
from nova import exception
from nova.i18n import _, _LW
from nova.openstack.common import log as logging
-from nova.pci import pci_stats
+from nova.pci import stats as pci_stats
from nova.scheduler import filters
from nova.scheduler import weights
from nova.virt import hardware
diff --git a/nova/service.py b/nova/service.py
index 5b2003a2a3..c2b986c019 100644
--- a/nova/service.py
+++ b/nova/service.py
@@ -221,7 +221,7 @@ class Service(service.Service):
'host': self.host,
'binary': self.binary,
'topic': self.topic,
- 'report_count': 0
+ 'report_count': 0,
}
service = self.conductor_api.service_create(context, svc_values)
self.service_id = service['id']
diff --git a/nova/tests/api/openstack/compute/contrib/test_baremetal_nodes.py b/nova/tests/api/openstack/compute/contrib/test_baremetal_nodes.py
index 62a61afd69..9650f9560a 100644
--- a/nova/tests/api/openstack/compute/contrib/test_baremetal_nodes.py
+++ b/nova/tests/api/openstack/compute/contrib/test_baremetal_nodes.py
@@ -14,17 +14,16 @@
# under the License.
import mock
-from oslo.config import cfg
from webob import exc
-from nova.api.openstack.compute.contrib import baremetal_nodes
+from nova.api.openstack.compute.contrib import baremetal_nodes as b_nodes_v2
+from nova.api.openstack.compute.plugins.v3 import baremetal_nodes \
+ as b_nodes_v21
from nova.api.openstack import extensions
from nova import context
from nova import test
from nova.tests.virt.ironic import utils as ironic_utils
-CONF = cfg.CONF
-
class FakeRequest(object):
@@ -64,18 +63,19 @@ def fake_node_ext_status(**updates):
FAKE_IRONIC_CLIENT = ironic_utils.FakeClient()
-@mock.patch.object(baremetal_nodes, '_get_ironic_client',
+@mock.patch.object(b_nodes_v21, '_get_ironic_client',
lambda *_: FAKE_IRONIC_CLIENT)
-class BareMetalNodesTest(test.NoDBTestCase):
-
+class BareMetalNodesTestV21(test.NoDBTestCase):
def setUp(self):
- super(BareMetalNodesTest, self).setUp()
+ super(BareMetalNodesTestV21, self).setUp()
- self.ext_mgr = self.mox.CreateMock(extensions.ExtensionManager)
+ self._setup()
self.context = context.get_admin_context()
- self.controller = baremetal_nodes.BareMetalNodeController(self.ext_mgr)
self.request = FakeRequest(self.context)
+ def _setup(self):
+ self.controller = b_nodes_v21.BareMetalNodeController()
+
@mock.patch.object(FAKE_IRONIC_CLIENT.node, 'list')
def test_index_ironic(self, mock_list):
properties = {'cpus': 2, 'memory_mb': 1024, 'local_gb': 20}
@@ -149,3 +149,11 @@ class BareMetalNodesTest(test.NoDBTestCase):
self.assertRaises(exc.HTTPBadRequest,
self.controller._remove_interface,
self.request, 'fake-id', 'fake-body')
+
+
+@mock.patch.object(b_nodes_v2, '_get_ironic_client',
+ lambda *_: FAKE_IRONIC_CLIENT)
+class BareMetalNodesTestV2(BareMetalNodesTestV21):
+ def _setup(self):
+ self.ext_mgr = self.mox.CreateMock(extensions.ExtensionManager)
+ self.controller = b_nodes_v2.BareMetalNodeController(self.ext_mgr)
diff --git a/nova/tests/api/openstack/compute/contrib/test_block_device_mapping.py b/nova/tests/api/openstack/compute/contrib/test_block_device_mapping.py
index 0ce73301d4..2c988d501c 100644
--- a/nova/tests/api/openstack/compute/contrib/test_block_device_mapping.py
+++ b/nova/tests/api/openstack/compute/contrib/test_block_device_mapping.py
@@ -161,6 +161,12 @@ class BlockDeviceMappingTestV21(test.TestCase):
self.assertRaises(exc.HTTPBadRequest,
self._test_create, params, no_image=True)
+ @mock.patch.object(compute_api.API, 'create')
+ def test_create_instance_with_bdm_param_not_list(self, mock_create):
+ self.params = {'block_device_mapping': '/dev/vdb'}
+ self.assertRaises(exc.HTTPBadRequest,
+ self._test_create, self.params)
+
def test_create_instance_with_device_name_empty(self):
self.bdm[0]['device_name'] = ''
diff --git a/nova/tests/api/openstack/compute/contrib/test_block_device_mapping_v1.py b/nova/tests/api/openstack/compute/contrib/test_block_device_mapping_v1.py
index 1cab2d8b65..2e680b9dad 100644
--- a/nova/tests/api/openstack/compute/contrib/test_block_device_mapping_v1.py
+++ b/nova/tests/api/openstack/compute/contrib/test_block_device_mapping_v1.py
@@ -180,6 +180,12 @@ class BlockDeviceMappingTestV21(test.TestCase):
self.assertRaises(exc.HTTPBadRequest,
self._test_create, self.params)
+ @mock.patch.object(compute_api.API, 'create')
+ def test_create_instance_with_bdm_param_not_list(self, mock_create):
+ self.params = {'block_device_mapping': '/dev/vdb'}
+ self.assertRaises(exc.HTTPBadRequest,
+ self._test_create, self.params)
+
def test_create_instance_with_device_name_empty(self):
self.bdm[0]['device_name'] = ''
params = {'block_device_mapping': self.bdm}
diff --git a/nova/tests/api/openstack/compute/contrib/test_cloudpipe_update.py b/nova/tests/api/openstack/compute/contrib/test_cloudpipe_update.py
index b17722902d..5caccf14b5 100644
--- a/nova/tests/api/openstack/compute/contrib/test_cloudpipe_update.py
+++ b/nova/tests/api/openstack/compute/contrib/test_cloudpipe_update.py
@@ -14,8 +14,10 @@
import webob
-from nova.api.openstack.compute.contrib import cloudpipe_update
+from nova.api.openstack.compute.contrib import cloudpipe_update as clup_v2
+from nova.api.openstack.compute.plugins.v3 import cloudpipe as clup_v21
from nova import db
+from nova import exception
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests import fake_network
@@ -36,13 +38,20 @@ def fake_network_update(context, network_id, values):
network[key] = values[key]
-class CloudpipeUpdateTest(test.NoDBTestCase):
+class CloudpipeUpdateTestV21(test.NoDBTestCase):
+ bad_request = exception.ValidationError
def setUp(self):
- super(CloudpipeUpdateTest, self).setUp()
- self.controller = cloudpipe_update.CloudpipeUpdateController()
+ super(CloudpipeUpdateTestV21, self).setUp()
self.stubs.Set(db, "project_get_networks", fake_project_get_networks)
self.stubs.Set(db, "network_update", fake_network_update)
+ self._setup()
+
+ def _setup(self):
+ self.controller = clup_v21.CloudpipeController()
+
+ def _check_status(self, expected_status, res, controller_methord):
+ self.assertEqual(expected_status, controller_methord.wsgi_code)
def test_cloudpipe_configure_project(self):
req = fakes.HTTPRequest.blank(
@@ -50,7 +59,7 @@ class CloudpipeUpdateTest(test.NoDBTestCase):
body = {"configure_project": {"vpn_ip": "1.2.3.4", "vpn_port": 222}}
result = self.controller.update(req, 'configure-project',
body=body)
- self.assertEqual('202 Accepted', result.status)
+ self._check_status(202, result, self.controller.update)
self.assertEqual(fake_networks[0]['vpn_public_address'], "1.2.3.4")
self.assertEqual(fake_networks[0]['vpn_public_port'], 222)
@@ -60,21 +69,31 @@ class CloudpipeUpdateTest(test.NoDBTestCase):
body = {"configure_project": {"vpn_ip": "1.2.3.4", "vpn_port": 222}}
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req,
- 'configure-projectx', body)
+ 'configure-projectx', body=body)
def test_cloudpipe_configure_project_bad_data(self):
req = fakes.HTTPRequest.blank(
'/v2/fake/os-cloudpipe/configure-project')
body = {"configure_project": {"vpn_ipxx": "1.2.3.4", "vpn_port": 222}}
- self.assertRaises(webob.exc.HTTPBadRequest,
+ self.assertRaises(self.bad_request,
self.controller.update, req,
- 'configure-project', body)
+ 'configure-project', body=body)
def test_cloudpipe_configure_project_bad_vpn_port(self):
req = fakes.HTTPRequest.blank(
'/v2/fake/os-cloudpipe/configure-project')
body = {"configure_project": {"vpn_ipxx": "1.2.3.4",
"vpn_port": "foo"}}
- self.assertRaises(webob.exc.HTTPBadRequest,
+ self.assertRaises(self.bad_request,
self.controller.update, req,
- 'configure-project', body)
+ 'configure-project', body=body)
+
+
+class CloudpipeUpdateTestV2(CloudpipeUpdateTestV21):
+ bad_request = webob.exc.HTTPBadRequest
+
+ def _setup(self):
+ self.controller = clup_v2.CloudpipeUpdateController()
+
+ def _check_status(self, expected_status, res, controller_methord):
+ self.assertEqual(expected_status, res.status_int)
diff --git a/nova/tests/api/openstack/compute/contrib/test_networks.py b/nova/tests/api/openstack/compute/contrib/test_networks.py
index efa3ea4136..b388ae1b5d 100644
--- a/nova/tests/api/openstack/compute/contrib/test_networks.py
+++ b/nova/tests/api/openstack/compute/contrib/test_networks.py
@@ -27,11 +27,9 @@ import webob
from nova.api.openstack.compute.contrib import networks_associate
from nova.api.openstack.compute.contrib import os_networks as networks
-from nova.api.openstack.compute.contrib import os_tenant_networks as tnet
from nova.api.openstack.compute.plugins.v3 import networks as networks_v21
from nova.api.openstack.compute.plugins.v3 import networks_associate as \
networks_associate_v21
-from nova.api.openstack.compute.plugins.v3 import tenant_networks as tnet_v21
from nova.api.openstack import extensions
import nova.context
from nova import exception
@@ -610,48 +608,3 @@ class NetworksAssociateTestV2(NetworksAssociateTestV21):
def _check_status(self, res, method, code):
self.assertEqual(res.status_int, 202)
-
-
-class TenantNetworksTestV21(test.NoDBTestCase):
- ctrlr = tnet_v21.TenantNetworkController
-
- def setUp(self):
- super(TenantNetworksTestV21, self).setUp()
- self.controller = self.ctrlr()
- self.flags(enable_network_quota=True)
-
- @mock.patch('nova.quota.QUOTAS.reserve')
- @mock.patch('nova.quota.QUOTAS.rollback')
- @mock.patch('nova.network.api.API.delete')
- def _test_network_delete_exception(self, ex, expex, delete_mock,
- rollback_mock, reserve_mock):
- req = fakes.HTTPRequest.blank('/v2/1234/os-tenant-networks')
- ctxt = req.environ['nova.context']
-
- reserve_mock.return_value = 'rv'
- delete_mock.side_effect = ex
-
- self.assertRaises(expex, self.controller.delete, req, 1)
-
- delete_mock.assert_called_once_with(ctxt, 1)
- rollback_mock.assert_called_once_with(ctxt, 'rv')
- reserve_mock.assert_called_once_with(ctxt, networks=-1)
-
- def test_network_delete_exception_network_not_found(self):
- ex = exception.NetworkNotFound(network_id=1)
- expex = webob.exc.HTTPNotFound
- self._test_network_delete_exception(ex, expex)
-
- def test_network_delete_exception_policy_failed(self):
- ex = exception.PolicyNotAuthorized(action='dummy')
- expex = webob.exc.HTTPForbidden
- self._test_network_delete_exception(ex, expex)
-
- def test_network_delete_exception_network_in_use(self):
- ex = exception.NetworkInUse(network_id=1)
- expex = webob.exc.HTTPConflict
- self._test_network_delete_exception(ex, expex)
-
-
-class TenantNetworksTestV2(TenantNetworksTestV21):
- ctrlr = tnet.NetworkController
diff --git a/nova/tests/api/openstack/compute/contrib/test_neutron_security_groups.py b/nova/tests/api/openstack/compute/contrib/test_neutron_security_groups.py
index 1116f893d1..95968f13a2 100644
--- a/nova/tests/api/openstack/compute/contrib/test_neutron_security_groups.py
+++ b/nova/tests/api/openstack/compute/contrib/test_neutron_security_groups.py
@@ -51,8 +51,8 @@ class TestNeutronSecurityGroupsTestCase(test.TestCase):
super(TestNeutronSecurityGroupsTestCase, self).tearDown()
-class TestNeutronSecurityGroups(
- test_security_groups.TestSecurityGroups,
+class TestNeutronSecurityGroupsV21(
+ test_security_groups.TestSecurityGroupsV21,
TestNeutronSecurityGroupsTestCase):
def _create_sg_template(self, **kwargs):
@@ -400,6 +400,12 @@ class TestNeutronSecurityGroups(
device_id=test_security_groups.FAKE_UUID1)
+class TestNeutronSecurityGroupsV2(TestNeutronSecurityGroupsV21):
+ controller_cls = security_groups.SecurityGroupController
+ server_secgrp_ctl_cls = security_groups.ServerSecurityGroupController
+ secgrp_act_ctl_cls = security_groups.SecurityGroupActionController
+
+
class TestNeutronSecurityGroupRulesTestCase(TestNeutronSecurityGroupsTestCase):
def setUp(self):
super(TestNeutronSecurityGroupRulesTestCase, self).setUp()
diff --git a/nova/tests/api/openstack/compute/contrib/test_security_groups.py b/nova/tests/api/openstack/compute/contrib/test_security_groups.py
index 40a1e47c14..341d2bb0ff 100644
--- a/nova/tests/api/openstack/compute/contrib/test_security_groups.py
+++ b/nova/tests/api/openstack/compute/contrib/test_security_groups.py
@@ -21,6 +21,8 @@ from oslo.serialization import jsonutils
import webob
from nova.api.openstack.compute.contrib import security_groups as secgroups_v2
+from nova.api.openstack.compute.plugins.v3 import security_groups as \
+ secgroups_v21
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import compute
@@ -121,17 +123,17 @@ def return_server_nonexistent(context, server_id, columns_to_join=None):
raise exception.InstanceNotFound(instance_id=server_id)
-# NOTE(oomichi): v2.1 API does not support security group management (create/
-# update/delete a security group). We don't need to test this class against
-# v2.1 API.
-class TestSecurityGroups(test.TestCase):
+class TestSecurityGroupsV21(test.TestCase):
+ secgrp_ctl_cls = secgroups_v21.SecurityGroupController
+ server_secgrp_ctl_cls = secgroups_v21.ServerSecurityGroupController
+ secgrp_act_ctl_cls = secgroups_v21.SecurityGroupActionController
+
def setUp(self):
- super(TestSecurityGroups, self).setUp()
+ super(TestSecurityGroupsV21, self).setUp()
- self.controller = secgroups_v2.SecurityGroupController()
- self.server_controller = (
- secgroups_v2.ServerSecurityGroupController())
- self.manager = secgroups_v2.SecurityGroupActionController()
+ self.controller = self.secgrp_ctl_cls()
+ self.server_controller = self.server_secgrp_ctl_cls()
+ self.manager = self.secgrp_act_ctl_cls()
# This needs to be done here to set fake_id because the derived
# class needs to be called first if it wants to set
@@ -807,6 +809,12 @@ class TestSecurityGroups(test.TestCase):
self.manager._removeSecurityGroup(req, '1', body)
+class TestSecurityGroupsV2(TestSecurityGroupsV21):
+ controller_cls = secgroups_v2.SecurityGroupController
+ server_secgrp_ctl_cls = secgroups_v2.ServerSecurityGroupController
+ secgrp_act_ctl_cls = secgroups_v2.SecurityGroupActionController
+
+
# NOTE(oomichi): v2.1 API does not support security group management (create/
# update/delete a security group). We don't need to test this class against
# v2.1 API.
diff --git a/nova/tests/api/openstack/compute/contrib/test_tenant_networks.py b/nova/tests/api/openstack/compute/contrib/test_tenant_networks.py
index 66c5cae7ee..18133e8604 100644
--- a/nova/tests/api/openstack/compute/contrib/test_tenant_networks.py
+++ b/nova/tests/api/openstack/compute/contrib/test_tenant_networks.py
@@ -23,12 +23,13 @@ from nova import test
from nova.tests.api.openstack import fakes
-class NetworksTestV21(test.NoDBTestCase):
- ctrl_class = networks_v21.TenantNetworkController
+class TenantNetworksTestV21(test.NoDBTestCase):
+ ctrlr = networks_v21.TenantNetworkController
def setUp(self):
- super(NetworksTestV21, self).setUp()
- self.controller = self.ctrl_class()
+ super(TenantNetworksTestV21, self).setUp()
+ self.controller = self.ctrlr()
+ self.flags(enable_network_quota=True)
@mock.patch('nova.network.api.API.delete',
side_effect=exception.NetworkInUse(network_id=1))
@@ -38,6 +39,38 @@ class NetworksTestV21(test.NoDBTestCase):
self.assertRaises(webob.exc.HTTPConflict,
self.controller.delete, req, 1)
+ @mock.patch('nova.quota.QUOTAS.reserve')
+ @mock.patch('nova.quota.QUOTAS.rollback')
+ @mock.patch('nova.network.api.API.delete')
+ def _test_network_delete_exception(self, ex, expex, delete_mock,
+ rollback_mock, reserve_mock):
+ req = fakes.HTTPRequest.blank('/v2/1234/os-tenant-networks')
+ ctxt = req.environ['nova.context']
-class NetworksTestV2(NetworksTestV21):
- ctrl_class = networks.NetworkController
+ reserve_mock.return_value = 'rv'
+ delete_mock.side_effect = ex
+
+ self.assertRaises(expex, self.controller.delete, req, 1)
+
+ delete_mock.assert_called_once_with(ctxt, 1)
+ rollback_mock.assert_called_once_with(ctxt, 'rv')
+ reserve_mock.assert_called_once_with(ctxt, networks=-1)
+
+ def test_network_delete_exception_network_not_found(self):
+ ex = exception.NetworkNotFound(network_id=1)
+ expex = webob.exc.HTTPNotFound
+ self._test_network_delete_exception(ex, expex)
+
+ def test_network_delete_exception_policy_failed(self):
+ ex = exception.PolicyNotAuthorized(action='dummy')
+ expex = webob.exc.HTTPForbidden
+ self._test_network_delete_exception(ex, expex)
+
+ def test_network_delete_exception_network_in_use(self):
+ ex = exception.NetworkInUse(network_id=1)
+ expex = webob.exc.HTTPConflict
+ self._test_network_delete_exception(ex, expex)
+
+
+class TenantNetworksTestV2(TenantNetworksTestV21):
+ ctrlr = networks.NetworkController
diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_pci.py b/nova/tests/api/openstack/compute/plugins/v3/test_pci.py
index a4d508c746..5bc2201c49 100644
--- a/nova/tests/api/openstack/compute/plugins/v3/test_pci.py
+++ b/nova/tests/api/openstack/compute/plugins/v3/test_pci.py
@@ -22,7 +22,7 @@ from nova import context
from nova import db
from nova import exception
from nova import objects
-from nova.pci import pci_device
+from nova.pci import device
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests.objects import test_pci_device
@@ -60,8 +60,8 @@ class PciServerControllerTest(test.NoDBTestCase):
}]}
self._create_fake_instance()
self._create_fake_pci_device()
- pci_device.claim(self.pci_device, self.inst)
- pci_device.allocate(self.pci_device, self.inst)
+ device.claim(self.pci_device, self.inst)
+ device.allocate(self.pci_device, self.inst)
def _create_fake_instance(self):
self.inst = objects.Instance()
diff --git a/nova/tests/api/openstack/test_common.py b/nova/tests/api/openstack/test_common.py
index 9878a3adef..37ead3e73d 100644
--- a/nova/tests/api/openstack/test_common.py
+++ b/nova/tests/api/openstack/test_common.py
@@ -21,6 +21,7 @@ import xml.dom.minidom as minidom
from lxml import etree
import mock
+import six
from testtools import matchers
import webob
import webob.exc
@@ -380,7 +381,7 @@ class MiscFunctionsTest(test.TestCase):
common.raise_http_conflict_for_instance_invalid_state(exc,
'meow', 'fake_server_id')
except webob.exc.HTTPConflict as e:
- self.assertEqual(unicode(e),
+ self.assertEqual(six.text_type(e),
"Cannot 'meow' instance fake_server_id while it is in "
"fake_attr fake_state")
else:
diff --git a/nova/tests/api/test_auth.py b/nova/tests/api/test_auth.py
index e11c611b3a..73bc240482 100644
--- a/nova/tests/api/test_auth.py
+++ b/nova/tests/api/test_auth.py
@@ -13,13 +13,13 @@
# under the License.
from oslo.config import cfg
-from oslo.middleware import request_id
from oslo.serialization import jsonutils
import webob
import webob.exc
import nova.api.auth
from nova.i18n import _
+from nova.openstack.common.middleware import request_id
from nova import test
CONF = cfg.CONF
diff --git a/nova/tests/compute/test_claims.py b/nova/tests/compute/test_claims.py
index 1e29e16a87..24590e818e 100644
--- a/nova/tests/compute/test_claims.py
+++ b/nova/tests/compute/test_claims.py
@@ -26,9 +26,9 @@ from nova.compute import claims
from nova import db
from nova import exception
from nova import objects
-from nova.pci import pci_manager
+from nova.pci import manager as pci_manager
from nova import test
-from nova.tests.pci import pci_fakes
+from nova.tests.pci import fakes as pci_fakes
from nova.virt import hardware
diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py
index f7c71f9a8a..600b344940 100644
--- a/nova/tests/compute/test_compute.py
+++ b/nova/tests/compute/test_compute.py
@@ -6270,7 +6270,10 @@ class ComputeTestCase(BaseTestCase):
task_state='deleting'),
fake_instance.fake_db_instance(uuid='fake_uuid7',
vm_state=vm_states.RESIZED,
- task_state='soft-deleting')]
+ task_state='soft-deleting'),
+ fake_instance.fake_db_instance(uuid='fake_uuid8',
+ vm_state=vm_states.ACTIVE,
+ task_state='resize_finish')]
expected_migration_status = {'fake_uuid1': 'confirmed',
'noexist': 'error',
'fake_uuid2': 'error',
@@ -6278,7 +6281,8 @@ class ComputeTestCase(BaseTestCase):
'fake_uuid4': None,
'fake_uuid5': 'error',
'fake_uuid6': None,
- 'fake_uuid7': None}
+ 'fake_uuid7': None,
+ 'fake_uuid8': None}
migrations = []
for i, instance in enumerate(instances, start=1):
fake_mig = test_migration.fake_db_migration()
diff --git a/nova/tests/compute/test_hvtype.py b/nova/tests/compute/test_hvtype.py
index f840b9e607..93cb245e10 100644
--- a/nova/tests/compute/test_hvtype.py
+++ b/nova/tests/compute/test_hvtype.py
@@ -37,6 +37,9 @@ class HvTypeTest(test.NoDBTestCase):
def test_canonicalize_xapi(self):
self.assertEqual(hvtype.XEN, hvtype.canonicalize("xapi"))
+ def test_canonicalize_powervm(self):
+ self.assertEqual(hvtype.PHYP, hvtype.canonicalize("POWERVM"))
+
def test_canonicalize_invalid(self):
self.assertRaises(exception.InvalidHypervisorVirtType,
hvtype.canonicalize,
diff --git a/nova/tests/compute/test_keypairs.py b/nova/tests/compute/test_keypairs.py
index 8c923beb29..2644250cc2 100644
--- a/nova/tests/compute/test_keypairs.py
+++ b/nova/tests/compute/test_keypairs.py
@@ -15,6 +15,7 @@
"""Tests for keypair API."""
from oslo.config import cfg
+import six
from nova.compute import api as compute_api
from nova import context
@@ -117,7 +118,7 @@ class CreateImportSharedTestMixIn(object):
exc = self.assertRaises(exc_class, func, self.ctxt, self.ctxt.user_id,
name, *args)
- self.assertEqual(expected_message, unicode(exc))
+ self.assertEqual(expected_message, six.text_type(exc))
def assertInvalidKeypair(self, expected_message, name):
msg = _('Keypair data is invalid: %s') % expected_message
@@ -188,7 +189,7 @@ class ImportKeypairTestCase(KeypairAPITestCase, CreateImportSharedTestMixIn):
self.ctxt, self.ctxt.user_id, 'foo',
'bad key data')
msg = u'Keypair data is invalid: failed to generate fingerprint'
- self.assertEqual(msg, unicode(exc))
+ self.assertEqual(msg, six.text_type(exc))
class GetKeypairTestCase(KeypairAPITestCase):
diff --git a/nova/tests/compute/test_resource_tracker.py b/nova/tests/compute/test_resource_tracker.py
index e8bc47bcff..0d3b97ff22 100644
--- a/nova/tests/compute/test_resource_tracker.py
+++ b/nova/tests/compute/test_resource_tracker.py
@@ -35,7 +35,7 @@ from nova import rpc
from nova import test
from nova.tests.compute.monitors import test_monitors
from nova.tests.objects import test_migration
-from nova.tests.pci import pci_fakes
+from nova.tests.pci import fakes as pci_fakes
from nova.virt import driver
from nova.virt import hardware
diff --git a/nova/tests/conductor/test_conductor.py b/nova/tests/conductor/test_conductor.py
index 271eea0a3f..19951f250a 100644
--- a/nova/tests/conductor/test_conductor.py
+++ b/nova/tests/conductor/test_conductor.py
@@ -19,6 +19,7 @@ import contextlib
import mock
import mox
+from oslo.config import cfg
from oslo import messaging
from oslo.serialization import jsonutils
from oslo.utils import timeutils
@@ -59,6 +60,10 @@ from nova.tests import fake_utils
from nova import utils
+CONF = cfg.CONF
+CONF.import_opt('report_interval', 'nova.service')
+
+
FAKE_IMAGE_REF = 'fake-image-ref'
@@ -863,6 +868,30 @@ class ConductorRPCAPITestCase(_BaseTestCase, test.TestCase):
self.conductor.security_groups_trigger_handler(self.context,
'event', ['arg'])
+ @mock.patch.object(db, 'service_update')
+ @mock.patch('oslo.messaging.RPCClient.prepare')
+ def test_service_update_time_big(self, mock_prepare, mock_update):
+ CONF.set_override('report_interval', 10)
+ services = {'id': 1}
+ self.conductor.service_update(self.context, services, {})
+ mock_prepare.assert_called_once_with(timeout=9)
+
+ @mock.patch.object(db, 'service_update')
+ @mock.patch('oslo.messaging.RPCClient.prepare')
+ def test_service_update_time_small(self, mock_prepare, mock_update):
+ CONF.set_override('report_interval', 3)
+ services = {'id': 1}
+ self.conductor.service_update(self.context, services, {})
+ mock_prepare.assert_called_once_with(timeout=3)
+
+ @mock.patch.object(db, 'service_update')
+ @mock.patch('oslo.messaging.RPCClient.prepare')
+ def test_service_update_no_time(self, mock_prepare, mock_update):
+ CONF.set_override('report_interval', None)
+ services = {'id': 1}
+ self.conductor.service_update(self.context, services, {})
+ mock_prepare.assert_called_once_with()
+
class ConductorAPITestCase(_BaseTestCase, test.TestCase):
"""Conductor API Tests."""
diff --git a/nova/tests/consoleauth/test_rpcapi.py b/nova/tests/consoleauth/test_rpcapi.py
index 104b43bdab..eb76acbf34 100644
--- a/nova/tests/consoleauth/test_rpcapi.py
+++ b/nova/tests/consoleauth/test_rpcapi.py
@@ -78,7 +78,7 @@ class ConsoleAuthRpcAPITestCase(test.NoDBTestCase):
self.flags(consoleauth='havana', group='upgrade_levels')
self._test_consoleauth_api('check_token', token='t', version='1.0')
- def test_delete_tokens_for_instnace(self):
+ def test_delete_tokens_for_instance(self):
self._test_consoleauth_api('delete_tokens_for_instance',
_do_cast=True,
instance_uuid="instance")
diff --git a/nova/tests/db/test_db_api.py b/nova/tests/db/test_db_api.py
index 02d8b934da..bd8dc724e5 100644
--- a/nova/tests/db/test_db_api.py
+++ b/nova/tests/db/test_db_api.py
@@ -6554,7 +6554,7 @@ class Ec2TestCase(test.TestCase):
try:
method(self.ctxt, value)
except exception.NotFound as exc:
- self.assertIn(unicode(value), unicode(exc))
+ self.assertIn(six.text_type(value), six.text_type(exc))
check_exc_format(db.get_ec2_instance_id_by_uuid, 'fake')
check_exc_format(db.get_instance_uuid_by_ec2_id, 123456)
diff --git a/nova/tests/fake_network.py b/nova/tests/fake_network.py
index 3ee3ceb82a..bfb678c6db 100644
--- a/nova/tests/fake_network.py
+++ b/nova/tests/fake_network.py
@@ -28,7 +28,7 @@ from nova.network import rpcapi as network_rpcapi
from nova import objects
from nova.objects import base as obj_base
from nova.objects import virtual_interface as vif_obj
-from nova.pci import pci_device
+from nova.pci import device as pci_device
from nova.tests.objects import test_fixed_ip
from nova.tests.objects import test_instance_info_cache
from nova.tests.objects import test_pci_device
diff --git a/nova/tests/fake_policy.py b/nova/tests/fake_policy.py
index c0f7a44e33..bb1ad52231 100644
--- a/nova/tests/fake_policy.py
+++ b/nova/tests/fake_policy.py
@@ -138,6 +138,7 @@ policy_data = """
"compute_extension:attach_interfaces": "",
"compute_extension:v3:os-attach-interfaces": "",
"compute_extension:baremetal_nodes": "",
+ "compute_extension:v3:os-baremetal-nodes": "",
"compute_extension:cells": "",
"compute_extension:cells:create": "rule:admin_api",
"compute_extension:cells:delete": "rule:admin_api",
diff --git a/nova/tests/integrated/v3/api_samples/os-cloudpipe/cloud-pipe-update-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-cloudpipe/cloud-pipe-update-req.json.tpl
new file mode 100644
index 0000000000..0ab9141aea
--- /dev/null
+++ b/nova/tests/integrated/v3/api_samples/os-cloudpipe/cloud-pipe-update-req.json.tpl
@@ -0,0 +1,6 @@
+{
+ "configure_project": {
+ "vpn_ip": "%(vpn_ip)s",
+ "vpn_port": "%(vpn_port)s"
+ }
+}
diff --git a/nova/tests/integrated/v3/api_samples/os-security-groups/security-group-add-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-security-groups/security-group-add-post-req.json.tpl
new file mode 100644
index 0000000000..19a6ed2cb8
--- /dev/null
+++ b/nova/tests/integrated/v3/api_samples/os-security-groups/security-group-add-post-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "addSecurityGroup": {
+ "name": "%(group_name)s"
+ }
+}
diff --git a/nova/tests/integrated/v3/api_samples/os-security-groups/security-group-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-security-groups/security-group-post-req.json.tpl
new file mode 100644
index 0000000000..3f54ab6856
--- /dev/null
+++ b/nova/tests/integrated/v3/api_samples/os-security-groups/security-group-post-req.json.tpl
@@ -0,0 +1,6 @@
+{
+ "security_group": {
+ "name": "%(group_name)s",
+ "description": "description"
+ }
+}
diff --git a/nova/tests/integrated/v3/api_samples/os-security-groups/security-group-remove-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-security-groups/security-group-remove-post-req.json.tpl
new file mode 100644
index 0000000000..7f550036b8
--- /dev/null
+++ b/nova/tests/integrated/v3/api_samples/os-security-groups/security-group-remove-post-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "removeSecurityGroup": {
+ "name": "%(group_name)s"
+ }
+}
diff --git a/nova/tests/integrated/v3/api_samples/os-security-groups/security-groups-create-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-security-groups/security-groups-create-resp.json.tpl
new file mode 100644
index 0000000000..e51714e3ee
--- /dev/null
+++ b/nova/tests/integrated/v3/api_samples/os-security-groups/security-groups-create-resp.json.tpl
@@ -0,0 +1,9 @@
+{
+ "security_group": {
+ "description": "%(description)s",
+ "id": 1,
+ "name": "%(group_name)s",
+ "rules": [],
+ "tenant_id": "openstack"
+ }
+}
diff --git a/nova/tests/integrated/v3/api_samples/os-security-groups/security-groups-get-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-security-groups/security-groups-get-resp.json.tpl
new file mode 100644
index 0000000000..0372512744
--- /dev/null
+++ b/nova/tests/integrated/v3/api_samples/os-security-groups/security-groups-get-resp.json.tpl
@@ -0,0 +1,9 @@
+{
+ "security_group": {
+ "description": "default",
+ "id": 1,
+ "name": "default",
+ "rules": [],
+ "tenant_id": "openstack"
+ }
+}
diff --git a/nova/tests/integrated/v3/api_samples/os-security-groups/security-groups-list-get-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-security-groups/security-groups-list-get-resp.json.tpl
new file mode 100644
index 0000000000..1771f2dff1
--- /dev/null
+++ b/nova/tests/integrated/v3/api_samples/os-security-groups/security-groups-list-get-resp.json.tpl
@@ -0,0 +1,11 @@
+{
+ "security_groups": [
+ {
+ "description": "default",
+ "id": 1,
+ "name": "default",
+ "rules": [],
+ "tenant_id": "openstack"
+ }
+ ]
+}
diff --git a/nova/tests/integrated/v3/api_samples/os-security-groups/server-security-groups-list-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-security-groups/server-security-groups-list-resp.json.tpl
new file mode 100644
index 0000000000..1771f2dff1
--- /dev/null
+++ b/nova/tests/integrated/v3/api_samples/os-security-groups/server-security-groups-list-resp.json.tpl
@@ -0,0 +1,11 @@
+{
+ "security_groups": [
+ {
+ "description": "default",
+ "id": 1,
+ "name": "default",
+ "rules": [],
+ "tenant_id": "openstack"
+ }
+ ]
+}
diff --git a/nova/tests/integrated/v3/test_cloudpipe.py b/nova/tests/integrated/v3/test_cloudpipe.py
index 435f875795..a3042eb736 100644
--- a/nova/tests/integrated/v3/test_cloudpipe.py
+++ b/nova/tests/integrated/v3/test_cloudpipe.py
@@ -69,3 +69,12 @@ class CloudPipeSampleTest(api_sample_base.ApiSampleTestBaseV3):
subs.update(project)
subs['image_id'] = CONF.vpn_image_id
self._verify_response('cloud-pipe-get-resp', subs, response, 200)
+
+ def test_cloud_pipe_update(self):
+ subs = {'vpn_ip': '192.168.1.1',
+ 'vpn_port': 2000}
+ response = self._do_put('os-cloudpipe/configure-project',
+ 'cloud-pipe-update-req',
+ subs)
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, "")
diff --git a/nova/tests/integrated/v3/test_security_groups.py b/nova/tests/integrated/v3/test_security_groups.py
index c1f27d5a46..2859bf1f12 100644
--- a/nova/tests/integrated/v3/test_security_groups.py
+++ b/nova/tests/integrated/v3/test_security_groups.py
@@ -19,21 +19,44 @@ from nova.tests.integrated.v3 import test_servers
def fake_get(*args, **kwargs):
nova_group = {}
- nova_group['id'] = 'fake'
- nova_group['description'] = ''
- nova_group['name'] = 'test'
- nova_group['project_id'] = 'fake'
+ nova_group['id'] = 1
+ nova_group['description'] = 'default'
+ nova_group['name'] = 'default'
+ nova_group['project_id'] = 'openstack'
nova_group['rules'] = []
return nova_group
-def fake_get_instances_security_groups_bindings(self, context, servers):
+def fake_get_instances_security_groups_bindings(self, context, servers,
+ detailed=False):
result = {}
for s in servers:
result[s.get('id')] = [{'name': 'test'}]
return result
+def fake_add_to_instance(self, context, instance, security_group_name):
+ pass
+
+
+def fake_remove_from_instance(self, context, instance, security_group_name):
+ pass
+
+
+def fake_list(self, context, names=None, ids=None, project=None,
+ search_opts=None):
+ return [fake_get()]
+
+
+def fake_get_instance_security_groups(self, context, instance_uuid,
+ detailed=False):
+ return [fake_get()]
+
+
+def fake_create_security_group(self, context, name, description):
+ return fake_get()
+
+
class SecurityGroupsJsonTest(test_servers.ServersSampleBase):
extension_name = 'os-security-groups'
@@ -44,6 +67,21 @@ class SecurityGroupsJsonTest(test_servers.ServersSampleBase):
self.stubs.Set(neutron_driver.SecurityGroupAPI,
'get_instances_security_groups_bindings',
fake_get_instances_security_groups_bindings)
+ self.stubs.Set(neutron_driver.SecurityGroupAPI,
+ 'add_to_instance',
+ fake_add_to_instance)
+ self.stubs.Set(neutron_driver.SecurityGroupAPI,
+ 'remove_from_instance',
+ fake_remove_from_instance)
+ self.stubs.Set(neutron_driver.SecurityGroupAPI,
+ 'list',
+ fake_list)
+ self.stubs.Set(neutron_driver.SecurityGroupAPI,
+ 'get_instance_security_groups',
+ fake_get_instance_security_groups)
+ self.stubs.Set(neutron_driver.SecurityGroupAPI,
+ 'create_security_group',
+ fake_create_security_group)
def test_server_create(self):
self._post_server()
@@ -61,3 +99,68 @@ class SecurityGroupsJsonTest(test_servers.ServersSampleBase):
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
self._verify_response('servers-detail-resp', subs, response, 200)
+
+ def _get_create_subs(self):
+ return {
+ 'group_name': 'default',
+ "description": "default",
+ }
+
+ def _create_security_group(self):
+ subs = self._get_create_subs()
+ return self._do_post('os-security-groups',
+ 'security-group-post-req', subs)
+
+ def _add_group(self, uuid):
+ subs = {
+ 'group_name': 'test'
+ }
+ return self._do_post('servers/%s/action' % uuid,
+ 'security-group-add-post-req', subs)
+
+ def test_security_group_create(self):
+ response = self._create_security_group()
+ subs = self._get_create_subs()
+ self._verify_response('security-groups-create-resp', subs,
+ response, 200)
+
+ def test_security_groups_list(self):
+ # Get api sample of security groups get list request.
+ response = self._do_get('os-security-groups')
+ subs = self._get_regexes()
+ self._verify_response('security-groups-list-get-resp',
+ subs, response, 200)
+
+ def test_security_groups_get(self):
+ # Get api sample of security groups get request.
+ security_group_id = '11111111-1111-1111-1111-111111111111'
+ response = self._do_get('os-security-groups/%s' % security_group_id)
+ subs = self._get_regexes()
+ self._verify_response('security-groups-get-resp', subs, response, 200)
+
+ def test_security_groups_list_server(self):
+ # Get api sample of security groups for a specific server.
+ uuid = self._post_server()
+ response = self._do_get('servers/%s/os-security-groups' % uuid)
+ subs = self._get_regexes()
+ self._verify_response('server-security-groups-list-resp',
+ subs, response, 200)
+
+ def test_security_groups_add(self):
+ self._create_security_group()
+ uuid = self._post_server()
+ response = self._add_group(uuid)
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, '')
+
+ def test_security_groups_remove(self):
+ self._create_security_group()
+ uuid = self._post_server()
+ self._add_group(uuid)
+ subs = {
+ 'group_name': 'test'
+ }
+ response = self._do_post('servers/%s/action' % uuid,
+ 'security-group-remove-post-req', subs)
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, '')
diff --git a/nova/tests/network/test_neutronv2.py b/nova/tests/network/test_neutronv2.py
index 1809cc62f5..05b282365a 100644
--- a/nova/tests/network/test_neutronv2.py
+++ b/nova/tests/network/test_neutronv2.py
@@ -36,8 +36,8 @@ from nova.network.neutronv2 import api as neutronapi
from nova.network.neutronv2 import constants
from nova import objects
from nova.openstack.common import policy as common_policy
-from nova.pci import pci_manager
-from nova.pci import pci_whitelist
+from nova.pci import manager as pci_manager
+from nova.pci import whitelist as pci_whitelist
from nova import policy
from nova import test
from nova.tests import fake_instance
diff --git a/nova/tests/pci/pci_fakes.py b/nova/tests/pci/fakes.py
index 591834ec9f..b56dfc20a8 100644
--- a/nova/tests/pci/pci_fakes.py
+++ b/nova/tests/pci/fakes.py
@@ -17,13 +17,13 @@ import functools
import mock
-from nova.pci import pci_whitelist
+from nova.pci import whitelist
def fake_pci_whitelist():
devspec = mock.Mock()
devspec.get_tags.return_value = None
- patcher = mock.patch.object(pci_whitelist, 'get_pci_device_devspec',
+ patcher = mock.patch.object(whitelist, 'get_pci_device_devspec',
return_value=devspec)
patcher.start()
return patcher
diff --git a/nova/tests/pci/test_pci_device.py b/nova/tests/pci/test_device.py
index d8291cb37a..2406ac254b 100644
--- a/nova/tests/pci/test_pci_device.py
+++ b/nova/tests/pci/test_device.py
@@ -15,9 +15,8 @@
from nova import context
from nova import exception
-from nova.objects import instance
-from nova.objects import pci_device as pci_device_obj
-from nova.pci import pci_device
+from nova import objects
+from nova.pci import device
from nova import test
@@ -45,16 +44,16 @@ class PciDeviceTestCase(test.TestCase):
def setUp(self):
super(PciDeviceTestCase, self).setUp()
self.ctxt = context.get_admin_context()
- self.inst = instance.Instance()
+ self.inst = objects.Instance()
self.inst.uuid = 'fake-inst-uuid'
- self.inst.pci_devices = pci_device_obj.PciDeviceList()
- self.devobj = pci_device_obj.PciDevice._from_db_object(
+ self.inst.pci_devices = objects.PciDeviceList()
+ self.devobj = objects.PciDevice._from_db_object(
self.ctxt,
- pci_device_obj.PciDevice(),
+ objects.PciDevice(),
dev_dict)
def test_claim_device(self):
- pci_device.claim(self.devobj, self.inst)
+ device.claim(self.devobj, self.inst)
self.assertEqual(self.devobj.status, 'claimed')
self.assertEqual(self.devobj.instance_uuid,
self.inst.uuid)
@@ -63,11 +62,11 @@ class PciDeviceTestCase(test.TestCase):
def test_claim_device_fail(self):
self.devobj.status = 'allocated'
self.assertRaises(exception.PciDeviceInvalidStatus,
- pci_device.claim, self.devobj, self.inst)
+ device.claim, self.devobj, self.inst)
def test_allocate_device(self):
- pci_device.claim(self.devobj, self.inst)
- pci_device.allocate(self.devobj, self.inst)
+ device.claim(self.devobj, self.inst)
+ device.allocate(self.devobj, self.inst)
self.assertEqual(self.devobj.status, 'allocated')
self.assertEqual(self.devobj.instance_uuid, 'fake-inst-uuid')
self.assertEqual(len(self.inst.pci_devices), 1)
@@ -77,29 +76,29 @@ class PciDeviceTestCase(test.TestCase):
def test_allocacte_device_fail_status(self):
self.devobj.status = 'removed'
self.assertRaises(exception.PciDeviceInvalidStatus,
- pci_device.allocate,
+ device.allocate,
self.devobj,
self.inst)
def test_allocacte_device_fail_owner(self):
- inst_2 = instance.Instance()
+ inst_2 = objects.Instance()
inst_2.uuid = 'fake-inst-uuid-2'
- pci_device.claim(self.devobj, self.inst)
+ device.claim(self.devobj, self.inst)
self.assertRaises(exception.PciDeviceInvalidOwner,
- pci_device.allocate,
+ device.allocate,
self.devobj, inst_2)
def test_free_claimed_device(self):
- pci_device.claim(self.devobj, self.inst)
- pci_device.free(self.devobj, self.inst)
+ device.claim(self.devobj, self.inst)
+ device.free(self.devobj, self.inst)
self.assertEqual(self.devobj.status, 'available')
self.assertIsNone(self.devobj.instance_uuid)
def test_free_allocated_device(self):
- pci_device.claim(self.devobj, self.inst)
- pci_device.allocate(self.devobj, self.inst)
+ device.claim(self.devobj, self.inst)
+ device.allocate(self.devobj, self.inst)
self.assertEqual(len(self.inst.pci_devices), 1)
- pci_device.free(self.devobj, self.inst)
+ device.free(self.devobj, self.inst)
self.assertEqual(len(self.inst.pci_devices), 0)
self.assertEqual(self.devobj.status, 'available')
self.assertIsNone(self.devobj.instance_uuid)
@@ -107,14 +106,14 @@ class PciDeviceTestCase(test.TestCase):
def test_free_device_fail(self):
self.devobj.status = 'removed'
self.assertRaises(exception.PciDeviceInvalidStatus,
- pci_device.free, self.devobj)
+ device.free, self.devobj)
def test_remove_device(self):
- pci_device.remove(self.devobj)
+ device.remove(self.devobj)
self.assertEqual(self.devobj.status, 'removed')
self.assertIsNone(self.devobj.instance_uuid)
def test_remove_device_fail(self):
- pci_device.claim(self.devobj, self.inst)
+ device.claim(self.devobj, self.inst)
self.assertRaises(exception.PciDeviceInvalidStatus,
- pci_device.remove, self.devobj)
+ device.remove, self.devobj)
diff --git a/nova/tests/pci/test_pci_devspec.py b/nova/tests/pci/test_devspec.py
index 8ffd1f5a19..d7b6098871 100644
--- a/nova/tests/pci/test_pci_devspec.py
+++ b/nova/tests/pci/test_devspec.py
@@ -13,10 +13,11 @@
import mock
+import six
from nova import exception
-from nova.objects import pci_device
-from nova.pci import pci_devspec
+from nova import objects
+from nova.pci import devspec
from nova import test
dev = {"vendor_id": "8086",
@@ -29,76 +30,76 @@ class PciAddressTestCase(test.NoDBTestCase):
def test_wrong_address(self):
pci_info = ('{"vendor_id": "8086", "address": "*: *: *.6",' +
'"product_id": "5057", "physical_network": "hr_net"}')
- pci = pci_devspec.PciDeviceSpec(pci_info)
+ pci = devspec.PciDeviceSpec(pci_info)
self.assertFalse(pci.match(dev))
def test_address_too_big(self):
pci_info = ('{"address": "0000:0a:0b:00.5", ' +
'"physical_network": "hr_net"}')
self.assertRaises(exception.PciDeviceWrongAddressFormat,
- pci_devspec.PciDeviceSpec, pci_info)
+ devspec.PciDeviceSpec, pci_info)
def test_address_invalid_character(self):
pci_info = '{"address": "0000:h4.12:6", "physical_network": "hr_net"}'
self.assertRaises(exception.PciDeviceWrongAddressFormat,
- pci_devspec.PciDeviceSpec, pci_info)
+ devspec.PciDeviceSpec, pci_info)
def test_max_func(self):
pci_info = (('{"address": "0000:0a:00.%s", ' +
'"physical_network": "hr_net"}') %
- (pci_devspec.MAX_FUNC + 1))
+ (devspec.MAX_FUNC + 1))
exc = self.assertRaises(exception.PciDeviceInvalidAddressField,
- pci_devspec.PciDeviceSpec, pci_info)
+ devspec.PciDeviceSpec, pci_info)
msg = ('Invalid PCI Whitelist: '
'The PCI address 0000:0a:00.%s has an invalid function.'
- % (pci_devspec.MAX_FUNC + 1))
- self.assertEqual(msg, unicode(exc))
+ % (devspec.MAX_FUNC + 1))
+ self.assertEqual(msg, six.text_type(exc))
def test_max_domain(self):
pci_info = ('{"address": "%x:0a:00.5", "physical_network":"hr_net"}'
- % (pci_devspec.MAX_DOMAIN + 1))
+ % (devspec.MAX_DOMAIN + 1))
exc = self.assertRaises(exception.PciConfigInvalidWhitelist,
- pci_devspec.PciDeviceSpec, pci_info)
+ devspec.PciDeviceSpec, pci_info)
msg = ('Invalid PCI devices Whitelist config invalid domain %x'
- % (pci_devspec.MAX_DOMAIN + 1))
- self.assertEqual(msg, unicode(exc))
+ % (devspec.MAX_DOMAIN + 1))
+ self.assertEqual(msg, six.text_type(exc))
def test_max_bus(self):
pci_info = ('{"address": "0000:%x:00.5", "physical_network":"hr_net"}'
- % (pci_devspec.MAX_BUS + 1))
+ % (devspec.MAX_BUS + 1))
exc = self.assertRaises(exception.PciConfigInvalidWhitelist,
- pci_devspec.PciDeviceSpec, pci_info)
+ devspec.PciDeviceSpec, pci_info)
msg = ('Invalid PCI devices Whitelist config invalid bus %x'
- % (pci_devspec.MAX_BUS + 1))
- self.assertEqual(msg, unicode(exc))
+ % (devspec.MAX_BUS + 1))
+ self.assertEqual(msg, six.text_type(exc))
def test_max_slot(self):
pci_info = ('{"address": "0000:0a:%x.5", "physical_network":"hr_net"}'
- % (pci_devspec.MAX_SLOT + 1))
+ % (devspec.MAX_SLOT + 1))
exc = self.assertRaises(exception.PciConfigInvalidWhitelist,
- pci_devspec.PciDeviceSpec, pci_info)
+ devspec.PciDeviceSpec, pci_info)
msg = ('Invalid PCI devices Whitelist config invalid slot %x'
- % (pci_devspec.MAX_SLOT + 1))
- self.assertEqual(msg, unicode(exc))
+ % (devspec.MAX_SLOT + 1))
+ self.assertEqual(msg, six.text_type(exc))
def test_address_is_undefined(self):
pci_info = '{"vendor_id":"8086", "product_id":"5057"}'
- pci = pci_devspec.PciDeviceSpec(pci_info)
+ pci = devspec.PciDeviceSpec(pci_info)
self.assertTrue(pci.match(dev))
def test_partial_address(self):
pci_info = '{"address":":0a:00.", "physical_network":"hr_net"}'
- pci = pci_devspec.PciDeviceSpec(pci_info)
+ pci = devspec.PciDeviceSpec(pci_info)
dev = {"vendor_id": "1137",
"product_id": "0071",
"address": "0000:0a:00.5",
"phys_function": "0000:0a:00.0"}
self.assertTrue(pci.match(dev))
- @mock.patch('nova.pci.pci_utils.is_physical_function', return_value = True)
+ @mock.patch('nova.pci.utils.is_physical_function', return_value = True)
def test_address_is_pf(self, mock_is_physical_function):
pci_info = '{"address":"0000:0a:00.0", "physical_network":"hr_net"}'
- pci = pci_devspec.PciDeviceSpec(pci_info)
+ pci = devspec.PciDeviceSpec(pci_info)
self.assertTrue(pci.match(dev))
@@ -106,63 +107,63 @@ class PciDevSpecTestCase(test.NoDBTestCase):
def test_spec_match(self):
pci_info = ('{"vendor_id": "8086","address": "*: *: *.5",' +
'"product_id": "5057", "physical_network": "hr_net"}')
- pci = pci_devspec.PciDeviceSpec(pci_info)
+ pci = devspec.PciDeviceSpec(pci_info)
self.assertTrue(pci.match(dev))
def test_invalid_vendor_id(self):
pci_info = ('{"vendor_id": "8087","address": "*: *: *.5", ' +
'"product_id": "5057", "physical_network": "hr_net"}')
- pci = pci_devspec.PciDeviceSpec(pci_info)
+ pci = devspec.PciDeviceSpec(pci_info)
self.assertFalse(pci.match(dev))
def test_vendor_id_out_of_range(self):
pci_info = ('{"vendor_id": "80860", "address": "*:*:*.5", ' +
'"product_id": "5057", "physical_network": "hr_net"}')
exc = self.assertRaises(exception.PciConfigInvalidWhitelist,
- pci_devspec.PciDeviceSpec, pci_info)
+ devspec.PciDeviceSpec, pci_info)
self.assertEqual("Invalid PCI devices Whitelist config "
- "invalid vendor_id 80860", unicode(exc))
+ "invalid vendor_id 80860", six.text_type(exc))
def test_invalid_product_id(self):
pci_info = ('{"vendor_id": "8086","address": "*: *: *.5", ' +
'"product_id": "5056", "physical_network": "hr_net"}')
- pci = pci_devspec.PciDeviceSpec(pci_info)
+ pci = devspec.PciDeviceSpec(pci_info)
self.assertFalse(pci.match(dev))
def test_product_id_out_of_range(self):
pci_info = ('{"vendor_id": "8086","address": "*:*:*.5", ' +
'"product_id": "50570", "physical_network": "hr_net"}')
exc = self.assertRaises(exception.PciConfigInvalidWhitelist,
- pci_devspec.PciDeviceSpec, pci_info)
+ devspec.PciDeviceSpec, pci_info)
self.assertEqual("Invalid PCI devices Whitelist config "
- "invalid product_id 50570", unicode(exc))
+ "invalid product_id 50570", six.text_type(exc))
def test_devname_and_address(self):
pci_info = ('{"devname": "eth0", "vendor_id":"8086", ' +
'"address":"*:*:*.5", "physical_network": "hr_net"}')
self.assertRaises(exception.PciDeviceInvalidDeviceName,
- pci_devspec.PciDeviceSpec, pci_info)
+ devspec.PciDeviceSpec, pci_info)
- @mock.patch('nova.pci.pci_utils.get_function_by_ifname',
+ @mock.patch('nova.pci.utils.get_function_by_ifname',
return_value = ("0000:0a:00.0", True))
def test_by_name(self, mock_get_function_by_ifname):
pci_info = '{"devname": "eth0", "physical_network": "hr_net"}'
- pci = pci_devspec.PciDeviceSpec(pci_info)
+ pci = devspec.PciDeviceSpec(pci_info)
self.assertTrue(pci.match(dev))
- @mock.patch('nova.pci.pci_utils.get_function_by_ifname',
+ @mock.patch('nova.pci.utils.get_function_by_ifname',
return_value = (None, False))
def test_invalid_name(self, mock_get_function_by_ifname):
pci_info = '{"devname": "lo", "physical_network": "hr_net"}'
exc = self.assertRaises(exception.PciDeviceNotFoundById,
- pci_devspec.PciDeviceSpec, pci_info)
- self.assertEqual('PCI device lo not found', unicode(exc))
+ devspec.PciDeviceSpec, pci_info)
+ self.assertEqual('PCI device lo not found', six.text_type(exc))
def test_pci_obj(self):
pci_info = ('{"vendor_id": "8086","address": "*:*:*.5", ' +
'"product_id": "5057", "physical_network": "hr_net"}')
- pci = pci_devspec.PciDeviceSpec(pci_info)
+ pci = devspec.PciDeviceSpec(pci_info)
pci_dev = {
'compute_node_id': 1,
'address': '0000:00:00.5',
@@ -172,5 +173,5 @@ class PciDevSpecTestCase(test.NoDBTestCase):
'extra_k1': 'v1',
}
- pci_obj = pci_device.PciDevice.create(pci_dev)
+ pci_obj = objects.PciDevice.create(pci_dev)
self.assertTrue(pci.match_pci_obj(pci_obj))
diff --git a/nova/tests/pci/test_pci_manager.py b/nova/tests/pci/test_manager.py
index 3c8cd6030b..e37fd5b067 100644
--- a/nova/tests/pci/test_pci_manager.py
+++ b/nova/tests/pci/test_manager.py
@@ -23,11 +23,11 @@ from nova import context
from nova import db
from nova import exception
from nova import objects
-from nova.pci import pci_device
-from nova.pci import pci_manager
+from nova.pci import device
+from nova.pci import manager
from nova import test
from nova.tests.api.openstack import fakes
-from nova.tests.pci import pci_fakes
+from nova.tests.pci import fakes as pci_fakes
fake_pci = {
@@ -111,7 +111,7 @@ class PciDevTrackerTestCase(test.TestCase):
patcher = pci_fakes.fake_pci_whitelist()
self.addCleanup(patcher.stop)
self._create_fake_instance()
- self.tracker = pci_manager.PciDevTracker(1)
+ self.tracker = manager.PciDevTracker(1)
def test_pcidev_tracker_create(self):
self.assertEqual(len(self.tracker.pci_devs), 3)
@@ -122,7 +122,7 @@ class PciDevTrackerTestCase(test.TestCase):
self.assertEqual(self.tracker.node_id, 1)
def test_pcidev_tracker_create_no_nodeid(self):
- self.tracker = pci_manager.PciDevTracker()
+ self.tracker = manager.PciDevTracker()
self.assertEqual(len(self.tracker.pci_devs), 0)
def test_set_hvdev_new_dev(self):
@@ -260,13 +260,13 @@ class PciDevTrackerTestCase(test.TestCase):
self.assertEqual(len(self.tracker.pci_devs), 3)
dev = self.tracker.pci_devs[0]
self.update_called = 0
- pci_device.remove(dev)
+ device.remove(dev)
self.tracker.save(ctxt)
self.assertEqual(len(self.tracker.pci_devs), 2)
self.assertEqual(self.destroy_called, 1)
def test_set_compute_node_id(self):
- self.tracker = pci_manager.PciDevTracker()
+ self.tracker = manager.PciDevTracker()
fake_pci_devs = [copy.deepcopy(fake_pci), copy.deepcopy(fake_pci_1),
copy.deepcopy(fake_pci_2)]
self.tracker.set_hvdevs(fake_pci_devs)
@@ -360,5 +360,5 @@ class PciGetInstanceDevs(test.TestCase):
self.stubs.Set(objects.Instance, 'obj_load_attr', _fake_obj_load_attr)
self.load_attr_called = False
- pci_manager.get_instance_pci_devs(inst)
+ manager.get_instance_pci_devs(inst)
self.assertEqual(self.load_attr_called, True)
diff --git a/nova/tests/pci/test_pci_request.py b/nova/tests/pci/test_request.py
index 82568f8455..32c768b0c0 100644
--- a/nova/tests/pci/test_pci_request.py
+++ b/nova/tests/pci/test_request.py
@@ -16,7 +16,7 @@
"""Tests for PCI request."""
from nova import exception
-from nova.pci import pci_request as pci_request
+from nova.pci import request
from nova import test
@@ -56,7 +56,7 @@ _fake_alias3 = """{
class AliasTestCase(test.NoDBTestCase):
def test_good_alias(self):
self.flags(pci_alias=[_fake_alias1])
- als = pci_request._get_alias_from_config()
+ als = request._get_alias_from_config()
self.assertIsInstance(als['QuicAssist'], list)
expect_dict = {
"capability_type": "pci",
@@ -68,7 +68,7 @@ class AliasTestCase(test.NoDBTestCase):
def test_multispec_alias(self):
self.flags(pci_alias=[_fake_alias1, _fake_alias11])
- als = pci_request._get_alias_from_config()
+ als = request._get_alias_from_config()
self.assertIsInstance(als['QuicAssist'], list)
expect_dict1 = {
"capability_type": "pci",
@@ -89,7 +89,7 @@ class AliasTestCase(test.NoDBTestCase):
def test_wrong_type_aliase(self):
self.flags(pci_alias=[_fake_alias2])
self.assertRaises(exception.PciInvalidAlias,
- pci_request._get_alias_from_config)
+ request._get_alias_from_config)
def test_wrong_product_id_aliase(self):
self.flags(pci_alias=[
@@ -101,7 +101,7 @@ class AliasTestCase(test.NoDBTestCase):
"device_type": "NIC"
}"""])
self.assertRaises(exception.PciInvalidAlias,
- pci_request._get_alias_from_config)
+ request._get_alias_from_config)
def test_wrong_vendor_id_aliase(self):
self.flags(pci_alias=[
@@ -113,7 +113,7 @@ class AliasTestCase(test.NoDBTestCase):
"device_type": "NIC"
}"""])
self.assertRaises(exception.PciInvalidAlias,
- pci_request._get_alias_from_config)
+ request._get_alias_from_config)
def test_wrong_cap_type_aliase(self):
self.flags(pci_alias=[
@@ -125,7 +125,7 @@ class AliasTestCase(test.NoDBTestCase):
"device_type": "NIC"
}"""])
self.assertRaises(exception.PciInvalidAlias,
- pci_request._get_alias_from_config)
+ request._get_alias_from_config)
def test_dup_aliase(self):
self.flags(pci_alias=[
@@ -145,7 +145,7 @@ class AliasTestCase(test.NoDBTestCase):
}"""])
self.assertRaises(
exception.PciInvalidAlias,
- pci_request._get_alias_from_config)
+ request._get_alias_from_config)
def _verify_result(self, expected, real):
exp_real = zip(expected, real)
@@ -169,7 +169,7 @@ class AliasTestCase(test.NoDBTestCase):
'capability_type': 'pci'}],
'alias_name': 'IntelNIC'}, ]
- requests = pci_request._translate_alias_to_requests(
+ requests = request._translate_alias_to_requests(
"QuicAssist : 3, IntelNIC: 1")
self.assertEqual(set([p['count'] for p in requests]), set([1, 3]))
self._verify_result(expect_request, requests)
@@ -177,7 +177,7 @@ class AliasTestCase(test.NoDBTestCase):
def test_aliase_2_request_invalid(self):
self.flags(pci_alias=[_fake_alias1, _fake_alias3])
self.assertRaises(exception.PciRequestAliasNotDefined,
- pci_request._translate_alias_to_requests,
+ request._translate_alias_to_requests,
"QuicAssistX : 3")
def test_get_pci_requests_from_flavor(self):
@@ -197,7 +197,7 @@ class AliasTestCase(test.NoDBTestCase):
flavor = {'extra_specs': {"pci_passthrough:alias":
"QuicAssist:3, IntelNIC: 1"}}
- requests = pci_request.get_pci_requests_from_flavor(flavor)
+ requests = request.get_pci_requests_from_flavor(flavor)
self.assertEqual(set([1, 3]),
set([p.count for p in requests.requests]))
self._verify_result(expect_request, requests.requests)
@@ -205,5 +205,5 @@ class AliasTestCase(test.NoDBTestCase):
def test_get_pci_requests_from_flavor_no_extra_spec(self):
self.flags(pci_alias=[_fake_alias1, _fake_alias3])
flavor = {}
- requests = pci_request.get_pci_requests_from_flavor(flavor)
+ requests = request.get_pci_requests_from_flavor(flavor)
self.assertEqual([], requests.requests)
diff --git a/nova/tests/pci/test_pci_stats.py b/nova/tests/pci/test_stats.py
index 2f830c6cfa..9a4d1afd1d 100644
--- a/nova/tests/pci/test_pci_stats.py
+++ b/nova/tests/pci/test_stats.py
@@ -18,10 +18,10 @@ from oslo.serialization import jsonutils
from nova import exception
from nova import objects
-from nova.pci import pci_stats as pci
-from nova.pci import pci_whitelist
+from nova.pci import stats
+from nova.pci import whitelist
from nova import test
-from nova.tests.pci import pci_fakes
+from nova.tests.pci import fakes
fake_pci_1 = {
'compute_node_id': 1,
@@ -65,9 +65,9 @@ class PciDeviceStatsTestCase(test.NoDBTestCase):
def setUp(self):
super(PciDeviceStatsTestCase, self).setUp()
- self.pci_stats = pci.PciDeviceStats()
+ self.pci_stats = stats.PciDeviceStats()
# The following two calls need to be made before adding the devices.
- patcher = pci_fakes.fake_pci_whitelist()
+ patcher = fakes.fake_pci_whitelist()
self.addCleanup(patcher.stop)
self._create_fake_devs()
@@ -92,7 +92,7 @@ class PciDeviceStatsTestCase(test.NoDBTestCase):
def test_json_creat(self):
m = jsonutils.dumps(self.pci_stats)
- new_stats = pci.PciDeviceStats(m)
+ new_stats = stats.PciDeviceStats(m)
self.assertEqual(len(new_stats.pools), 2)
self.assertEqual(set([d['count'] for d in new_stats]),
@@ -141,19 +141,19 @@ class PciDeviceStatsTestCase(test.NoDBTestCase):
pci_requests_multiple)
-@mock.patch.object(pci_whitelist, 'get_pci_devices_filter')
+@mock.patch.object(whitelist, 'get_pci_devices_filter')
class PciDeviceStatsWithTagsTestCase(test.NoDBTestCase):
def setUp(self):
super(PciDeviceStatsWithTagsTestCase, self).setUp()
- self.pci_stats = pci.PciDeviceStats()
+ self.pci_stats = stats.PciDeviceStats()
self._create_whitelist()
def _create_whitelist(self):
white_list = ['{"vendor_id":"1137","product_id":"0071",'
'"address":"*:0a:00.*","physical_network":"physnet1"}',
'{"vendor_id":"1137","product_id":"0072"}']
- self.pci_wlist = pci_whitelist.PciHostDevicesWhiteList(white_list)
+ self.pci_wlist = whitelist.PciHostDevicesWhiteList(white_list)
def _create_pci_devices(self):
self.pci_tagged_devices = []
diff --git a/nova/tests/pci/test_pci_utils.py b/nova/tests/pci/test_utils.py
index d904dc9254..77a0ce24f5 100644
--- a/nova/tests/pci/test_pci_utils.py
+++ b/nova/tests/pci/test_utils.py
@@ -15,7 +15,7 @@
# under the License.
from nova import exception
-from nova.pci import pci_utils
+from nova.pci import utils
from nova import test
@@ -26,36 +26,36 @@ class PciDeviceMatchTestCase(test.NoDBTestCase):
'device_id': 'd1'}
def test_single_spec_match(self):
- self.assertTrue(pci_utils.pci_device_prop_match(
+ self.assertTrue(utils.pci_device_prop_match(
self.fake_pci_1, [{'vendor_id': 'v1', 'device_id': 'd1'}]))
def test_multiple_spec_match(self):
- self.assertTrue(pci_utils.pci_device_prop_match(
+ self.assertTrue(utils.pci_device_prop_match(
self.fake_pci_1,
[{'vendor_id': 'v1', 'device_id': 'd1'},
{'vendor_id': 'v3', 'device_id': 'd3'}]))
def test_spec_dismatch(self):
- self.assertFalse(pci_utils.pci_device_prop_match(
+ self.assertFalse(utils.pci_device_prop_match(
self.fake_pci_1,
[{'vendor_id': 'v4', 'device_id': 'd4'},
{'vendor_id': 'v3', 'device_id': 'd3'}]))
def test_spec_extra_key(self):
- self.assertFalse(pci_utils.pci_device_prop_match(
+ self.assertFalse(utils.pci_device_prop_match(
self.fake_pci_1,
[{'vendor_id': 'v1', 'device_id': 'd1', 'wrong_key': 'k1'}]))
class PciDeviceAddressParserTestCase(test.NoDBTestCase):
def test_parse_address(self):
- self.parse_result = pci_utils.parse_address("0000:04:12.6")
+ self.parse_result = utils.parse_address("0000:04:12.6")
self.assertEqual(self.parse_result, ('0000', '04', '12', '6'))
def test_parse_address_wrong(self):
self.assertRaises(exception.PciDeviceWrongAddressFormat,
- pci_utils.parse_address, "0000:04.12:6")
+ utils.parse_address, "0000:04.12:6")
def test_parse_address_invalid_character(self):
self.assertRaises(exception.PciDeviceWrongAddressFormat,
- pci_utils.parse_address, "0000:h4.12:6")
+ utils.parse_address, "0000:h4.12:6")
diff --git a/nova/tests/pci/test_pci_whitelist.py b/nova/tests/pci/test_whitelist.py
index 01b753d745..cb5891dffb 100644
--- a/nova/tests/pci/test_pci_whitelist.py
+++ b/nova/tests/pci/test_whitelist.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-from nova.pci import pci_whitelist
+from nova.pci import whitelist
from nova import test
@@ -30,28 +30,28 @@ dev_dict = {
class PciHostDevicesWhiteListTestCase(test.NoDBTestCase):
def test_whitelist(self):
white_list = '{"product_id":"0001", "vendor_id":"8086"}'
- parsed = pci_whitelist.PciHostDevicesWhiteList([white_list])
+ parsed = whitelist.PciHostDevicesWhiteList([white_list])
self.assertEqual(1, len(parsed.specs))
def test_whitelist_empty(self):
- parsed = pci_whitelist.PciHostDevicesWhiteList()
+ parsed = whitelist.PciHostDevicesWhiteList()
self.assertFalse(parsed.device_assignable(dev_dict))
def test_whitelist_multiple(self):
wl1 = '{"product_id":"0001", "vendor_id":"8086"}'
wl2 = '{"product_id":"0002", "vendor_id":"8087"}'
- parsed = pci_whitelist.PciHostDevicesWhiteList([wl1, wl2])
+ parsed = whitelist.PciHostDevicesWhiteList([wl1, wl2])
self.assertEqual(2, len(parsed.specs))
def test_device_assignable(self):
white_list = '{"product_id":"0001", "vendor_id":"8086"}'
- parsed = pci_whitelist.PciHostDevicesWhiteList([white_list])
+ parsed = whitelist.PciHostDevicesWhiteList([white_list])
self.assertIsNotNone(parsed.device_assignable(dev_dict))
def test_device_assignable_multiple(self):
white_list_1 = '{"product_id":"0001", "vendor_id":"8086"}'
white_list_2 = '{"product_id":"0002", "vendor_id":"8087"}'
- parsed = pci_whitelist.PciHostDevicesWhiteList(
+ parsed = whitelist.PciHostDevicesWhiteList(
[white_list_1, white_list_2])
self.assertIsNotNone(parsed.device_assignable(dev_dict))
dev_dict1 = dev_dict.copy()
@@ -62,5 +62,5 @@ class PciHostDevicesWhiteListTestCase(test.NoDBTestCase):
def test_get_pci_devices_filter(self):
white_list_1 = '{"product_id":"0001", "vendor_id":"8086"}'
self.flags(pci_passthrough_whitelist=[white_list_1])
- pci_filter = pci_whitelist.get_pci_devices_filter()
+ pci_filter = whitelist.get_pci_devices_filter()
self.assertIsNotNone(pci_filter.device_assignable(dev_dict))
diff --git a/nova/tests/scheduler/filters/__init__.py b/nova/tests/scheduler/filters/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/scheduler/filters/__init__.py
diff --git a/nova/tests/scheduler/filters/test_affinity_filters.py b/nova/tests/scheduler/filters/test_affinity_filters.py
new file mode 100644
index 0000000000..de75a85c07
--- /dev/null
+++ b/nova/tests/scheduler/filters/test_affinity_filters.py
@@ -0,0 +1,185 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+from oslo.config import cfg
+
+from nova.scheduler.filters import affinity_filter
+from nova import test
+from nova.tests.scheduler import fakes
+
+CONF = cfg.CONF
+
+CONF.import_opt('my_ip', 'nova.netconf')
+
+
+@mock.patch('nova.compute.api.API.get_all')
+class TestDifferentHostFilter(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestDifferentHostFilter, self).setUp()
+ self.filt_cls = affinity_filter.DifferentHostFilter()
+
+ def test_affinity_different_filter_passes(self, get_all_mock):
+ host = fakes.FakeHostState('host1', 'node1', {})
+ get_all_mock.return_value = []
+
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'scheduler_hints': {
+ 'different_host': ['fake'], }}
+
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ get_all_mock.assert_called_once_with(mock.sentinel.ctx,
+ {'host': 'host1',
+ 'uuid': ['fake'],
+ 'deleted': False})
+
+ def test_affinity_different_filter_no_list_passes(self, get_all_mock):
+ host = fakes.FakeHostState('host1', 'node1', {})
+ get_all_mock.return_value = []
+
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'scheduler_hints': {
+ 'different_host': 'fake'}}
+
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ get_all_mock.assert_called_once_with(mock.sentinel.ctx,
+ {'host': 'host1',
+ 'uuid': ['fake'],
+ 'deleted': False})
+
+ def test_affinity_different_filter_fails(self, get_all_mock):
+ host = fakes.FakeHostState('host1', 'node1', {})
+ get_all_mock.return_value = [mock.sentinel.instances]
+
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'scheduler_hints': {
+ 'different_host': ['fake'], }}
+
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+ get_all_mock.assert_called_once_with(mock.sentinel.ctx,
+ {'host': 'host1',
+ 'uuid': ['fake'],
+ 'deleted': False})
+
+ def test_affinity_different_filter_handles_none(self, get_all_mock):
+ host = fakes.FakeHostState('host1', 'node1', {})
+
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'scheduler_hints': None}
+
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ self.assertFalse(get_all_mock.called)
+
+
+@mock.patch('nova.compute.api.API.get_all')
+class TestSameHostFilter(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestSameHostFilter, self).setUp()
+ self.filt_cls = affinity_filter.SameHostFilter()
+
+ def test_affinity_same_filter_passes(self, get_all_mock):
+ host = fakes.FakeHostState('host1', 'node1', {})
+ get_all_mock.return_value = [mock.sentinel.images]
+
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'scheduler_hints': {
+ 'same_host': ['fake'], }}
+
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ get_all_mock.assert_called_once_with(mock.sentinel.ctx,
+ {'host': 'host1',
+ 'uuid': ['fake'],
+ 'deleted': False})
+
+ def test_affinity_same_filter_no_list_passes(self, get_all_mock):
+ host = fakes.FakeHostState('host1', 'node1', {})
+ get_all_mock.return_value = [mock.sentinel.images]
+
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'scheduler_hints': {
+ 'same_host': 'fake'}}
+
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ get_all_mock.assert_called_once_with(mock.sentinel.ctx,
+ {'host': 'host1',
+ 'uuid': ['fake'],
+ 'deleted': False})
+
+ def test_affinity_same_filter_fails(self, get_all_mock):
+ host = fakes.FakeHostState('host1', 'node1', {})
+ get_all_mock.return_value = []
+
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'scheduler_hints': {
+ 'same_host': ['fake'], }}
+
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+ get_all_mock.assert_called_once_with(mock.sentinel.ctx,
+ {'host': 'host1',
+ 'uuid': ['fake'],
+ 'deleted': False})
+
+ def test_affinity_same_filter_handles_none(self, get_all_mock):
+ host = fakes.FakeHostState('host1', 'node1', {})
+
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'scheduler_hints': None}
+
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ self.assertFalse(get_all_mock.called)
+
+
+class TestSimpleCIDRAffinityFilter(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestSimpleCIDRAffinityFilter, self).setUp()
+ self.filt_cls = affinity_filter.SimpleCIDRAffinityFilter()
+
+ def test_affinity_simple_cidr_filter_passes(self):
+ host = fakes.FakeHostState('host1', 'node1', {})
+ host.host_ip = '10.8.1.1'
+
+ affinity_ip = "10.8.1.100"
+
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'scheduler_hints': {
+ 'cidr': '/24',
+ 'build_near_host_ip': affinity_ip}}
+
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_affinity_simple_cidr_filter_fails(self):
+ host = fakes.FakeHostState('host1', 'node1', {})
+ host.host_ip = '10.8.1.1'
+
+ affinity_ip = "10.8.1.100"
+
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'scheduler_hints': {
+ 'cidr': '/32',
+ 'build_near_host_ip': affinity_ip}}
+
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_affinity_simple_cidr_filter_handles_none(self):
+ host = fakes.FakeHostState('host1', 'node1', {})
+
+ affinity_ip = CONF.my_ip.split('.')[0:3]
+ affinity_ip.append('100')
+ affinity_ip = str.join('.', affinity_ip)
+
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'scheduler_hints': None}
+
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
diff --git a/nova/tests/scheduler/filters/test_disk_filters.py b/nova/tests/scheduler/filters/test_disk_filters.py
new file mode 100644
index 0000000000..668b6f09df
--- /dev/null
+++ b/nova/tests/scheduler/filters/test_disk_filters.py
@@ -0,0 +1,57 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.scheduler.filters import disk_filter
+from nova import test
+from nova.tests.scheduler import fakes
+
+
+class TestDiskFilter(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestDiskFilter, self).setUp()
+ self.filt_cls = disk_filter.DiskFilter()
+
+ def test_disk_filter_passes(self):
+ self.flags(disk_allocation_ratio=1.0)
+ filter_properties = {'instance_type': {'root_gb': 1,
+ 'ephemeral_gb': 1, 'swap': 512}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 13})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_disk_filter_fails(self):
+ self.flags(disk_allocation_ratio=1.0)
+ filter_properties = {'instance_type': {'root_gb': 10,
+ 'ephemeral_gb': 1, 'swap': 1024}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 13})
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_disk_filter_oversubscribe(self):
+ self.flags(disk_allocation_ratio=10.0)
+ filter_properties = {'instance_type': {'root_gb': 100,
+ 'ephemeral_gb': 18, 'swap': 1024}}
+ # 1GB used... so 119GB allowed...
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 12})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ self.assertEqual(12 * 10.0, host.limits['disk_gb'])
+
+ def test_disk_filter_oversubscribe_fail(self):
+ self.flags(disk_allocation_ratio=10.0)
+ filter_properties = {'instance_type': {'root_gb': 100,
+ 'ephemeral_gb': 19, 'swap': 1024}}
+ # 1GB used... so 119GB allowed...
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 12})
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
diff --git a/nova/tests/scheduler/filters/test_extra_specs_ops.py b/nova/tests/scheduler/filters/test_extra_specs_ops.py
new file mode 100644
index 0000000000..5f8f912a81
--- /dev/null
+++ b/nova/tests/scheduler/filters/test_extra_specs_ops.py
@@ -0,0 +1,200 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.scheduler.filters import extra_specs_ops
+from nova import test
+
+
+class ExtraSpecsOpsTestCase(test.NoDBTestCase):
+ def _do_extra_specs_ops_test(self, value, req, matches):
+ assertion = self.assertTrue if matches else self.assertFalse
+ assertion(extra_specs_ops.match(value, req))
+
+ def test_extra_specs_matches_simple(self):
+ self._do_extra_specs_ops_test(
+ value='1',
+ req='1',
+ matches=True)
+
+ def test_extra_specs_fails_simple(self):
+ self._do_extra_specs_ops_test(
+ value='',
+ req='1',
+ matches=False)
+
+ def test_extra_specs_fails_simple2(self):
+ self._do_extra_specs_ops_test(
+ value='3',
+ req='1',
+ matches=False)
+
+ def test_extra_specs_fails_simple3(self):
+ self._do_extra_specs_ops_test(
+ value='222',
+ req='2',
+ matches=False)
+
+ def test_extra_specs_fails_with_bogus_ops(self):
+ self._do_extra_specs_ops_test(
+ value='4',
+ req='> 2',
+ matches=False)
+
+ def test_extra_specs_matches_with_op_eq(self):
+ self._do_extra_specs_ops_test(
+ value='123',
+ req='= 123',
+ matches=True)
+
+ def test_extra_specs_matches_with_op_eq2(self):
+ self._do_extra_specs_ops_test(
+ value='124',
+ req='= 123',
+ matches=True)
+
+ def test_extra_specs_fails_with_op_eq(self):
+ self._do_extra_specs_ops_test(
+ value='34',
+ req='= 234',
+ matches=False)
+
+ def test_extra_specs_fails_with_op_eq3(self):
+ self._do_extra_specs_ops_test(
+ value='34',
+ req='=',
+ matches=False)
+
+ def test_extra_specs_matches_with_op_seq(self):
+ self._do_extra_specs_ops_test(
+ value='123',
+ req='s== 123',
+ matches=True)
+
+ def test_extra_specs_fails_with_op_seq(self):
+ self._do_extra_specs_ops_test(
+ value='1234',
+ req='s== 123',
+ matches=False)
+
+ def test_extra_specs_matches_with_op_sneq(self):
+ self._do_extra_specs_ops_test(
+ value='1234',
+ req='s!= 123',
+ matches=True)
+
+ def test_extra_specs_fails_with_op_sneq(self):
+ self._do_extra_specs_ops_test(
+ value='123',
+ req='s!= 123',
+ matches=False)
+
+ def test_extra_specs_fails_with_op_sge(self):
+ self._do_extra_specs_ops_test(
+ value='1000',
+ req='s>= 234',
+ matches=False)
+
+ def test_extra_specs_fails_with_op_sle(self):
+ self._do_extra_specs_ops_test(
+ value='1234',
+ req='s<= 1000',
+ matches=False)
+
+ def test_extra_specs_fails_with_op_sl(self):
+ self._do_extra_specs_ops_test(
+ value='2',
+ req='s< 12',
+ matches=False)
+
+ def test_extra_specs_fails_with_op_sg(self):
+ self._do_extra_specs_ops_test(
+ value='12',
+ req='s> 2',
+ matches=False)
+
+ def test_extra_specs_matches_with_op_in(self):
+ self._do_extra_specs_ops_test(
+ value='12311321',
+ req='<in> 11',
+ matches=True)
+
+ def test_extra_specs_matches_with_op_in2(self):
+ self._do_extra_specs_ops_test(
+ value='12311321',
+ req='<in> 12311321',
+ matches=True)
+
+ def test_extra_specs_matches_with_op_in3(self):
+ self._do_extra_specs_ops_test(
+ value='12311321',
+ req='<in> 12311321 <in>',
+ matches=True)
+
+ def test_extra_specs_fails_with_op_in(self):
+ self._do_extra_specs_ops_test(
+ value='12310321',
+ req='<in> 11',
+ matches=False)
+
+ def test_extra_specs_fails_with_op_in2(self):
+ self._do_extra_specs_ops_test(
+ value='12310321',
+ req='<in> 11 <in>',
+ matches=False)
+
+ def test_extra_specs_matches_with_op_or(self):
+ self._do_extra_specs_ops_test(
+ value='12',
+ req='<or> 11 <or> 12',
+ matches=True)
+
+ def test_extra_specs_matches_with_op_or2(self):
+ self._do_extra_specs_ops_test(
+ value='12',
+ req='<or> 11 <or> 12 <or>',
+ matches=True)
+
+ def test_extra_specs_fails_with_op_or(self):
+ self._do_extra_specs_ops_test(
+ value='13',
+ req='<or> 11 <or> 12',
+ matches=False)
+
+ def test_extra_specs_fails_with_op_or2(self):
+ self._do_extra_specs_ops_test(
+ value='13',
+ req='<or> 11 <or> 12 <or>',
+ matches=False)
+
+ def test_extra_specs_matches_with_op_le(self):
+ self._do_extra_specs_ops_test(
+ value='2',
+ req='<= 10',
+ matches=True)
+
+ def test_extra_specs_fails_with_op_le(self):
+ self._do_extra_specs_ops_test(
+ value='3',
+ req='<= 2',
+ matches=False)
+
+ def test_extra_specs_matches_with_op_ge(self):
+ self._do_extra_specs_ops_test(
+ value='3',
+ req='>= 1',
+ matches=True)
+
+ def test_extra_specs_fails_with_op_ge(self):
+ self._do_extra_specs_ops_test(
+ value='2',
+ req='>= 3',
+ matches=False)
diff --git a/nova/tests/scheduler/filters/test_ram_filters.py b/nova/tests/scheduler/filters/test_ram_filters.py
new file mode 100644
index 0000000000..880cdac296
--- /dev/null
+++ b/nova/tests/scheduler/filters/test_ram_filters.py
@@ -0,0 +1,89 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova.scheduler.filters import ram_filter
+from nova import test
+from nova.tests.scheduler import fakes
+
+
+class TestRamFilter(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestRamFilter, self).setUp()
+ self.filt_cls = ram_filter.RamFilter()
+
+ def test_ram_filter_fails_on_memory(self):
+ ram_filter.RamFilter.ram_allocation_ratio = 1.0
+ filter_properties = {'instance_type': {'memory_mb': 1024}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 1023, 'total_usable_ram_mb': 1024})
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_ram_filter_passes(self):
+ ram_filter.RamFilter.ram_allocation_ratio = 1.0
+ filter_properties = {'instance_type': {'memory_mb': 1024}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 1024, 'total_usable_ram_mb': 1024})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_ram_filter_oversubscribe(self):
+ ram_filter.RamFilter.ram_allocation_ratio = 2.0
+ filter_properties = {'instance_type': {'memory_mb': 1024}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': -1024, 'total_usable_ram_mb': 2048})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ self.assertEqual(2048 * 2.0, host.limits['memory_mb'])
+
+
+@mock.patch('nova.scheduler.filters.utils.aggregate_values_from_db')
+class TestAggregateRamFilter(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestAggregateRamFilter, self).setUp()
+ self.filt_cls = ram_filter.AggregateRamFilter()
+
+ def test_aggregate_ram_filter_value_error(self, agg_mock):
+ self.flags(ram_allocation_ratio=1.0)
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'instance_type': {'memory_mb': 1024}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 1024, 'total_usable_ram_mb': 1024})
+ agg_mock.return_value = set(['XXX'])
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ self.assertEqual(1024 * 1.0, host.limits['memory_mb'])
+
+ def test_aggregate_ram_filter_default_value(self, agg_mock):
+ self.flags(ram_allocation_ratio=1.0)
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'instance_type': {'memory_mb': 1024}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 1023, 'total_usable_ram_mb': 1024})
+ # False: fallback to default flag w/o aggregates
+ agg_mock.return_value = set()
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+ agg_mock.return_value = set(['2.0'])
+ # True: use ratio from aggregates
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ self.assertEqual(1024 * 2.0, host.limits['memory_mb'])
+
+ def test_aggregate_ram_filter_conflict_values(self, agg_mock):
+ self.flags(ram_allocation_ratio=1.0)
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'instance_type': {'memory_mb': 1024}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 1023, 'total_usable_ram_mb': 1024})
+ agg_mock.return_value = set(['1.5', '2.0'])
+ # use the minimum ratio from aggregates
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ self.assertEqual(1024 * 1.5, host.limits['memory_mb'])
diff --git a/nova/tests/scheduler/test_filter_scheduler.py b/nova/tests/scheduler/test_filter_scheduler.py
index 6cdf7613a8..596476dae0 100644
--- a/nova/tests/scheduler/test_filter_scheduler.py
+++ b/nova/tests/scheduler/test_filter_scheduler.py
@@ -444,11 +444,6 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
for policy in policies:
self._group_filter_with_filter_not_configured(policy)
- def test_group_uuid_details_in_filter_properties(self):
- group = self._create_server_group()
- self._group_details_in_filter_properties(group, 'get_by_uuid',
- group.uuid, 'anti-affinity')
-
def test_group_name_details_in_filter_properties(self):
group = self._create_server_group()
self._group_details_in_filter_properties(group, 'get_by_name',
@@ -646,6 +641,20 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
self.driver.select_destinations, self.context,
{'num_instances': 1}, {})
+ def test_select_destinations_no_valid_host_not_enough(self):
+ # Tests that we have fewer hosts available than number of instances
+ # requested to build.
+ with mock.patch.object(self.driver, '_schedule',
+ return_value=[mock.sentinel.host1]):
+ try:
+ self.driver.select_destinations(
+ self.context, {'num_instances': 2}, {})
+ self.fail('Expected NoValidHost to be raised.')
+ except exception.NoValidHost as e:
+ # Make sure that we provided a reason why NoValidHost.
+ self.assertIn('reason', e.kwargs)
+ self.assertTrue(len(e.kwargs['reason']) > 0)
+
def test_handles_deleted_instance(self):
"""Test instance deletion while being scheduled."""
diff --git a/nova/tests/scheduler/test_host_filters.py b/nova/tests/scheduler/test_host_filters.py
index 5bd4d2c1f4..93d0bf7929 100644
--- a/nova/tests/scheduler/test_host_filters.py
+++ b/nova/tests/scheduler/test_host_filters.py
@@ -30,10 +30,8 @@ from nova import context
from nova import db
from nova import objects
from nova.objects import base as obj_base
-from nova.pci import pci_stats
+from nova.pci import stats as pci_stats
from nova.scheduler import filters
-from nova.scheduler.filters import extra_specs_ops
-from nova.scheduler.filters import ram_filter
from nova.scheduler.filters import trusted_filter
from nova import servicegroup
from nova import test
@@ -44,8 +42,6 @@ from nova.virt import hardware
CONF = cfg.CONF
-CONF.import_opt('my_ip', 'nova.netconf')
-
class TestFilter(filters.BaseHostFilter):
pass
@@ -56,192 +52,6 @@ class TestBogusFilter(object):
pass
-class ExtraSpecsOpsTestCase(test.NoDBTestCase):
- def _do_extra_specs_ops_test(self, value, req, matches):
- assertion = self.assertTrue if matches else self.assertFalse
- assertion(extra_specs_ops.match(value, req))
-
- def test_extra_specs_matches_simple(self):
- self._do_extra_specs_ops_test(
- value='1',
- req='1',
- matches=True)
-
- def test_extra_specs_fails_simple(self):
- self._do_extra_specs_ops_test(
- value='',
- req='1',
- matches=False)
-
- def test_extra_specs_fails_simple2(self):
- self._do_extra_specs_ops_test(
- value='3',
- req='1',
- matches=False)
-
- def test_extra_specs_fails_simple3(self):
- self._do_extra_specs_ops_test(
- value='222',
- req='2',
- matches=False)
-
- def test_extra_specs_fails_with_bogus_ops(self):
- self._do_extra_specs_ops_test(
- value='4',
- req='> 2',
- matches=False)
-
- def test_extra_specs_matches_with_op_eq(self):
- self._do_extra_specs_ops_test(
- value='123',
- req='= 123',
- matches=True)
-
- def test_extra_specs_matches_with_op_eq2(self):
- self._do_extra_specs_ops_test(
- value='124',
- req='= 123',
- matches=True)
-
- def test_extra_specs_fails_with_op_eq(self):
- self._do_extra_specs_ops_test(
- value='34',
- req='= 234',
- matches=False)
-
- def test_extra_specs_fails_with_op_eq3(self):
- self._do_extra_specs_ops_test(
- value='34',
- req='=',
- matches=False)
-
- def test_extra_specs_matches_with_op_seq(self):
- self._do_extra_specs_ops_test(
- value='123',
- req='s== 123',
- matches=True)
-
- def test_extra_specs_fails_with_op_seq(self):
- self._do_extra_specs_ops_test(
- value='1234',
- req='s== 123',
- matches=False)
-
- def test_extra_specs_matches_with_op_sneq(self):
- self._do_extra_specs_ops_test(
- value='1234',
- req='s!= 123',
- matches=True)
-
- def test_extra_specs_fails_with_op_sneq(self):
- self._do_extra_specs_ops_test(
- value='123',
- req='s!= 123',
- matches=False)
-
- def test_extra_specs_fails_with_op_sge(self):
- self._do_extra_specs_ops_test(
- value='1000',
- req='s>= 234',
- matches=False)
-
- def test_extra_specs_fails_with_op_sle(self):
- self._do_extra_specs_ops_test(
- value='1234',
- req='s<= 1000',
- matches=False)
-
- def test_extra_specs_fails_with_op_sl(self):
- self._do_extra_specs_ops_test(
- value='2',
- req='s< 12',
- matches=False)
-
- def test_extra_specs_fails_with_op_sg(self):
- self._do_extra_specs_ops_test(
- value='12',
- req='s> 2',
- matches=False)
-
- def test_extra_specs_matches_with_op_in(self):
- self._do_extra_specs_ops_test(
- value='12311321',
- req='<in> 11',
- matches=True)
-
- def test_extra_specs_matches_with_op_in2(self):
- self._do_extra_specs_ops_test(
- value='12311321',
- req='<in> 12311321',
- matches=True)
-
- def test_extra_specs_matches_with_op_in3(self):
- self._do_extra_specs_ops_test(
- value='12311321',
- req='<in> 12311321 <in>',
- matches=True)
-
- def test_extra_specs_fails_with_op_in(self):
- self._do_extra_specs_ops_test(
- value='12310321',
- req='<in> 11',
- matches=False)
-
- def test_extra_specs_fails_with_op_in2(self):
- self._do_extra_specs_ops_test(
- value='12310321',
- req='<in> 11 <in>',
- matches=False)
-
- def test_extra_specs_matches_with_op_or(self):
- self._do_extra_specs_ops_test(
- value='12',
- req='<or> 11 <or> 12',
- matches=True)
-
- def test_extra_specs_matches_with_op_or2(self):
- self._do_extra_specs_ops_test(
- value='12',
- req='<or> 11 <or> 12 <or>',
- matches=True)
-
- def test_extra_specs_fails_with_op_or(self):
- self._do_extra_specs_ops_test(
- value='13',
- req='<or> 11 <or> 12',
- matches=False)
-
- def test_extra_specs_fails_with_op_or2(self):
- self._do_extra_specs_ops_test(
- value='13',
- req='<or> 11 <or> 12 <or>',
- matches=False)
-
- def test_extra_specs_matches_with_op_le(self):
- self._do_extra_specs_ops_test(
- value='2',
- req='<= 10',
- matches=True)
-
- def test_extra_specs_fails_with_op_le(self):
- self._do_extra_specs_ops_test(
- value='3',
- req='<= 2',
- matches=False)
-
- def test_extra_specs_matches_with_op_ge(self):
- self._do_extra_specs_ops_test(
- value='3',
- req='>= 1',
- matches=True)
-
- def test_extra_specs_fails_with_op_ge(self):
- self._do_extra_specs_ops_test(
- value='2',
- req='>= 3',
- matches=False)
-
-
class HostFiltersTestCase(test.NoDBTestCase):
"""Test case for host filters."""
# FIXME(sirp): These tests still require DB access until we can separate
@@ -287,171 +97,6 @@ class HostFiltersTestCase(test.NoDBTestCase):
return ret_value
self.stubs.Set(servicegroup.API, 'service_is_up', fake_service_is_up)
- def test_affinity_different_filter_passes(self):
- filt_cls = self.class_map['DifferentHostFilter']()
- host = fakes.FakeHostState('host1', 'node1', {})
- instance = fakes.FakeInstance(context=self.context,
- params={'host': 'host2'})
- instance_uuid = instance.uuid
-
- filter_properties = {'context': self.context.elevated(),
- 'scheduler_hints': {
- 'different_host': [instance_uuid], }}
-
- self.assertTrue(filt_cls.host_passes(host, filter_properties))
-
- def test_affinity_different_filter_no_list_passes(self):
- filt_cls = self.class_map['DifferentHostFilter']()
- host = fakes.FakeHostState('host1', 'node1', {})
- instance = fakes.FakeInstance(context=self.context,
- params={'host': 'host2'})
- instance_uuid = instance.uuid
-
- filter_properties = {'context': self.context.elevated(),
- 'scheduler_hints': {
- 'different_host': instance_uuid}}
-
- self.assertTrue(filt_cls.host_passes(host, filter_properties))
-
- def test_affinity_different_filter_fails(self):
- filt_cls = self.class_map['DifferentHostFilter']()
- host = fakes.FakeHostState('host1', 'node1', {})
- instance = fakes.FakeInstance(context=self.context,
- params={'host': 'host1'})
- instance_uuid = instance.uuid
-
- filter_properties = {'context': self.context.elevated(),
- 'scheduler_hints': {
- 'different_host': [instance_uuid], }}
-
- self.assertFalse(filt_cls.host_passes(host, filter_properties))
-
- def test_affinity_different_filter_handles_none(self):
- filt_cls = self.class_map['DifferentHostFilter']()
- host = fakes.FakeHostState('host1', 'node1', {})
-
- filter_properties = {'context': self.context.elevated(),
- 'scheduler_hints': None}
-
- self.assertTrue(filt_cls.host_passes(host, filter_properties))
-
- def test_affinity_different_filter_handles_deleted_instance(self):
- filt_cls = self.class_map['DifferentHostFilter']()
- host = fakes.FakeHostState('host1', 'node1', {})
- instance = fakes.FakeInstance(context=self.context,
- params={'host': 'host1'})
- instance_uuid = instance.uuid
- db.instance_destroy(self.context, instance_uuid)
-
- filter_properties = {'context': self.context.elevated(),
- 'scheduler_hints': {
- 'different_host': [instance_uuid], }}
-
- self.assertTrue(filt_cls.host_passes(host, filter_properties))
-
- def test_affinity_same_filter_no_list_passes(self):
- filt_cls = self.class_map['SameHostFilter']()
- host = fakes.FakeHostState('host1', 'node1', {})
- instance = fakes.FakeInstance(context=self.context,
- params={'host': 'host1'})
- instance_uuid = instance.uuid
-
- filter_properties = {'context': self.context.elevated(),
- 'scheduler_hints': {
- 'same_host': instance_uuid}}
-
- self.assertTrue(filt_cls.host_passes(host, filter_properties))
-
- def test_affinity_same_filter_passes(self):
- filt_cls = self.class_map['SameHostFilter']()
- host = fakes.FakeHostState('host1', 'node1', {})
- instance = fakes.FakeInstance(context=self.context,
- params={'host': 'host1'})
- instance_uuid = instance.uuid
-
- filter_properties = {'context': self.context.elevated(),
- 'scheduler_hints': {
- 'same_host': [instance_uuid], }}
-
- self.assertTrue(filt_cls.host_passes(host, filter_properties))
-
- def test_affinity_same_filter_fails(self):
- filt_cls = self.class_map['SameHostFilter']()
- host = fakes.FakeHostState('host1', 'node1', {})
- instance = fakes.FakeInstance(context=self.context,
- params={'host': 'host2'})
- instance_uuid = instance.uuid
-
- filter_properties = {'context': self.context.elevated(),
- 'scheduler_hints': {
- 'same_host': [instance_uuid], }}
-
- self.assertFalse(filt_cls.host_passes(host, filter_properties))
-
- def test_affinity_same_filter_handles_none(self):
- filt_cls = self.class_map['SameHostFilter']()
- host = fakes.FakeHostState('host1', 'node1', {})
-
- filter_properties = {'context': self.context.elevated(),
- 'scheduler_hints': None}
-
- self.assertTrue(filt_cls.host_passes(host, filter_properties))
-
- def test_affinity_same_filter_handles_deleted_instance(self):
- filt_cls = self.class_map['SameHostFilter']()
- host = fakes.FakeHostState('host1', 'node1', {})
- instance = fakes.FakeInstance(context=self.context,
- params={'host': 'host1'})
- instance_uuid = instance.uuid
- db.instance_destroy(self.context, instance_uuid)
-
- filter_properties = {'context': self.context.elevated(),
- 'scheduler_hints': {
- 'same_host': [instance_uuid], }}
-
- self.assertFalse(filt_cls.host_passes(host, filter_properties))
-
- def test_affinity_simple_cidr_filter_passes(self):
- filt_cls = self.class_map['SimpleCIDRAffinityFilter']()
- host = fakes.FakeHostState('host1', 'node1', {})
- host.host_ip = '10.8.1.1'
-
- affinity_ip = "10.8.1.100"
-
- filter_properties = {'context': self.context.elevated(),
- 'scheduler_hints': {
- 'cidr': '/24',
- 'build_near_host_ip': affinity_ip}}
-
- self.assertTrue(filt_cls.host_passes(host, filter_properties))
-
- def test_affinity_simple_cidr_filter_fails(self):
- filt_cls = self.class_map['SimpleCIDRAffinityFilter']()
- host = fakes.FakeHostState('host1', 'node1', {})
- host.host_ip = '10.8.1.1'
-
- affinity_ip = "10.8.1.100"
-
- filter_properties = {'context': self.context.elevated(),
- 'scheduler_hints': {
- 'cidr': '/32',
- 'build_near_host_ip': affinity_ip}}
-
- self.assertFalse(filt_cls.host_passes(host, filter_properties))
-
- def test_affinity_simple_cidr_filter_handles_none(self):
- filt_cls = self.class_map['SimpleCIDRAffinityFilter']()
- host = fakes.FakeHostState('host1', 'node1', {})
-
- affinity_ip = CONF.my_ip.split('.')[0:3]
- affinity_ip.append('100')
- affinity_ip = str.join('.', affinity_ip)
-
- filter_properties = {'context': self.context.elevated(),
- 'scheduler_hints': None}
-
- self.assertTrue(filt_cls.host_passes(host, filter_properties))
-
def test_compute_filter_passes(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['ComputeFilter']()
@@ -506,146 +151,6 @@ class HostFiltersTestCase(test.NoDBTestCase):
# False since type matches aggregate, metadata
self.assertFalse(filt_cls.host_passes(host, filter2_properties))
- def test_ram_filter_fails_on_memory(self):
- self._stub_service_is_up(True)
- filt_cls = self.class_map['RamFilter']()
- ram_filter.RamFilter.ram_allocation_ratio = 1.0
- filter_properties = {'instance_type': {'memory_mb': 1024}}
- service = {'disabled': False}
- host = fakes.FakeHostState('host1', 'node1',
- {'free_ram_mb': 1023, 'total_usable_ram_mb': 1024,
- 'service': service})
- self.assertFalse(filt_cls.host_passes(host, filter_properties))
-
- def test_ram_filter_passes(self):
- self._stub_service_is_up(True)
- filt_cls = self.class_map['RamFilter']()
- ram_filter.RamFilter.ram_allocation_ratio = 1.0
- filter_properties = {'instance_type': {'memory_mb': 1024}}
- service = {'disabled': False}
- host = fakes.FakeHostState('host1', 'node1',
- {'free_ram_mb': 1024, 'total_usable_ram_mb': 1024,
- 'service': service})
- self.assertTrue(filt_cls.host_passes(host, filter_properties))
-
- def test_ram_filter_oversubscribe(self):
- self._stub_service_is_up(True)
- filt_cls = self.class_map['RamFilter']()
- ram_filter.RamFilter.ram_allocation_ratio = 2.0
- filter_properties = {'instance_type': {'memory_mb': 1024}}
- service = {'disabled': False}
- host = fakes.FakeHostState('host1', 'node1',
- {'free_ram_mb': -1024, 'total_usable_ram_mb': 2048,
- 'service': service})
- self.assertTrue(filt_cls.host_passes(host, filter_properties))
- self.assertEqual(2048 * 2.0, host.limits['memory_mb'])
-
- def test_aggregate_ram_filter_value_error(self):
- self._stub_service_is_up(True)
- filt_cls = self.class_map['AggregateRamFilter']()
- self.flags(ram_allocation_ratio=1.0)
- filter_properties = {'context': self.context,
- 'instance_type': {'memory_mb': 1024}}
- service = {'disabled': False}
- host = fakes.FakeHostState('host1', 'node1',
- {'free_ram_mb': 1024, 'total_usable_ram_mb': 1024,
- 'service': service})
- self._create_aggregate_with_host(name='fake_aggregate',
- hosts=['host1'],
- metadata={'ram_allocation_ratio': 'XXX'})
- self.assertTrue(filt_cls.host_passes(host, filter_properties))
- self.assertEqual(1024 * 1.0, host.limits['memory_mb'])
-
- def test_aggregate_ram_filter_default_value(self):
- self._stub_service_is_up(True)
- filt_cls = self.class_map['AggregateRamFilter']()
- self.flags(ram_allocation_ratio=1.0)
- filter_properties = {'context': self.context,
- 'instance_type': {'memory_mb': 1024}}
- service = {'disabled': False}
- host = fakes.FakeHostState('host1', 'node1',
- {'free_ram_mb': 1023, 'total_usable_ram_mb': 1024,
- 'service': service})
- # False: fallback to default flag w/o aggregates
- self.assertFalse(filt_cls.host_passes(host, filter_properties))
- self._create_aggregate_with_host(name='fake_aggregate',
- hosts=['host1'],
- metadata={'ram_allocation_ratio': '2.0'})
- # True: use ratio from aggregates
- self.assertTrue(filt_cls.host_passes(host, filter_properties))
- self.assertEqual(1024 * 2.0, host.limits['memory_mb'])
-
- def test_aggregate_ram_filter_conflict_values(self):
- self._stub_service_is_up(True)
- filt_cls = self.class_map['AggregateRamFilter']()
- self.flags(ram_allocation_ratio=1.0)
- filter_properties = {'context': self.context,
- 'instance_type': {'memory_mb': 1024}}
- service = {'disabled': False}
- host = fakes.FakeHostState('host1', 'node1',
- {'free_ram_mb': 1023, 'total_usable_ram_mb': 1024,
- 'service': service})
- self._create_aggregate_with_host(name='fake_aggregate1',
- hosts=['host1'],
- metadata={'ram_allocation_ratio': '1.5'})
- self._create_aggregate_with_host(name='fake_aggregate2',
- hosts=['host1'],
- metadata={'ram_allocation_ratio': '2.0'})
- # use the minimum ratio from aggregates
- self.assertTrue(filt_cls.host_passes(host, filter_properties))
- self.assertEqual(1024 * 1.5, host.limits['memory_mb'])
-
- def test_disk_filter_passes(self):
- self._stub_service_is_up(True)
- filt_cls = self.class_map['DiskFilter']()
- self.flags(disk_allocation_ratio=1.0)
- filter_properties = {'instance_type': {'root_gb': 1,
- 'ephemeral_gb': 1, 'swap': 512}}
- service = {'disabled': False}
- host = fakes.FakeHostState('host1', 'node1',
- {'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 13,
- 'service': service})
- self.assertTrue(filt_cls.host_passes(host, filter_properties))
-
- def test_disk_filter_fails(self):
- self._stub_service_is_up(True)
- filt_cls = self.class_map['DiskFilter']()
- self.flags(disk_allocation_ratio=1.0)
- filter_properties = {'instance_type': {'root_gb': 10,
- 'ephemeral_gb': 1, 'swap': 1024}}
- service = {'disabled': False}
- host = fakes.FakeHostState('host1', 'node1',
- {'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 13,
- 'service': service})
- self.assertFalse(filt_cls.host_passes(host, filter_properties))
-
- def test_disk_filter_oversubscribe(self):
- self._stub_service_is_up(True)
- filt_cls = self.class_map['DiskFilter']()
- self.flags(disk_allocation_ratio=10.0)
- filter_properties = {'instance_type': {'root_gb': 100,
- 'ephemeral_gb': 18, 'swap': 1024}}
- service = {'disabled': False}
- # 1GB used... so 119GB allowed...
- host = fakes.FakeHostState('host1', 'node1',
- {'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 12,
- 'service': service})
- self.assertTrue(filt_cls.host_passes(host, filter_properties))
- self.assertEqual(12 * 10.0, host.limits['disk_gb'])
-
- def test_disk_filter_oversubscribe_fail(self):
- self._stub_service_is_up(True)
- filt_cls = self.class_map['DiskFilter']()
- self.flags(disk_allocation_ratio=10.0)
- filter_properties = {'instance_type': {'root_gb': 100,
- 'ephemeral_gb': 19, 'swap': 1024}}
- service = {'disabled': False}
- # 1GB used... so 119GB allowed...
- host = fakes.FakeHostState('host1', 'node1',
- {'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 12,
- 'service': service})
- self.assertFalse(filt_cls.host_passes(host, filter_properties))
-
def _test_compute_filter_fails_on_service_disabled(self,
reason=None):
self._stub_service_is_up(True)
diff --git a/nova/tests/test_exception.py b/nova/tests/test_exception.py
index bf5264d2ca..6b1617047c 100644
--- a/nova/tests/test_exception.py
+++ b/nova/tests/test_exception.py
@@ -16,6 +16,8 @@
import inspect
+import six
+
from nova import context
from nova import exception
from nova import test
@@ -67,10 +69,10 @@ class NovaExceptionTestCase(test.NoDBTestCase):
msg_fmt = "default message"
exc = FakeNovaException()
- self.assertEqual(unicode(exc), 'default message')
+ self.assertEqual(six.text_type(exc), 'default message')
def test_error_msg(self):
- self.assertEqual(unicode(exception.NovaException('test')),
+ self.assertEqual(six.text_type(exception.NovaException('test')),
'test')
def test_default_error_msg_with_kwargs(self):
@@ -78,7 +80,7 @@ class NovaExceptionTestCase(test.NoDBTestCase):
msg_fmt = "default message: %(code)s"
exc = FakeNovaException(code=500)
- self.assertEqual(unicode(exc), 'default message: 500')
+ self.assertEqual(six.text_type(exc), 'default message: 500')
self.assertEqual(exc.message, 'default message: 500')
def test_error_msg_exception_with_kwargs(self):
@@ -86,7 +88,7 @@ class NovaExceptionTestCase(test.NoDBTestCase):
msg_fmt = "default message: %(misspelled_code)s"
exc = FakeNovaException(code=500, misspelled_code='blah')
- self.assertEqual(unicode(exc), 'default message: blah')
+ self.assertEqual(six.text_type(exc), 'default message: blah')
self.assertEqual(exc.message, 'default message: blah')
def test_default_error_code(self):
@@ -115,7 +117,7 @@ class NovaExceptionTestCase(test.NoDBTestCase):
msg_fmt = "some message"
exc = FakeNovaException()
- self.assertEqual(unicode(exc), exc.format_message())
+ self.assertEqual(six.text_type(exc), exc.format_message())
def test_format_message_remote(self):
class FakeNovaException_Remote(exception.NovaException):
@@ -125,7 +127,7 @@ class NovaExceptionTestCase(test.NoDBTestCase):
return u"print the whole trace"
exc = FakeNovaException_Remote()
- self.assertEqual(unicode(exc), u"print the whole trace")
+ self.assertEqual(six.text_type(exc), u"print the whole trace")
self.assertEqual(exc.format_message(), "some message")
def test_format_message_remote_error(self):
diff --git a/nova/tests/test_hacking.py b/nova/tests/test_hacking.py
index e39e53f96c..69089c0cd3 100644
--- a/nova/tests/test_hacking.py
+++ b/nova/tests/test_hacking.py
@@ -300,9 +300,9 @@ class HackingTestCase(test.NoDBTestCase):
self._assert_has_errors(code, checker, expected_errors=errors,
filename='nova/tests/test_assert.py')
- def test_str_exception(self):
+ def test_str_unicode_exception(self):
- checker = checks.CheckForStrExc
+ checker = checks.CheckForStrUnicodeExc
code = """
def f(a, b):
try:
@@ -317,12 +317,23 @@ class HackingTestCase(test.NoDBTestCase):
code = """
def f(a, b):
try:
+ p = unicode(a) + str(b)
+ except ValueError as e:
+ p = e
+ return p
+ """
+ errors = []
+ self._assert_has_errors(code, checker, expected_errors=errors)
+
+ code = """
+ def f(a, b):
+ try:
p = str(a) + str(b)
except ValueError as e:
p = unicode(e)
return p
"""
- errors = []
+ errors = [(5, 20, 'N325')]
self._assert_has_errors(code, checker, expected_errors=errors)
code = """
@@ -334,12 +345,27 @@ class HackingTestCase(test.NoDBTestCase):
p = unicode(a) + unicode(b)
except ValueError as ve:
p = str(e) + str(ve)
- p = unicode(e)
+ p = e
return p
"""
errors = [(8, 20, 'N325'), (8, 29, 'N325')]
self._assert_has_errors(code, checker, expected_errors=errors)
+ code = """
+ def f(a, b):
+ try:
+ p = str(a) + str(b)
+ except ValueError as e:
+ try:
+ p = unicode(a) + unicode(b)
+ except ValueError as ve:
+ p = str(e) + unicode(ve)
+ p = str(e)
+ return p
+ """
+ errors = [(8, 20, 'N325'), (8, 33, 'N325'), (9, 16, 'N325')]
+ self._assert_has_errors(code, checker, expected_errors=errors)
+
def test_trans_add(self):
checker = checks.CheckForTransAdd
diff --git a/nova/tests/test_metadata.py b/nova/tests/test_metadata.py
index b0ce27b25a..e38f833298 100644
--- a/nova/tests/test_metadata.py
+++ b/nova/tests/test_metadata.py
@@ -789,6 +789,28 @@ class MetadataHandlerTestCase(test.TestCase):
'X-Instance-ID-Signature': signed})
self.assertEqual(response.status_int, 500)
+ def test_get_metadata(self):
+ def _test_metadata_path(relpath):
+ # recursively confirm a http 200 from all meta-data elements
+ # available at relpath.
+ response = fake_request(self.stubs, self.mdinst,
+ relpath=relpath)
+ for item in response.body.split('\n'):
+ if 'public-keys' in relpath:
+ # meta-data/public-keys/0=keyname refers to
+ # meta-data/public-keys/0
+ item = item.split('=')[0]
+ if item.endswith('/'):
+ path = relpath + '/' + item
+ _test_metadata_path(path)
+ continue
+
+ path = relpath + '/' + item
+ response = fake_request(self.stubs, self.mdinst, relpath=path)
+ self.assertEqual(response.status_int, 200, message=path)
+
+ _test_metadata_path('/2009-04-04/meta-data')
+
class MetadataPasswordTestCase(test.TestCase):
def setUp(self):
diff --git a/nova/tests/virt/libvirt/test_config.py b/nova/tests/virt/libvirt/test_config.py
index 2f190fccb7..f3ab5f38a4 100644
--- a/nova/tests/virt/libvirt/test_config.py
+++ b/nova/tests/virt/libvirt/test_config.py
@@ -1862,8 +1862,8 @@ class LibvirtConfigNodeDevicePciCapTest(LibvirtConfigBaseTest):
self.assertEqual(obj.fun_capability[0].type, 'virt_functions')
self.assertEqual(obj.fun_capability[0].device_addrs,
- [("0000", "0x0a", "0x1", "0x1"),
- ("0001", "0x0a", "0x02", "0x03"), ])
+ [(0, 10, 1, 1),
+ (1, 10, 2, 3), ])
def test_config_device_pci_2cap(self):
xmlin = """
@@ -1898,11 +1898,11 @@ class LibvirtConfigNodeDevicePciCapTest(LibvirtConfigBaseTest):
self.assertEqual(obj.fun_capability[0].type, 'virt_functions')
self.assertEqual(obj.fun_capability[0].device_addrs,
- [("0000", '0x0a', '0x1', "0x1"),
- ("0001", "0x0a", "0x02", "0x03"), ])
+ [(0, 10, 1, 1),
+ (1, 10, 2, 3), ])
self.assertEqual(obj.fun_capability[1].type, 'phys_function')
self.assertEqual(obj.fun_capability[1].device_addrs,
- [("0000", '0x0a', '0x1', "0x1"), ])
+ [(0, 10, 1, 1), ])
def test_config_read_only_disk(self):
obj = config.LibvirtConfigGuestDisk()
@@ -1941,8 +1941,8 @@ class LibvirtConfigNodeDevicePciSubFunctionCap(LibvirtConfigBaseTest):
fun_capability = config.LibvirtConfigNodeDevicePciSubFunctionCap()
fun_capability.parse_str(xmlin)
self.assertEqual('virt_functions', fun_capability.type)
- self.assertEqual([("0000", "0x0a", "0x1", "0x1"),
- ("0001", "0x0a", "0x02", "0x03"), ],
+ self.assertEqual([(0, 10, 1, 1),
+ (1, 10, 2, 3)],
fun_capability.device_addrs)
diff --git a/nova/tests/virt/libvirt/test_driver.py b/nova/tests/virt/libvirt/test_driver.py
index 5d2c6e0df3..d658ee5f3e 100644
--- a/nova/tests/virt/libvirt/test_driver.py
+++ b/nova/tests/virt/libvirt/test_driver.py
@@ -57,7 +57,7 @@ from nova.openstack.common import lockutils
from nova.openstack.common import loopingcall
from nova.openstack.common import processutils
from nova.openstack.common import uuidutils
-from nova.pci import pci_manager
+from nova.pci import manager as pci_manager
from nova import test
from nova.tests import fake_block_device
from nova.tests import fake_instance
@@ -172,6 +172,8 @@ class FakeVirtDomain(object):
self.uuidstr = uuidstr
self.id = id
self.domname = name
+ self._info = [power_state.RUNNING, 2048 * units.Mi, 1234 * units.Mi,
+ None, None]
if fake_xml:
self._fake_dom_xml = fake_xml
else:
@@ -195,8 +197,7 @@ class FakeVirtDomain(object):
return self.id
def info(self):
- return [power_state.RUNNING, 2048 * units.Mi, 1234 * units.Mi,
- None, None]
+ return self._info
def create(self):
pass
@@ -237,6 +238,9 @@ class FakeVirtDomain(object):
def resume(self):
pass
+ def destroy(self):
+ pass
+
class CacheConcurrencyTestCase(test.NoDBTestCase):
def setUp(self):
@@ -611,6 +615,20 @@ class LibvirtConnTestCase(test.NoDBTestCase):
inst = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertPublicAPISignatures(baseinst, inst)
+ @mock.patch.object(libvirt_driver.LibvirtDriver, "_has_min_version")
+ def test_min_version_start_ok(self, mock_version):
+ mock_version.return_value = True
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ drvr.init_host("dummyhost")
+
+ @mock.patch.object(libvirt_driver.LibvirtDriver, "_has_min_version")
+ def test_min_version_start_abort(self, mock_version):
+ mock_version.return_value = False
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ self.assertRaises(exception.NovaException,
+ drvr.init_host,
+ "dummyhost")
+
@mock.patch.object(objects.Service, 'get_by_compute_host')
def test_set_host_enabled_with_disable(self, mock_svc):
# Tests disabling an enabled host.
@@ -4889,18 +4907,22 @@ class LibvirtConnTestCase(test.NoDBTestCase):
def test_attach_invalid_volume_type(self):
self.create_fake_libvirt_mock()
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
+ instance = fake_instance.fake_instance_obj(
+ self.context, **self.test_instance)
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.VolumeDriverNotFound,
conn.attach_volume, None,
{"driver_volume_type": "badtype"},
- {"name": "fake-instance"},
+ instance,
"/dev/sda")
def test_attach_blockio_invalid_hypervisor(self):
self.flags(virt_type='fake_type', group='libvirt')
self.create_fake_libvirt_mock()
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
+ instance = fake_instance.fake_instance_obj(
+ self.context, **self.test_instance)
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.InvalidHypervisorType,
@@ -4909,7 +4931,7 @@ class LibvirtConnTestCase(test.NoDBTestCase):
"data": {"logical_block_size": "4096",
"physical_block_size": "4096"}
},
- {"name": "fake-instance"},
+ instance,
"/dev/sda")
def test_attach_blockio_invalid_version(self):
@@ -4918,6 +4940,8 @@ class LibvirtConnTestCase(test.NoDBTestCase):
self.flags(virt_type='qemu', group='libvirt')
self.create_fake_libvirt_mock()
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
+ instance = fake_instance.fake_instance_obj(
+ self.context, **self.test_instance)
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(self.conn, "getLibVersion", get_lib_version_stub)
@@ -4927,7 +4951,7 @@ class LibvirtConnTestCase(test.NoDBTestCase):
"data": {"logical_block_size": "4096",
"physical_block_size": "4096"}
},
- {"name": "fake-instance"},
+ instance,
"/dev/sda")
@mock.patch('nova.virt.libvirt.blockinfo.get_info_from_bdm')
@@ -7323,6 +7347,56 @@ class LibvirtConnTestCase(test.NoDBTestCase):
self.stubs.Set(os.path, 'exists', fake_os_path_exists)
conn.destroy(self.context, instance, [], None, False)
+ @mock.patch.object(libvirt_driver.LibvirtDriver, 'cleanup')
+ @mock.patch.object(libvirt_driver.LibvirtDriver, '_teardown_container')
+ @mock.patch.object(libvirt_driver.LibvirtDriver, '_lookup_by_name')
+ def test_destroy_lxc_calls_teardown_container(self, mock_look_up,
+ mock_teardown_container,
+ mock_cleanup):
+ self.flags(virt_type='lxc', group='libvirt')
+ fake_domain = FakeVirtDomain()
+
+ def destroy_side_effect(*args, **kwargs):
+ fake_domain._info[0] = power_state.SHUTDOWN
+
+ with mock.patch.object(fake_domain, 'destroy',
+ side_effect=destroy_side_effect) as mock_domain_destroy:
+ mock_look_up.return_value = fake_domain
+ instance = fake_instance.fake_instance_obj(self.context)
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ network_info = []
+ conn.destroy(self.context, instance, network_info, None, False)
+
+ mock_look_up.assert_has_calls([mock.call(instance.name),
+ mock.call(instance.name)])
+ mock_domain_destroy.assert_called_once_with()
+ mock_teardown_container.assert_called_once_with(instance)
+ mock_cleanup.assert_called_once_with(self.context, instance,
+ network_info, None, False,
+ None)
+
+ @mock.patch.object(libvirt_driver.LibvirtDriver, 'cleanup')
+ @mock.patch.object(libvirt_driver.LibvirtDriver, '_teardown_container')
+ @mock.patch.object(libvirt_driver.LibvirtDriver, '_lookup_by_name')
+ def test_destroy_lxc_calls_teardown_container_when_no_domain(self,
+ mock_look_up, mock_teardown_container, mock_cleanup):
+ self.flags(virt_type='lxc', group='libvirt')
+ instance = fake_instance.fake_instance_obj(self.context)
+ inf_exception = exception.InstanceNotFound(instance_id=instance.name)
+ mock_look_up.side_effect = inf_exception
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ network_info = []
+ conn.destroy(self.context, instance, network_info, None, False)
+
+ mock_look_up.assert_has_calls([mock.call(instance.name),
+ mock.call(instance.name)])
+ mock_teardown_container.assert_called_once_with(instance)
+ mock_cleanup.assert_called_once_with(self.context, instance,
+ network_info, None, False,
+ None)
+
def test_reboot_different_ids(self):
class FakeLoopingCall:
def start(self, *a, **k):
@@ -7449,8 +7523,10 @@ class LibvirtConnTestCase(test.NoDBTestCase):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
self.stubs.Set(conn, '_hard_reboot', fake_hard_reboot)
- instance = {"name": "instancename", "id": "instanceid",
- "uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
+ instance_details = {"name": "instancename", "id": 1,
+ "uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
+ instance = fake_instance.fake_instance_obj(
+ self.context, **instance_details)
network_info = _fake_network_info(self.stubs, 1)
conn.resume_state_on_host_boot(self.context, instance, network_info,
@@ -7485,7 +7561,9 @@ class LibvirtConnTestCase(test.NoDBTestCase):
def test_resume_state_on_host_boot_with_instance_not_found_on_driver(self):
called = {'count': 0}
- instance = {'name': 'test'}
+ instance_details = {'name': 'test'}
+ instance = fake_instance.fake_instance_obj(
+ self.context, **instance_details)
def fake_lookup_by_name(instance_name):
raise exception.InstanceNotFound(instance_id='fake')
@@ -7560,7 +7638,7 @@ class LibvirtConnTestCase(test.NoDBTestCase):
block_device_info)
@mock.patch('nova.openstack.common.loopingcall.FixedIntervalLoopingCall')
- @mock.patch('nova.pci.pci_manager.get_instance_pci_devs')
+ @mock.patch('nova.pci.manager.get_instance_pci_devs')
@mock.patch('nova.virt.libvirt.LibvirtDriver._prepare_pci_devices_for_use')
@mock.patch('nova.virt.libvirt.LibvirtDriver._create_domain_and_network')
@mock.patch('nova.virt.libvirt.LibvirtDriver._create_images_and_backing')
@@ -10394,8 +10472,11 @@ class HostStateTestCase(test.NoDBTestCase):
hardware.VirtNUMATopologyCellUsage(
2, set([3, 4]), 1024)])
- class FakeConnection(object):
+ class FakeConnection(libvirt_driver.LibvirtDriver):
"""Fake connection object."""
+ def __init__(self):
+ super(HostStateTestCase.FakeConnection,
+ self).__init__(fake.FakeVirtAPI(), True)
def _get_vcpu_total(self):
return 1
@@ -10444,8 +10525,9 @@ class HostStateTestCase(test.NoDBTestCase):
return HostStateTestCase.numa_topology
def test_update_status(self):
- hs = libvirt_driver.HostState(self.FakeConnection())
- stats = hs._stats
+ drvr = HostStateTestCase.FakeConnection()
+
+ stats = drvr.get_available_resource("compute1")
self.assertEqual(stats["vcpus"], 1)
self.assertEqual(stats["memory_mb"], 497)
self.assertEqual(stats["local_gb"], 100)
diff --git a/nova/tests/virt/vmwareapi/stubs.py b/nova/tests/virt/vmwareapi/stubs.py
index f14505b81e..fb207176cc 100644
--- a/nova/tests/virt/vmwareapi/stubs.py
+++ b/nova/tests/virt/vmwareapi/stubs.py
@@ -26,8 +26,8 @@ from nova import db
from nova.tests import test_flavors
from nova.tests.virt.vmwareapi import fake
from nova.virt.vmwareapi import driver
+from nova.virt.vmwareapi import images
from nova.virt.vmwareapi import network_util
-from nova.virt.vmwareapi import vmware_images
def fake_get_vim_object(arg):
@@ -86,8 +86,8 @@ def set_stubs(stubs):
"""Set the stubs."""
stubs.Set(network_util, 'get_network_with_the_name',
fake.fake_get_network)
- stubs.Set(vmware_images, 'upload_image', fake.fake_upload_image)
- stubs.Set(vmware_images, 'fetch_image', fake.fake_fetch_image)
+ stubs.Set(images, 'upload_image', fake.fake_upload_image)
+ stubs.Set(images, 'fetch_image', fake.fake_fetch_image)
stubs.Set(driver.VMwareAPISession, "vim", fake_vim_prop)
stubs.Set(driver.VMwareAPISession, "_is_vim_object",
fake_is_vim_object)
diff --git a/nova/tests/virt/vmwareapi/test_configdrive.py b/nova/tests/virt/vmwareapi/test_configdrive.py
index 0b408e5375..8df4121f44 100644
--- a/nova/tests/virt/vmwareapi/test_configdrive.py
+++ b/nova/tests/virt/vmwareapi/test_configdrive.py
@@ -27,9 +27,9 @@ from nova.tests.virt.vmwareapi import fake as vmwareapi_fake
from nova.tests.virt.vmwareapi import stubs
from nova.virt import fake
from nova.virt.vmwareapi import driver
+from nova.virt.vmwareapi import images
from nova.virt.vmwareapi import vm_util
from nova.virt.vmwareapi import vmops
-from nova.virt.vmwareapi import vmware_images
class ConfigDriveTestCase(test.NoDBTestCase):
@@ -108,7 +108,7 @@ class ConfigDriveTestCase(test.NoDBTestCase):
def fake_upload_iso_to_datastore(iso_path, instance, **kwargs):
pass
- self.stubs.Set(vmware_images,
+ self.stubs.Set(images,
'upload_iso_to_datastore',
fake_upload_iso_to_datastore)
diff --git a/nova/tests/virt/vmwareapi/test_driver_api.py b/nova/tests/virt/vmwareapi/test_driver_api.py
index 64139c7337..f2cb36fce9 100644
--- a/nova/tests/virt/vmwareapi/test_driver_api.py
+++ b/nova/tests/virt/vmwareapi/test_driver_api.py
@@ -62,11 +62,11 @@ from nova.virt.vmwareapi import driver
from nova.virt.vmwareapi import ds_util
from nova.virt.vmwareapi import error_util
from nova.virt.vmwareapi import imagecache
+from nova.virt.vmwareapi import images
from nova.virt.vmwareapi import vif
from nova.virt.vmwareapi import vim_util
from nova.virt.vmwareapi import vm_util
from nova.virt.vmwareapi import vmops
-from nova.virt.vmwareapi import vmware_images
from nova.virt.vmwareapi import volumeops
CONF = cfg.CONF
@@ -324,7 +324,7 @@ class VMwareAPIVMTestCase(test.NoDBTestCase):
def test_configuration_linked_clone(self):
self.flags(use_linked_clone=None, group='vmware')
- self.assertRaises(error_util.UseLinkedCloneConfigurationFault,
+ self.assertRaises(vexc.UseLinkedCloneConfigurationFault,
self.conn._validate_configuration)
@mock.patch.object(pbm, 'get_profile_id_by_name')
@@ -528,13 +528,13 @@ class VMwareAPIVMTestCase(test.NoDBTestCase):
else:
self.assertFalse(vmwareapi_fake.get_file(str(cache)))
- @mock.patch.object(nova.virt.vmwareapi.vmware_images.VMwareImage,
+ @mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
'from_image')
def test_instance_dir_disk_created(self, mock_from_image):
"""Test image file is cached when even when use_linked_clone
is False
"""
- img_props = vmware_images.VMwareImage(
+ img_props = images.VMwareImage(
image_id=self.fake_image_uuid,
linked_clone=False)
@@ -544,13 +544,13 @@ class VMwareAPIVMTestCase(test.NoDBTestCase):
self.assertTrue(vmwareapi_fake.get_file(str(path)))
self._cached_files_exist()
- @mock.patch.object(nova.virt.vmwareapi.vmware_images.VMwareImage,
+ @mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
'from_image')
def test_cache_dir_disk_created(self, mock_from_image):
"""Test image disk is cached when use_linked_clone is True."""
self.flags(use_linked_clone=True, group='vmware')
- img_props = vmware_images.VMwareImage(
+ img_props = images.VMwareImage(
image_id=self.fake_image_uuid,
file_size=1 * units.Ki,
disk_type=constants.DISK_TYPE_SPARSE)
@@ -599,11 +599,11 @@ class VMwareAPIVMTestCase(test.NoDBTestCase):
self.image['disk_format'] = 'iso'
self._create_vm()
- @mock.patch.object(nova.virt.vmwareapi.vmware_images.VMwareImage,
+ @mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
'from_image')
def test_iso_disk_cdrom_attach_with_config_drive(self,
mock_from_image):
- img_props = vmware_images.VMwareImage(
+ img_props = images.VMwareImage(
image_id=self.fake_image_uuid,
file_size=80 * units.Gi,
file_type='iso',
@@ -781,10 +781,10 @@ class VMwareAPIVMTestCase(test.NoDBTestCase):
self._check_vm_info(info, power_state.RUNNING)
self.assertTrue(vmwareapi_fake.get_file(str(root)))
- @mock.patch.object(nova.virt.vmwareapi.vmware_images.VMwareImage,
+ @mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
'from_image')
def test_spawn_disk_extend_sparse(self, mock_from_image):
- img_props = vmware_images.VMwareImage(
+ img_props = images.VMwareImage(
image_id=self.fake_image_uuid,
file_size=units.Ki,
disk_type=constants.DISK_TYPE_SPARSE,
@@ -916,10 +916,10 @@ class VMwareAPIVMTestCase(test.NoDBTestCase):
self.assertRaises(DeleteError, self._create_vm)
self.assertTrue(vmwareapi_fake.get_file(cached_image))
- @mock.patch.object(nova.virt.vmwareapi.vmware_images.VMwareImage,
+ @mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
'from_image')
def test_spawn_disk_invalid_disk_size(self, mock_from_image):
- img_props = vmware_images.VMwareImage(
+ img_props = images.VMwareImage(
image_id=self.fake_image_uuid,
file_size=82 * units.Gi,
disk_type=constants.DISK_TYPE_SPARSE,
@@ -930,10 +930,10 @@ class VMwareAPIVMTestCase(test.NoDBTestCase):
self.assertRaises(exception.InstanceUnacceptable,
self._create_vm)
- @mock.patch.object(nova.virt.vmwareapi.vmware_images.VMwareImage,
+ @mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
'from_image')
def test_spawn_disk_extend_insufficient_disk_space(self, mock_from_image):
- img_props = vmware_images.VMwareImage(
+ img_props = images.VMwareImage(
image_id=self.fake_image_uuid,
file_size=1024,
disk_type=constants.DISK_TYPE_SPARSE,
@@ -1185,7 +1185,7 @@ class VMwareAPIVMTestCase(test.NoDBTestCase):
info = self.conn.get_info({'uuid': self.uuid,
'node': self.instance_node})
self._check_vm_info(info, power_state.RUNNING)
- with mock.patch.object(vmware_images, 'upload_image',
+ with mock.patch.object(images, 'upload_image',
self.mock_upload_image):
self.conn.snapshot(self.context, self.instance, "Test-Snapshot",
func_call_matcher.call)
@@ -2216,10 +2216,10 @@ class VMwareAPIVCDriverTestCase(VMwareAPIVMTestCase):
network_info=self.network_info,
block_device_info=None)
- @mock.patch.object(nova.virt.vmwareapi.vmware_images.VMwareImage,
+ @mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
'from_image')
def test_spawn_with_sparse_image(self, mock_from_image):
- img_info = vmware_images.VMwareImage(
+ img_info = images.VMwareImage(
image_id=self.fake_image_uuid,
file_size=1024,
disk_type=constants.DISK_TYPE_SPARSE,
diff --git a/nova/tests/virt/vmwareapi/test_vmware_images.py b/nova/tests/virt/vmwareapi/test_images.py
index fb27e9fc31..613705cf78 100644
--- a/nova/tests/virt/vmwareapi/test_vmware_images.py
+++ b/nova/tests/virt/vmwareapi/test_images.py
@@ -12,7 +12,7 @@
# under the License.
"""
-Test suite for vmware_images.
+Test suite for images.
"""
import contextlib
@@ -24,8 +24,8 @@ from nova import exception
from nova import test
import nova.tests.image.fake
from nova.virt.vmwareapi import constants
+from nova.virt.vmwareapi import images
from nova.virt.vmwareapi import read_write_util
-from nova.virt.vmwareapi import vmware_images
class VMwareImagesTestCase(test.NoDBTestCase):
@@ -64,16 +64,16 @@ class VMwareImagesTestCase(test.NoDBTestCase):
side_effect=fake_read_handle),
mock.patch.object(read_write_util, 'VMwareHTTPWriteFile',
side_effect=fake_write_handle),
- mock.patch.object(vmware_images, 'start_transfer'),
- mock.patch.object(vmware_images.IMAGE_API, 'get',
+ mock.patch.object(images, 'start_transfer'),
+ mock.patch.object(images.IMAGE_API, 'get',
return_value=image_data),
- mock.patch.object(vmware_images.IMAGE_API, 'download',
+ mock.patch.object(images.IMAGE_API, 'download',
return_value=read_iter),
) as (glance_read, http_write, start_transfer, image_show,
image_download):
- vmware_images.fetch_image(context, instance,
- host, dc_name,
- ds_name, file_path)
+ images.fetch_image(context, instance,
+ host, dc_name,
+ ds_name, file_path)
glance_read.assert_called_once_with(read_iter)
http_write.assert_called_once_with(host, dc_name, ds_name, None,
@@ -103,9 +103,9 @@ class VMwareImagesTestCase(test.NoDBTestCase):
"vmware_adaptertype": constants.DEFAULT_ADAPTER_TYPE,
"vmware_disktype": constants.DEFAULT_DISK_TYPE,
"hw_vif_model": constants.DEFAULT_VIF_MODEL,
- vmware_images.LINKED_CLONE_PROPERTY: True}}
+ images.LINKED_CLONE_PROPERTY: True}}
- img_props = vmware_images.VMwareImage.from_image(image_id, mdata)
+ img_props = images.VMwareImage.from_image(image_id, mdata)
image_size_in_kb = raw_disk_size_in_bytes / units.Ki
@@ -139,9 +139,9 @@ class VMwareImagesTestCase(test.NoDBTestCase):
if image_lc_setting is not None:
mdata['properties'][
- vmware_images.LINKED_CLONE_PROPERTY] = image_lc_setting
+ images.LINKED_CLONE_PROPERTY] = image_lc_setting
- return vmware_images.VMwareImage.from_image(image_id, mdata)
+ return images.VMwareImage.from_image(image_id, mdata)
def test_use_linked_clone_override_nf(self):
image_props = self._image_build(None, False)
@@ -205,7 +205,7 @@ class VMwareImagesTestCase(test.NoDBTestCase):
self.assertFalse(image.linked_clone)
def test_image_defaults(self):
- image = vmware_images.VMwareImage(image_id='fake-image-id')
+ image = images.VMwareImage(image_id='fake-image-id')
# N.B. We intentially don't use the defined constants here. Amongst
# other potential failures, we're interested in changes to their
diff --git a/nova/tests/virt/vmwareapi/test_vm_util.py b/nova/tests/virt/vmwareapi/test_vm_util.py
index 1f995f7f0d..ac095f9c3b 100644
--- a/nova/tests/virt/vmwareapi/test_vm_util.py
+++ b/nova/tests/virt/vmwareapi/test_vm_util.py
@@ -41,10 +41,6 @@ class VMwareVMUtilTestCase(test.NoDBTestCase):
fake.reset()
vm_util.vm_refs_cache_reset()
- def tearDown(self):
- super(VMwareVMUtilTestCase, self).tearDown()
- fake.reset()
-
def _test_get_stats_from_cluster(self, connection_state="connected",
maintenance_mode=False):
ManagedObjectRefs = [fake.ManagedObjectReference("host1",
diff --git a/nova/tests/virt/vmwareapi/test_vmops.py b/nova/tests/virt/vmwareapi/test_vmops.py
index f09085071c..150ce7befd 100644
--- a/nova/tests/virt/vmwareapi/test_vmops.py
+++ b/nova/tests/virt/vmwareapi/test_vmops.py
@@ -33,10 +33,10 @@ from nova.tests.virt.vmwareapi import stubs
from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import driver
from nova.virt.vmwareapi import ds_util
+from nova.virt.vmwareapi import images
from nova.virt.vmwareapi import vim_util
from nova.virt.vmwareapi import vm_util
from nova.virt.vmwareapi import vmops
-from nova.virt.vmwareapi import vmware_images
class DsPathMatcher:
@@ -605,7 +605,7 @@ class VMwareVMOpsTestCase(test.NoDBTestCase):
mock_sized_image_exists,
flavor_fits_image=False):
file_size = 10 * units.Gi if flavor_fits_image else 5 * units.Gi
- image_info = vmware_images.VMwareImage(
+ image_info = images.VMwareImage(
image_id=self._image_id,
file_size=file_size,
linked_clone=False)
@@ -655,7 +655,7 @@ class VMwareVMOpsTestCase(test.NoDBTestCase):
mock_extend_virtual_disk,
flavor_fits_image=False):
file_size = 10 * units.Gi if flavor_fits_image else 5 * units.Gi
- image_info = vmware_images.VMwareImage(
+ image_info = images.VMwareImage(
image_id=self._image_id,
file_size=file_size,
linked_clone=False)
@@ -699,7 +699,7 @@ class VMwareVMOpsTestCase(test.NoDBTestCase):
mock_create_virtual_disk,
mock_attach_cdrom,
with_root_disk):
- image_info = vmware_images.VMwareImage(
+ image_info = images.VMwareImage(
image_id=self._image_id,
file_size=10 * units.Mi,
linked_clone=True)
@@ -821,7 +821,7 @@ class VMwareVMOpsTestCase(test.NoDBTestCase):
mock_call_method),
mock.patch.object(uuidutils, 'generate_uuid',
return_value='tmp-uuid'),
- mock.patch.object(vmware_images, 'fetch_image')
+ mock.patch.object(images, 'fetch_image')
) as (_wait_for_task, _call_method, _generate_uuid, _fetch_image):
self._vmops.spawn(self._context, self._instance, image,
injected_files='fake_files',
@@ -927,7 +927,7 @@ class VMwareVMOpsTestCase(test.NoDBTestCase):
mock_get_datastore,
image_size_bytes=0,
instance_name=None):
- image_info = vmware_images.VMwareImage(
+ image_info = images.VMwareImage(
image_id=self._image_id,
file_size=image_size_bytes,
linked_clone=True)
@@ -992,7 +992,7 @@ class VMwareVMOpsTestCase(test.NoDBTestCase):
def test_build_virtual_machine(self):
image_id = nova.tests.image.fake.get_valid_image_id()
- image = vmware_images.VMwareImage(image_id=image_id)
+ image = images.VMwareImage(image_id=image_id)
vm_ref = self._vmops.build_virtual_machine(self._instance,
'fake-instance-name',
@@ -1093,7 +1093,7 @@ class VMwareVMOpsTestCase(test.NoDBTestCase):
file_type = (constants.DISK_FORMAT_ISO if is_iso
else constants.DEFAULT_DISK_FORMAT)
- image_info = vmware_images.VMwareImage(
+ image_info = images.VMwareImage(
image_id=self._image_id,
file_size=10 * units.Mi,
file_type=file_type,
@@ -1165,7 +1165,7 @@ class VMwareVMOpsTestCase(test.NoDBTestCase):
self._test_fetch_image_if_missing(
is_iso=True)
- @mock.patch.object(vmware_images, 'fetch_image')
+ @mock.patch.object(images, 'fetch_image')
def test_fetch_image_as_file(self, mock_fetch_image):
vi = self._make_vm_config_info()
image_ds_loc = mock.Mock()
diff --git a/nova/tests/virt/xenapi/test_vmops.py b/nova/tests/virt/xenapi/test_vmops.py
index 62cd9ca486..e6bd462e9b 100644
--- a/nova/tests/virt/xenapi/test_vmops.py
+++ b/nova/tests/virt/xenapi/test_vmops.py
@@ -21,7 +21,7 @@ from nova.compute import task_states
from nova import context
from nova import exception
from nova import objects
-from nova.pci import pci_manager
+from nova.pci import manager as pci_manager
from nova import test
from nova.tests import fake_instance
from nova.tests.virt.xenapi import stubs
diff --git a/nova/virt/libvirt/config.py b/nova/virt/libvirt/config.py
index b54d6c4177..1586ef471e 100644
--- a/nova/virt/libvirt/config.py
+++ b/nova/virt/libvirt/config.py
@@ -31,7 +31,7 @@ import six
from nova import exception
from nova.openstack.common import log as logging
-from nova.pci import pci_utils
+from nova.pci import utils as pci_utils
from nova.virt import hardware
@@ -1848,10 +1848,10 @@ class LibvirtConfigNodeDevicePciSubFunctionCap(LibvirtConfigObject):
self.type = xmldoc.get("type")
for c in xmldoc.getchildren():
if c.tag == "address":
- self.device_addrs.append((c.get('domain'),
- c.get('bus'),
- c.get('slot'),
- c.get('function')))
+ self.device_addrs.append((int(c.get('domain'), 16),
+ int(c.get('bus'), 16),
+ int(c.get('slot'), 16),
+ int(c.get('function'), 16)))
class LibvirtConfigGuestRng(LibvirtConfigGuestDevice):
diff --git a/nova/virt/libvirt/designer.py b/nova/virt/libvirt/designer.py
index 8ed55a2564..879db744c7 100644
--- a/nova/virt/libvirt/designer.py
+++ b/nova/virt/libvirt/designer.py
@@ -19,7 +19,7 @@ This module provides helper APIs for populating the config.py
classes based on common operational needs / policies
"""
-from nova.pci import pci_utils
+from nova.pci import utils as pci_utils
def set_vif_guest_frontend_config(conf, mac, model, driver):
diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py
index ca1cb7036d..dbdbfd08f1 100644
--- a/nova/virt/libvirt/driver.py
+++ b/nova/virt/libvirt/driver.py
@@ -81,9 +81,9 @@ from nova.openstack.common import fileutils
from nova.openstack.common import log as logging
from nova.openstack.common import loopingcall
from nova.openstack.common import processutils
-from nova.pci import pci_manager
-from nova.pci import pci_utils
-from nova.pci import pci_whitelist
+from nova.pci import manager as pci_manager
+from nova.pci import utils as pci_utils
+from nova.pci import whitelist as pci_whitelist
from nova import rpc
from nova import utils
from nova import version
@@ -383,7 +383,6 @@ class LibvirtDriver(driver.ComputeDriver):
libvirt = importutils.import_module('libvirt')
self._skip_list_all_domains = False
- self._host_state = None
self._initiator = None
self._fc_wwnns = None
self._fc_wwpns = None
@@ -467,12 +466,6 @@ class LibvirtDriver(driver.ComputeDriver):
self._disk_cachemode = "writethrough"
return self._disk_cachemode
- @property
- def host_state(self):
- if not self._host_state:
- self._host_state = HostState(self)
- return self._host_state
-
def _set_cache_mode(self, conf):
"""Set cache mode on LibvirtConfigGuestDisk object."""
try:
@@ -714,9 +707,10 @@ class LibvirtDriver(driver.ComputeDriver):
major = MIN_LIBVIRT_VERSION[0]
minor = MIN_LIBVIRT_VERSION[1]
micro = MIN_LIBVIRT_VERSION[2]
- LOG.error(_LE('Nova requires libvirt version '
- '%(major)i.%(minor)i.%(micro)i or greater.'),
- {'major': major, 'minor': minor, 'micro': micro})
+ raise exception.NovaException(
+ _('Nova requires libvirt version '
+ '%(major)i.%(minor)i.%(micro)i or greater.') %
+ {'major': major, 'minor': minor, 'micro': micro})
self._init_events()
@@ -979,10 +973,6 @@ class LibvirtDriver(driver.ComputeDriver):
old_domid = virt_dom.ID()
virt_dom.destroy()
- # NOTE(GuanQiang): teardown container to avoid resource leak
- if CONF.libvirt.virt_type == 'lxc':
- self._teardown_container(instance)
-
except libvirt.libvirtError as e:
is_okay = False
errcode = e.get_error_code()
@@ -1049,6 +1039,10 @@ class LibvirtDriver(driver.ComputeDriver):
LOG.info(_LI("Going to destroy instance again."),
instance=instance)
self._destroy(instance)
+ else:
+ # NOTE(GuanQiang): teardown container to avoid resource leak
+ if CONF.libvirt.virt_type == 'lxc':
+ self._teardown_container(instance)
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None):
@@ -1329,7 +1323,7 @@ class LibvirtDriver(driver.ComputeDriver):
def attach_volume(self, context, connection_info, instance, mountpoint,
disk_bus=None, device_type=None, encryption=None):
- instance_name = instance['name']
+ instance_name = instance.name
virt_dom = self._lookup_by_name(instance_name)
disk_dev = mountpoint.rpartition("/")[2]
bdm = {
@@ -1489,7 +1483,7 @@ class LibvirtDriver(driver.ComputeDriver):
def detach_volume(self, connection_info, instance, mountpoint,
encryption=None):
- instance_name = instance['name']
+ instance_name = instance.name
disk_dev = mountpoint.rpartition("/")[2]
try:
virt_dom = self._lookup_by_name(instance_name)
@@ -2470,7 +2464,7 @@ class LibvirtDriver(driver.ComputeDriver):
def suspend(self, instance):
"""Suspend the specified instance."""
- dom = self._lookup_by_name(instance['name'])
+ dom = self._lookup_by_name(instance.name)
self._detach_pci_devices(dom,
pci_manager.get_instance_pci_devs(instance))
self._detach_sriov_ports(instance, dom)
@@ -2493,7 +2487,7 @@ class LibvirtDriver(driver.ComputeDriver):
# Check if the instance is running already and avoid doing
# anything if it is.
try:
- domain = self._lookup_by_name(instance['name'])
+ domain = self._lookup_by_name(instance.name)
state = LIBVIRT_POWER_STATE[domain.info()[0]]
ignored_states = (power_state.RUNNING,
@@ -4728,11 +4722,11 @@ class LibvirtDriver(driver.ComputeDriver):
if fun_cap.type == 'virt_functions':
return {'dev_type': 'type-PF'}
if fun_cap.type == 'phys_function':
- phys_address = "%s:%s:%s.%s" % (
- fun_cap.device_addrs[0][0].replace("0x", ''),
- fun_cap.device_addrs[0][1].replace("0x", ''),
- fun_cap.device_addrs[0][2].replace("0x", ''),
- fun_cap.device_addrs[0][3].replace("0x", ''))
+ phys_address = "%04x:%02x:%02x.%01x" % (
+ fun_cap.device_addrs[0][0],
+ fun_cap.device_addrs[0][1],
+ fun_cap.device_addrs[0][2],
+ fun_cap.device_addrs[0][3])
return {'dev_type': 'type-VF',
'phys_function': phys_address}
return {'dev_type': 'type-PCI'}
@@ -4916,12 +4910,45 @@ class LibvirtDriver(driver.ComputeDriver):
:returns: dictionary containing resource info
"""
- # Temporary: convert supported_instances into a string, while keeping
+ disk_info_dict = self._get_local_gb_info()
+ data = {}
+
+ # NOTE(dprince): calling capabilities before getVersion works around
+ # an initialization issue with some versions of Libvirt (1.0.5.5).
+ # See: https://bugzilla.redhat.com/show_bug.cgi?id=1000116
+ # See: https://bugs.launchpad.net/nova/+bug/1215593
+
+ # Temporary convert supported_instances into a string, while keeping
# the RPC version as JSON. Can be changed when RPC broadcast is removed
- stats = self.host_state.get_host_stats(True)
- stats['supported_instances'] = jsonutils.dumps(
- stats['supported_instances'])
- return stats
+ data["supported_instances"] = jsonutils.dumps(
+ self._get_instance_capabilities())
+
+ data["vcpus"] = self._get_vcpu_total()
+ data["memory_mb"] = self._get_memory_mb_total()
+ data["local_gb"] = disk_info_dict['total']
+ data["vcpus_used"] = self._get_vcpu_used()
+ data["memory_mb_used"] = self._get_memory_mb_used()
+ data["local_gb_used"] = disk_info_dict['used']
+ data["hypervisor_type"] = self._get_hypervisor_type()
+ data["hypervisor_version"] = self._get_hypervisor_version()
+ data["hypervisor_hostname"] = self._get_hypervisor_hostname()
+ data["cpu_info"] = self._get_cpu_info()
+
+ disk_free_gb = disk_info_dict['free']
+ disk_over_committed = self._get_disk_over_committed_size_total()
+ available_least = disk_free_gb * units.Gi - disk_over_committed
+ data['disk_available_least'] = available_least / units.Gi
+
+ data['pci_passthrough_devices'] = \
+ self._get_pci_passthrough_devices()
+
+ numa_topology = self._get_host_numa_topology()
+ if numa_topology:
+ data['numa_topology'] = numa_topology.to_json()
+ else:
+ data['numa_topology'] = None
+
+ return data
def check_instance_shared_storage_local(self, context, instance):
dirpath = libvirt_utils.get_instance_path(instance)
@@ -5148,8 +5175,7 @@ class LibvirtDriver(driver.ComputeDriver):
ret = self._conn.compareCPU(cpu.to_xml(), 0)
except libvirt.libvirtError as e:
with excutils.save_and_reraise_exception():
- ret = unicode(e)
- LOG.error(m, {'ret': ret, 'u': u})
+ LOG.error(m, {'ret': e, 'u': u})
if ret <= 0:
LOG.error(m, {'ret': ret, 'u': u})
@@ -6315,76 +6341,3 @@ class LibvirtDriver(driver.ComputeDriver):
def is_supported_fs_format(self, fs_type):
return fs_type in [disk.FS_FORMAT_EXT2, disk.FS_FORMAT_EXT3,
disk.FS_FORMAT_EXT4, disk.FS_FORMAT_XFS]
-
-
-class HostState(object):
- """Manages information about the compute node through libvirt."""
- def __init__(self, driver):
- super(HostState, self).__init__()
- self._stats = {}
- self.driver = driver
- self.update_status()
-
- def get_host_stats(self, refresh=False):
- """Return the current state of the host.
-
- If 'refresh' is True, run update the stats first.
- """
- if refresh or not self._stats:
- self.update_status()
- return self._stats
-
- def update_status(self):
- """Retrieve status info from libvirt."""
- def _get_disk_available_least():
- """Return total real disk available least size.
-
- The size of available disk, when block_migration command given
- disk_over_commit param is FALSE.
-
- The size that deducted real instance disk size from the total size
- of the virtual disk of all instances.
-
- """
- disk_free_gb = disk_info_dict['free']
- disk_over_committed = (self.driver.
- _get_disk_over_committed_size_total())
- # Disk available least size
- available_least = disk_free_gb * units.Gi - disk_over_committed
- return (available_least / units.Gi)
-
- LOG.debug("Updating host stats")
- disk_info_dict = self.driver._get_local_gb_info()
- data = {}
-
- # NOTE(dprince): calling capabilities before getVersion works around
- # an initialization issue with some versions of Libvirt (1.0.5.5).
- # See: https://bugzilla.redhat.com/show_bug.cgi?id=1000116
- # See: https://bugs.launchpad.net/nova/+bug/1215593
- data["supported_instances"] = \
- self.driver._get_instance_capabilities()
-
- data["vcpus"] = self.driver._get_vcpu_total()
- data["memory_mb"] = self.driver._get_memory_mb_total()
- data["local_gb"] = disk_info_dict['total']
- data["vcpus_used"] = self.driver._get_vcpu_used()
- data["memory_mb_used"] = self.driver._get_memory_mb_used()
- data["local_gb_used"] = disk_info_dict['used']
- data["hypervisor_type"] = self.driver._get_hypervisor_type()
- data["hypervisor_version"] = self.driver._get_hypervisor_version()
- data["hypervisor_hostname"] = self.driver._get_hypervisor_hostname()
- data["cpu_info"] = self.driver._get_cpu_info()
- data['disk_available_least'] = _get_disk_available_least()
-
- data['pci_passthrough_devices'] = \
- self.driver._get_pci_passthrough_devices()
-
- numa_topology = self.driver._get_host_numa_topology()
- if numa_topology:
- data['numa_topology'] = numa_topology.to_json()
- else:
- data['numa_topology'] = None
-
- self._stats = data
-
- return data
diff --git a/nova/virt/libvirt/imagebackend.py b/nova/virt/libvirt/imagebackend.py
index 4d3f0da0c9..fc7d11ff78 100644
--- a/nova/virt/libvirt/imagebackend.py
+++ b/nova/virt/libvirt/imagebackend.py
@@ -276,7 +276,7 @@ class Image(object):
except (TypeError, ValueError) as e:
msg = (_("Could not load line %(line)s, got error "
"%(error)s") %
- {'line': line, 'error': unicode(e)})
+ {'line': line, 'error': e})
raise exception.InvalidDiskInfo(reason=msg)
@utils.synchronized(self.disk_info_path, external=False,
@@ -313,7 +313,7 @@ class Image(object):
fileutils.ensure_tree(os.path.dirname(self.disk_info_path))
write_to_disk_info_file()
except OSError as e:
- raise exception.DiskInfoReadWriteFail(reason=unicode(e))
+ raise exception.DiskInfoReadWriteFail(reason=six.text_type(e))
return driver_format
@staticmethod
diff --git a/nova/virt/vmwareapi/__init__.py b/nova/virt/vmwareapi/__init__.py
index 022e525284..d9e04435ad 100644
--- a/nova/virt/vmwareapi/__init__.py
+++ b/nova/virt/vmwareapi/__init__.py
@@ -18,8 +18,4 @@
# NOTE(sdague) for nicer compute_driver specification
from nova.virt.vmwareapi import driver
-# VMwareESXDriver is deprecated in Juno. This property definition
-# allows those configurations to work which reference it while
-# logging a deprecation warning
-VMwareESXDriver = driver.VMwareESXDriver
VMwareVCDriver = driver.VMwareVCDriver
diff --git a/nova/virt/vmwareapi/driver.py b/nova/virt/vmwareapi/driver.py
index 43e4ea2120..00181ae548 100644
--- a/nova/virt/vmwareapi/driver.py
+++ b/nova/virt/vmwareapi/driver.py
@@ -24,6 +24,7 @@ import re
from oslo.config import cfg
from oslo.serialization import jsonutils
from oslo.vmware import api
+from oslo.vmware import exceptions as vexc
from oslo.vmware import pbm
from oslo.vmware import vim
from oslo.vmware import vim_util
@@ -104,22 +105,6 @@ CONF.register_opts(spbm_opts, 'vmware')
TIME_BETWEEN_API_CALL_RETRIES = 1.0
-# The following class was removed in the transition from Icehouse to
-# Juno, but may still be referenced in configuration files. The
-# following stub allow those configurations to work while logging a
-# deprecation warning.
-class VMwareESXDriver(driver.ComputeDriver):
- """The ESX host connection object."""
-
- def _do_deprecation_warning(self):
- LOG.warn(_LW('The VMware ESX driver is now deprecated and has been '
- 'removed in the Juno release. The VC driver will remain '
- 'and continue to be supported.'))
-
- def __init__(self, virtapi, read_only=False, scheme="https"):
- self._do_deprecation_warning()
-
-
class VMwareVCDriver(driver.ComputeDriver):
"""The VC host connection object."""
@@ -208,7 +193,7 @@ class VMwareVCDriver(driver.ComputeDriver):
def _validate_configuration(self):
if CONF.vmware.use_linked_clone is None:
- raise error_util.UseLinkedCloneConfigurationFault()
+ raise vexc.UseLinkedCloneConfigurationFault()
if CONF.vmware.pbm_enabled:
if not CONF.vmware.pbm_default_policy:
diff --git a/nova/virt/vmwareapi/error_util.py b/nova/virt/vmwareapi/error_util.py
index cd62cf084e..6f2a9f7de6 100644
--- a/nova/virt/vmwareapi/error_util.py
+++ b/nova/virt/vmwareapi/error_util.py
@@ -26,16 +26,6 @@ from nova.i18n import _
# - map back to NovaException?
-class VMwareDriverConfigurationException(vexc.VMwareDriverException):
- """Base class for all configuration exceptions.
- """
- msg_fmt = _("VMware Driver configuration fault.")
-
-
-class UseLinkedCloneConfigurationFault(VMwareDriverConfigurationException):
- msg_fmt = _("No default value for use_linked_clone found.")
-
-
class NoRootDiskDefined(vexc.VMwareDriverException):
msg_fmt = _("No root disk defined.")
@@ -44,9 +34,9 @@ class TaskInProgress(vexc.VMwareDriverException):
msg_fmt = _("Virtual machine is busy.")
-class PbmDefaultPolicyUnspecified(VMwareDriverConfigurationException):
+class PbmDefaultPolicyUnspecified(vexc.VMwareDriverConfigurationException):
msg_fmt = _("Default PBM policy is required if PBM is enabled.")
-class PbmDefaultPolicyDoesNotExist(VMwareDriverConfigurationException):
+class PbmDefaultPolicyDoesNotExist(vexc.VMwareDriverConfigurationException):
msg_fmt = _("The default PBM policy doesn't exist on the backend.")
diff --git a/nova/virt/vmwareapi/vmware_images.py b/nova/virt/vmwareapi/images.py
index d4ae7993fe..64827f00e4 100644
--- a/nova/virt/vmwareapi/vmware_images.py
+++ b/nova/virt/vmwareapi/images.py
@@ -102,7 +102,7 @@ class VMwareImage(object):
:param image_id - image id of image
:param image_meta - image metadata we are working with
:return: vmware image object
- :rtype: nova.virt.vmwareapi.vmware_images.VmwareImage
+ :rtype: nova.virt.vmwareapi.images.VmwareImage
"""
if image_meta is None:
image_meta = {}
diff --git a/nova/virt/vmwareapi/vm_util.py b/nova/virt/vmwareapi/vm_util.py
index 207fadea9e..6f313647d2 100644
--- a/nova/virt/vmwareapi/vm_util.py
+++ b/nova/virt/vmwareapi/vm_util.py
@@ -255,16 +255,14 @@ def _create_vif_spec(client_factory, vif_info):
mac_address = vif_info['mac_address']
backing = None
if network_ref and network_ref['type'] == 'OpaqueNetwork':
- backing_name = ''.join(['ns0:VirtualEthernetCard',
- 'OpaqueNetworkBackingInfo'])
- backing = client_factory.create(backing_name)
+ backing = client_factory.create(
+ 'ns0:VirtualEthernetCardOpaqueNetworkBackingInfo')
backing.opaqueNetworkId = network_ref['network-id']
backing.opaqueNetworkType = network_ref['network-type']
elif (network_ref and
network_ref['type'] == "DistributedVirtualPortgroup"):
- backing_name = ''.join(['ns0:VirtualEthernetCardDistributed',
- 'VirtualPortBackingInfo'])
- backing = client_factory.create(backing_name)
+ backing = client_factory.create(
+ 'ns0:VirtualEthernetCardDistributedVirtualPortBackingInfo')
portgroup = client_factory.create(
'ns0:DistributedVirtualSwitchPortConnection')
portgroup.switchUuid = network_ref['dvsw']
diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py
index 49a7a3c477..f76698ecd6 100644
--- a/nova/virt/vmwareapi/vmops.py
+++ b/nova/virt/vmwareapi/vmops.py
@@ -51,10 +51,10 @@ from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import ds_util
from nova.virt.vmwareapi import error_util
from nova.virt.vmwareapi import imagecache
+from nova.virt.vmwareapi import images
from nova.virt.vmwareapi import vif as vmwarevif
from nova.virt.vmwareapi import vim_util
from nova.virt.vmwareapi import vm_util
-from nova.virt.vmwareapi import vmware_images
CONF = cfg.CONF
@@ -283,7 +283,7 @@ class VMwareVMOps(object):
'datastore_name': vi.datastore.name},
instance=vi.instance)
- vmware_images.fetch_image(
+ images.fetch_image(
context,
vi.instance,
session._host,
@@ -429,8 +429,8 @@ class VMwareVMOps(object):
instance_name=None, power_on=True):
client_factory = self._session.vim.client.factory
- image_info = vmware_images.VMwareImage.from_image(instance.image_ref,
- image_meta)
+ image_info = images.VMwareImage.from_image(instance.image_ref,
+ image_meta)
vi = self._get_vm_config_info(instance, image_info, instance_name)
# Creates the virtual machine. The virtual machine reference returned
@@ -520,7 +520,7 @@ class VMwareVMOps(object):
cdb.make_drive(tmp_file)
upload_iso_path = "%s/configdrive.iso" % (
upload_folder)
- vmware_images.upload_iso_to_datastore(
+ images.upload_iso_to_datastore(
tmp_file, instance,
host=self._session._host,
data_center_name=dc_name,
@@ -680,7 +680,7 @@ class VMwareVMOps(object):
# Upload the contents of -flat.vmdk file which has the disk data.
LOG.debug("Uploading image %s", image_id,
instance=instance)
- vmware_images.upload_image(
+ images.upload_image(
context,
image_id,
instance,
diff --git a/nova/virt/xenapi/host.py b/nova/virt/xenapi/host.py
index a61debbf34..264c452d33 100644
--- a/nova/virt/xenapi/host.py
+++ b/nova/virt/xenapi/host.py
@@ -32,7 +32,7 @@ from nova import exception
from nova.i18n import _
from nova import objects
from nova.openstack.common import log as logging
-from nova.pci import pci_whitelist
+from nova.pci import whitelist as pci_whitelist
from nova.virt.xenapi import pool_states
from nova.virt.xenapi import vm_utils
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index b9bc052c4a..533cc32c4b 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -46,7 +46,7 @@ from nova.i18n import _
from nova.i18n import _LE
from nova import objects
from nova.openstack.common import log as logging
-from nova.pci import pci_manager
+from nova.pci import manager as pci_manager
from nova import utils
from nova.virt import configdrive
from nova.virt import driver as virt_driver
diff --git a/nova/volume/cinder.py b/nova/volume/cinder.py
index 084b1855cf..e3d1169019 100644
--- a/nova/volume/cinder.py
+++ b/nova/volume/cinder.py
@@ -395,7 +395,7 @@ class API(object):
except cinder_exception.OverLimit:
raise exception.OverQuota(overs='volumes')
except cinder_exception.BadRequest as e:
- raise exception.InvalidInput(reason=unicode(e))
+ raise exception.InvalidInput(reason=e)
@translate_volume_exception
def delete(self, context, volume_id):
diff --git a/requirements.txt b/requirements.txt
index e021921763..d707941acd 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -46,4 +46,3 @@ lockfile>=0.8
simplejson>=2.2.0
rfc3986>=0.2.0 # Apache-2.0
oslo.vmware>=0.6.0 # Apache-2.0
-oslo.middleware>=0.1.0 # Apache-2.0
diff --git a/setup.cfg b/setup.cfg
index 14c85e00eb..cb30e55abc 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -63,6 +63,7 @@ nova.api.v3.extensions =
aggregates = nova.api.openstack.compute.plugins.v3.aggregates:Aggregates
attach_interfaces = nova.api.openstack.compute.plugins.v3.attach_interfaces:AttachInterfaces
availability_zone = nova.api.openstack.compute.plugins.v3.availability_zone:AvailabilityZone
+ baremetal_nodes = nova.api.openstack.compute.plugins.v3.baremetal_nodes:BareMetalNodes
block_device_mapping = nova.api.openstack.compute.plugins.v3.block_device_mapping:BlockDeviceMapping
cells = nova.api.openstack.compute.plugins.v3.cells:Cells
certificates = nova.api.openstack.compute.plugins.v3.certificates:Certificates