summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.zuul.yaml34
-rw-r--r--api-ref/source/parameters.yaml3
-rw-r--r--api-ref/source/servers-actions.inc5
-rw-r--r--api-ref/source/servers.inc5
-rw-r--r--doc/api_samples/os-evacuate/v2.95/server-evacuate-find-host-req.json5
-rw-r--r--doc/api_samples/os-evacuate/v2.95/server-evacuate-req.json6
-rw-r--r--doc/api_samples/servers/v2.94/server-action-rebuild-resp.json80
-rw-r--r--doc/api_samples/servers/v2.94/server-action-rebuild.json15
-rw-r--r--doc/api_samples/servers/v2.94/server-create-req.json30
-rw-r--r--doc/api_samples/servers/v2.94/server-create-resp.json22
-rw-r--r--doc/api_samples/servers/v2.94/server-get-resp.json81
-rw-r--r--doc/api_samples/servers/v2.94/server-update-req.json8
-rw-r--r--doc/api_samples/servers/v2.94/server-update-resp.json78
-rw-r--r--doc/api_samples/servers/v2.94/servers-details-resp.json88
-rw-r--r--doc/api_samples/servers/v2.94/servers-list-resp.json24
-rw-r--r--doc/api_samples/versions/v21-version-get-resp.json2
-rw-r--r--doc/api_samples/versions/versions-get-resp.json2
-rw-r--r--doc/source/admin/availability-zones.rst56
-rw-r--r--doc/source/admin/compute-node-identification.rst83
-rw-r--r--doc/source/admin/huge-pages.rst2
-rw-r--r--doc/source/admin/index.rst1
-rw-r--r--doc/source/admin/pci-passthrough.rst22
-rw-r--r--doc/source/cli/nova-compute.rst2
-rw-r--r--mypy-files.txt4
-rw-r--r--nova/api/openstack/api_version_request.py4
-rw-r--r--nova/api/openstack/compute/evacuate.py25
-rw-r--r--nova/api/openstack/compute/flavor_access.py9
-rw-r--r--nova/api/openstack/compute/remote_consoles.py3
-rw-r--r--nova/api/openstack/compute/rest_api_version_history.rst16
-rw-r--r--nova/api/openstack/compute/schemas/evacuate.py4
-rw-r--r--nova/api/openstack/compute/schemas/servers.py14
-rw-r--r--nova/api/openstack/compute/servers.py9
-rw-r--r--nova/api/openstack/identity.py22
-rw-r--r--nova/compute/api.py14
-rw-r--r--nova/compute/manager.py217
-rw-r--r--nova/compute/pci_placement_translator.py2
-rw-r--r--nova/compute/resource_tracker.py115
-rw-r--r--nova/compute/rpcapi.py18
-rw-r--r--nova/compute/utils.py2
-rw-r--r--nova/compute/vm_states.py3
-rw-r--r--nova/conductor/api.py6
-rw-r--r--nova/conductor/manager.py14
-rw-r--r--nova/conductor/rpcapi.py15
-rw-r--r--nova/conf/api.py7
-rw-r--r--nova/conf/compute.py9
-rw-r--r--nova/conf/libvirt.py10
-rw-r--r--nova/conf/pci.py26
-rw-r--r--nova/conf/scheduler.py21
-rw-r--r--nova/conf/workarounds.py29
-rw-r--r--nova/exception.py25
-rw-r--r--nova/filesystem.py59
-rw-r--r--nova/manager.py7
-rw-r--r--nova/objects/compute_node.py15
-rw-r--r--nova/objects/request_spec.py12
-rw-r--r--nova/objects/service.py12
-rw-r--r--nova/pci/stats.py159
-rw-r--r--nova/policies/tenant_networks.py4
-rw-r--r--nova/policy.py12
-rw-r--r--nova/rpc.py16
-rw-r--r--nova/scheduler/filters/__init__.py41
-rw-r--r--nova/scheduler/filters/numa_topology_filter.py40
-rw-r--r--nova/scheduler/filters/pci_passthrough_filter.py33
-rw-r--r--nova/service.py4
-rw-r--r--nova/test.py16
-rw-r--r--nova/tests/fixtures/libvirt.py6
-rw-r--r--nova/tests/fixtures/libvirt_imagebackend.py3
-rw-r--r--nova/tests/fixtures/nova.py105
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/os-evacuate/v2.95/server-evacuate-find-host-req.json.tpl5
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/os-evacuate/v2.95/server-evacuate-req.json.tpl5
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-action-rebuild-resp.json.tpl80
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-action-rebuild.json.tpl15
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-create-req.json.tpl21
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-create-resp.json.tpl22
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-get-resp.json.tpl81
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-update-req.json.tpl8
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-update-resp.json.tpl78
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/servers-details-resp.json.tpl88
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/servers-list-resp.json.tpl24
-rw-r--r--nova/tests/functional/api_sample_tests/test_evacuate.py52
-rw-r--r--nova/tests/functional/api_sample_tests/test_remote_consoles.py20
-rw-r--r--nova/tests/functional/api_sample_tests/test_servers.py45
-rw-r--r--nova/tests/functional/compute/test_resource_tracker.py1
-rw-r--r--nova/tests/functional/integrated_helpers.py2
-rw-r--r--nova/tests/functional/libvirt/base.py8
-rw-r--r--nova/tests/functional/libvirt/test_evacuate.py4
-rw-r--r--nova/tests/functional/libvirt/test_pci_in_placement.py278
-rw-r--r--nova/tests/functional/libvirt/test_pci_sriov_servers.py346
-rw-r--r--nova/tests/functional/libvirt/test_vpmem.py6
-rw-r--r--nova/tests/functional/notification_sample_tests/test_compute_task.py7
-rw-r--r--nova/tests/functional/notification_sample_tests/test_instance.py20
-rw-r--r--nova/tests/functional/regressions/test_bug_1669054.py3
-rw-r--r--nova/tests/functional/regressions/test_bug_1713783.py6
-rw-r--r--nova/tests/functional/regressions/test_bug_1764883.py3
-rw-r--r--nova/tests/functional/regressions/test_bug_1823370.py3
-rw-r--r--nova/tests/functional/regressions/test_bug_1896463.py2
-rw-r--r--nova/tests/functional/regressions/test_bug_1922053.py6
-rw-r--r--nova/tests/functional/test_server_group.py57
-rw-r--r--nova/tests/functional/test_server_rescue.py86
-rw-r--r--nova/tests/functional/test_servers.py12
-rw-r--r--nova/tests/functional/test_servers_resource_request.py4
-rw-r--r--nova/tests/functional/test_service.py85
-rw-r--r--nova/tests/unit/api/openstack/compute/test_evacuate.py29
-rw-r--r--nova/tests/unit/api/openstack/compute/test_flavor_access.py25
-rw-r--r--nova/tests/unit/api/openstack/compute/test_remote_consoles.py12
-rw-r--r--nova/tests/unit/api/openstack/compute/test_server_groups.py97
-rw-r--r--nova/tests/unit/api/openstack/fakes.py14
-rw-r--r--nova/tests/unit/cmd/test_policy.py13
-rw-r--r--nova/tests/unit/compute/test_api.py157
-rw-r--r--nova/tests/unit/compute/test_compute.py32
-rw-r--r--nova/tests/unit/compute/test_compute_mgr.py301
-rw-r--r--nova/tests/unit/compute/test_resource_tracker.py200
-rw-r--r--nova/tests/unit/compute/test_rpcapi.py48
-rw-r--r--nova/tests/unit/compute/test_utils.py2
-rw-r--r--nova/tests/unit/conductor/test_conductor.py33
-rw-r--r--nova/tests/unit/console/test_websocketproxy.py4
-rw-r--r--nova/tests/unit/objects/test_compute_node.py17
-rw-r--r--nova/tests/unit/objects/test_request_spec.py20
-rw-r--r--nova/tests/unit/pci/test_stats.py101
-rw-r--r--nova/tests/unit/policies/base.py10
-rw-r--r--nova/tests/unit/policies/test_evacuate.py2
-rw-r--r--nova/tests/unit/scheduler/test_host_manager.py14
-rw-r--r--nova/tests/unit/scheduler/test_manager.py5
-rw-r--r--nova/tests/unit/test_filesystem.py52
-rw-r--r--nova/tests/unit/test_policy.py10
-rw-r--r--nova/tests/unit/test_rpc.py44
-rw-r--r--nova/tests/unit/test_service.py9
-rw-r--r--nova/tests/unit/virt/disk/test_api.py1
-rw-r--r--nova/tests/unit/virt/libvirt/cpu/__init__.py0
-rw-r--r--nova/tests/unit/virt/libvirt/cpu/test_api.py63
-rw-r--r--nova/tests/unit/virt/libvirt/cpu/test_core.py122
-rw-r--r--nova/tests/unit/virt/libvirt/test_driver.py167
-rw-r--r--nova/tests/unit/virt/libvirt/test_utils.py43
-rw-r--r--nova/tests/unit/virt/test_images.py46
-rw-r--r--nova/tests/unit/virt/test_node.py142
-rw-r--r--nova/virt/driver.py6
-rw-r--r--nova/virt/fake.py37
-rw-r--r--nova/virt/images.py31
-rw-r--r--nova/virt/ironic/driver.py7
-rw-r--r--nova/virt/libvirt/cpu/__init__.py16
-rw-r--r--nova/virt/libvirt/cpu/api.py62
-rw-r--r--nova/virt/libvirt/cpu/core.py78
-rw-r--r--nova/virt/libvirt/driver.py117
-rw-r--r--nova/virt/libvirt/host.py24
-rw-r--r--nova/virt/libvirt/imagebackend.py1
-rw-r--r--nova/virt/libvirt/utils.py69
-rw-r--r--nova/virt/node.py108
-rw-r--r--releasenotes/notes/allowing-target-state-for-evacuate-d4c1912c481973d6.yaml13
-rw-r--r--releasenotes/notes/bp-pci-device-tracking-in-placement-antelope-082310a2b0337e0e.yaml8
-rw-r--r--releasenotes/notes/bug-1996995-qemu_monitor_announce_self-add-configurables-2b2f19d238442f72.yaml28
-rw-r--r--releasenotes/notes/enable-enforce-scope-and-new-defaults-14db8c75b263b599.yaml23
-rw-r--r--releasenotes/notes/microversion-2-94-59649401d5763286.yaml22
-rw-r--r--releasenotes/notes/rescue-volume-based-instance-c6e3fba236d90be7.yaml6
-rw-r--r--releasenotes/notes/stable-compute-uuid-08663a0955616728.yaml19
-rw-r--r--releasenotes/notes/use-compareHypervisorCPU-b75c8f097cc73556.yaml12
-rw-r--r--requirements.txt4
-rw-r--r--tox.ini2
156 files changed, 5011 insertions, 841 deletions
diff --git a/.zuul.yaml b/.zuul.yaml
index 1a35975d3a..25d6cc6819 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -665,6 +665,36 @@
parent: tempest-integrated-compute
nodeset: openstack-single-node-focal
+# TODO(gmann): Remove this jobs once all the required services for intergrate
+# compute gate (Cinder, Glance, Neutron) by default enable scope and new
+# defaults which means all the nova jobs will be tested with new RBAC in
+# integrated way and we do not need this separate job.
+- job:
+ name: tempest-integrated-compute-enforce-scope-new-defaults
+ parent: tempest-integrated-compute
+ description: |
+ This job runs the Tempest tests with scope and new defaults enabled
+ for Nova, Neutron, Glance, and Cinder services.
+ # TODO (gmann): There were few fixes in neutron and neutron-lib for the
+ # RBAC but they are not yet released so we need to add both projcts as
+ # the required-projects. Those can be removed once new version of neutron
+ # and neutron-lib is released.
+ required-projects:
+ - openstack/neutron
+ - openstack/neutron-lib
+ vars:
+ devstack_localrc:
+ # Enabeling the scope and new defaults for services implemented it.
+ # NOTE (gmann): We need to keep keystone scope check disable as
+ # services (except ironic) does not support the system scope and
+ # they need keystone to continue working with project scope. Until
+ # Keystone policies are changed to work for project scoped also, we
+ # need to keep scope check disable for keystone.
+ NOVA_ENFORCE_SCOPE: true
+ CINDER_ENFORCE_SCOPE: true
+ GLANCE_ENFORCE_SCOPE: true
+ NEUTRON_ENFORCE_SCOPE: true
+
- project:
# Please try to keep the list of job names sorted alphabetically.
templates:
@@ -723,6 +753,8 @@
- ^tox.ini$
- tempest-integrated-compute-ubuntu-focal:
irrelevant-files: *policies-irrelevant-files
+ - tempest-integrated-compute-enforce-scope-new-defaults:
+ irrelevant-files: *policies-irrelevant-files
- grenade-skip-level:
irrelevant-files: *policies-irrelevant-files
- nova-grenade-multinode:
@@ -758,6 +790,8 @@
irrelevant-files: *policies-irrelevant-files
- tempest-integrated-compute-ubuntu-focal:
irrelevant-files: *policies-irrelevant-files
+ - tempest-integrated-compute-enforce-scope-new-defaults:
+ irrelevant-files: *policies-irrelevant-files
- nova-grenade-multinode:
irrelevant-files: *policies-irrelevant-files
- tempest-ipv6-only:
diff --git a/api-ref/source/parameters.yaml b/api-ref/source/parameters.yaml
index aba8185a4b..e185dce29d 100644
--- a/api-ref/source/parameters.yaml
+++ b/api-ref/source/parameters.yaml
@@ -6382,6 +6382,9 @@ server_hostname_req:
description: |
The hostname to configure for the instance in the metadata service.
+ Starting with microversion 2.94, this can be a Fully Qualified Domain Name
+ (FQDN) of up to 255 characters in length.
+
.. note::
This information is published via the metadata service and requires
diff --git a/api-ref/source/servers-actions.inc b/api-ref/source/servers-actions.inc
index 3b8b68d4ff..bb9953afa0 100644
--- a/api-ref/source/servers-actions.inc
+++ b/api-ref/source/servers-actions.inc
@@ -604,6 +604,11 @@ Request
.. literalinclude:: ../../doc/api_samples/servers/v2.90/server-action-rebuild.json
:language: javascript
+**Example Rebuild Server (rebuild Action) (v2.94)**
+
+.. literalinclude:: ../../doc/api_samples/servers/v2.94/server-action-rebuild.json
+ :language: javascript
+
Response
--------
diff --git a/api-ref/source/servers.inc b/api-ref/source/servers.inc
index fa199e211c..e72d0641b9 100644
--- a/api-ref/source/servers.inc
+++ b/api-ref/source/servers.inc
@@ -448,6 +448,11 @@ Request
.. literalinclude:: ../../doc/api_samples/servers/v2.90/server-create-req.json
:language: javascript
+**Example Create Server With FQDN in Hostname (v2.94)**
+
+.. literalinclude:: ../../doc/api_samples/servers/v2.94/server-create-req.json
+ :language: javascript
+
Response
--------
diff --git a/doc/api_samples/os-evacuate/v2.95/server-evacuate-find-host-req.json b/doc/api_samples/os-evacuate/v2.95/server-evacuate-find-host-req.json
new file mode 100644
index 0000000000..ae9fb0a67b
--- /dev/null
+++ b/doc/api_samples/os-evacuate/v2.95/server-evacuate-find-host-req.json
@@ -0,0 +1,5 @@
+{
+ "evacuate": {
+ "targetState": "stopped"
+ }
+}
diff --git a/doc/api_samples/os-evacuate/v2.95/server-evacuate-req.json b/doc/api_samples/os-evacuate/v2.95/server-evacuate-req.json
new file mode 100644
index 0000000000..a9f809c830
--- /dev/null
+++ b/doc/api_samples/os-evacuate/v2.95/server-evacuate-req.json
@@ -0,0 +1,6 @@
+{
+ "evacuate": {
+ "host": "testHost",
+ "targetState": "stopped"
+ }
+}
diff --git a/doc/api_samples/servers/v2.94/server-action-rebuild-resp.json b/doc/api_samples/servers/v2.94/server-action-rebuild-resp.json
new file mode 100644
index 0000000000..7eeb568ea4
--- /dev/null
+++ b/doc/api_samples/servers/v2.94/server-action-rebuild-resp.json
@@ -0,0 +1,80 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO",
+ "OS-EXT-AZ:availability_zone": "us-west",
+ "OS-EXT-SRV-ATTR:hostname": "updated-hostname.example.com",
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "OS-SRV-USG:launched_at": "2021-08-19T15:16:22.177882",
+ "OS-SRV-USG:terminated_at": null,
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "addresses": {
+ "private": [
+ {
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
+ "OS-EXT-IPS:type": "fixed",
+ "addr": "192.168.1.30",
+ "version": 4
+ }
+ ]
+ },
+ "adminPass": "seekr3t",
+ "config_drive": "",
+ "created": "2019-04-23T17:10:22Z",
+ "description": null,
+ "flavor": {
+ "disk": 1,
+ "ephemeral": 0,
+ "extra_specs": {},
+ "original_name": "m1.tiny",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1
+ },
+ "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6",
+ "id": "0c37a84a-c757-4f22-8c7f-0bf8b6970886",
+ "image": {
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/0c37a84a-c757-4f22-8c7f-0bf8b6970886",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/0c37a84a-c757-4f22-8c7f-0bf8b6970886",
+ "rel": "bookmark"
+ }
+ ],
+ "locked": false,
+ "locked_reason": null,
+ "metadata": {
+ "meta_var": "meta_val"
+ },
+ "name": "foobar",
+ "os-extended-volumes:volumes_attached": [],
+ "progress": 0,
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "server_groups": [],
+ "status": "ACTIVE",
+ "tags": [],
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "trusted_image_certificates": null,
+ "updated": "2019-04-23T17:10:24Z",
+ "user_data": "ZWNobyAiaGVsbG8gd29ybGQi",
+ "user_id": "fake"
+ }
+}
diff --git a/doc/api_samples/servers/v2.94/server-action-rebuild.json b/doc/api_samples/servers/v2.94/server-action-rebuild.json
new file mode 100644
index 0000000000..b5401ad9ca
--- /dev/null
+++ b/doc/api_samples/servers/v2.94/server-action-rebuild.json
@@ -0,0 +1,15 @@
+{
+ "rebuild" : {
+ "accessIPv4" : "1.2.3.4",
+ "accessIPv6" : "80fe::",
+ "OS-DCF:diskConfig": "AUTO",
+ "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b",
+ "name" : "foobar",
+ "adminPass" : "seekr3t",
+ "hostname": "custom-hostname.example.com",
+ "metadata" : {
+ "meta_var" : "meta_val"
+ },
+ "user_data": "ZWNobyAiaGVsbG8gd29ybGQi"
+ }
+}
diff --git a/doc/api_samples/servers/v2.94/server-create-req.json b/doc/api_samples/servers/v2.94/server-create-req.json
new file mode 100644
index 0000000000..c6d4ce5640
--- /dev/null
+++ b/doc/api_samples/servers/v2.94/server-create-req.json
@@ -0,0 +1,30 @@
+{
+ "server" : {
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "name" : "new-server-test",
+ "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "1",
+ "availability_zone": "us-west",
+ "OS-DCF:diskConfig": "AUTO",
+ "hostname": "custom-hostname.example.com",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality": [
+ {
+ "path": "/etc/banner.txt",
+ "contents": "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6 b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ],
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "user_data" : "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg=="
+ },
+ "OS-SCH-HNT:scheduler_hints": {
+ "same_host": "48e6a9f6-30af-47e0-bc04-acaed113bb4e"
+ }
+}
diff --git a/doc/api_samples/servers/v2.94/server-create-resp.json b/doc/api_samples/servers/v2.94/server-create-resp.json
new file mode 100644
index 0000000000..f50e29dd8b
--- /dev/null
+++ b/doc/api_samples/servers/v2.94/server-create-resp.json
@@ -0,0 +1,22 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO",
+ "adminPass": "6NpUwoz2QDRN",
+ "id": "f5dc173b-6804-445a-a6d8-c705dad5b5eb",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/f5dc173b-6804-445a-a6d8-c705dad5b5eb",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/f5dc173b-6804-445a-a6d8-c705dad5b5eb",
+ "rel": "bookmark"
+ }
+ ],
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ]
+ }
+}
diff --git a/doc/api_samples/servers/v2.94/server-get-resp.json b/doc/api_samples/servers/v2.94/server-get-resp.json
new file mode 100644
index 0000000000..0a05b2f917
--- /dev/null
+++ b/doc/api_samples/servers/v2.94/server-get-resp.json
@@ -0,0 +1,81 @@
+{
+ "server": {
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "addresses": {
+ "private": [
+ {
+ "addr": "192.168.1.30",
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
+ "OS-EXT-IPS:type": "fixed",
+ "version": 4
+ }
+ ]
+ },
+ "created": "2013-09-03T04:01:32Z",
+ "description": null,
+ "locked": false,
+ "locked_reason": null,
+ "flavor": {
+ "disk": 1,
+ "ephemeral": 0,
+ "extra_specs": {},
+ "original_name": "m1.tiny",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1
+ },
+ "hostId": "92154fab69d5883ba2c8622b7e65f745dd33257221c07af363c51b29",
+ "id": "0e44cc9c-e052-415d-afbf-469b0d384170",
+ "image": {
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/0e44cc9c-e052-415d-afbf-469b0d384170",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/0e44cc9c-e052-415d-afbf-469b0d384170",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "config_drive": "",
+ "OS-DCF:diskConfig": "AUTO",
+ "OS-EXT-AZ:availability_zone": "nova",
+ "OS-EXT-SRV-ATTR:hostname": "custom-hostname.example.com",
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "os-extended-volumes:volumes_attached": [
+ {"id": "volume_id1", "delete_on_termination": false},
+ {"id": "volume_id2", "delete_on_termination": false}
+ ],
+ "OS-SRV-USG:launched_at": "2013-09-23T13:37:00.880302",
+ "OS-SRV-USG:terminated_at": null,
+ "progress": 0,
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "server_groups": [],
+ "status": "ACTIVE",
+ "tags": [],
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "trusted_image_certificates": null,
+ "updated": "2013-09-03T04:01:33Z",
+ "user_id": "fake"
+ }
+}
diff --git a/doc/api_samples/servers/v2.94/server-update-req.json b/doc/api_samples/servers/v2.94/server-update-req.json
new file mode 100644
index 0000000000..1743f05fc7
--- /dev/null
+++ b/doc/api_samples/servers/v2.94/server-update-req.json
@@ -0,0 +1,8 @@
+{
+ "server": {
+ "accessIPv4": "4.3.2.1",
+ "accessIPv6": "80fe::",
+ "OS-DCF:diskConfig": "AUTO",
+ "hostname" : "new-server-hostname.example.com"
+ }
+}
diff --git a/doc/api_samples/servers/v2.94/server-update-resp.json b/doc/api_samples/servers/v2.94/server-update-resp.json
new file mode 100644
index 0000000000..4aa834f9ec
--- /dev/null
+++ b/doc/api_samples/servers/v2.94/server-update-resp.json
@@ -0,0 +1,78 @@
+{
+ "server": {
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "addresses": {
+ "private": [
+ {
+ "addr": "192.168.1.30",
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
+ "OS-EXT-IPS:type": "fixed",
+ "version": 4
+ }
+ ]
+ },
+ "created": "2013-09-03T04:01:32Z",
+ "description": null,
+ "locked": false,
+ "locked_reason": null,
+ "flavor": {
+ "disk": 1,
+ "ephemeral": 0,
+ "extra_specs": {},
+ "original_name": "m1.tiny",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1
+ },
+ "hostId": "92154fab69d5883ba2c8622b7e65f745dd33257221c07af363c51b29",
+ "id": "0e44cc9c-e052-415d-afbf-469b0d384170",
+ "image": {
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/0e44cc9c-e052-415d-afbf-469b0d384170",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/0e44cc9c-e052-415d-afbf-469b0d384170",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "config_drive": "",
+ "OS-DCF:diskConfig": "AUTO",
+ "OS-EXT-AZ:availability_zone": "us-west",
+ "OS-EXT-SRV-ATTR:hostname": "new-server-hostname.example.com",
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "os-extended-volumes:volumes_attached": [],
+ "OS-SRV-USG:launched_at": "2013-09-23T13:37:00.880302",
+ "OS-SRV-USG:terminated_at": null,
+ "progress": 0,
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "server_groups": [],
+ "status": "ACTIVE",
+ "tags": [],
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "trusted_image_certificates": null,
+ "updated": "2013-09-03T04:01:33Z",
+ "user_id": "fake"
+ }
+}
diff --git a/doc/api_samples/servers/v2.94/servers-details-resp.json b/doc/api_samples/servers/v2.94/servers-details-resp.json
new file mode 100644
index 0000000000..54b63fa523
--- /dev/null
+++ b/doc/api_samples/servers/v2.94/servers-details-resp.json
@@ -0,0 +1,88 @@
+{
+ "servers": [
+ {
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "addresses": {
+ "private": [
+ {
+ "addr": "192.168.1.30",
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
+ "OS-EXT-IPS:type": "fixed",
+ "version": 4
+ }
+ ]
+ },
+ "created": "2013-09-03T04:01:32Z",
+ "description": "",
+ "flavor": {
+ "disk": 1,
+ "ephemeral": 0,
+ "extra_specs": {},
+ "original_name": "m1.tiny",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1
+ },
+ "hostId": "bcf92836fc9ed4203a75cb0337afc7f917d2be504164b995c2334b25",
+ "id": "f5dc173b-6804-445a-a6d8-c705dad5b5eb",
+ "image": {
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/f5dc173b-6804-445a-a6d8-c705dad5b5eb",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/f5dc173b-6804-445a-a6d8-c705dad5b5eb",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "config_drive": "",
+ "locked": false,
+ "locked_reason": "",
+ "OS-DCF:diskConfig": "AUTO",
+ "OS-EXT-AZ:availability_zone": "nova",
+ "OS-EXT-SRV-ATTR:hostname": "custom-hostname.example.com",
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "os-extended-volumes:volumes_attached": [
+ {"id": "volume_id1", "delete_on_termination": false},
+ {"id": "volume_id2", "delete_on_termination": false}
+ ],
+ "OS-SRV-USG:launched_at": "2013-09-23T13:53:12.774549",
+ "OS-SRV-USG:terminated_at": null,
+ "progress": 0,
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "status": "ACTIVE",
+ "tags": [],
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "trusted_image_certificates": null,
+ "updated": "2013-09-03T04:01:32Z",
+ "user_id": "fake"
+ }
+ ],
+ "servers_links": [
+ {
+ "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/detail?limit=1&marker=f5dc173b-6804-445a-a6d8-c705dad5b5eb",
+ "rel": "next"
+ }
+ ]
+}
diff --git a/doc/api_samples/servers/v2.94/servers-list-resp.json b/doc/api_samples/servers/v2.94/servers-list-resp.json
new file mode 100644
index 0000000000..742d54b170
--- /dev/null
+++ b/doc/api_samples/servers/v2.94/servers-list-resp.json
@@ -0,0 +1,24 @@
+{
+ "servers": [
+ {
+ "id": "22c91117-08de-4894-9aa9-6ef382400985",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/22c91117-08de-4894-9aa9-6ef382400985",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/22c91117-08de-4894-9aa9-6ef382400985",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "new-server-test"
+ }
+ ],
+ "servers_links": [
+ {
+ "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers?limit=1&marker=22c91117-08de-4894-9aa9-6ef382400985",
+ "rel": "next"
+ }
+ ]
+}
diff --git a/doc/api_samples/versions/v21-version-get-resp.json b/doc/api_samples/versions/v21-version-get-resp.json
index 78678556bf..3f285e6017 100644
--- a/doc/api_samples/versions/v21-version-get-resp.json
+++ b/doc/api_samples/versions/v21-version-get-resp.json
@@ -19,7 +19,7 @@
}
],
"status": "CURRENT",
- "version": "2.93",
+ "version": "2.95",
"min_version": "2.1",
"updated": "2013-07-23T11:33:21Z"
}
diff --git a/doc/api_samples/versions/versions-get-resp.json b/doc/api_samples/versions/versions-get-resp.json
index 59b67279b7..749fd4674f 100644
--- a/doc/api_samples/versions/versions-get-resp.json
+++ b/doc/api_samples/versions/versions-get-resp.json
@@ -22,7 +22,7 @@
}
],
"status": "CURRENT",
- "version": "2.93",
+ "version": "2.95",
"min_version": "2.1",
"updated": "2013-07-23T11:33:21Z"
}
diff --git a/doc/source/admin/availability-zones.rst b/doc/source/admin/availability-zones.rst
index ffe1be06f9..aff8a0ab31 100644
--- a/doc/source/admin/availability-zones.rst
+++ b/doc/source/admin/availability-zones.rst
@@ -118,11 +118,47 @@ Implications for moving servers
There are several ways to move a server to another host: evacuate, resize,
cold migrate, live migrate, and unshelve. Move operations typically go through
-the scheduler to pick the target host *unless* a target host is specified and
-the request forces the server to that host by bypassing the scheduler. Only
-evacuate and live migrate can forcefully bypass the scheduler and move a
-server to a specified host and even then it is highly recommended to *not*
-force and bypass the scheduler.
+the scheduler to pick the target host.
+
+Prior to API microversion 2.68, using older openstackclient (pre-5.5.0) and
+novaclient, it was possible to specify a target host and the request forces
+the server to that host by bypassing the scheduler. Only evacuate and live
+migrate can forcefully bypass the scheduler and move a server to specified host
+and even then it is highly recommended to *not* force and bypass a scheduler.
+
+- live migrate with force host (works with older openstackclients(pre-5.5.0):
+
+.. code-block:: console
+
+ $ openstack server migrate --live <host> <server>
+
+- live migrate without forcing:
+
+.. code-block:: console
+
+ $ openstack server migrate --live-migration --host <host> <server>
+
+While support for 'server evacuate' command to openstackclient was added
+in 5.5.3 and there it never exposed ability to force an evacuation, but
+it was previously possible with novaclient.
+
+- evacuate with force host:
+
+.. code-block:: console
+
+ $ nova evacuate --force <server> <host>
+
+- evacuate without forcing using novaclient:
+
+.. code-block:: console
+
+ $ nova evacuate
+
+- evacuate without forcing using openstackclient:
+
+.. code-block:: console
+
+ $ openstack server evacuate --host <host> <server>
With respect to availability zones, a server is restricted to a zone if:
@@ -150,16 +186,6 @@ If the server was not created in a specific zone then it is free to be moved
to other zones, i.e. the :ref:`AvailabilityZoneFilter <AvailabilityZoneFilter>`
is a no-op.
-Knowing this, it is dangerous to force a server to another host with evacuate
-or live migrate if the server is restricted to a zone and is then forced to
-move to a host in another zone, because that will create an inconsistency in
-the internal tracking of where that server should live and may require manually
-updating the database for that server. For example, if a user creates a server
-in zone A and then the admin force live migrates the server to zone B, and then
-the user resizes the server, the scheduler will try to move it back to zone A
-which may or may not work, e.g. if the admin deleted or renamed zone A in the
-interim.
-
Resource affinity
~~~~~~~~~~~~~~~~~
diff --git a/doc/source/admin/compute-node-identification.rst b/doc/source/admin/compute-node-identification.rst
new file mode 100644
index 0000000000..31d4802d0b
--- /dev/null
+++ b/doc/source/admin/compute-node-identification.rst
@@ -0,0 +1,83 @@
+===========================
+Compute Node Identification
+===========================
+
+Nova requires that compute nodes maintain a constant and consistent identity
+during their lifecycle. With the exception of the ironic driver, starting in
+the 2023.1 release, this is achieved by use of a file containing the node
+unique identifier that is persisted on disk. Prior to 2023.1, a combination of
+the compute node's hostname and the :oslo.config:option:`host` value in the
+configuration file were used.
+
+The 2023.1 and later compute node identification file must remain unchanged
+during the lifecycle of the compute node. Changing the value or removing the
+file will result in a failure to start and may require advanced techniques
+for recovery. The file is read once at `nova-compute`` startup, at which point
+it is validated for formatting and the corresponding node is located or
+created in the database.
+
+.. note::
+
+ Even after 2023.1, the compute node's hostname may not be changed after
+ the initial registration with the controller nodes, it is just not used
+ as the primary method for identification.
+
+The behavior of ``nova-compute`` is different when using the ironic driver,
+as the (UUID-based) identity and mapping of compute nodes to compute manager
+service hosts is dynamic. In that case, no single node identity is maintained
+by the compute host and thus no identity file is read or written. Thus none
+of the sections below apply to hosts with :oslo.config:option:`compute_driver`
+set to `ironic`.
+
+Self-provisioning of the node identity
+--------------------------------------
+
+By default, ``nova-compute`` will automatically generate and write a UUID to
+disk the first time it starts up, and will use that going forward as its
+stable identity. Using the :oslo.config:option:`state_path`
+(which is ``/var/lib/nova`` on most systems), a ``compute_id`` file will be
+created with a generated UUID.
+
+Since this file (and it's parent directory) is writable by nova, it may be
+desirable to move this to one of the other locations that nova looks for the
+identification file.
+
+Deployment provisioning of the node identity
+--------------------------------------------
+
+In addition to the location mentioned above, nova will also search the parent
+directories of any config file in use (either the defaults or provided on
+the command line) for a ``compute_id`` file. Thus, a deployment tool may, on
+most systems, pre-provision the node's UUID by writing one to
+``/etc/nova/compute_id``.
+
+The contents of the file should be a single UUID in canonical textual
+representation with no additional whitespace or other characters. The following
+should work on most Linux systems:
+
+.. code-block:: shell
+
+ $ uuidgen > /etc/nova/compute_id
+
+.. note::
+
+ **Do not** execute the above command blindly in every run of a deployment
+ tool, as that will result in overwriting the ``compute_id`` file each time,
+ which *will* prevent nova from working properly.
+
+Upgrading from pre-2023.1
+-------------------------
+
+Before release 2023.1, ``nova-compute`` only used the hostname (combined with
+:oslo.config:option:`host`, if set) to identify its compute node objects in
+the database. When upgrading from a prior release, the compute node will
+perform a one-time migration of the hostname-matched compute node UUID to the
+``compute_id`` file in the :oslo.config:option:`state_path` location.
+
+.. note::
+
+ It is imperative that you allow the above migration to run and complete on
+ compute nodes that are being upgraded. Skipping this step by
+ pre-provisioning a ``compute_id`` file before the upgrade will **not** work
+ and will be equivalent to changing the compute node UUID after it has
+ already been created once.
diff --git a/doc/source/admin/huge-pages.rst b/doc/source/admin/huge-pages.rst
index 73f6c5dd2d..a451c6e3ab 100644
--- a/doc/source/admin/huge-pages.rst
+++ b/doc/source/admin/huge-pages.rst
@@ -96,7 +96,7 @@ pages at boot time, run:
.. code-block:: console
- # echo 'GRUB_CMDLINE_LINUX="$GRUB_CMDLINE_LINUX hugepagesz=2M hugepages=2048 transparent_hugepage=never"' > /etc/default/grub
+ # echo 'GRUB_CMDLINE_LINUX="$GRUB_CMDLINE_LINUX hugepagesz=2M hugepages=2048 transparent_hugepage=never"' >> /etc/default/grub
$ grep GRUB_CMDLINE_LINUX /etc/default/grub
GRUB_CMDLINE_LINUX="..."
GRUB_CMDLINE_LINUX="$GRUB_CMDLINE_LINUX hugepagesz=2M hugepages=2048 transparent_hugepage=never"
diff --git a/doc/source/admin/index.rst b/doc/source/admin/index.rst
index 93b4e6a554..8cb5bf7156 100644
--- a/doc/source/admin/index.rst
+++ b/doc/source/admin/index.rst
@@ -206,6 +206,7 @@ instance for these kind of workloads.
secure-boot
sev
managing-resource-providers
+ compute-node-identification
resource-limits
cpu-models
libvirt-misc
diff --git a/doc/source/admin/pci-passthrough.rst b/doc/source/admin/pci-passthrough.rst
index 46d1604214..09a963603d 100644
--- a/doc/source/admin/pci-passthrough.rst
+++ b/doc/source/admin/pci-passthrough.rst
@@ -65,6 +65,10 @@ capabilities.
:oslo.config:option:`pci.device_spec` configuration that uses the
``devname`` field.
+.. versionchanged:: 27.0.0 (2023.1 Antelope):
+ Nova provides Placement based scheduling support for servers with flavor
+ based PCI requests. This support is disable by default.
+
Enabling PCI passthrough
------------------------
@@ -442,6 +446,24 @@ removed and VFs from the same PF is configured (or vice versa) then
nova-compute will refuse to start as it would create a situation where both
the PF and its VFs are made available for consumption.
+Since nova 27.0.0 (2023.1 Antelope) scheduling and allocation of PCI devices
+in Placement can also be enabled via
+:oslo.config:option:`filter_scheduler.pci_in_placement`. Please note that this
+should only be enabled after all the computes in the system is configured to
+report PCI inventory in Placement via
+enabling :oslo.config:option:`pci.report_in_placement`. In Antelope flavor
+based PCI requests are support but Neutron port base PCI requests are not
+handled in Placement.
+
+If you are upgrading from an earlier version with already existing servers with
+PCI usage then you must enable :oslo.config:option:`pci.report_in_placement`
+first on all your computes having PCI allocations and then restart the
+nova-compute service, before you enable
+:oslo.config:option:`filter_scheduler.pci_in_placement`. The compute service
+will heal the missing PCI allocation in placement during startup and will
+continue healing missing allocations for future servers until the scheduling
+support is enabled.
+
If a flavor requests multiple ``type-VF`` devices via
:nova:extra-spec:`pci_passthrough:alias` then it is important to consider the
value of :nova:extra-spec:`group_policy` as well. The value ``none``
diff --git a/doc/source/cli/nova-compute.rst b/doc/source/cli/nova-compute.rst
index f190949efa..1346dab92e 100644
--- a/doc/source/cli/nova-compute.rst
+++ b/doc/source/cli/nova-compute.rst
@@ -41,6 +41,8 @@ Files
* ``/etc/nova/policy.d/``
* ``/etc/nova/rootwrap.conf``
* ``/etc/nova/rootwrap.d/``
+* ``/etc/nova/compute_id``
+* ``/var/lib/nova/compute_id``
See Also
========
diff --git a/mypy-files.txt b/mypy-files.txt
index 5a3b9ab339..391ed58d87 100644
--- a/mypy-files.txt
+++ b/mypy-files.txt
@@ -1,6 +1,7 @@
nova/compute/manager.py
nova/compute/pci_placement_translator.py
nova/crypto.py
+nova/filesystem.py
nova/limit/local.py
nova/limit/placement.py
nova/network/neutron.py
@@ -13,6 +14,9 @@ nova/virt/driver.py
nova/virt/hardware.py
nova/virt/libvirt/machine_type_utils.py
nova/virt/libvirt/__init__.py
+nova/virt/libvirt/cpu/__init__.py
+nova/virt/libvirt/cpu/api.py
+nova/virt/libvirt/cpu/core.py
nova/virt/libvirt/driver.py
nova/virt/libvirt/event.py
nova/virt/libvirt/guest.py
diff --git a/nova/api/openstack/api_version_request.py b/nova/api/openstack/api_version_request.py
index 02afdbd850..718ac7e8e6 100644
--- a/nova/api/openstack/api_version_request.py
+++ b/nova/api/openstack/api_version_request.py
@@ -253,6 +253,8 @@ REST_API_VERSION_HISTORY = """REST API Version History:
``POST /os-keypairs`` and allow including @ and dot (.) characters
in keypair name.
* 2.93 - Add support for volume backed server rebuild.
+ * 2.94 - Allow FQDN in server hostname.
+ * 2.95 - Evacuate will now stop instance at destination.
"""
# The minimum and maximum versions of the API supported
@@ -261,7 +263,7 @@ REST_API_VERSION_HISTORY = """REST API Version History:
# Note(cyeoh): This only applies for the v2.1 API once microversions
# support is fully merged. It does not affect the V2 API.
_MIN_API_VERSION = '2.1'
-_MAX_API_VERSION = '2.93'
+_MAX_API_VERSION = '2.95'
DEFAULT_API_VERSION = _MIN_API_VERSION
# Almost all proxy APIs which are related to network, images and baremetal
diff --git a/nova/api/openstack/compute/evacuate.py b/nova/api/openstack/compute/evacuate.py
index aa35812759..a6602be079 100644
--- a/nova/api/openstack/compute/evacuate.py
+++ b/nova/api/openstack/compute/evacuate.py
@@ -23,9 +23,11 @@ from nova.api.openstack.compute.schemas import evacuate
from nova.api.openstack import wsgi
from nova.api import validation
from nova.compute import api as compute
+from nova.compute import vm_states
import nova.conf
from nova import exception
from nova.i18n import _
+from nova import objects
from nova.policies import evacuate as evac_policies
from nova import utils
@@ -33,6 +35,8 @@ CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
+MIN_VER_NOVA_COMPUTE_EVACUATE_STOPPED = 62
+
class EvacuateController(wsgi.Controller):
def __init__(self):
@@ -77,7 +81,8 @@ class EvacuateController(wsgi.Controller):
@validation.schema(evacuate.evacuate, "2.0", "2.13")
@validation.schema(evacuate.evacuate_v214, "2.14", "2.28")
@validation.schema(evacuate.evacuate_v2_29, "2.29", "2.67")
- @validation.schema(evacuate.evacuate_v2_68, "2.68")
+ @validation.schema(evacuate.evacuate_v2_68, "2.68", "2.94")
+ @validation.schema(evacuate.evacuate_v2_95, "2.95")
def _evacuate(self, req, id, body):
"""Permit admins to evacuate a server from a failed host
to a new one.
@@ -92,6 +97,19 @@ class EvacuateController(wsgi.Controller):
host = evacuate_body.get("host")
force = None
+ target_state = None
+ if api_version_request.is_supported(req, min_version='2.95'):
+ min_ver = objects.service.get_minimum_version_all_cells(
+ context, ['nova-compute'])
+ if min_ver < MIN_VER_NOVA_COMPUTE_EVACUATE_STOPPED:
+ raise exception.NotSupportedComputeForEvacuateV295(
+ {'currently': min_ver,
+ 'expected': MIN_VER_NOVA_COMPUTE_EVACUATE_STOPPED})
+ # Starts to 2.95 any evacuated instances will be stopped at
+ # destination. Previously an active or stopped instance would have
+ # kept its state.
+ target_state = vm_states.STOPPED
+
on_shared_storage = self._get_on_shared_storage(req, evacuate_body)
if api_version_request.is_supported(req, min_version='2.29'):
@@ -120,7 +138,8 @@ class EvacuateController(wsgi.Controller):
try:
self.compute_api.evacuate(context, instance, host,
- on_shared_storage, password, force)
+ on_shared_storage, password, force,
+ target_state)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'evacuate', id)
@@ -130,6 +149,8 @@ class EvacuateController(wsgi.Controller):
exception.ExtendedResourceRequestOldCompute,
) as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
+ except exception.UnsupportedRPCVersion as e:
+ raise exc.HTTPConflict(explanation=e.format_message())
if (not api_version_request.is_supported(req, min_version='2.14') and
CONF.api.enable_instance_password):
diff --git a/nova/api/openstack/compute/flavor_access.py b/nova/api/openstack/compute/flavor_access.py
index e17e6f0ddc..fc8df15db5 100644
--- a/nova/api/openstack/compute/flavor_access.py
+++ b/nova/api/openstack/compute/flavor_access.py
@@ -93,7 +93,14 @@ class FlavorActionController(wsgi.Controller):
vals = body['removeTenantAccess']
tenant = vals['tenant']
- identity.verify_project_id(context, tenant)
+ # It doesn't really matter if project exists or not: we can delete
+ # it from flavor's access list in both cases.
+ try:
+ identity.verify_project_id(context, tenant)
+ except webob.exc.HTTPBadRequest as identity_exc:
+ msg = "Project ID %s is not a valid project." % tenant
+ if msg not in identity_exc.explanation:
+ raise
# NOTE(gibi): We have to load a flavor from the db here as
# flavor.remove_access() will try to emit a notification and that needs
diff --git a/nova/api/openstack/compute/remote_consoles.py b/nova/api/openstack/compute/remote_consoles.py
index 36015542aa..7d374ef432 100644
--- a/nova/api/openstack/compute/remote_consoles.py
+++ b/nova/api/openstack/compute/remote_consoles.py
@@ -56,6 +56,9 @@ class RemoteConsolesController(wsgi.Controller):
raise webob.exc.HTTPNotFound(explanation=e.format_message())
except exception.InstanceNotReady as e:
raise webob.exc.HTTPConflict(explanation=e.format_message())
+ except exception.InstanceInvalidState as e:
+ common.raise_http_conflict_for_instance_invalid_state(
+ e, 'get_vnc_console', id)
except NotImplementedError:
common.raise_feature_not_supported()
diff --git a/nova/api/openstack/compute/rest_api_version_history.rst b/nova/api/openstack/compute/rest_api_version_history.rst
index 0040310198..b34510be5c 100644
--- a/nova/api/openstack/compute/rest_api_version_history.rst
+++ b/nova/api/openstack/compute/rest_api_version_history.rst
@@ -1229,3 +1229,19 @@ Add support for volume backed server rebuild. The end user will provide the
image with the rebuild command and it will rebuild the volume with the new
image similar to the result of rebuilding an ephemeral disk.
+
+2.94
+---------------------
+
+The ``hostname`` parameter to the ``POST /servers`` (create server), ``PUT
+/servers/{id}`` (update server) and ``POST /servers/{server_id}/action
+(rebuild)`` (rebuild server) APIs is now allowed to be a Fully Qualified Domain
+Name (FQDN).
+
+
+2.95
+---------------------
+
+Any evacuated instances will be now stopped at destination. This
+requires minimun compute version 27.0.0 (antelope 2023.1). Operators
+can still use previous microversion for older behavior.
diff --git a/nova/api/openstack/compute/schemas/evacuate.py b/nova/api/openstack/compute/schemas/evacuate.py
index a415a97f89..c7b84a655e 100644
--- a/nova/api/openstack/compute/schemas/evacuate.py
+++ b/nova/api/openstack/compute/schemas/evacuate.py
@@ -46,3 +46,7 @@ evacuate_v2_29['properties']['evacuate']['properties'][
# v2.68 removes the 'force' parameter added in v2.29, meaning it is identical
# to v2.14
evacuate_v2_68 = copy.deepcopy(evacuate_v214)
+
+# v2.95 keeps the same schema, evacuating an instance will now result its state
+# to be stopped at destination.
+evacuate_v2_95 = copy.deepcopy(evacuate_v2_68)
diff --git a/nova/api/openstack/compute/schemas/servers.py b/nova/api/openstack/compute/schemas/servers.py
index 300411de40..0869f83434 100644
--- a/nova/api/openstack/compute/schemas/servers.py
+++ b/nova/api/openstack/compute/schemas/servers.py
@@ -360,6 +360,11 @@ create_v290 = copy.deepcopy(create_v274)
create_v290['properties']['server'][
'properties']['hostname'] = parameter_types.hostname
+# Support FQDN as hostname
+create_v294 = copy.deepcopy(create_v290)
+create_v294['properties']['server'][
+ 'properties']['hostname'] = parameter_types.fqdn
+
update = {
'type': 'object',
'properties': {
@@ -391,6 +396,11 @@ update_v290 = copy.deepcopy(update_v219)
update_v290['properties']['server'][
'properties']['hostname'] = parameter_types.hostname
+
+update_v294 = copy.deepcopy(update_v290)
+update_v294['properties']['server'][
+ 'properties']['hostname'] = parameter_types.fqdn
+
rebuild = {
'type': 'object',
'properties': {
@@ -449,6 +459,10 @@ rebuild_v290 = copy.deepcopy(rebuild_v263)
rebuild_v290['properties']['rebuild']['properties'][
'hostname'] = parameter_types.hostname
+rebuild_v294 = copy.deepcopy(rebuild_v290)
+rebuild_v294['properties']['rebuild']['properties'][
+ 'hostname'] = parameter_types.fqdn
+
resize = {
'type': 'object',
diff --git a/nova/api/openstack/compute/servers.py b/nova/api/openstack/compute/servers.py
index 946d5a7042..33e74456fd 100644
--- a/nova/api/openstack/compute/servers.py
+++ b/nova/api/openstack/compute/servers.py
@@ -677,7 +677,8 @@ class ServersController(wsgi.Controller):
@validation.schema(schema_servers.create_v263, '2.63', '2.66')
@validation.schema(schema_servers.create_v267, '2.67', '2.73')
@validation.schema(schema_servers.create_v274, '2.74', '2.89')
- @validation.schema(schema_servers.create_v290, '2.90')
+ @validation.schema(schema_servers.create_v290, '2.90', '2.93')
+ @validation.schema(schema_servers.create_v294, '2.94')
def create(self, req, body):
"""Creates a new server for a given user."""
context = req.environ['nova.context']
@@ -906,7 +907,8 @@ class ServersController(wsgi.Controller):
@validation.schema(schema_servers.update_v20, '2.0', '2.0')
@validation.schema(schema_servers.update, '2.1', '2.18')
@validation.schema(schema_servers.update_v219, '2.19', '2.89')
- @validation.schema(schema_servers.update_v290, '2.90')
+ @validation.schema(schema_servers.update_v290, '2.90', '2.93')
+ @validation.schema(schema_servers.update_v294, '2.94')
def update(self, req, id, body):
"""Update server then pass on to version-specific controller."""
@@ -1147,7 +1149,8 @@ class ServersController(wsgi.Controller):
@validation.schema(schema_servers.rebuild_v254, '2.54', '2.56')
@validation.schema(schema_servers.rebuild_v257, '2.57', '2.62')
@validation.schema(schema_servers.rebuild_v263, '2.63', '2.89')
- @validation.schema(schema_servers.rebuild_v290, '2.90')
+ @validation.schema(schema_servers.rebuild_v290, '2.90', '2.93')
+ @validation.schema(schema_servers.rebuild_v294, '2.94')
def _action_rebuild(self, req, id, body):
"""Rebuild an instance with the given attributes."""
rebuild_dict = body['rebuild']
diff --git a/nova/api/openstack/identity.py b/nova/api/openstack/identity.py
index 7ffc623fed..15ec884aea 100644
--- a/nova/api/openstack/identity.py
+++ b/nova/api/openstack/identity.py
@@ -27,24 +27,27 @@ def verify_project_id(context, project_id):
"""verify that a project_id exists.
This attempts to verify that a project id exists. If it does not,
- an HTTPBadRequest is emitted.
+ an HTTPBadRequest is emitted. Also HTTPBadRequest is emitted
+ if Keystone identity service version 3.0 is not found.
"""
adap = utils.get_ksa_adapter(
'identity', ksa_auth=context.get_auth_plugin(),
min_version=(3, 0), max_version=(3, 'latest'))
- failure = webob.exc.HTTPBadRequest(
- explanation=_("Project ID %s is not a valid project.") %
- project_id)
try:
resp = adap.get('/projects/%s' % project_id)
except kse.EndpointNotFound:
LOG.error(
- "Keystone identity service version 3.0 was not found. This might "
- "be because your endpoint points to the v2.0 versioned endpoint "
- "which is not supported. Please fix this.")
- raise failure
+ "Keystone identity service version 3.0 was not found. This "
+ "might be caused by Nova misconfiguration or Keystone "
+ "problems.")
+ msg = _("Nova was unable to find Keystone service endpoint.")
+ # TODO(astupnik). It may be reasonable to switch to HTTP 503
+ # (HTTP Service Unavailable) instead of HTTP Bad Request here.
+ # If proper Keystone servie is inaccessible, then technially
+ # this is a server side error and not an error in Nova.
+ raise webob.exc.HTTPBadRequest(explanation=msg)
except kse.ClientException:
# something is wrong, like there isn't a keystone v3 endpoint,
# or nova isn't configured for the interface to talk to it;
@@ -57,7 +60,8 @@ def verify_project_id(context, project_id):
return True
elif resp.status_code == 404:
# we got access, and we know this project is not there
- raise failure
+ msg = _("Project ID %s is not a valid project.") % project_id
+ raise webob.exc.HTTPBadRequest(explanation=msg)
elif resp.status_code == 403:
# we don't have enough permission to verify this, so default
# to "it's ok".
diff --git a/nova/compute/api.py b/nova/compute/api.py
index b2884543e5..6b2023c19f 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -3797,7 +3797,8 @@ class API:
orig_sys_metadata=orig_sys_metadata, bdms=bdms,
preserve_ephemeral=preserve_ephemeral, host=host,
request_spec=request_spec,
- reimage_boot_volume=reimage_boot_volume)
+ reimage_boot_volume=reimage_boot_volume,
+ target_state=None)
def _check_volume_status(self, context, bdms):
"""Check whether the status of the volume is "in-use".
@@ -4699,6 +4700,7 @@ class API:
allow_bfv_rescue=False):
"""Rescue the given instance."""
+ image_meta = None
if rescue_image_ref:
try:
image_meta = image_meta_obj.ImageMeta.from_image_ref(
@@ -4719,6 +4721,8 @@ class API:
"image properties set")
raise exception.UnsupportedRescueImage(
image=rescue_image_ref)
+ else:
+ image_meta = instance.image_meta
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
@@ -4727,6 +4731,9 @@ class API:
volume_backed = compute_utils.is_volume_backed_instance(
context, instance, bdms)
+ allow_bfv_rescue &= 'hw_rescue_bus' in image_meta.properties and \
+ 'hw_rescue_device' in image_meta.properties
+
if volume_backed and allow_bfv_rescue:
cn = objects.ComputeNode.get_by_host_and_nodename(
context, instance.host, instance.node)
@@ -5611,7 +5618,7 @@ class API:
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.ERROR], task_state=None)
def evacuate(self, context, instance, host, on_shared_storage,
- admin_password=None, force=None):
+ admin_password=None, force=None, target_state=None):
"""Running evacuate to target host.
Checking vm compute host state, if the host not in expected_state,
@@ -5622,6 +5629,7 @@ class API:
:param on_shared_storage: True if instance files on shared storage
:param admin_password: password to set on rebuilt instance
:param force: Force the evacuation to the specific host target
+ :param target_state: Set a target state for the evacuated instance
"""
LOG.debug('vm evacuation scheduled', instance=instance)
@@ -5685,7 +5693,7 @@ class API:
on_shared_storage=on_shared_storage,
host=host,
request_spec=request_spec,
- )
+ target_state=target_state)
def get_migrations(self, context, filters):
"""Get all migrations for the given filters."""
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 5ebc278b29..952ab3e199 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -84,6 +84,7 @@ from nova.objects import external_event as external_event_obj
from nova.objects import fields
from nova.objects import instance as obj_instance
from nova.objects import migrate_data as migrate_data_obj
+from nova.objects import service as service_obj
from nova.pci import request as pci_req_module
from nova.pci import whitelist
from nova import safe_utils
@@ -96,6 +97,7 @@ from nova.virt import configdrive
from nova.virt import driver
from nova.virt import event as virtevent
from nova.virt import hardware
+import nova.virt.node
from nova.virt import storage_users
from nova.virt import virtapi
from nova.volume import cinder
@@ -616,7 +618,7 @@ class ComputeVirtAPI(virtapi.VirtAPI):
class ComputeManager(manager.Manager):
"""Manages the running instances from creation to destruction."""
- target = messaging.Target(version='6.1')
+ target = messaging.Target(version='6.2')
def __init__(self, compute_driver=None, *args, **kwargs):
"""Load configuration options and connect to the hypervisor."""
@@ -1470,31 +1472,111 @@ class ComputeManager(manager.Manager):
:return: a dict of ComputeNode objects keyed by the UUID of the given
node.
"""
- nodes_by_uuid = {}
try:
- node_names = self.driver.get_available_nodes()
+ node_ids = self.driver.get_nodenames_by_uuid()
except exception.VirtDriverNotReady:
LOG.warning(
"Virt driver is not ready. If this is the first time this "
- "service is starting on this host, then you can ignore this "
- "warning.")
+ "service is starting on this host, then you can ignore "
+ "this warning.")
return {}
- for node_name in node_names:
- try:
- node = objects.ComputeNode.get_by_host_and_nodename(
- context, self.host, node_name)
- nodes_by_uuid[node.uuid] = node
- except exception.ComputeHostNotFound:
- LOG.warning(
- "Compute node %s not found in the database. If this is "
- "the first time this service is starting on this host, "
- "then you can ignore this warning.", node_name)
- return nodes_by_uuid
+ nodes = objects.ComputeNodeList.get_all_by_uuids(context,
+ list(node_ids.keys()))
+ if not nodes:
+ # NOTE(danms): This should only happen if the compute_id is
+ # pre-provisioned on a host that has never started.
+ LOG.warning('Compute nodes %s for host %s were not found in the '
+ 'database. If this is the first time this service is '
+ 'starting on this host, then you can ignore this '
+ 'warning.',
+ list(node_ids.keys()), self.host)
+ return {}
- def init_host(self):
+ for node in nodes:
+ if node.hypervisor_hostname != node_ids.get(node.uuid):
+ raise exception.InvalidConfiguration(
+ ('My compute node %s has hypervisor_hostname %s '
+ 'but virt driver reports it should be %s. Possible '
+ 'rename detected, refusing to start!') % (
+ node.uuid, node.hypervisor_hostname,
+ node_ids.get(node.uuid)))
+
+ return {n.uuid: n for n in nodes}
+
+ def _ensure_existing_node_identity(self, service_ref):
+ """If we are upgrading from an older service version, we need
+ to write our node identity uuid (if not already done) based on
+ nodes assigned to us in the database.
+ """
+ if 'ironic' in CONF.compute_driver.lower():
+ # We do not persist a single local node identity for
+ # ironic
+ return
+
+ if service_ref.version >= service_obj.NODE_IDENTITY_VERSION:
+ # Already new enough, nothing to do here, but make sure that we
+ # have a UUID file already, as this is not our first time starting.
+ if nova.virt.node.read_local_node_uuid() is None:
+ raise exception.InvalidConfiguration(
+ ('No local node identity found, but this is not our '
+ 'first startup on this host. Refusing to start after '
+ 'potentially having lost that state!'))
+ return
+
+ if nova.virt.node.read_local_node_uuid():
+ # We already have a local node identity, no migration needed
+ return
+
+ context = nova.context.get_admin_context()
+ db_nodes = objects.ComputeNodeList.get_all_by_host(context, self.host)
+ if not db_nodes:
+ # This means we have no nodes in the database (that we
+ # know of) and thus have no need to record an existing
+ # UUID. That is probably strange, so log a warning.
+ raise exception.InvalidConfiguration(
+ ('Upgrading from service version %i but found no '
+ 'nodes in the database for host %s to persist '
+ 'locally; Possible rename detected, '
+ 'refusing to start!') % (
+ service_ref.version, self.host))
+
+ if len(db_nodes) > 1:
+ # If this happens we can't do the right thing, so raise an
+ # exception to abort host startup
+ LOG.warning('Multiple nodes found in the database for host %s; '
+ 'unable to persist local node identity automatically')
+ raise exception.InvalidConfiguration(
+ 'Multiple nodes found in database, manual node uuid '
+ 'configuration required')
+
+ nova.virt.node.write_local_node_uuid(db_nodes[0].uuid)
+
+ def _check_for_host_rename(self, nodes_by_uuid):
+ if 'ironic' in CONF.compute_driver.lower():
+ # Ironic (currently) rebalances nodes at various times, and as
+ # such, nodes being discovered as assigned to this host with a
+ # different hostname is not surprising. Skip this check for
+ # ironic.
+ return
+ for node in nodes_by_uuid.values():
+ if node.host != self.host:
+ raise exception.InvalidConfiguration(
+ 'My node %s has host %r but my host is %r; '
+ 'Possible rename detected, refusing to start!' % (
+ node.uuid, node.host, self.host))
+ LOG.debug('Verified node %s matches my host %s',
+ node.uuid, self.host)
+
+ def init_host(self, service_ref):
"""Initialization for a standalone compute service."""
+ if service_ref:
+ # If we are an existing service, check to see if we need
+ # to record a locally-persistent node identity because
+ # we have upgraded from a previous version.
+ self._ensure_existing_node_identity(service_ref)
+
if CONF.pci.device_spec:
# Simply loading the PCI passthrough spec will do a bunch of
# validation that would otherwise wait until the PciDevTracker is
@@ -1524,7 +1606,18 @@ class ComputeManager(manager.Manager):
raise exception.InvalidConfiguration(msg)
self.driver.init_host(host=self.host)
+
+ # NOTE(gibi): At this point the compute_nodes of the resource tracker
+ # has not been populated yet so we cannot rely on the resource tracker
+ # here.
context = nova.context.get_admin_context()
+ nodes_by_uuid = self._get_nodes(context)
+
+ # NOTE(danms): Check for a possible host rename and abort
+ # startup before we start mucking with instances we think are
+ # ours.
+ self._check_for_host_rename(nodes_by_uuid)
+
instances = objects.InstanceList.get_by_host(
context, self.host,
expected_attrs=['info_cache', 'metadata', 'numa_topology'])
@@ -1534,17 +1627,12 @@ class ComputeManager(manager.Manager):
self._validate_pinning_configuration(instances)
self._validate_vtpm_configuration(instances)
- # NOTE(gibi): At this point the compute_nodes of the resource tracker
- # has not been populated yet so we cannot rely on the resource tracker
- # here.
# NOTE(gibi): If ironic and vcenter virt driver slow start time
# becomes problematic here then we should consider adding a config
# option or a driver flag to tell us if we should thread
# _destroy_evacuated_instances and
# _error_out_instances_whose_build_was_interrupted out in the
# background on startup
- nodes_by_uuid = self._get_nodes(context)
-
try:
# checking that instance was not already evacuated to other host
evacuated_instances = self._destroy_evacuated_instances(
@@ -2471,10 +2559,12 @@ class ComputeManager(manager.Manager):
if provider_mapping:
try:
- compute_utils\
- .update_pci_request_with_placement_allocations(
- context, self.reportclient,
- instance.pci_requests.requests, provider_mapping)
+ compute_utils.update_pci_request_with_placement_allocations(
+ context,
+ self.reportclient,
+ instance.pci_requests.requests,
+ provider_mapping,
+ )
except (exception.AmbiguousResourceProviderForPCIRequest,
exception.UnexpectedResourceProviderNameForPCIRequest
) as e:
@@ -3622,7 +3712,7 @@ class ComputeManager(manager.Manager):
bdms, recreate, on_shared_storage,
preserve_ephemeral, migration,
scheduled_node, limits, request_spec, accel_uuids,
- reimage_boot_volume):
+ reimage_boot_volume, target_state):
"""Destroy and re-make this instance.
A 'rebuild' effectively purges all existing data from the system and
@@ -3657,6 +3747,7 @@ class ComputeManager(manager.Manager):
:param reimage_boot_volume: Boolean to specify whether the user has
explicitly requested to rebuild a boot
volume
+ :param target_state: Set a target state for the evacuated instance.
"""
# recreate=True means the instance is being evacuated from a failed
@@ -3721,7 +3812,8 @@ class ComputeManager(manager.Manager):
image_meta, injected_files, new_pass, orig_sys_metadata,
bdms, evacuate, on_shared_storage, preserve_ephemeral,
migration, request_spec, allocs, rebuild_claim,
- scheduled_node, limits, accel_uuids, reimage_boot_volume)
+ scheduled_node, limits, accel_uuids, reimage_boot_volume,
+ target_state)
except (exception.ComputeResourcesUnavailable,
exception.RescheduledException) as e:
if isinstance(e, exception.ComputeResourcesUnavailable):
@@ -3781,7 +3873,7 @@ class ComputeManager(manager.Manager):
injected_files, new_pass, orig_sys_metadata, bdms, evacuate,
on_shared_storage, preserve_ephemeral, migration, request_spec,
allocations, rebuild_claim, scheduled_node, limits, accel_uuids,
- reimage_boot_volume):
+ reimage_boot_volume, target_state):
"""Helper to avoid deep nesting in the top-level method."""
provider_mapping = None
@@ -3789,10 +3881,12 @@ class ComputeManager(manager.Manager):
provider_mapping = self._get_request_group_mapping(request_spec)
if provider_mapping:
- compute_utils.\
- update_pci_request_with_placement_allocations(
- context, self.reportclient,
- instance.pci_requests.requests, provider_mapping)
+ compute_utils.update_pci_request_with_placement_allocations(
+ context,
+ self.reportclient,
+ instance.pci_requests.requests,
+ provider_mapping,
+ )
claim_context = rebuild_claim(
context, instance, scheduled_node, allocations,
@@ -3803,7 +3897,8 @@ class ComputeManager(manager.Manager):
context, instance, orig_image_ref, image_meta, injected_files,
new_pass, orig_sys_metadata, bdms, evacuate, on_shared_storage,
preserve_ephemeral, migration, request_spec, allocations,
- provider_mapping, accel_uuids, reimage_boot_volume)
+ provider_mapping, accel_uuids, reimage_boot_volume,
+ target_state)
@staticmethod
def _get_image_name(image_meta):
@@ -3817,10 +3912,18 @@ class ComputeManager(manager.Manager):
injected_files, new_pass, orig_sys_metadata, bdms, evacuate,
on_shared_storage, preserve_ephemeral, migration, request_spec,
allocations, request_group_resource_providers_mapping,
- accel_uuids, reimage_boot_volume):
+ accel_uuids, reimage_boot_volume, target_state):
orig_vm_state = instance.vm_state
if evacuate:
+ if target_state and orig_vm_state != vm_states.ERROR:
+ # This will ensure that at destination the instance will have
+ # the desired state.
+ if target_state not in vm_states.ALLOW_TARGET_STATES:
+ raise exception.InstanceEvacuateNotSupportedTargetState(
+ target_state=target_state)
+ orig_vm_state = target_state
+
if request_spec:
# NOTE(gibi): Do a late check of server group policy as
# parallel scheduling could violate such policy. This will
@@ -5415,10 +5518,12 @@ class ComputeManager(manager.Manager):
if provider_mapping:
try:
- compute_utils.\
- update_pci_request_with_placement_allocations(
- context, self.reportclient,
- instance.pci_requests.requests, provider_mapping)
+ compute_utils.update_pci_request_with_placement_allocations(
+ context,
+ self.reportclient,
+ instance.pci_requests.requests,
+ provider_mapping,
+ )
except (exception.AmbiguousResourceProviderForPCIRequest,
exception.UnexpectedResourceProviderNameForPCIRequest
) as e:
@@ -6899,12 +7004,12 @@ class ComputeManager(manager.Manager):
try:
if provider_mappings:
- update = (
- compute_utils.
- update_pci_request_with_placement_allocations)
- update(
- context, self.reportclient, instance.pci_requests.requests,
- provider_mappings)
+ compute_utils.update_pci_request_with_placement_allocations(
+ context,
+ self.reportclient,
+ instance.pci_requests.requests,
+ provider_mappings,
+ )
accel_info = []
if accel_uuids:
@@ -7984,12 +8089,12 @@ class ComputeManager(manager.Manager):
instance_uuid=instance.uuid) from e
try:
- update = (
- compute_utils.
- update_pci_request_with_placement_allocations)
- update(
- context, self.reportclient, pci_reqs.requests,
- provider_mappings)
+ compute_utils.update_pci_request_with_placement_allocations(
+ context,
+ self.reportclient,
+ pci_reqs.requests,
+ provider_mappings,
+ )
except (
exception.AmbiguousResourceProviderForPCIRequest,
exception.UnexpectedResourceProviderNameForPCIRequest
@@ -10375,6 +10480,14 @@ class ComputeManager(manager.Manager):
LOG.exception(
"Error updating PCI resources for node %(node)s.",
{'node': nodename})
+ except exception.InvalidConfiguration as e:
+ if startup:
+ # If this happens during startup, we need to let it raise to
+ # abort our service startup.
+ raise
+ else:
+ LOG.error("Error updating resources for node %s: %s",
+ nodename, e)
except Exception:
LOG.exception("Error updating resources for node %(node)s.",
{'node': nodename})
@@ -11291,7 +11404,7 @@ class _ComputeV5Proxy(object):
bdms, recreate, on_shared_storage,
preserve_ephemeral, migration,
scheduled_node, limits, request_spec,
- accel_uuids, False)
+ accel_uuids, False, None)
# 5.13 support for optional accel_uuids argument
def shelve_instance(self, context, instance, image_id,
diff --git a/nova/compute/pci_placement_translator.py b/nova/compute/pci_placement_translator.py
index 3ee52e303c..016efd9122 100644
--- a/nova/compute/pci_placement_translator.py
+++ b/nova/compute/pci_placement_translator.py
@@ -614,7 +614,7 @@ def update_provider_tree_for_pci(
if updated:
LOG.debug(
"Placement PCI view needs allocation healing. This should only "
- "happen if [scheduler]pci_in_placement is still disabled. "
+ "happen if [filter_scheduler]pci_in_placement is still disabled. "
"Original allocations: %s New allocations: %s",
old_alloc,
allocations,
diff --git a/nova/compute/resource_tracker.py b/nova/compute/resource_tracker.py
index ab60c77d96..3f911f3708 100644
--- a/nova/compute/resource_tracker.py
+++ b/nova/compute/resource_tracker.py
@@ -49,6 +49,7 @@ from nova import rpc
from nova.scheduler.client import report
from nova import utils
from nova.virt import hardware
+from nova.virt import node
CONF = nova.conf.CONF
@@ -619,18 +620,11 @@ class ResourceTracker(object):
:param prefix: Prefix to use when accessing migration context
attributes. 'old_' or 'new_', with 'new_' being the default.
"""
- # Remove usage for an instance that is tracked in migrations, such as
- # on the dest node during revert resize.
- if instance['uuid'] in self.tracked_migrations:
- migration = self.tracked_migrations.pop(instance['uuid'])
+ if instance["uuid"] in self.tracked_migrations:
if not flavor:
- flavor = self._get_flavor(instance, prefix, migration)
- # Remove usage for an instance that is not tracked in migrations (such
- # as on the source node after a migration).
- # NOTE(lbeliveau): On resize on the same node, the instance is
- # included in both tracked_migrations and tracked_instances.
- elif instance['uuid'] in self.tracked_instances:
- self.tracked_instances.remove(instance['uuid'])
+ flavor = self._get_flavor(
+ instance, prefix, self.tracked_migrations[instance["uuid"]]
+ )
if flavor is not None:
numa_topology = self._get_migration_context_resource(
@@ -646,6 +640,15 @@ class ResourceTracker(object):
ctxt = context.elevated()
self._update(ctxt, self.compute_nodes[nodename])
+ # Remove usage for an instance that is tracked in migrations, such as
+ # on the dest node during revert resize.
+ self.tracked_migrations.pop(instance['uuid'], None)
+ # Remove usage for an instance that is not tracked in migrations (such
+ # as on the source node after a migration).
+ # NOTE(lbeliveau): On resize on the same node, the instance is
+ # included in both tracked_migrations and tracked_instances.
+ self.tracked_instances.discard(instance['uuid'])
+
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True)
def update_usage(self, context, instance, nodename):
"""Update the resource usage and stats after a change in an
@@ -666,50 +669,6 @@ class ResourceTracker(object):
return (nodename not in self.compute_nodes or
not self.driver.node_is_available(nodename))
- def _check_for_nodes_rebalance(self, context, resources, nodename):
- """Check if nodes rebalance has happened.
-
- The ironic driver maintains a hash ring mapping bare metal nodes
- to compute nodes. If a compute dies, the hash ring is rebuilt, and
- some of its bare metal nodes (more precisely, those not in ACTIVE
- state) are assigned to other computes.
-
- This method checks for this condition and adjusts the database
- accordingly.
-
- :param context: security context
- :param resources: initial values
- :param nodename: node name
- :returns: True if a suitable compute node record was found, else False
- """
- if not self.driver.rebalances_nodes:
- return False
-
- # Its possible ironic just did a node re-balance, so let's
- # check if there is a compute node that already has the correct
- # hypervisor_hostname. We can re-use that rather than create a
- # new one and have to move existing placement allocations
- cn_candidates = objects.ComputeNodeList.get_by_hypervisor(
- context, nodename)
-
- if len(cn_candidates) == 1:
- cn = cn_candidates[0]
- LOG.info("ComputeNode %(name)s moving from %(old)s to %(new)s",
- {"name": nodename, "old": cn.host, "new": self.host})
- cn.host = self.host
- self.compute_nodes[nodename] = cn
- self._copy_resources(cn, resources)
- self._setup_pci_tracker(context, cn, resources)
- self._update(context, cn)
- return True
- elif len(cn_candidates) > 1:
- LOG.error(
- "Found more than one ComputeNode for nodename %s. "
- "Please clean up the orphaned ComputeNode records in your DB.",
- nodename)
-
- return False
-
def _init_compute_node(self, context, resources):
"""Initialize the compute node if it does not already exist.
@@ -727,6 +686,7 @@ class ResourceTracker(object):
False otherwise
"""
nodename = resources['hypervisor_hostname']
+ node_uuid = resources['uuid']
# if there is already a compute node just use resources
# to initialize
@@ -738,23 +698,43 @@ class ResourceTracker(object):
# now try to get the compute node record from the
# database. If we get one we use resources to initialize
- cn = self._get_compute_node(context, nodename)
+
+ # We use read_deleted=True so that we will find and recover a deleted
+ # node object, if necessary.
+ with utils.temporary_mutation(context, read_deleted='yes'):
+ cn = self._get_compute_node(context, node_uuid)
+ if cn and cn.deleted:
+ # Undelete and save this right now so that everything below
+ # can continue without read_deleted=yes
+ LOG.info('Undeleting compute node %s', cn.uuid)
+ cn.deleted = False
+ cn.deleted_at = None
+ cn.save()
if cn:
+ if cn.host != self.host:
+ LOG.info("ComputeNode %(name)s moving from %(old)s to %(new)s",
+ {"name": nodename, "old": cn.host, "new": self.host})
+ cn.host = self.host
+ self._update(context, cn)
+
self.compute_nodes[nodename] = cn
self._copy_resources(cn, resources)
self._setup_pci_tracker(context, cn, resources)
return False
- if self._check_for_nodes_rebalance(context, resources, nodename):
- return False
-
# there was no local copy and none in the database
# so we need to create a new compute node. This needs
# to be initialized with resource values.
cn = objects.ComputeNode(context)
cn.host = self.host
self._copy_resources(cn, resources, initial=True)
- cn.create()
+ try:
+ cn.create()
+ except exception.DuplicateRecord:
+ raise exception.InvalidConfiguration(
+ 'Duplicate compute node record found for host %s node %s' % (
+ cn.host, cn.hypervisor_hostname))
+
# Only map the ComputeNode into compute_nodes if create() was OK
# because if create() fails, on the next run through here nodename
# would be in compute_nodes and we won't try to create again (because
@@ -887,6 +867,14 @@ class ResourceTracker(object):
# contains a non-None value, even for non-Ironic nova-compute hosts. It
# is this value that will be populated in the compute_nodes table.
resources['host_ip'] = CONF.my_ip
+ if 'uuid' not in resources:
+ # NOTE(danms): Any driver that does not provide a uuid per
+ # node gets the locally-persistent compute_id. Only ironic
+ # should be setting the per-node uuid (and returning
+ # multiple nodes in general). If this is the first time we
+ # are creating a compute node on this host, we will
+ # generate and persist this uuid for the future.
+ resources['uuid'] = node.get_local_node_uuid()
# We want the 'cpu_info' to be None from the POV of the
# virt driver, but the DB requires it to be non-null so
@@ -1012,14 +1000,13 @@ class ResourceTracker(object):
if startup:
self._check_resources(context)
- def _get_compute_node(self, context, nodename):
+ def _get_compute_node(self, context, node_uuid):
"""Returns compute node for the host and nodename."""
try:
- return objects.ComputeNode.get_by_host_and_nodename(
- context, self.host, nodename)
+ return objects.ComputeNode.get_by_uuid(context, node_uuid)
except exception.NotFound:
LOG.warning("No compute node record for %(host)s:%(node)s",
- {'host': self.host, 'node': nodename})
+ {'host': self.host, 'node': node_uuid})
def _report_hypervisor_resource_view(self, resources):
"""Log the hypervisor's view of free resources.
diff --git a/nova/compute/rpcapi.py b/nova/compute/rpcapi.py
index 3ac4c6dcfa..efc06300db 100644
--- a/nova/compute/rpcapi.py
+++ b/nova/compute/rpcapi.py
@@ -403,6 +403,7 @@ class ComputeAPI(object):
* ... - Rename the instance_type argument of resize_instance() to
flavor
* 6.1 - Add reimage_boot_volume parameter to rebuild_instance()
+ * 6.2 - Add target_state parameter to rebuild_instance()
'''
VERSION_ALIASES = {
@@ -424,6 +425,7 @@ class ComputeAPI(object):
'xena': '6.0',
'yoga': '6.0',
'zed': '6.1',
+ 'antelope': '6.2',
}
@property
@@ -1083,7 +1085,7 @@ class ComputeAPI(object):
image_ref, orig_image_ref, orig_sys_metadata, bdms,
recreate, on_shared_storage, host, node,
preserve_ephemeral, migration, limits, request_spec, accel_uuids,
- reimage_boot_volume):
+ reimage_boot_volume, target_state):
# NOTE(edleafe): compute nodes can only use the dict form of limits.
if isinstance(limits, objects.SchedulerLimits):
@@ -1096,12 +1098,20 @@ class ComputeAPI(object):
'limits': limits,
'request_spec': request_spec,
'accel_uuids': accel_uuids,
- 'reimage_boot_volume': reimage_boot_volume
+ 'reimage_boot_volume': reimage_boot_volume,
+ 'target_state': target_state,
}
-
- version = '6.1'
+ version = '6.2'
client = self.router.client(ctxt)
if not client.can_send_version(version):
+ if msg_args['target_state']:
+ raise exception.UnsupportedRPCVersion(
+ api="rebuild_instance",
+ required="6.2")
+ else:
+ del msg_args['target_state']
+ version = '6.1'
+ if not client.can_send_version(version):
if msg_args['reimage_boot_volume']:
raise exception.NovaException(
'Compute RPC version does not support '
diff --git a/nova/compute/utils.py b/nova/compute/utils.py
index fa78292170..30efc24fc7 100644
--- a/nova/compute/utils.py
+++ b/nova/compute/utils.py
@@ -1536,7 +1536,7 @@ def update_pci_request_with_placement_allocations(
for spec in pci_request.spec:
# FIXME(gibi): this is baaad but spec is a dict of strings so
# we need to serialize
- spec['rp_uuids'] = ','.join(set(mapping.values()))
+ spec['rp_uuids'] = ','.join(mapping.values())
elif needs_update_due_to_qos(pci_request, provider_mapping):
diff --git a/nova/compute/vm_states.py b/nova/compute/vm_states.py
index 633894c1ea..1a916ea59a 100644
--- a/nova/compute/vm_states.py
+++ b/nova/compute/vm_states.py
@@ -76,3 +76,6 @@ ALLOW_TRIGGER_CRASH_DUMP = [ACTIVE, PAUSED, RESCUED, RESIZED, ERROR]
# states we allow resources to be freed in
ALLOW_RESOURCE_REMOVAL = [DELETED, SHELVED_OFFLOADED]
+
+# states we allow for evacuate instance
+ALLOW_TARGET_STATES = [STOPPED]
diff --git a/nova/conductor/api.py b/nova/conductor/api.py
index 778fdd6c73..843c8ce3a3 100644
--- a/nova/conductor/api.py
+++ b/nova/conductor/api.py
@@ -144,7 +144,8 @@ class ComputeTaskAPI(object):
injected_files, new_pass, orig_sys_metadata,
bdms, recreate=False, on_shared_storage=False,
preserve_ephemeral=False, host=None,
- request_spec=None, reimage_boot_volume=False):
+ request_spec=None, reimage_boot_volume=False,
+ target_state=None):
self.conductor_compute_rpcapi.rebuild_instance(context,
instance=instance,
new_pass=new_pass,
@@ -158,7 +159,8 @@ class ComputeTaskAPI(object):
preserve_ephemeral=preserve_ephemeral,
host=host,
request_spec=request_spec,
- reimage_boot_volume=reimage_boot_volume)
+ reimage_boot_volume=reimage_boot_volume,
+ target_state=target_state)
def cache_images(self, context, aggregate, image_ids):
"""Request images be pre-cached on hosts within an aggregate.
diff --git a/nova/conductor/manager.py b/nova/conductor/manager.py
index 3fcea4481d..8177519331 100644
--- a/nova/conductor/manager.py
+++ b/nova/conductor/manager.py
@@ -235,7 +235,7 @@ class ComputeTaskManager:
may involve coordinating activities on multiple compute nodes.
"""
- target = messaging.Target(namespace='compute_task', version='1.24')
+ target = messaging.Target(namespace='compute_task', version='1.25')
def __init__(self):
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
@@ -1037,6 +1037,12 @@ class ComputeTaskManager:
request_spec.requested_resources = res_req
request_spec.request_level_params = req_lvl_params
+ # NOTE(gibi): as PCI devices is tracked in placement we
+ # need to generate request groups from InstancePCIRequests.
+ # This will append new RequestGroup objects to the
+ # request_spec.requested_resources list if needed
+ request_spec.generate_request_groups_from_pci_requests()
+
# NOTE(cfriesen): Ensure that we restrict the scheduler to
# the cell specified by the instance mapping.
self._restrict_request_spec_to_cell(
@@ -1146,7 +1152,8 @@ class ComputeTaskManager:
injected_files, new_pass, orig_sys_metadata,
bdms, recreate, on_shared_storage,
preserve_ephemeral=False, host=None,
- request_spec=None, reimage_boot_volume=False):
+ request_spec=None, reimage_boot_volume=False,
+ target_state=None):
# recreate=True means the instance is being evacuated from a failed
# host to a new destination host. The 'recreate' variable name is
# confusing, so rename it to evacuate here at the top, which is simpler
@@ -1350,7 +1357,8 @@ class ComputeTaskManager:
limits=limits,
request_spec=request_spec,
accel_uuids=accel_uuids,
- reimage_boot_volume=reimage_boot_volume)
+ reimage_boot_volume=reimage_boot_volume,
+ target_state=target_state)
def _validate_image_traits_for_rebuild(self, context, instance, image_ref):
"""Validates that the traits specified in the image can be satisfied
diff --git a/nova/conductor/rpcapi.py b/nova/conductor/rpcapi.py
index ffaecd2c95..a5f0cf0094 100644
--- a/nova/conductor/rpcapi.py
+++ b/nova/conductor/rpcapi.py
@@ -287,6 +287,7 @@ class ComputeTaskAPI(object):
1.22 - Added confirm_snapshot_based_resize()
1.23 - Added revert_snapshot_based_resize()
1.24 - Add reimage_boot_volume parameter to rebuild_instance()
+ 1.25 - Add target_state parameter to rebuild_instance()
"""
def __init__(self):
@@ -428,8 +429,8 @@ class ComputeTaskAPI(object):
image_ref, orig_image_ref, orig_sys_metadata, bdms,
recreate=False, on_shared_storage=False, host=None,
preserve_ephemeral=False, request_spec=None,
- reimage_boot_volume=False):
- version = '1.24'
+ reimage_boot_volume=False, target_state=None):
+ version = '1.25'
kw = {'instance': instance,
'new_pass': new_pass,
'injected_files': injected_files,
@@ -442,9 +443,17 @@ class ComputeTaskAPI(object):
'preserve_ephemeral': preserve_ephemeral,
'host': host,
'request_spec': request_spec,
- 'reimage_boot_volume': reimage_boot_volume
+ 'reimage_boot_volume': reimage_boot_volume,
+ 'target_state': target_state,
}
if not self.client.can_send_version(version):
+ if kw['target_state']:
+ raise exception.UnsupportedRPCVersion(
+ api="rebuild_instance", required="1.25")
+ else:
+ del kw['target_state']
+ version = '1.24'
+ if not self.client.can_send_version(version):
if kw['reimage_boot_volume']:
raise exception.NovaException(
'Conductor RPC version does not support '
diff --git a/nova/conf/api.py b/nova/conf/api.py
index 5c8a367e8e..58cbc4931e 100644
--- a/nova/conf/api.py
+++ b/nova/conf/api.py
@@ -225,8 +225,11 @@ service.
help="""
Domain name used to configure FQDN for instances.
-Configure a fully-qualified domain name for instance hostnames. If unset, only
-the hostname without a domain will be configured.
+Configure a fully-qualified domain name for instance hostnames. The value is
+suffixed to the instance hostname from the database to construct the hostname
+that appears in the metadata API. To disable this behavior (for example in
+order to correctly support microversion's 2.94 FQDN hostnames), set this to the
+empty string.
Possible values:
diff --git a/nova/conf/compute.py b/nova/conf/compute.py
index 224c802935..de2743d850 100644
--- a/nova/conf/compute.py
+++ b/nova/conf/compute.py
@@ -1016,6 +1016,15 @@ Related options:
filtering computes based on supported image types, which is required
to be enabled for this to take effect.
"""),
+ cfg.ListOpt('vmdk_allowed_types',
+ default=['streamOptimized', 'monolithicSparse'],
+ help="""
+A list of strings describing allowed VMDK "create-type" subformats
+that will be allowed. This is recommended to only include
+single-file-with-sparse-header variants to avoid potential host file
+exposure due to processing named extents. If this list is empty, then no
+form of VMDK image will be allowed.
+"""),
cfg.BoolOpt('packing_host_numa_cells_allocation_strategy',
default=False,
help="""
diff --git a/nova/conf/libvirt.py b/nova/conf/libvirt.py
index 16a3f63090..632bba40fb 100644
--- a/nova/conf/libvirt.py
+++ b/nova/conf/libvirt.py
@@ -1478,6 +1478,15 @@ Related options:
"""),
]
+libvirt_cpu_mgmt_opts = [
+ cfg.StrOpt('cpu_power_governor_low',
+ default='powersave',
+ help='Governor to use in order '
+ 'to reduce CPU power consumption'),
+ cfg.StrOpt('cpu_power_governor_high',
+ default='performance',
+ help='Governor to use in order to have best CPU performance'),
+]
ALL_OPTS = list(itertools.chain(
libvirt_general_opts,
@@ -1499,6 +1508,7 @@ ALL_OPTS = list(itertools.chain(
libvirt_volume_nvmeof_opts,
libvirt_pmem_opts,
libvirt_vtpm_opts,
+ libvirt_cpu_mgmt_opts,
))
diff --git a/nova/conf/pci.py b/nova/conf/pci.py
index 468ae9a3bd..533bf52ead 100644
--- a/nova/conf/pci.py
+++ b/nova/conf/pci.py
@@ -70,8 +70,9 @@ Possible Values:
``resource_class``
The optional Placement resource class name that is used
to track the requested PCI devices in Placement. It can be a standard
- resource class from the ``os-resource-classes`` lib. Or can be any string.
- In that case Nova will normalize it to a proper Placement resource class by
+ resource class from the ``os-resource-classes`` lib. Or it can be an
+ arbitrary string. If it is an non-standard resource class then Nova will
+ normalize it to a proper Placement resource class by
making it upper case, replacing any consecutive character outside of
``[A-Z0-9_]`` with a single '_', and prefixing the name with ``CUSTOM_`` if
not yet prefixed. The maximum allowed length is 255 character including the
@@ -79,19 +80,22 @@ Possible Values:
``vendor_id`` and ``product_id`` values of the alias in the form of
``CUSTOM_PCI_{vendor_id}_{product_id}``. The ``resource_class`` requested
in the alias is matched against the ``resource_class`` defined in the
- ``[pci]device_spec``.
+ ``[pci]device_spec``. This field can only be used only if
+ ``[filter_scheduler]pci_in_placement`` is enabled.
``traits``
An optional comma separated list of Placement trait names requested to be
present on the resource provider that fulfills this alias. Each trait can
- be a standard trait from ``os-traits`` lib or can be any string. If it is
- not a standard trait then Nova will normalize the trait name by making it
- upper case, replacing any consecutive character outside of ``[A-Z0-9_]``
- with a single '_', and prefixing the name with ``CUSTOM_`` if not yet
- prefixed. The maximum allowed length of a trait name is 255 character
- including the prefix. Every trait in ``traits`` requested in the alias
- ensured to be in the list of traits provided in the ``traits`` field of
- the ``[pci]device_spec`` when scheduling the request.
+ be a standard trait from ``os-traits`` lib or it can be an arbitrary
+ string. If it is a non-standard trait then Nova will normalize the
+ trait name by making it upper case, replacing any consecutive character
+ outside of ``[A-Z0-9_]`` with a single '_', and prefixing the name
+ with ``CUSTOM_`` if not yet prefixed. The maximum allowed length of a
+ trait name is 255 character including the prefix. Every trait in
+ ``traits`` requested in the alias ensured to be in the list of traits
+ provided in the ``traits`` field of the ``[pci]device_spec`` when
+ scheduling the request. This field can only be used only if
+ ``[filter_scheduler]pci_in_placement`` is enabled.
* Supports multiple aliases by repeating the option (not by specifying
a list value)::
diff --git a/nova/conf/scheduler.py b/nova/conf/scheduler.py
index 03e78fe701..c75bd07c5b 100644
--- a/nova/conf/scheduler.py
+++ b/nova/conf/scheduler.py
@@ -745,7 +745,26 @@ Possible values:
Related options:
* ``[filter_scheduler] aggregate_image_properties_isolation_namespace``
-""")]
+"""),
+ cfg.BoolOpt(
+ "pci_in_placement",
+ default=False,
+ help="""
+Enable scheduling and claiming PCI devices in Placement.
+
+This can be enabled after ``[pci]report_in_placement`` is enabled on all
+compute hosts.
+
+When enabled the scheduler queries Placement about the PCI device
+availability to select destination for a server with PCI request. The scheduler
+also allocates the selected PCI devices in Placement. Note that this logic
+does not replace the PCIPassthroughFilter but extends it.
+
+* ``[pci] report_in_placement``
+* ``[pci] alias``
+* ``[pci] device_spec``
+"""),
+]
metrics_group = cfg.OptGroup(
name="metrics",
diff --git a/nova/conf/workarounds.py b/nova/conf/workarounds.py
index 1116664d36..943ec74885 100644
--- a/nova/conf/workarounds.py
+++ b/nova/conf/workarounds.py
@@ -374,6 +374,28 @@ Related options:
* :oslo.config:option:`DEFAULT.compute_driver` (libvirt)
"""),
+ cfg.IntOpt('qemu_monitor_announce_self_count',
+ default=3,
+ min=1,
+ help="""
+The total number of times to send the announce_self command to the QEMU
+monitor when enable_qemu_monitor_announce_self is enabled.
+
+Related options:
+
+* :oslo.config:option:`WORKAROUNDS.enable_qemu_monitor_announce_self` (libvirt)
+"""),
+ cfg.IntOpt('qemu_monitor_announce_self_interval',
+ default=1,
+ min=1,
+ help="""
+The number of seconds to wait before re-sending the announce_self
+command to the QEMU monitor.
+
+Related options:
+
+* :oslo.config:option:`WORKAROUNDS.enable_qemu_monitor_announce_self` (libvirt)
+"""),
cfg.BoolOpt('disable_compute_service_check_for_ffu',
default=False,
help="""
@@ -410,6 +432,13 @@ with the destination host. When using QEMU >= 2.9 and libvirt >=
4.4.0, libvirt will do the correct thing with respect to checking CPU
compatibility on the destination host during live migration.
"""),
+ cfg.BoolOpt('skip_cpu_compare_at_startup',
+ default=False,
+ help="""
+This will skip the CPU comparison call at the startup of Compute
+service and lets libvirt handle it.
+"""),
+
cfg.BoolOpt(
'skip_hypervisor_version_check_on_lm',
default=False,
diff --git a/nova/exception.py b/nova/exception.py
index cebaea4bee..f5993e79f8 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -1451,6 +1451,11 @@ class InstanceEvacuateNotSupported(Invalid):
msg_fmt = _('Instance evacuate is not supported.')
+class InstanceEvacuateNotSupportedTargetState(Invalid):
+ msg_fmt = _("Target state '%(target_state)s' for instance evacuate "
+ "is not supported.")
+
+
class DBNotAllowed(NovaException):
msg_fmt = _('%(binary)s attempted direct database access which is '
'not allowed by policy')
@@ -1479,6 +1484,11 @@ class UnsupportedRescueImage(Invalid):
msg_fmt = _("Requested rescue image '%(image)s' is not supported")
+class UnsupportedRPCVersion(Invalid):
+ msg_fmt = _("Unsupported RPC version for %(api)s. "
+ "Required >= %(required)s")
+
+
class Base64Exception(NovaException):
msg_fmt = _("Invalid Base 64 data for file %(path)s")
@@ -2496,3 +2506,18 @@ class PlacementPciMixedTraitsException(PlacementPciException):
class ReimageException(NovaException):
msg_fmt = _("Reimaging volume failed.")
+
+
+class InvalidNodeConfiguration(NovaException):
+ msg_fmt = _('Invalid node identity configuration: %(reason)s')
+
+
+class DuplicateRecord(NovaException):
+ msg_fmt = _('Unable to create duplicate record for %(target)s')
+
+
+class NotSupportedComputeForEvacuateV295(NotSupported):
+ msg_fmt = _("Starting to microversion 2.95, evacuate API will stop "
+ "instance on destination. To evacuate before upgrades are "
+ "complete please use an older microversion. Required version "
+ "for compute %(expected), current version %(currently)s")
diff --git a/nova/filesystem.py b/nova/filesystem.py
new file mode 100644
index 0000000000..5394d2d835
--- /dev/null
+++ b/nova/filesystem.py
@@ -0,0 +1,59 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Functions to address filesystem calls, particularly sysfs."""
+
+import os
+
+from oslo_log import log as logging
+
+from nova import exception
+
+LOG = logging.getLogger(__name__)
+
+
+SYS = '/sys'
+
+
+# NOTE(bauzas): this method is deliberately not wrapped in a privsep entrypoint
+def read_sys(path: str) -> str:
+ """Reads the content of a file in the sys filesystem.
+
+ :param path: relative or absolute. If relative, will be prefixed by /sys.
+ :returns: contents of that file.
+ :raises: nova.exception.FileNotFound if we can't read that file.
+ """
+ try:
+ # The path can be absolute with a /sys prefix but that's fine.
+ with open(os.path.join(SYS, path), mode='r') as data:
+ return data.read()
+ except (OSError, ValueError) as exc:
+ raise exception.FileNotFound(file_path=path) from exc
+
+
+# NOTE(bauzas): this method is deliberately not wrapped in a privsep entrypoint
+# In order to correctly use it, you need to decorate the caller with a specific
+# privsep entrypoint.
+def write_sys(path: str, data: str) -> None:
+ """Writes the content of a file in the sys filesystem with data.
+
+ :param path: relative or absolute. If relative, will be prefixed by /sys.
+ :param data: the data to write.
+ :returns: contents of that file.
+ :raises: nova.exception.FileNotFound if we can't write that file.
+ """
+ try:
+ # The path can be absolute with a /sys prefix but that's fine.
+ with open(os.path.join(SYS, path), mode='w') as fd:
+ fd.write(data)
+ except (OSError, ValueError) as exc:
+ raise exception.FileNotFound(file_path=path) from exc
diff --git a/nova/manager.py b/nova/manager.py
index 9c00401b96..df03305367 100644
--- a/nova/manager.py
+++ b/nova/manager.py
@@ -103,12 +103,15 @@ class Manager(PeriodicTasks, metaclass=ManagerMeta):
"""Tasks to be run at a periodic interval."""
return self.run_periodic_tasks(context, raise_on_error=raise_on_error)
- def init_host(self):
+ def init_host(self, service_ref):
"""Hook to do additional manager initialization when one requests
the service be started. This is called before any service record
- is created.
+ is created, but if one already exists for this service, it is
+ provided.
Child classes should override this method.
+
+ :param service_ref: An objects.Service if one exists, else None.
"""
pass
diff --git a/nova/objects/compute_node.py b/nova/objects/compute_node.py
index 60c2be71cd..dfc1b2ae28 100644
--- a/nova/objects/compute_node.py
+++ b/nova/objects/compute_node.py
@@ -12,6 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+from oslo_db import exception as db_exc
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
from oslo_utils import versionutils
@@ -339,7 +340,12 @@ class ComputeNode(base.NovaPersistentObject, base.NovaObject):
self._convert_supported_instances_to_db_format(updates)
self._convert_pci_stats_to_db_format(updates)
- db_compute = db.compute_node_create(self._context, updates)
+ try:
+ db_compute = db.compute_node_create(self._context, updates)
+ except db_exc.DBDuplicateEntry:
+ target = 'compute node %s:%s' % (updates['hypervisor_hostname'],
+ updates['uuid'])
+ raise exception.DuplicateRecord(target=target)
self._from_db_object(self._context, self, db_compute)
@base.remotable
@@ -388,8 +394,11 @@ class ComputeNode(base.NovaPersistentObject, base.NovaObject):
# The uuid field is read-only so it should only be set when
# creating the compute node record for the first time. Ignore
# it otherwise.
- if key == 'uuid' and 'uuid' in self:
- continue
+ if (key == 'uuid' and 'uuid' in self and
+ resources[key] != self.uuid):
+ raise exception.InvalidNodeConfiguration(
+ reason='Attempt to overwrite node %s with %s!' % (
+ self.uuid, resources[key]))
setattr(self, key, resources[key])
# supported_instances has a different name in compute_node
diff --git a/nova/objects/request_spec.py b/nova/objects/request_spec.py
index 6443cff8ca..a4ca77edf6 100644
--- a/nova/objects/request_spec.py
+++ b/nova/objects/request_spec.py
@@ -22,6 +22,7 @@ from oslo_serialization import jsonutils
from oslo_utils import versionutils
from nova.compute import pci_placement_translator
+import nova.conf
from nova.db.api import api as api_db_api
from nova.db.api import models as api_models
from nova import exception
@@ -30,6 +31,7 @@ from nova.objects import base
from nova.objects import fields
from nova.objects import instance as obj_instance
+CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
REQUEST_SPEC_OPTIONAL_ATTRS = ['requested_destination',
@@ -487,16 +489,8 @@ class RequestSpec(base.NovaObject):
def _traits_from_request(spec: ty.Dict[str, ty.Any]) -> ty.Set[str]:
return pci_placement_translator.get_traits(spec.get("traits", ""))
- # This is here temporarily until the PCI placement scheduling is under
- # implementation. When that is done there will be a config option
- # [scheduler]pci_in_placement to configure this. Now we add this as a
- # function to allow tests to selectively enable the WIP feature
- @staticmethod
- def _pci_in_placement_enabled():
- return False
-
def generate_request_groups_from_pci_requests(self):
- if not self._pci_in_placement_enabled():
+ if not CONF.filter_scheduler.pci_in_placement:
return False
for pci_request in self.pci_requests.requests:
diff --git a/nova/objects/service.py b/nova/objects/service.py
index 71361e0168..0ed443ef17 100644
--- a/nova/objects/service.py
+++ b/nova/objects/service.py
@@ -31,7 +31,7 @@ LOG = logging.getLogger(__name__)
# NOTE(danms): This is the global service version counter
-SERVICE_VERSION = 64
+SERVICE_VERSION = 66
# NOTE(danms): This is our SERVICE_VERSION history. The idea is that any
@@ -225,8 +225,18 @@ SERVICE_VERSION_HISTORY = (
# Version 64: Compute RPC v6.1:
# Add reimage_boot_volume parameter to rebuild_instance()
{'compute_rpc': '6.1'},
+ # Version 65: Compute RPC v6.1:
+ # Added stable local node identity
+ {'compute_rpc': '6.1'},
+ # Version 66: Compute RPC v6.2:
+ # Add target_state parameter to rebuild_instance()
+ {'compute_rpc': '6.2'},
)
+# This is the version after which we can rely on having a persistent
+# local node identity for single-node systems.
+NODE_IDENTITY_VERSION = 65
+
# This is used to raise an error at service startup if older than N-1 computes
# are detected. Update this at the beginning of every release cycle to point to
# the smallest service version that was added in N-1.
diff --git a/nova/pci/stats.py b/nova/pci/stats.py
index fab91c055a..5c5f7c669c 100644
--- a/nova/pci/stats.py
+++ b/nova/pci/stats.py
@@ -13,7 +13,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-
+import collections
import copy
import typing as ty
@@ -68,11 +68,15 @@ class PciDeviceStats(object):
# the PCI alias, but they are matched by the placement
# allocation_candidates query, so we can ignore them during pool creation
# and during filtering here
- ignored_tags = ['resource_class', 'traits']
- # these are metadata keys in the pool and in the request that are matched
+ ignored_spec_tags = ignored_pool_tags = ['resource_class', 'traits']
+ # this is a metadata key in the spec that is matched
+ # specially in _filter_pools_based_on_placement_allocation. So we can
+ # ignore them in the general matching logic.
+ ignored_spec_tags += ['rp_uuids']
+ # this is a metadata key in the pool that is matched
# specially in _filter_pools_based_on_placement_allocation. So we can
# ignore them in the general matching logic.
- ignored_tags += ['rp_uuid', 'rp_uuids']
+ ignored_pool_tags += ['rp_uuid']
def __init__(
self,
@@ -100,8 +104,6 @@ class PciDeviceStats(object):
pool_keys = pool.copy()
del pool_keys['count']
del pool_keys['devices']
- # FIXME(gibi): do we need this?
- pool_keys.pop('rp_uuid', None)
if (len(pool_keys.keys()) == len(dev_pool.keys()) and
self._equal_properties(dev_pool, pool_keys, list(dev_pool))):
return pool
@@ -148,15 +150,18 @@ class PciDeviceStats(object):
if tags:
pool.update(
- {k: v for k, v in tags.items() if k not in self.ignored_tags}
+ {
+ k: v
+ for k, v in tags.items()
+ if k not in self.ignored_pool_tags
+ }
)
# NOTE(gibi): since PCI in placement maps a PCI dev or a PF to a
# single RP and the scheduler allocates from a specific RP we need
# to split the pools by PCI or PF address. We can still keep
# the VFs from the same parent PF in a single pool though as they
# are equivalent from placement perspective.
- address = dev.parent_addr or dev.address
- pool['address'] = address
+ pool['address'] = dev.parent_addr or dev.address
# NOTE(gibi): parent_ifname acts like a tag during pci claim but
# not provided as part of the whitelist spec as it is auto detected
@@ -246,6 +251,17 @@ class PciDeviceStats(object):
free_devs.extend(pool['devices'])
return free_devs
+ def _allocate_devs(
+ self, pool: Pool, num: int, request_id: str
+ ) -> ty.List["objects.PciDevice"]:
+ alloc_devices = []
+ for _ in range(num):
+ pci_dev = pool['devices'].pop()
+ self._handle_device_dependents(pci_dev)
+ pci_dev.request_id = request_id
+ alloc_devices.append(pci_dev)
+ return alloc_devices
+
def consume_requests(
self,
pci_requests: 'objects.InstancePCIRequests',
@@ -276,20 +292,29 @@ class PciDeviceStats(object):
self.add_device(alloc_devices.pop())
raise exception.PciDeviceRequestFailed(requests=pci_requests)
- for pool in pools:
- if pool['count'] >= count:
- num_alloc = count
- else:
- num_alloc = pool['count']
- count -= num_alloc
- pool['count'] -= num_alloc
- for d in range(num_alloc):
- pci_dev = pool['devices'].pop()
- self._handle_device_dependents(pci_dev)
- pci_dev.request_id = request.request_id
- alloc_devices.append(pci_dev)
- if count == 0:
- break
+ if not rp_uuids:
+ # if there is no placement allocation then we are free to
+ # consume from the pools in any order:
+ for pool in pools:
+ if pool['count'] >= count:
+ num_alloc = count
+ else:
+ num_alloc = pool['count']
+ count -= num_alloc
+ pool['count'] -= num_alloc
+ alloc_devices += self._allocate_devs(
+ pool, num_alloc, request.request_id)
+ if count == 0:
+ break
+ else:
+ # but if there is placement allocation then we have to follow
+ # it
+ requested_devs_per_pool_rp = collections.Counter(rp_uuids)
+ for pool in pools:
+ count = requested_devs_per_pool_rp[pool['rp_uuid']]
+ pool['count'] -= count
+ alloc_devices += self._allocate_devs(
+ pool, count, request.request_id)
return alloc_devices
@@ -341,7 +366,9 @@ class PciDeviceStats(object):
def ignore_keys(spec):
return {
- k: v for k, v in spec.items() if k not in self.ignored_tags
+ k: v
+ for k, v in spec.items()
+ if k not in self.ignored_spec_tags
}
request_specs = [ignore_keys(spec) for spec in request.spec]
@@ -545,36 +572,38 @@ class PciDeviceStats(object):
self,
pools: ty.List[Pool],
request: 'objects.InstancePCIRequest',
- rp_uuids: ty.Set[str],
+ rp_uuids: ty.List[str],
) -> ty.List[Pool]:
if not rp_uuids:
# If there is no placement allocation then we don't need to filter
# by it. This could happen if the instance only has neutron port
# based InstancePCIRequest as that is currently not having
# placement allocation (except for QoS ports, but that handled in a
- # separate codepath) or if the [scheduler]pci_in_placement
+ # separate codepath) or if the [filter_scheduler]pci_in_placement
# configuration option is not enabled in the scheduler.
return pools
+ requested_dev_count_per_rp = collections.Counter(rp_uuids)
matching_pools = []
for pool in pools:
rp_uuid = pool.get('rp_uuid')
if rp_uuid is None:
- # NOTE(gibi): There can be pools without rp_uuid field if the
- # [pci]report_in_placement is not enabled for a compute with
- # viable PCI devices. We have a non-empty rp_uuids, so we know
- # that the [scheduler]pci_in_placement is enabled. This is a
- # configuration error.
- LOG.warning(
- "The PCI pool %s isn't mapped to an RP UUID but the "
- "scheduler is configured to create PCI allocations in "
- "placement. This should not happen. Please enable "
- "[pci]report_in_placement on all compute hosts before "
- "enabling [scheduler]pci_in_placement in the scheduler. "
- "This pool is ignored now.", pool)
+ # NOTE(gibi): As rp_uuids is not empty the scheduler allocated
+ # PCI resources on this host, so we know that
+ # [pci]report_in_placement is enabled on this host. But this
+ # pool has no RP mapping which can only happen if the pool
+ # contains PCI devices with physical_network tag, as those
+ # devices not yet reported in placement. But if they are not
+ # reported then we can ignore them here too.
continue
- if rp_uuid in rp_uuids:
+ if (
+ # the placement allocation contains this pool
+ rp_uuid in requested_dev_count_per_rp and
+ # the amount of dev allocated in placement can be consumed
+ # from the pool
+ pool["count"] >= requested_dev_count_per_rp[rp_uuid]
+ ):
matching_pools.append(pool)
return matching_pools
@@ -584,7 +613,7 @@ class PciDeviceStats(object):
pools: ty.List[Pool],
request: 'objects.InstancePCIRequest',
numa_cells: ty.Optional[ty.List['objects.InstanceNUMACell']],
- rp_uuids: ty.Set[str],
+ rp_uuids: ty.List[str],
) -> ty.Optional[ty.List[Pool]]:
"""Determine if an individual PCI request can be met.
@@ -753,7 +782,7 @@ class PciDeviceStats(object):
self,
pools: ty.List[Pool],
request: 'objects.InstancePCIRequest',
- rp_uuids: ty.Set[str],
+ rp_uuids: ty.List[str],
numa_cells: ty.Optional[ty.List['objects.InstanceNUMACell']] = None,
) -> bool:
"""Apply an individual PCI request.
@@ -784,11 +813,22 @@ class PciDeviceStats(object):
if not filtered_pools:
return False
- count = request.count
- for pool in filtered_pools:
- count = self._decrease_pool_count(pools, pool, count)
- if not count:
- break
+ if not rp_uuids:
+ # If there is no placement allocation for this request then we are
+ # free to consume from the filtered pools in any order
+ count = request.count
+ for pool in filtered_pools:
+ count = self._decrease_pool_count(pools, pool, count)
+ if not count:
+ break
+ else:
+ # but if there is placement allocation then we have to follow that
+ requested_devs_per_pool_rp = collections.Counter(rp_uuids)
+ for pool in filtered_pools:
+ count = requested_devs_per_pool_rp[pool['rp_uuid']]
+ pool['count'] -= count
+ if pool['count'] == 0:
+ pools.remove(pool)
return True
@@ -796,38 +836,43 @@ class PciDeviceStats(object):
self,
provider_mapping: ty.Optional[ty.Dict[str, ty.List[str]]],
request: 'objects.InstancePCIRequest'
- ) -> ty.Set[str]:
- """Return the list of RP uuids that are fulfilling the request"""
+ ) -> ty.List[str]:
+ """Return the list of RP uuids that are fulfilling the request.
+
+ An RP will be in the list as many times as many devices needs to
+ be allocated from that RP.
+ """
if request.source == objects.InstancePCIRequest.NEUTRON_PORT:
# TODO(gibi): support neutron based requests in a later cycle
- # set() will signal that any PCI pool can be used for this request
- return set()
+ # an empty list will signal that any PCI pool can be used for this
+ # request
+ return []
if not provider_mapping:
# NOTE(gibi): AFAIK specs is always a list of a single dict
# but the object is hard to change retroactively
rp_uuids = request.spec[0].get('rp_uuids')
if not rp_uuids:
- # This can happen if [scheduler]pci_in_placement is not
+ # This can happen if [filter_scheduler]pci_in_placement is not
# enabled yet
- # set() will signal that any PCI pool can be used for this
- # request
- return set()
+ # An empty list will signal that any PCI pool can be used for
+ # this request
+ return []
# TODO(gibi): this is baaad but spec is a dict of string so
# the list is serialized
- return set(rp_uuids.split(','))
+ return rp_uuids.split(',')
# NOTE(gibi): the PCI prefilter generates RequestGroup suffixes from
# InstancePCIRequests in the form of {request_id}-{count_index}
# NOTE(gibi): a suffixed request group always fulfilled from a single
# RP
- return {
+ return [
rp_uuids[0]
for group_id, rp_uuids in provider_mapping.items()
if group_id.startswith(request.request_id)
- }
+ ]
def apply_requests(
self,
diff --git a/nova/policies/tenant_networks.py b/nova/policies/tenant_networks.py
index ee5bd66cdf..79f8d21eaa 100644
--- a/nova/policies/tenant_networks.py
+++ b/nova/policies/tenant_networks.py
@@ -38,7 +38,7 @@ DEPRECATED_POLICY = policy.DeprecatedRule(
tenant_networks_policies = [
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'list',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="""List project networks.
This API is proxy calls to the Network service. This is deprecated.""",
@@ -52,7 +52,7 @@ This API is proxy calls to the Network service. This is deprecated.""",
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'show',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="""Show project network details.
This API is proxy calls to the Network service. This is deprecated.""",
diff --git a/nova/policy.py b/nova/policy.py
index 55455a9271..c66489cc8d 100644
--- a/nova/policy.py
+++ b/nova/policy.py
@@ -41,11 +41,15 @@ USER_BASED_RESOURCES = ['os-keypairs']
saved_file_rules = []
KEY_EXPR = re.compile(r'%\((\w+)\)s')
-# TODO(gmann): Remove setting the default value of config policy_file
-# once oslo_policy change the default value to 'policy.yaml'.
-# https://github.com/openstack/oslo.policy/blob/a626ad12fe5a3abd49d70e3e5b95589d279ab578/oslo_policy/opts.py#L49
+# TODO(gmann): Remove overriding the default value of config options
+# 'policy_file', 'enforce_scope', and 'enforce_new_defaults' once
+# oslo_policy change their default value to what is overridden here.
DEFAULT_POLICY_FILE = 'policy.yaml'
-opts.set_defaults(cfg.CONF, DEFAULT_POLICY_FILE)
+opts.set_defaults(
+ cfg.CONF,
+ DEFAULT_POLICY_FILE,
+ enforce_scope=True,
+ enforce_new_defaults=True)
def reset():
diff --git a/nova/rpc.py b/nova/rpc.py
index a32b920e06..7a92650414 100644
--- a/nova/rpc.py
+++ b/nova/rpc.py
@@ -204,11 +204,9 @@ def get_client(target, version_cap=None, serializer=None,
else:
serializer = RequestContextSerializer(serializer)
- return messaging.RPCClient(TRANSPORT,
- target,
- version_cap=version_cap,
- serializer=serializer,
- call_monitor_timeout=call_monitor_timeout)
+ return messaging.get_rpc_client(TRANSPORT, target,
+ version_cap=version_cap, serializer=serializer,
+ call_monitor_timeout=call_monitor_timeout)
def get_server(target, endpoints, serializer=None):
@@ -436,9 +434,9 @@ class ClientRouter(periodic_task.PeriodicTasks):
transport = context.mq_connection
if transport:
cmt = self.default_client.call_monitor_timeout
- return messaging.RPCClient(transport, self.target,
- version_cap=self.version_cap,
- serializer=self.serializer,
- call_monitor_timeout=cmt)
+ return messaging.get_rpc_client(transport, self.target,
+ version_cap=self.version_cap,
+ serializer=self.serializer,
+ call_monitor_timeout=cmt)
else:
return self.default_client
diff --git a/nova/scheduler/filters/__init__.py b/nova/scheduler/filters/__init__.py
index 74e24b7bc3..785a13279e 100644
--- a/nova/scheduler/filters/__init__.py
+++ b/nova/scheduler/filters/__init__.py
@@ -16,8 +16,12 @@
"""
Scheduler host filters
"""
+from oslo_log import log as logging
+
from nova import filters
+LOG = logging.getLogger(__name__)
+
class BaseHostFilter(filters.BaseFilter):
"""Base class for host filters."""
@@ -53,6 +57,43 @@ class BaseHostFilter(filters.BaseFilter):
raise NotImplementedError()
+class CandidateFilterMixin:
+ """Mixing that helps to implement a Filter that needs to filter host by
+ Placement allocation candidates.
+ """
+
+ def filter_candidates(self, host_state, filter_func):
+ """Checks still viable allocation candidates by the filter_func and
+ keep only those that are passing it.
+
+ :param host_state: HostState object holding the list of still viable
+ allocation candidates
+ :param filter_func: A callable that takes an allocation candidate and
+ returns a True like object if the candidate passed the filter or a
+ False like object if it doesn't.
+ """
+ good_candidates = []
+ for candidate in host_state.allocation_candidates:
+ LOG.debug(
+ f'{self.__class__.__name__} tries allocation candidate: '
+ f'{candidate}',
+ )
+ if filter_func(candidate):
+ LOG.debug(
+ f'{self.__class__.__name__} accepted allocation '
+ f'candidate: {candidate}',
+ )
+ good_candidates.append(candidate)
+ else:
+ LOG.debug(
+ f'{self.__class__.__name__} rejected allocation '
+ f'candidate: {candidate}',
+ )
+
+ host_state.allocation_candidates = good_candidates
+ return good_candidates
+
+
class HostFilterHandler(filters.BaseFilterHandler):
def __init__(self):
super(HostFilterHandler, self).__init__(BaseHostFilter)
diff --git a/nova/scheduler/filters/numa_topology_filter.py b/nova/scheduler/filters/numa_topology_filter.py
index 7ec9ca5648..ae50db90e5 100644
--- a/nova/scheduler/filters/numa_topology_filter.py
+++ b/nova/scheduler/filters/numa_topology_filter.py
@@ -20,7 +20,10 @@ from nova.virt import hardware
LOG = logging.getLogger(__name__)
-class NUMATopologyFilter(filters.BaseHostFilter):
+class NUMATopologyFilter(
+ filters.BaseHostFilter,
+ filters.CandidateFilterMixin,
+):
"""Filter on requested NUMA topology."""
# NOTE(sean-k-mooney): In change I0322d872bdff68936033a6f5a54e8296a6fb343
@@ -97,34 +100,19 @@ class NUMATopologyFilter(filters.BaseHostFilter):
if network_metadata:
limits.network_metadata = network_metadata
- good_candidates = []
- for candidate in host_state.allocation_candidates:
- LOG.debug(
- 'NUMATopologyFilter tries allocation candidate: %s, %s',
- candidate, requested_topology
- )
- instance_topology = (hardware.numa_fit_instance_to_host(
- host_topology, requested_topology,
+ good_candidates = self.filter_candidates(
+ host_state,
+ lambda candidate: hardware.numa_fit_instance_to_host(
+ host_topology,
+ requested_topology,
limits=limits,
pci_requests=pci_requests,
pci_stats=host_state.pci_stats,
- provider_mapping=candidate['mappings'],
- ))
- if instance_topology:
- LOG.debug(
- 'NUMATopologyFilter accepted allocation candidate: %s',
- candidate
- )
- good_candidates.append(candidate)
- else:
- LOG.debug(
- 'NUMATopologyFilter rejected allocation candidate: %s',
- candidate
- )
-
- host_state.allocation_candidates = good_candidates
-
- if not host_state.allocation_candidates:
+ provider_mapping=candidate["mappings"],
+ ),
+ )
+
+ if not good_candidates:
LOG.debug("%(host)s, %(node)s fails NUMA topology "
"requirements. The instance does not fit on this "
"host.", {'host': host_state.host,
diff --git a/nova/scheduler/filters/pci_passthrough_filter.py b/nova/scheduler/filters/pci_passthrough_filter.py
index 36f0b5901c..992879072a 100644
--- a/nova/scheduler/filters/pci_passthrough_filter.py
+++ b/nova/scheduler/filters/pci_passthrough_filter.py
@@ -20,7 +20,10 @@ from nova.scheduler import filters
LOG = logging.getLogger(__name__)
-class PciPassthroughFilter(filters.BaseHostFilter):
+class PciPassthroughFilter(
+ filters.BaseHostFilter,
+ filters.CandidateFilterMixin,
+):
"""Pci Passthrough Filter based on PCI request
Filter that schedules instances on a host if the host has devices
@@ -54,28 +57,12 @@ class PciPassthroughFilter(filters.BaseHostFilter):
{'host_state': host_state, 'requests': pci_requests})
return False
- good_candidates = []
- for candidate in host_state.allocation_candidates:
- LOG.debug(
- 'PciPassthroughFilter tries allocation candidate: %s',
- candidate
- )
- if host_state.pci_stats.support_requests(
- pci_requests.requests,
- provider_mapping=candidate['mappings']
- ):
- LOG.debug(
- 'PciPassthroughFilter accepted allocation candidate: %s',
- candidate
- )
- good_candidates.append(candidate)
- else:
- LOG.debug(
- 'PciPassthroughFilter rejected allocation candidate: %s',
- candidate
- )
-
- host_state.allocation_candidates = good_candidates
+ good_candidates = self.filter_candidates(
+ host_state,
+ lambda candidate: host_state.pci_stats.support_requests(
+ pci_requests.requests, provider_mapping=candidate["mappings"]
+ ),
+ )
if not good_candidates:
LOG.debug("%(host_state)s doesn't have the required PCI devices"
diff --git a/nova/service.py b/nova/service.py
index 2c10224926..bd3b49ae66 100644
--- a/nova/service.py
+++ b/nova/service.py
@@ -156,11 +156,11 @@ class Service(service.Service):
LOG.info('Starting %(topic)s node (version %(version)s)',
{'topic': self.topic, 'version': verstr})
self.basic_config_check()
- self.manager.init_host()
- self.model_disconnected = False
ctxt = context.get_admin_context()
self.service_ref = objects.Service.get_by_host_and_binary(
ctxt, self.host, self.binary)
+ self.manager.init_host(self.service_ref)
+ self.model_disconnected = False
if self.service_ref:
_update_service_ref(self.service_ref)
diff --git a/nova/test.py b/nova/test.py
index 689d5ba291..0f7965ea33 100644
--- a/nova/test.py
+++ b/nova/test.py
@@ -171,6 +171,12 @@ class TestCase(base.BaseTestCase):
# base class when USES_DB is True.
NUMBER_OF_CELLS = 1
+ # The stable compute id stuff is intentionally singleton-ish, which makes
+ # it a nightmare for testing multiple host/node combinations in tests like
+ # we do. So, mock it out by default, unless the test is specifically
+ # designed to handle it.
+ STUB_COMPUTE_ID = True
+
def setUp(self):
"""Run before each test method to initialize test environment."""
# Ensure BaseTestCase's ConfigureLogging fixture is disabled since
@@ -288,6 +294,10 @@ class TestCase(base.BaseTestCase):
self.useFixture(nova_fixtures.GenericPoisonFixture())
self.useFixture(nova_fixtures.SysFsPoisonFixture())
+ # Additional module names can be added to this set if needed
+ self.useFixture(nova_fixtures.ImportModulePoisonFixture(
+ set(['guestfs', 'libvirt'])))
+
# make sure that the wsgi app is fully initialized for all testcase
# instead of only once initialized for test worker
wsgi_app.init_global_data.reset()
@@ -295,6 +305,11 @@ class TestCase(base.BaseTestCase):
# Reset the placement client singleton
report.PLACEMENTCLIENT = None
+ # Reset our local node uuid cache (and avoid writing to the
+ # local filesystem when we generate a new one).
+ if self.STUB_COMPUTE_ID:
+ self.useFixture(nova_fixtures.ComputeNodeIdFixture())
+
def _setup_cells(self):
"""Setup a normal cellsv2 environment.
@@ -683,6 +698,7 @@ class SubclassSignatureTestCase(testtools.TestCase, metaclass=abc.ABCMeta):
raise NotImplementedError()
def setUp(self):
+ self.useFixture(nova_fixtures.ConfFixture(CONF))
self.base = self._get_base_class()
super(SubclassSignatureTestCase, self).setUp()
diff --git a/nova/tests/fixtures/libvirt.py b/nova/tests/fixtures/libvirt.py
index 5d20e7d54f..4f48463118 100644
--- a/nova/tests/fixtures/libvirt.py
+++ b/nova/tests/fixtures/libvirt.py
@@ -2044,6 +2044,12 @@ class Connection(object):
return VIR_CPU_COMPARE_IDENTICAL
+ def compareHypervisorCPU(
+ self, emulator, arch, machine, virttype,
+ xml, flags
+ ):
+ return self.compareCPU(xml, flags)
+
def getCPUStats(self, cpuNum, flag):
if cpuNum < 2:
return {'kernel': 5664160000000,
diff --git a/nova/tests/fixtures/libvirt_imagebackend.py b/nova/tests/fixtures/libvirt_imagebackend.py
index ea32b6b34a..4ce3f03710 100644
--- a/nova/tests/fixtures/libvirt_imagebackend.py
+++ b/nova/tests/fixtures/libvirt_imagebackend.py
@@ -190,6 +190,9 @@ class LibvirtImageBackendFixture(fixtures.Fixture):
# Set the SUPPORTS_CLONE member variable to mimic the Image base
# class.
image_init.SUPPORTS_CLONE = False
+ # Set the SUPPORTS_LUKS member variable to mimic the Image base
+ # class.
+ image_init.SUPPORTS_LUKS = False
# Ditto for the 'is_shared_block_storage' and
# 'is_file_in_instance_path' functions
diff --git a/nova/tests/fixtures/nova.py b/nova/tests/fixtures/nova.py
index 129b2f9abb..9a652c02cb 100644
--- a/nova/tests/fixtures/nova.py
+++ b/nova/tests/fixtures/nova.py
@@ -20,8 +20,10 @@ import collections
import contextlib
from contextlib import contextmanager
import functools
+from importlib.abc import MetaPathFinder
import logging as std_logging
import os
+import sys
import time
from unittest import mock
import warnings
@@ -63,6 +65,7 @@ from nova.scheduler import weights
from nova import service
from nova.tests.functional.api import client
from nova import utils
+from nova.virt import node
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
@@ -563,11 +566,10 @@ class CellDatabases(fixtures.Fixture):
call_monitor_timeout=None):
"""Mirror rpc.get_client() but with our special sauce."""
serializer = CheatingSerializer(serializer)
- return messaging.RPCClient(rpc.TRANSPORT,
- target,
- version_cap=version_cap,
- serializer=serializer,
- call_monitor_timeout=call_monitor_timeout)
+ return messaging.get_rpc_client(rpc.TRANSPORT, target,
+ version_cap=version_cap,
+ serializer=serializer,
+ call_monitor_timeout=call_monitor_timeout)
def add_cell_database(self, connection_str, default=False):
"""Add a cell database to the fixture.
@@ -1798,3 +1800,96 @@ class SysFsPoisonFixture(fixtures.Fixture):
# a bunch of test to fail
# self.inject_poison("os.path", "exists")
# self.inject_poison("os", "stat")
+
+
+class ImportModulePoisonFixture(fixtures.Fixture):
+ """Poison imports of modules unsuitable for the test environment.
+
+ Examples are guestfs and libvirt. Ordinarily, these would not be installed
+ in the test environment but if they _are_ present, it can result in
+ actual calls to libvirt, for example, which could cause tests to fail.
+
+ This fixture will inspect module imports and if they are in the disallowed
+ list, it will fail the test with a helpful message about mocking needed in
+ the test.
+ """
+
+ class ForbiddenModules(MetaPathFinder):
+ def __init__(self, test, modules):
+ super().__init__()
+ self.test = test
+ self.modules = modules
+
+ def find_spec(self, fullname, path, target=None):
+ if fullname in self.modules:
+ current = eventlet.getcurrent()
+ # NOTE(gibi) not all eventlet spawn is under our control, so
+ # there can be senders without test_case_id set, find the first
+ # ancestor that was spawned from nova.utils.spawn[_n] and
+ # therefore has the id set.
+ while (
+ current is not None and
+ not getattr(current, 'test_case_id', None)
+ ):
+ current = current.parent
+
+ if current is not None:
+ self.test.tc_id = current.test_case_id
+ LOG.warning(
+ "!!!---!!! TestCase ID %s hit the import poison while "
+ "importing %s. If you see this in a failed functional "
+ "test then please let #openstack-nova on IRC know "
+ "about it. !!!---!!!", current.test_case_id, fullname)
+ self.test.fail_message = (
+ f"This test imports the '{fullname}' module, which it "
+ f'should not in the test environment. Please add '
+ f'appropriate mocking to this test.'
+ )
+ raise ImportError(fullname)
+
+ def __init__(self, module_names):
+ self.module_names = module_names
+ self.fail_message = ''
+ self.tc_id = None
+ if isinstance(module_names, str):
+ self.module_names = {module_names}
+ self.meta_path_finder = self.ForbiddenModules(self, self.module_names)
+
+ def setUp(self):
+ super().setUp()
+ self.addCleanup(self.cleanup)
+ sys.meta_path.insert(0, self.meta_path_finder)
+
+ def cleanup(self):
+ sys.meta_path.remove(self.meta_path_finder)
+ # We use a flag and check it during the cleanup phase to fail the test
+ # if needed. This is done because some module imports occur inside of a
+ # try-except block that ignores all exceptions, so raising an exception
+ # there (which is also what self.assert* and self.fail() do underneath)
+ # will not work to cause a failure in the test.
+ if self.fail_message:
+ if self.tc_id is not None:
+ LOG.warning(
+ "!!!---!!! TestCase ID %s hit the import poison. If you "
+ "see this in a failed functional test then please let "
+ "#openstack-nova on IRC know about it. !!!---!!!",
+ self.tc_id
+ )
+ raise ImportError(self.fail_message)
+
+
+class ComputeNodeIdFixture(fixtures.Fixture):
+ def setUp(self):
+ super().setUp()
+
+ node.LOCAL_NODE_UUID = None
+ self.useFixture(fixtures.MockPatch(
+ 'nova.virt.node.read_local_node_uuid',
+ lambda: None))
+ self.useFixture(fixtures.MockPatch(
+ 'nova.virt.node.write_local_node_uuid',
+ lambda uuid: None))
+ self.useFixture(fixtures.MockPatch(
+ 'nova.compute.manager.ComputeManager.'
+ '_ensure_existing_node_identity',
+ mock.DEFAULT))
diff --git a/nova/tests/functional/api_sample_tests/api_samples/os-evacuate/v2.95/server-evacuate-find-host-req.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/os-evacuate/v2.95/server-evacuate-find-host-req.json.tpl
new file mode 100644
index 0000000000..8abf0b4e18
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/os-evacuate/v2.95/server-evacuate-find-host-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "evacuate": {
+ "adminPass": "%(adminPass)s"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/os-evacuate/v2.95/server-evacuate-req.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/os-evacuate/v2.95/server-evacuate-req.json.tpl
new file mode 100644
index 0000000000..8abf0b4e18
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/os-evacuate/v2.95/server-evacuate-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "evacuate": {
+ "adminPass": "%(adminPass)s"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-action-rebuild-resp.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-action-rebuild-resp.json.tpl
new file mode 100644
index 0000000000..486433733d
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-action-rebuild-resp.json.tpl
@@ -0,0 +1,80 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO",
+ "OS-EXT-AZ:availability_zone": "us-west",
+ "OS-EXT-SRV-ATTR:hostname": "%(hostname)s",
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "OS-SRV-USG:launched_at": "%(strtime)s",
+ "OS-SRV-USG:terminated_at": null,
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "addresses": {
+ "private": [
+ {
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
+ "OS-EXT-IPS:type": "fixed",
+ "addr": "192.168.1.30",
+ "version": 4
+ }
+ ]
+ },
+ "adminPass": "seekr3t",
+ "config_drive": "",
+ "created": "%(isotime)s",
+ "description": null,
+ "flavor": {
+ "disk": 1,
+ "ephemeral": 0,
+ "extra_specs": {},
+ "original_name": "m1.tiny",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1
+ },
+ "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(compute_endpoint)s/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "locked": false,
+ "locked_reason": null,
+ "metadata": {
+ "meta_var": "meta_val"
+ },
+ "name": "foobar",
+ "os-extended-volumes:volumes_attached": [],
+ "progress": 0,
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "server_groups": [],
+ "status": "ACTIVE",
+ "tags": [],
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "trusted_image_certificates": null,
+ "updated": "%(isotime)s",
+ "user_data": "ZWNobyAiaGVsbG8gd29ybGQi",
+ "user_id": "fake"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-action-rebuild.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-action-rebuild.json.tpl
new file mode 100644
index 0000000000..3becc83fba
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-action-rebuild.json.tpl
@@ -0,0 +1,15 @@
+{
+ "rebuild" : {
+ "accessIPv4" : "%(access_ip_v4)s",
+ "accessIPv6" : "%(access_ip_v6)s",
+ "OS-DCF:diskConfig": "AUTO",
+ "imageRef" : "%(uuid)s",
+ "name" : "%(name)s",
+ "adminPass" : "%(pass)s",
+ "hostname": "%(hostname)s",
+ "metadata" : {
+ "meta_var" : "meta_val"
+ },
+ "user_data": "ZWNobyAiaGVsbG8gd29ybGQi"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-create-req.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-create-req.json.tpl
new file mode 100644
index 0000000000..f83c78fdc9
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-create-req.json.tpl
@@ -0,0 +1,21 @@
+{
+ "server" : {
+ "accessIPv4": "%(access_ip_v4)s",
+ "accessIPv6": "%(access_ip_v6)s",
+ "name" : "new-server-test",
+ "imageRef" : "%(image_id)s",
+ "flavorRef" : "1",
+ "OS-DCF:diskConfig": "AUTO",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "user_data" : "%(user_data)s",
+ "networks": "auto",
+ "hostname": "custom-hostname.example.com"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-create-resp.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-create-resp.json.tpl
new file mode 100644
index 0000000000..4b30e0cfbd
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-create-resp.json.tpl
@@ -0,0 +1,22 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO",
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ],
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-get-resp.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-get-resp.json.tpl
new file mode 100644
index 0000000000..ae2088619a
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-get-resp.json.tpl
@@ -0,0 +1,81 @@
+{
+ "server": {
+ "accessIPv4": "%(access_ip_v4)s",
+ "accessIPv6": "%(access_ip_v6)s",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
+ "OS-EXT-IPS:type": "fixed",
+ "version": 4
+ }
+ ]
+ },
+ "created": "%(isotime)s",
+ "description": null,
+ "locked": false,
+ "locked_reason": null,
+ "flavor": {
+ "disk": 1,
+ "ephemeral": 0,
+ "extra_specs": {},
+ "original_name": "m1.tiny",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(compute_endpoint)s/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "config_drive": "%(cdrive)s",
+ "OS-DCF:diskConfig": "AUTO",
+ "OS-EXT-AZ:availability_zone": "nova",
+ "OS-EXT-SRV-ATTR:hostname": "%(hostname)s",
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "os-extended-volumes:volumes_attached": [
+ {"id": "volume_id1", "delete_on_termination": false},
+ {"id": "volume_id2", "delete_on_termination": false}
+ ],
+ "OS-SRV-USG:launched_at": "%(strtime)s",
+ "OS-SRV-USG:terminated_at": null,
+ "progress": 0,
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "server_groups": [],
+ "status": "ACTIVE",
+ "tags": [],
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "trusted_image_certificates": null,
+ "updated": "%(isotime)s",
+ "user_id": "fake"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-update-req.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-update-req.json.tpl
new file mode 100644
index 0000000000..bc4be64a8e
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-update-req.json.tpl
@@ -0,0 +1,8 @@
+{
+ "server": {
+ "accessIPv4": "%(access_ip_v4)s",
+ "accessIPv6": "%(access_ip_v6)s",
+ "OS-DCF:diskConfig": "AUTO",
+ "hostname": "new-server-hostname.example.com"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-update-resp.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-update-resp.json.tpl
new file mode 100644
index 0000000000..2adc16df5e
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-update-resp.json.tpl
@@ -0,0 +1,78 @@
+{
+ "server": {
+ "accessIPv4": "%(access_ip_v4)s",
+ "accessIPv6": "%(access_ip_v6)s",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
+ "OS-EXT-IPS:type": "fixed",
+ "version": 4
+ }
+ ]
+ },
+ "created": "%(isotime)s",
+ "description": null,
+ "locked": false,
+ "locked_reason": null,
+ "flavor": {
+ "disk": 1,
+ "ephemeral": 0,
+ "extra_specs": {},
+ "original_name": "m1.tiny",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(compute_endpoint)s/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "config_drive": "",
+ "OS-DCF:diskConfig": "AUTO",
+ "OS-EXT-AZ:availability_zone": "us-west",
+ "OS-EXT-SRV-ATTR:hostname": "new-server-hostname.example.com",
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "os-extended-volumes:volumes_attached": [],
+ "OS-SRV-USG:launched_at": "%(strtime)s",
+ "OS-SRV-USG:terminated_at": null,
+ "progress": 0,
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "server_groups": [],
+ "status": "ACTIVE",
+ "tags": [],
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "trusted_image_certificates": null,
+ "updated": "%(isotime)s",
+ "user_id": "fake"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/servers-details-resp.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/servers-details-resp.json.tpl
new file mode 100644
index 0000000000..f49d21e7a2
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/servers-details-resp.json.tpl
@@ -0,0 +1,88 @@
+{
+ "servers": [
+ {
+ "accessIPv4": "%(access_ip_v4)s",
+ "accessIPv6": "%(access_ip_v6)s",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
+ "OS-EXT-IPS:type": "fixed",
+ "version": 4
+ }
+ ]
+ },
+ "created": "%(isotime)s",
+ "description": "",
+ "flavor": {
+ "disk": 1,
+ "ephemeral": 0,
+ "extra_specs": {},
+ "original_name": "m1.tiny",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(compute_endpoint)s/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "config_drive": "%(cdrive)s",
+ "locked": false,
+ "locked_reason": "",
+ "OS-DCF:diskConfig": "AUTO",
+ "OS-EXT-AZ:availability_zone": "nova",
+ "OS-EXT-SRV-ATTR:hostname": "custom-hostname.example.com",
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "os-extended-volumes:volumes_attached": [
+ {"id": "volume_id1", "delete_on_termination": false},
+ {"id": "volume_id2", "delete_on_termination": false}
+ ],
+ "OS-SRV-USG:launched_at": "%(strtime)s",
+ "OS-SRV-USG:terminated_at": null,
+ "progress": 0,
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "status": "ACTIVE",
+ "tags": [],
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "trusted_image_certificates": null,
+ "updated": "%(isotime)s",
+ "user_id": "fake"
+ }
+ ],
+ "servers_links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/servers/detail?limit=1&marker=%(id)s",
+ "rel": "next"
+ }
+ ]
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/servers-list-resp.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/servers-list-resp.json.tpl
new file mode 100644
index 0000000000..9cdb3aa644
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/servers-list-resp.json.tpl
@@ -0,0 +1,24 @@
+{
+ "servers": [
+ {
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "new-server-test"
+ }
+ ],
+ "servers_links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/servers?limit=1&marker=%(id)s",
+ "rel": "next"
+ }
+ ]
+}
diff --git a/nova/tests/functional/api_sample_tests/test_evacuate.py b/nova/tests/functional/api_sample_tests/test_evacuate.py
index ab3aa95ad8..15efb39d44 100644
--- a/nova/tests/functional/api_sample_tests/test_evacuate.py
+++ b/nova/tests/functional/api_sample_tests/test_evacuate.py
@@ -80,7 +80,7 @@ class EvacuateJsonTest(test_servers.ServersSampleBase):
orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
on_shared_storage=False, preserve_ephemeral=mock.ANY,
host='testHost', request_spec=mock.ANY,
- reimage_boot_volume=False)
+ reimage_boot_volume=False, target_state=None)
@mock.patch('nova.conductor.manager.ComputeTaskManager.rebuild_instance')
def test_server_evacuate_find_host(self, rebuild_mock):
@@ -97,7 +97,7 @@ class EvacuateJsonTest(test_servers.ServersSampleBase):
orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
on_shared_storage=False, preserve_ephemeral=mock.ANY,
host=None, request_spec=mock.ANY,
- reimage_boot_volume=False)
+ reimage_boot_volume=False, target_state=None)
class EvacuateJsonTestV214(EvacuateJsonTest):
@@ -119,7 +119,7 @@ class EvacuateJsonTestV214(EvacuateJsonTest):
orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
on_shared_storage=None, preserve_ephemeral=mock.ANY,
host='testHost', request_spec=mock.ANY,
- reimage_boot_volume=False)
+ reimage_boot_volume=False, target_state=None)
@mock.patch('nova.conductor.manager.ComputeTaskManager.rebuild_instance')
def test_server_evacuate_find_host(self, rebuild_mock):
@@ -135,7 +135,7 @@ class EvacuateJsonTestV214(EvacuateJsonTest):
orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
on_shared_storage=None, preserve_ephemeral=mock.ANY,
host=None, request_spec=mock.ANY,
- reimage_boot_volume=False)
+ reimage_boot_volume=False, target_state=None)
class EvacuateJsonTestV229(EvacuateJsonTestV214):
@@ -163,7 +163,7 @@ class EvacuateJsonTestV229(EvacuateJsonTestV214):
orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
on_shared_storage=None, preserve_ephemeral=mock.ANY,
host=None, request_spec=mock.ANY,
- reimage_boot_volume=False)
+ reimage_boot_volume=False, target_state=None)
@mock.patch('nova.conductor.manager.ComputeTaskManager.rebuild_instance')
@mock.patch('nova.objects.ComputeNodeList.get_all_by_host')
@@ -184,7 +184,7 @@ class EvacuateJsonTestV229(EvacuateJsonTestV214):
orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
on_shared_storage=None, preserve_ephemeral=mock.ANY,
host='testHost', request_spec=mock.ANY,
- reimage_boot_volume=False)
+ reimage_boot_volume=False, target_state=None)
class EvacuateJsonTestV268(EvacuateJsonTestV229):
@@ -211,8 +211,46 @@ class EvacuateJsonTestV268(EvacuateJsonTestV229):
orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
on_shared_storage=None, preserve_ephemeral=mock.ANY,
host=None, request_spec=mock.ANY,
- reimage_boot_volume=False)
+ reimage_boot_volume=False, target_state=None)
def test_server_evacuate_with_force(self):
# doesn't apply to v2.68+, which removed the ability to force migrate
pass
+
+
+class EvacuateJsonTestV295(EvacuateJsonTestV268):
+ microversion = '2.95'
+ scenarios = [('v2_95', {'api_major_version': 'v2.1'})]
+
+ @mock.patch('nova.conductor.manager.ComputeTaskManager.rebuild_instance')
+ def test_server_evacuate(self, rebuild_mock):
+ req_subs = {
+ "adminPass": "MySecretPass",
+ }
+ self._test_evacuate(req_subs, 'server-evacuate-req',
+ server_resp=None, expected_resp_code=200)
+
+ rebuild_mock.assert_called_once_with(mock.ANY, instance=mock.ANY,
+ orig_image_ref=mock.ANY, image_ref=mock.ANY,
+ injected_files=mock.ANY, new_pass="MySecretPass",
+ orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
+ on_shared_storage=None, preserve_ephemeral=mock.ANY,
+ host=None, request_spec=mock.ANY,
+ reimage_boot_volume=False, target_state="stopped")
+
+ @mock.patch('nova.conductor.manager.ComputeTaskManager.rebuild_instance')
+ def test_server_evacuate_find_host(self, rebuild_mock):
+ req_subs = {
+ 'host': 'testHost',
+ "adminPass": "MySecretPass",
+ }
+ self._test_evacuate(req_subs, 'server-evacuate-find-host-req',
+ server_resp=None, expected_resp_code=200)
+
+ rebuild_mock.assert_called_once_with(mock.ANY, instance=mock.ANY,
+ orig_image_ref=mock.ANY, image_ref=mock.ANY,
+ injected_files=mock.ANY, new_pass="MySecretPass",
+ orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
+ on_shared_storage=None, preserve_ephemeral=mock.ANY,
+ host=None, request_spec=mock.ANY,
+ reimage_boot_volume=False, target_state="stopped")
diff --git a/nova/tests/functional/api_sample_tests/test_remote_consoles.py b/nova/tests/functional/api_sample_tests/test_remote_consoles.py
index 986826bfee..e304402ee9 100644
--- a/nova/tests/functional/api_sample_tests/test_remote_consoles.py
+++ b/nova/tests/functional/api_sample_tests/test_remote_consoles.py
@@ -13,6 +13,10 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
+from nova.compute import api as compute
+from nova import exception
from nova.tests.functional.api_sample_tests import test_servers
HTTP_RE = r'(https?://)([\w\d:#@%/;$()~_?\+-=\\.&](#!)?)*'
@@ -38,6 +42,22 @@ class ConsolesSampleJsonTests(test_servers.ServersSampleBase):
self._verify_response('get-vnc-console-post-resp', {'url': HTTP_RE},
response, 200)
+ @mock.patch.object(compute.API, 'get_vnc_console')
+ def test_get_vnc_console_instance_invalid_state(self,
+ mock_get_vnc_console):
+ uuid = self._post_server()
+
+ def fake_get_vnc_console(*args, **kwargs):
+ raise exception.InstanceInvalidState(
+ attr='fake_attr', state='fake_state', method='fake_method',
+ instance_uuid=uuid)
+
+ mock_get_vnc_console.side_effect = fake_get_vnc_console
+ response = self._do_post('servers/%s/action' % uuid,
+ 'get-vnc-console-post-req',
+ {'action': 'os-getVNCConsole'})
+ self.assertEqual(409, response.status_code)
+
def test_get_spice_console(self):
uuid = self._post_server()
response = self._do_post('servers/%s/action' % uuid,
diff --git a/nova/tests/functional/api_sample_tests/test_servers.py b/nova/tests/functional/api_sample_tests/test_servers.py
index aa07b88247..7679c9b734 100644
--- a/nova/tests/functional/api_sample_tests/test_servers.py
+++ b/nova/tests/functional/api_sample_tests/test_servers.py
@@ -618,6 +618,13 @@ class ServersSampleJson290Test(ServersSampleJsonTest):
ADMIN_API = False
+class ServersSampleJson294Test(ServersSampleJsonTest):
+ microversion = '2.94'
+ scenarios = [('v2_94', {'api_major_version': 'v2.1'})]
+ use_common_server_post = False
+ ADMIN_API = False
+
+
class ServersUpdateSampleJsonTest(ServersSampleBase):
# Many of the 'os_compute_api:servers:*' policies are admin-only, and we
@@ -702,6 +709,44 @@ class ServersUpdateSampleJson290Test(ServersUpdateSampleJsonTest):
self._verify_response('server-action-rebuild-resp', subs, resp, 202)
+class ServersUpdateSampleJson294Test(ServersUpdateSampleJsonTest):
+ microversion = '2.94'
+ scenarios = [('v2_94', {'api_major_version': 'v2.1'})]
+ ADMIN_API = False
+
+ def test_update_server(self):
+ uuid = self._post_server()
+ subs = {}
+ subs['hostid'] = '[a-f0-9]+'
+ subs['access_ip_v4'] = '1.2.3.4'
+ subs['access_ip_v6'] = '80fe::'
+ subs['hostname'] = 'updated-hostname.example.com'
+ response = self._do_put('servers/%s' % uuid,
+ 'server-update-req', subs)
+ self._verify_response('server-update-resp', subs, response, 200)
+
+ def test_server_rebuild(self):
+ uuid = self._post_server()
+ params = {
+ 'uuid': self.glance.auto_disk_config_enabled_image['id'],
+ 'name': 'foobar',
+ 'pass': 'seekr3t',
+ 'hostid': '[a-f0-9]+',
+ 'access_ip_v4': '1.2.3.4',
+ 'access_ip_v6': '80fe::',
+ 'hostname': 'updated-hostname.example.com',
+ }
+
+ resp = self._do_post(
+ 'servers/%s/action' % uuid,
+ 'server-action-rebuild',
+ params,
+ )
+ subs = params.copy()
+ del subs['uuid']
+ self._verify_response('server-action-rebuild-resp', subs, resp, 202)
+
+
class ServerSortKeysJsonTests(ServersSampleBase):
sample_dir = 'servers-sort'
diff --git a/nova/tests/functional/compute/test_resource_tracker.py b/nova/tests/functional/compute/test_resource_tracker.py
index fff08697ae..139fb5e6ac 100644
--- a/nova/tests/functional/compute/test_resource_tracker.py
+++ b/nova/tests/functional/compute/test_resource_tracker.py
@@ -248,6 +248,7 @@ class IronicResourceTrackerTest(test.TestCase):
'numa_topology': None,
'resource_class': None, # Act like admin hasn't set yet...
'stats': stats,
+ 'uuid': str(getattr(uuids, nodename)),
}
self.rt.update_available_resource(self.ctx, nodename)
diff --git a/nova/tests/functional/integrated_helpers.py b/nova/tests/functional/integrated_helpers.py
index ca6fd48e8c..cdf71da0d4 100644
--- a/nova/tests/functional/integrated_helpers.py
+++ b/nova/tests/functional/integrated_helpers.py
@@ -598,7 +598,7 @@ class InstanceHelperMixin:
def _evacuate_server(
self, server, extra_post_args=None, expected_host=None,
- expected_state='ACTIVE', expected_task_state=NOT_SPECIFIED,
+ expected_state='SHUTOFF', expected_task_state=NOT_SPECIFIED,
expected_migration_status='done'):
"""Evacuate a server."""
api = getattr(self, 'admin_api', self.api)
diff --git a/nova/tests/functional/libvirt/base.py b/nova/tests/functional/libvirt/base.py
index 47a8bbe81c..7b6ee10631 100644
--- a/nova/tests/functional/libvirt/base.py
+++ b/nova/tests/functional/libvirt/base.py
@@ -18,6 +18,7 @@ import io
from unittest import mock
import fixtures
+from oslo_utils.fixture import uuidsentinel as uuids
from nova import conf
from nova.tests import fixtures as nova_fixtures
@@ -177,7 +178,12 @@ class ServersTestBase(integrated_helpers._IntegratedTestBase):
self.assertNotIn(hostname, self.computes)
self.assertNotIn(hostname, self.compute_rp_uuids)
- self.computes[hostname] = _start_compute(hostname, host_info)
+ with mock.patch('nova.virt.node.get_local_node_uuid') as m:
+ m.return_value = str(getattr(uuids, 'node_%s' % hostname))
+ self.computes[hostname] = _start_compute(hostname, host_info)
+ # We need to trigger libvirt.Host() to capture the node-local
+ # uuid while we have it mocked out.
+ self.computes[hostname].driver._host.get_node_uuid()
self.compute_rp_uuids[hostname] = self.placement.get(
'/resource_providers?name=%s' % hostname).body[
diff --git a/nova/tests/functional/libvirt/test_evacuate.py b/nova/tests/functional/libvirt/test_evacuate.py
index 9d3deec99d..92d7ffba29 100644
--- a/nova/tests/functional/libvirt/test_evacuate.py
+++ b/nova/tests/functional/libvirt/test_evacuate.py
@@ -415,7 +415,9 @@ class _LibvirtEvacuateTest(integrated_helpers.InstanceHelperMixin):
with mock.patch.object(fakelibvirt.Connection, 'getHostname',
return_value=name):
- compute = self.start_service('compute', host=name)
+ with mock.patch('nova.virt.node.get_local_node_uuid') as m:
+ m.return_value = str(getattr(uuids, 'node_%s' % name))
+ compute = self.start_service('compute', host=name)
compute.driver._host.get_connection().getHostname = lambda: name
return compute
diff --git a/nova/tests/functional/libvirt/test_pci_in_placement.py b/nova/tests/functional/libvirt/test_pci_in_placement.py
index 136bbd7f67..41d6c8e008 100644
--- a/nova/tests/functional/libvirt/test_pci_in_placement.py
+++ b/nova/tests/functional/libvirt/test_pci_in_placement.py
@@ -13,6 +13,7 @@
# under the License.
from unittest import mock
+import ddt
import fixtures
import os_resource_classes
import os_traits
@@ -87,7 +88,7 @@ class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
num_pci=2, num_pfs=2, num_vfs=4)
# the emulated devices will then be filtered by the device_spec:
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
# PCI_PROD_ID will match two type-PCI devs (slot 0, 1)
{
@@ -164,7 +165,7 @@ class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
pci_info = fakelibvirt.HostPCIDevicesInfo(
num_pci=1, num_pfs=1, num_vfs=1)
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
# PCI_PROD_ID will match the type-PCI in slot 0
{
@@ -211,7 +212,7 @@ class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
pci_info = fakelibvirt.HostPCIDevicesInfo(
num_pci=0, num_pfs=1, num_vfs=1)
# both device will be matched by our config
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
# PF
{
@@ -244,7 +245,7 @@ class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
num_pci=0, num_pfs=1, num_vfs=2)
# the config matches the two VFs separately and tries to configure
# them with different resource class
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
{
"address": "0000:81:00.1",
@@ -278,7 +279,7 @@ class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
num_pci=0, num_pfs=1, num_vfs=2)
# the config matches the two VFs separately and tries to configure
# them with different trait list
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
{
"address": "0000:81:00.1",
@@ -312,7 +313,7 @@ class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
pci_info = fakelibvirt.HostPCIDevicesInfo(
num_pci=0, num_pfs=1, num_vfs=1)
# then the config assigns physnet to the dev
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
{
"vendor_id": fakelibvirt.PCI_VEND_ID,
@@ -332,7 +333,7 @@ class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
)
def test_devname_based_dev_spec_rejected(self):
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
{
"devname": "eth0",
@@ -360,7 +361,7 @@ class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
pci_info = fakelibvirt.HostPCIDevicesInfo(
num_pci=1, num_pfs=0, num_vfs=0)
# the config matches that PCI dev
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
{
"vendor_id": fakelibvirt.PCI_VEND_ID,
@@ -382,7 +383,7 @@ class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
)
# now un-configure the PCI device and restart the compute
- self.flags(group='pci', device_spec=self._to_device_spec_conf([]))
+ self.flags(group='pci', device_spec=self._to_list_of_json_str([]))
self.restart_compute_service(hostname="compute1")
# the RP had no allocation so nova could remove it
@@ -398,7 +399,7 @@ class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
pci_info = fakelibvirt.HostPCIDevicesInfo(
num_pci=0, num_pfs=1, num_vfs=2)
# then the config matching the VFs
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
{
"vendor_id": fakelibvirt.PCI_VEND_ID,
@@ -446,7 +447,7 @@ class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
pci_info = fakelibvirt.HostPCIDevicesInfo(
num_pci=0, num_pfs=1, num_vfs=2)
# then the config patches the VFs
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
{
"vendor_id": fakelibvirt.PCI_VEND_ID,
@@ -489,7 +490,7 @@ class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
pci_info = fakelibvirt.HostPCIDevicesInfo(
num_pci=0, num_pfs=1, num_vfs=2)
# then the config matches both VFs
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
{
"vendor_id": fakelibvirt.PCI_VEND_ID,
@@ -512,7 +513,7 @@ class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
# change the config to match the PF but do not match the VFs and
# restart the compute
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
{
"vendor_id": fakelibvirt.PCI_VEND_ID,
@@ -544,7 +545,7 @@ class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
pci_info = fakelibvirt.HostPCIDevicesInfo(
num_pci=0, num_pfs=1, num_vfs=2)
# then the config only matches the PF
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
{
"vendor_id": fakelibvirt.PCI_VEND_ID,
@@ -567,7 +568,7 @@ class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
# remove the PF from the config and add the VFs instead then restart
# the compute
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
{
"vendor_id": fakelibvirt.PCI_VEND_ID,
@@ -600,7 +601,7 @@ class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
num_pci=0, num_pfs=2, num_vfs=4)
# from slot 0 we match the PF only and ignore the VFs
# from slot 1 we match the VFs but ignore the parent PF
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
{
"vendor_id": fakelibvirt.PCI_VEND_ID,
@@ -645,7 +646,7 @@ class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
# change the resource class and traits configuration and restart the
# compute
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
{
"product_id": fakelibvirt.PF_PROD_ID,
@@ -698,7 +699,7 @@ class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
pci_info = fakelibvirt.HostPCIDevicesInfo(
num_pci=0, num_pfs=1, num_vfs=1)
# we match the PF only and ignore the VF
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
{
"vendor_id": fakelibvirt.PCI_VEND_ID,
@@ -752,7 +753,7 @@ class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
self._create_one_compute_with_a_pf_consumed_by_an_instance())
# remove 0000:81:00.0 from the device spec and restart the compute
- device_spec = self._to_device_spec_conf([])
+ device_spec = self._to_list_of_json_str([])
self.flags(group='pci', device_spec=device_spec)
# The PF is used but removed from the config. The PciTracker warns
# but keeps the device so the placement logic mimic this and only warns
@@ -796,7 +797,7 @@ class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
# in the config, then restart the compute service
# only match the VF now
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
{
"vendor_id": fakelibvirt.PCI_VEND_ID,
@@ -871,7 +872,7 @@ class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
pci_info = fakelibvirt.HostPCIDevicesInfo(
num_pci=1, num_pfs=0, num_vfs=0)
# the config matches the PCI dev
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
{
"vendor_id": fakelibvirt.PCI_VEND_ID,
@@ -898,7 +899,7 @@ class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
pci_info = fakelibvirt.HostPCIDevicesInfo(
num_pci=1, num_pfs=0, num_vfs=0)
# the config matches the PCI dev
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
{
"vendor_id": fakelibvirt.PCI_VEND_ID,
@@ -955,7 +956,7 @@ class PlacementPCIAllocationHealingTests(PlacementPCIReportingTests):
pci_info = fakelibvirt.HostPCIDevicesInfo(
num_pci=1, num_pfs=0, num_vfs=0)
# the config matches the PCI dev
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
{
"vendor_id": fakelibvirt.PCI_VEND_ID,
@@ -1011,7 +1012,7 @@ class PlacementPCIAllocationHealingTests(PlacementPCIReportingTests):
pci_info = fakelibvirt.HostPCIDevicesInfo(
num_pci=2, num_pfs=2, num_vfs=8)
# the config matches:
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
# both type-PCI
{
@@ -1111,7 +1112,7 @@ class PlacementPCIAllocationHealingTests(PlacementPCIReportingTests):
pci_info = fakelibvirt.HostPCIDevicesInfo(
num_pci=2, num_pfs=2, num_vfs=8)
# the config matches:
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
# both type-PCI
{
@@ -1200,7 +1201,7 @@ class PlacementPCIAllocationHealingTests(PlacementPCIReportingTests):
compute1_pci_info = fakelibvirt.HostPCIDevicesInfo(
num_pci=0, num_pfs=1, num_vfs=2)
# the config matches just the VFs
- compute1_device_spec = self._to_device_spec_conf(
+ compute1_device_spec = self._to_list_of_json_str(
[
{
"vendor_id": fakelibvirt.PCI_VEND_ID,
@@ -1257,7 +1258,7 @@ class PlacementPCIAllocationHealingTests(PlacementPCIReportingTests):
compute2_pci_info = fakelibvirt.HostPCIDevicesInfo(
num_pci=0, num_pfs=1, num_vfs=1)
# the config matches just the VFs
- compute2_device_spec = self._to_device_spec_conf(
+ compute2_device_spec = self._to_list_of_json_str(
[
{
"vendor_id": fakelibvirt.PCI_VEND_ID,
@@ -1361,7 +1362,7 @@ class PlacementPCIAllocationHealingTests(PlacementPCIReportingTests):
compute1_pci_info = fakelibvirt.HostPCIDevicesInfo(
num_pci=0, num_pfs=1, num_vfs=1)
# the config matches just the VFs
- compute1_device_spec = self._to_device_spec_conf(
+ compute1_device_spec = self._to_list_of_json_str(
[
{
"vendor_id": fakelibvirt.PCI_VEND_ID,
@@ -1419,7 +1420,7 @@ class PlacementPCIAllocationHealingTests(PlacementPCIReportingTests):
compute2_pci_info = fakelibvirt.HostPCIDevicesInfo(
num_pci=2, num_pfs=1, num_vfs=1)
# the config matches the PCI devs and hte PF but not the VFs
- compute2_device_spec = self._to_device_spec_conf(
+ compute2_device_spec = self._to_list_of_json_str(
[
{
"vendor_id": fakelibvirt.PCI_VEND_ID,
@@ -1500,7 +1501,7 @@ class PlacementPCIAllocationHealingTests(PlacementPCIReportingTests):
compute1_pci_info = fakelibvirt.HostPCIDevicesInfo(
num_pci=0, num_pfs=1, num_vfs=3)
# the config matches just the VFs
- compute1_device_spec = self._to_device_spec_conf(
+ compute1_device_spec = self._to_list_of_json_str(
[
{
"vendor_id": fakelibvirt.PCI_VEND_ID,
@@ -1597,32 +1598,33 @@ class PlacementPCIAllocationHealingTests(PlacementPCIReportingTests):
compute1_expected_placement_view["allocations"][server["id"]] = {
"0000:81:00.0": {self.VF_RC: 2}
}
- self.assert_placement_pci_view(
- "compute1", **compute1_expected_placement_view)
+ # NOTE(gibi): This is unfortunate but during same host resize
+ # confirm when the PCI scheduling is not enabled the healing logic
+ # cannot heal the dest host allocation during the claim. It will only
+ # heal it in the next run of the ResourceTracker._update(). This due
+ # to the fact that ResourceTracker.drop_move_claim runs both for
+ # revert (on the dest) and confirm (on the source) and in same host
+ # resize this means that it runs on both the source and the dest as
+ # they are the same.
+ # Anyhow the healing will happen just a bit later. And the end goal is
+ # to make the scheduler support enabled by default and delete the
+ # whole healing logic. So I think this is acceptable.
self._run_periodics()
self.assert_placement_pci_view(
"compute1", **compute1_expected_placement_view)
-class RCAndTraitBasedPCIAliasTests(PlacementPCIReportingTests):
+@ddt.ddt
+class SimpleRCAndTraitBasedPCIAliasTests(PlacementPCIReportingTests):
def setUp(self):
super().setUp()
- # TODO(gibi): replace this with setting the [scheduler]pci_in_placement
- # confing to True once that config is added
- self.mock_pci_in_placement_enabled = self.useFixture(
- fixtures.MockPatch(
- 'nova.objects.request_spec.RequestSpec.'
- '_pci_in_placement_enabled',
- return_value=True
- )
- ).mock
+ self.flags(group='filter_scheduler', pci_in_placement=True)
- def test_boot_with_custom_rc_and_traits(self):
# The fake libvirt will emulate on the host:
# * one type-PCI in slot 0
pci_info = fakelibvirt.HostPCIDevicesInfo(
num_pci=1, num_pfs=0, num_vfs=0)
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
{
"address": "0000:81:00.0",
@@ -1641,7 +1643,7 @@ class RCAndTraitBasedPCIAliasTests(PlacementPCIReportingTests):
self.start_compute(hostname="compute1", pci_info=pci_info)
self.assertPCIDeviceCounts("compute1", total=1, free=1)
- compute1_expected_placement_view = {
+ self.compute1_expected_placement_view = {
"inventories": {
"0000:81:00.0": {"CUSTOM_GPU": 1},
},
@@ -1658,82 +1660,82 @@ class RCAndTraitBasedPCIAliasTests(PlacementPCIReportingTests):
"allocations": {},
}
self.assert_placement_pci_view(
- "compute1", **compute1_expected_placement_view)
+ "compute1", **self.compute1_expected_placement_view)
- pci_alias_wrong_rc = {
+ @ddt.data(
+ {
"vendor_id": fakelibvirt.PCI_VEND_ID,
"product_id": fakelibvirt.PCI_PROD_ID,
"name": "a-gpu-wrong-rc",
- }
- pci_alias_wrong_rc_2 = {
+ },
+ {
"resource_class": os_resource_classes.PGPU,
"name": "a-gpu-wrong-rc-2",
- }
- pci_alias_asking_for_missing_trait = {
+ },
+ {
"resource_class": "GPU",
# NOTE(gibi): "big" is missing from device spec
"traits": "purple,big",
"name": "a-gpu-missing-trait",
- }
+ },
+ )
+ def test_boot_with_custom_rc_and_traits_no_matching_device(
+ self, pci_alias
+ ):
+ self.flags(group="pci", alias=self._to_list_of_json_str([pci_alias]))
+ extra_spec = {"pci_passthrough:alias": f"{pci_alias['name']}:1"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._create_server(
+ flavor_id=flavor_id, networks=[], expected_state="ERROR"
+ )
+ self.assertIn("fault", server)
+ self.assertIn("No valid host", server["fault"]["message"])
+
+ self.assertPCIDeviceCounts("compute1", total=1, free=1)
+ self.assert_placement_pci_view(
+ "compute1", **self.compute1_expected_placement_view
+ )
+
+ def test_boot_with_custom_rc_and_traits_succeeds(self):
pci_alias_gpu = {
"resource_class": "GPU",
"traits": "HW_GPU_API_VULKAN,PURPLE",
"name": "a-gpu",
}
self.flags(
- group="pci",
- # FIXME(gibi): make _to_device_spec_conf a general util for both
- # device spec and pci alias
- alias=self._to_device_spec_conf(
- [
- pci_alias_wrong_rc,
- pci_alias_wrong_rc_2,
- pci_alias_asking_for_missing_trait,
- pci_alias_gpu,
- ]
- ),
- )
-
- # try to boot with each alias that does not match
- for alias in [
- "a-gpu-wrong-rc",
- "a-gpu-wrong-rc-2",
- "a-gpu-missing-trait",
- ]:
- extra_spec = {"pci_passthrough:alias": f"{alias}:1"}
- flavor_id = self._create_flavor(extra_spec=extra_spec)
- server = self._create_server(
- flavor_id=flavor_id, networks=[], expected_state='ERROR')
- self.assertIn('fault', server)
- self.assertIn('No valid host', server['fault']['message'])
-
- self.assertPCIDeviceCounts("compute1", total=1, free=1)
- self.assert_placement_pci_view(
- "compute1", **compute1_expected_placement_view)
-
- # then boot with the matching alias
+ group="pci", alias=self._to_list_of_json_str([pci_alias_gpu])
+ )
+
extra_spec = {"pci_passthrough:alias": "a-gpu:1"}
flavor_id = self._create_flavor(extra_spec=extra_spec)
- server = self._create_server(
- flavor_id=flavor_id, networks=[])
+ server = self._create_server(flavor_id=flavor_id, networks=[])
self.assertPCIDeviceCounts("compute1", total=1, free=0)
- compute1_expected_placement_view[
- "usages"]["0000:81:00.0"]["CUSTOM_GPU"] = 1
- compute1_expected_placement_view["allocations"][server["id"]] = {
+ self.compute1_expected_placement_view["usages"]["0000:81:00.0"][
+ "CUSTOM_GPU"
+ ] = 1
+ self.compute1_expected_placement_view["allocations"][server["id"]] = {
"0000:81:00.0": {"CUSTOM_GPU": 1}
}
self.assert_placement_pci_view(
- "compute1", **compute1_expected_placement_view)
+ "compute1", **self.compute1_expected_placement_view
+ )
self.assert_no_pci_healing("compute1")
+
+class RCAndTraitBasedPCIAliasTests(PlacementPCIReportingTests):
+ def setUp(self):
+ super().setUp()
+ self.flags(group='filter_scheduler', pci_in_placement=True)
+
def test_device_claim_consistent_with_placement_allocation(self):
- """As soon as [scheduler]pci_in_placement is enabled the nova-scheduler
- will allocate PCI devices in placement. Then on the nova-compute side
- the PCI claim will also allocate PCI devices in the nova DB. This test
- will create a situation where the two allocation could contradict and
- observes that in a contradicting situation the PCI claim will fail
- instead of allocating a device that is not allocated in placement.
+ """As soon as [filter_scheduler]pci_in_placement is enabled the
+ nova-scheduler will allocate PCI devices in placement. Then on the
+ nova-compute side the PCI claim will also allocate PCI devices in the
+ nova DB. This test will create a situation where the two allocation
+ could contradict and observes that in a contradicting situation the PCI
+ claim will fail instead of allocating a device that is not allocated in
+ placement.
For the contradiction to happen we need two PCI devices that looks
different from placement perspective than from the nova DB perspective.
@@ -1752,7 +1754,7 @@ class RCAndTraitBasedPCIAliasTests(PlacementPCIReportingTests):
num_pci=1, num_pfs=1, num_vfs=0)
# we allow both device to be consumed, but we assign different traits
# so we can selectively schedule to one of the devices in placement
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
{
"address": "0000:81:00.0",
@@ -1805,7 +1807,7 @@ class RCAndTraitBasedPCIAliasTests(PlacementPCIReportingTests):
}
self.flags(
group="pci",
- alias=self._to_device_spec_conf([pci_alias_no_match]),
+ alias=self._to_list_of_json_str([pci_alias_no_match]),
)
# then try to boot with the alias and expect no valid host error
@@ -1825,7 +1827,7 @@ class RCAndTraitBasedPCIAliasTests(PlacementPCIReportingTests):
pci_info = fakelibvirt.HostPCIDevicesInfo(
num_pci=0, num_pfs=2, num_vfs=4)
# make all 4 VFs available
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
{
"product_id": fakelibvirt.VF_PROD_ID,
@@ -1867,9 +1869,7 @@ class RCAndTraitBasedPCIAliasTests(PlacementPCIReportingTests):
}
self.flags(
group="pci",
- # FIXME(gibi): make _to_device_spec_conf a general util for both
- # device spec and pci alias
- alias=self._to_device_spec_conf([pci_alias_vf]),
+ alias=self._to_list_of_json_str([pci_alias_vf]),
)
# reserve VFs from 81.01 in placement to drive the first instance to
@@ -1919,3 +1919,79 @@ class RCAndTraitBasedPCIAliasTests(PlacementPCIReportingTests):
self.assert_placement_pci_view(
"compute1", **compute1_expected_placement_view)
self.assert_no_pci_healing("compute1")
+
+ def test_3vfs_asymmetric_split_between_pfs(self):
+ # The fake libvirt will emulate on the host:
+ # * two type-PFs in slot 0, 1 with 2 VFs each
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=0, num_pfs=2, num_vfs=4)
+ # make all 4 VFs available
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "product_id": fakelibvirt.VF_PROD_ID,
+ "resource_class": "MY_VF",
+ "traits": "blue",
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ self.start_compute(hostname="compute1", pci_info=pci_info)
+
+ compute1_expected_placement_view = {
+ "inventories": {
+ "0000:81:00.0": {"CUSTOM_MY_VF": 2},
+ "0000:81:01.0": {"CUSTOM_MY_VF": 2},
+ },
+ "traits": {
+ "0000:81:00.0": [
+ "CUSTOM_BLUE",
+ ],
+ "0000:81:01.0": [
+ "CUSTOM_BLUE",
+ ],
+ },
+ "usages": {
+ "0000:81:00.0": {"CUSTOM_MY_VF": 0},
+ "0000:81:01.0": {"CUSTOM_MY_VF": 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+ self.assertPCIDeviceCounts('compute1', total=4, free=4)
+
+ pci_alias_vf = {
+ "resource_class": "MY_VF",
+ "traits": "blue",
+ "name": "a-vf",
+ }
+ self.flags(
+ group="pci",
+ alias=self._to_list_of_json_str([pci_alias_vf]),
+ )
+
+ # Boot an instance requesting three VFs. The 3 VFs can be split between
+ # the two PFs two ways: 2 from 81.00 and 1 from 81.01, or 1 from 81.00
+ # and 2 from 81.01.
+ # Let's block the first way in placement by reserving 1 device from
+ # 81.00
+ self._reserve_placement_resource(
+ "compute1_0000:81:00.0", "CUSTOM_MY_VF", 1)
+ extra_spec = {"pci_passthrough:alias": "a-vf:3"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ # We expect this to fit.
+ server_3vf = self._create_server(flavor_id=flavor_id, networks=[])
+
+ self.assertPCIDeviceCounts('compute1', total=4, free=1)
+ compute1_expected_placement_view["usages"] = {
+ "0000:81:00.0": {"CUSTOM_MY_VF": 1},
+ "0000:81:01.0": {"CUSTOM_MY_VF": 2},
+ }
+ compute1_expected_placement_view["allocations"][server_3vf["id"]] = {
+ "0000:81:00.0": {"CUSTOM_MY_VF": 1},
+ "0000:81:01.0": {"CUSTOM_MY_VF": 2},
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+ self.assert_no_pci_healing("compute1")
diff --git a/nova/tests/functional/libvirt/test_pci_sriov_servers.py b/nova/tests/functional/libvirt/test_pci_sriov_servers.py
index dd89c063dc..6b8b254af9 100644
--- a/nova/tests/functional/libvirt/test_pci_sriov_servers.py
+++ b/nova/tests/functional/libvirt/test_pci_sriov_servers.py
@@ -284,8 +284,8 @@ class _PCIServersTestBase(base.ServersTestBase):
self.assert_placement_pci_allocations_on_host(hostname, allocations)
@staticmethod
- def _to_device_spec_conf(spec_list):
- return [jsonutils.dumps(x) for x in spec_list]
+ def _to_list_of_json_str(list):
+ return [jsonutils.dumps(x) for x in list]
@staticmethod
def _move_allocation(allocations, from_uuid, to_uuid):
@@ -1952,19 +1952,12 @@ class PCIServersTest(_PCIServersTestBase):
def setUp(self):
super().setUp()
self.flags(group="pci", report_in_placement=True)
- # TODO(gibi): replace this with setting the [scheduler]pci_prefilter
- # confing to True once that config is added
- self.mock_pci_in_placement_enabled = self.useFixture(
- fixtures.MockPatch(
- 'nova.objects.request_spec.RequestSpec.'
- '_pci_in_placement_enabled',
- return_value=True
- )
- ).mock
+ self.flags(group='filter_scheduler', pci_in_placement=True)
def test_create_server_with_pci_dev_and_numa(self):
"""Verifies that an instance can be booted with cpu pinning and with an
- assigned pci device.
+ assigned pci device with legacy policy and numa info for the pci
+ device.
"""
self.flags(cpu_dedicated_set='0-7', group='compute')
@@ -1999,9 +1992,9 @@ class PCIServersTest(_PCIServersTestBase):
def test_create_server_with_pci_dev_and_numa_fails(self):
"""This test ensures that it is not possible to allocated CPU and
- memory resources from one NUMA node and a PCI device from another.
+ memory resources from one NUMA node and a PCI device from another
+ if we use the legacy policy and the pci device reports numa info.
"""
-
self.flags(cpu_dedicated_set='0-7', group='compute')
pci_info = fakelibvirt.HostPCIDevicesInfo(num_pci=1, numa_node=0)
@@ -2356,6 +2349,133 @@ class PCIServersTest(_PCIServersTestBase):
self.assert_no_pci_healing("test_compute0")
self.assert_no_pci_healing("test_compute1")
+ def test_same_host_resize_with_pci(self):
+ """Start a single compute with 3 PCI devs and resize and instance
+ from one dev to two devs
+ """
+ self.flags(allow_resize_to_same_host=True)
+ self.start_compute(
+ hostname='test_compute0',
+ pci_info=fakelibvirt.HostPCIDevicesInfo(num_pci=3))
+ self.assertPCIDeviceCounts('test_compute0', total=3, free=3)
+ test_compute0_placement_pci_view = {
+ "inventories": {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ "0000:81:02.0": {self.PCI_RC: 1},
+ },
+ "traits": {
+ "0000:81:00.0": [],
+ "0000:81:01.0": [],
+ "0000:81:02.0": [],
+ },
+ "usages": {
+ "0000:81:00.0": {self.PCI_RC: 0},
+ "0000:81:01.0": {self.PCI_RC: 0},
+ "0000:81:02.0": {self.PCI_RC: 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ # Boot a server with a single PCI device.
+ # To stabilize the test we reserve 81.01 and 81.02 in placement so
+ # we can be sure that the instance will use 81.00, otherwise the
+ # allocation will be random between 00, 01, and 02
+ self._reserve_placement_resource(
+ "test_compute0_0000:81:01.0", self.PCI_RC, 1)
+ self._reserve_placement_resource(
+ "test_compute0_0000:81:02.0", self.PCI_RC, 1)
+ extra_spec = {'pci_passthrough:alias': f'{self.ALIAS_NAME}:1'}
+ pci_flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._create_server(flavor_id=pci_flavor_id, networks='none')
+
+ self.assertPCIDeviceCounts('test_compute0', total=3, free=2)
+ test_compute0_placement_pci_view[
+ "usages"]["0000:81:00.0"][self.PCI_RC] = 1
+ test_compute0_placement_pci_view[
+ "allocations"][server['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+ # remove the reservations, so we can resize on the same host and
+ # consume 01 and 02
+ self._reserve_placement_resource(
+ "test_compute0_0000:81:01.0", self.PCI_RC, 0)
+ self._reserve_placement_resource(
+ "test_compute0_0000:81:02.0", self.PCI_RC, 0)
+
+ # Resize the server to use 2 PCI devices
+ extra_spec = {'pci_passthrough:alias': f'{self.ALIAS_NAME}:2'}
+ pci_flavor_id = self._create_flavor(extra_spec=extra_spec)
+ with mock.patch(
+ 'nova.virt.libvirt.driver.LibvirtDriver'
+ '.migrate_disk_and_power_off',
+ return_value='{}',
+ ):
+ self._resize_server(server, pci_flavor_id)
+
+ self.assertPCIDeviceCounts('test_compute0', total=3, free=0)
+ # the source host side of the allocation is now held by the migration
+ # UUID
+ self._move_server_allocation(
+ test_compute0_placement_pci_view["allocations"], server['id'])
+ # but we have the dest host side of the allocations on the same host
+ test_compute0_placement_pci_view[
+ "usages"]["0000:81:01.0"][self.PCI_RC] = 1
+ test_compute0_placement_pci_view[
+ "usages"]["0000:81:02.0"][self.PCI_RC] = 1
+ test_compute0_placement_pci_view["allocations"][server['id']] = {
+ "0000:81:01.0": {self.PCI_RC: 1},
+ "0000:81:02.0": {self.PCI_RC: 1},
+ }
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ # revert the resize so the instance should go back to use a single
+ # device
+ self._revert_resize(server)
+ self.assertPCIDeviceCounts('test_compute0', total=3, free=2)
+ # the migration allocation is moved back to the instance UUID
+ self._move_server_allocation(
+ test_compute0_placement_pci_view["allocations"],
+ server["id"],
+ revert=True,
+ )
+ # and the "dest" side of the allocation is dropped
+ test_compute0_placement_pci_view[
+ "usages"]["0000:81:01.0"][self.PCI_RC] = 0
+ test_compute0_placement_pci_view[
+ "usages"]["0000:81:02.0"][self.PCI_RC] = 0
+ test_compute0_placement_pci_view["allocations"][server['id']] = {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ }
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ # resize again but now confirm the same host resize and assert that
+ # only the new flavor usage remains
+ with mock.patch(
+ 'nova.virt.libvirt.driver.LibvirtDriver'
+ '.migrate_disk_and_power_off',
+ return_value='{}',
+ ):
+ self._resize_server(server, pci_flavor_id)
+ self._confirm_resize(server)
+
+ self.assertPCIDeviceCounts('test_compute0', total=3, free=1)
+ test_compute0_placement_pci_view["usages"] = {
+ "0000:81:01.0": {self.PCI_RC: 1},
+ "0000:81:02.0": {self.PCI_RC: 1},
+ }
+ test_compute0_placement_pci_view["allocations"][
+ server['id']] = {self.PCI_RC: 1}
+ test_compute0_placement_pci_view["allocations"][server['id']] = {
+ "0000:81:01.0": {self.PCI_RC: 1},
+ "0000:81:02.0": {self.PCI_RC: 1},
+ }
+ self.assert_no_pci_healing("test_compute0")
+
def _confirm_resize(self, server, host='host1'):
# NOTE(sbauza): Unfortunately, _cleanup_resize() in libvirt checks the
# host option to know the source hostname but given we have a global
@@ -2561,7 +2681,7 @@ class PCIServersTest(_PCIServersTestBase):
self.assertIn('fault', server)
self.assertIn('No valid host', server['fault']['message'])
- def _create_two_computes_and_an_instance_on_the_first(self):
+ def _create_two_computes(self):
self.start_compute(
hostname='test_compute0',
pci_info=fakelibvirt.HostPCIDevicesInfo(num_pci=1))
@@ -2589,6 +2709,17 @@ class PCIServersTest(_PCIServersTestBase):
self.assert_placement_pci_view(
"test_compute1", **test_compute1_placement_pci_view)
+ return (
+ test_compute0_placement_pci_view,
+ test_compute1_placement_pci_view,
+ )
+
+ def _create_two_computes_and_an_instance_on_the_first(self):
+ (
+ test_compute0_placement_pci_view,
+ test_compute1_placement_pci_view,
+ ) = self._create_two_computes()
+
# boot a VM on test_compute0 with a single PCI dev
extra_spec = {'pci_passthrough:alias': f'{self.ALIAS_NAME}:1'}
pci_flavor_id = self._create_flavor(extra_spec=extra_spec)
@@ -2695,6 +2826,175 @@ class PCIServersTest(_PCIServersTestBase):
self.assert_no_pci_healing("test_compute0")
self.assert_no_pci_healing("test_compute1")
+ def test_unshelve_after_offload(self):
+ (
+ server,
+ test_compute0_placement_pci_view,
+ test_compute1_placement_pci_view,
+ ) = self._create_two_computes_and_an_instance_on_the_first()
+
+ # shelve offload the server
+ self._shelve_server(server)
+
+ # source allocation should be freed
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=1)
+ test_compute0_placement_pci_view["usages"][
+ "0000:81:00.0"][self.PCI_RC] = 0
+ del test_compute0_placement_pci_view["allocations"][server['id']]
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ # test_compute1 should not be touched
+ self.assertPCIDeviceCounts('test_compute1', total=1, free=1)
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
+ # disable test_compute0 and unshelve the instance
+ self.api.put_service(
+ self.computes["test_compute0"].service_ref.uuid,
+ {"status": "disabled"},
+ )
+ self._unshelve_server(server)
+
+ # test_compute0 should be unchanged
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=1)
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ # test_compute1 should be allocated
+ self.assertPCIDeviceCounts('test_compute1', total=1, free=0)
+ test_compute1_placement_pci_view["usages"][
+ "0000:81:00.0"][self.PCI_RC] = 1
+ test_compute1_placement_pci_view["allocations"][
+ server['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
+ self.assert_no_pci_healing("test_compute0")
+ self.assert_no_pci_healing("test_compute1")
+
+ def test_reschedule(self):
+ (
+ test_compute0_placement_pci_view,
+ test_compute1_placement_pci_view,
+ ) = self._create_two_computes()
+
+ # try to boot a VM with a single device but inject fault on the first
+ # compute so that the VM is re-scheduled to the other
+ extra_spec = {'pci_passthrough:alias': f'{self.ALIAS_NAME}:1'}
+ pci_flavor_id = self._create_flavor(extra_spec=extra_spec)
+
+ calls = []
+ orig_guest_create = (
+ nova.virt.libvirt.driver.LibvirtDriver._create_guest)
+
+ def fake_guest_create(*args, **kwargs):
+ if not calls:
+ calls.append(1)
+ raise fakelibvirt.make_libvirtError(
+ fakelibvirt.libvirtError,
+ "internal error",
+ error_code=fakelibvirt.VIR_ERR_INTERNAL_ERROR,
+ )
+ else:
+ return orig_guest_create(*args, **kwargs)
+
+ with mock.patch(
+ 'nova.virt.libvirt.driver.LibvirtDriver._create_guest',
+ new=fake_guest_create
+ ):
+ server = self._create_server(
+ flavor_id=pci_flavor_id, networks='none')
+
+ compute_pci_view_map = {
+ 'test_compute0': test_compute0_placement_pci_view,
+ 'test_compute1': test_compute1_placement_pci_view,
+ }
+ allocated_compute = server['OS-EXT-SRV-ATTR:host']
+ not_allocated_compute = (
+ "test_compute0"
+ if allocated_compute == "test_compute1"
+ else "test_compute1"
+ )
+
+ allocated_pci_view = compute_pci_view_map.pop(
+ server['OS-EXT-SRV-ATTR:host'])
+ not_allocated_pci_view = list(compute_pci_view_map.values())[0]
+
+ self.assertPCIDeviceCounts(allocated_compute, total=1, free=0)
+ allocated_pci_view["usages"][
+ "0000:81:00.0"][self.PCI_RC] = 1
+ allocated_pci_view["allocations"][
+ server['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
+ self.assert_placement_pci_view(allocated_compute, **allocated_pci_view)
+
+ self.assertPCIDeviceCounts(not_allocated_compute, total=1, free=1)
+ self.assert_placement_pci_view(
+ not_allocated_compute, **not_allocated_pci_view)
+ self.assert_no_pci_healing("test_compute0")
+ self.assert_no_pci_healing("test_compute1")
+
+ def test_multi_create(self):
+ self.start_compute(
+ hostname='test_compute0',
+ pci_info=fakelibvirt.HostPCIDevicesInfo(num_pci=3))
+ self.assertPCIDeviceCounts('test_compute0', total=3, free=3)
+ test_compute0_placement_pci_view = {
+ "inventories": {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ "0000:81:02.0": {self.PCI_RC: 1},
+ },
+ "traits": {
+ "0000:81:00.0": [],
+ "0000:81:01.0": [],
+ "0000:81:02.0": [],
+ },
+ "usages": {
+ "0000:81:00.0": {self.PCI_RC: 0},
+ "0000:81:01.0": {self.PCI_RC: 0},
+ "0000:81:02.0": {self.PCI_RC: 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ extra_spec = {'pci_passthrough:alias': f'{self.ALIAS_NAME}:1'}
+ pci_flavor_id = self._create_flavor(extra_spec=extra_spec)
+ body = self._build_server(flavor_id=pci_flavor_id, networks='none')
+ body.update(
+ {
+ "min_count": "2",
+ }
+ )
+ self.api.post_server({'server': body})
+
+ servers = self.api.get_servers(detail=False)
+ for server in servers:
+ self._wait_for_state_change(server, 'ACTIVE')
+
+ self.assertEqual(2, len(servers))
+ self.assertPCIDeviceCounts('test_compute0', total=3, free=1)
+ # we have no way to influence which instance takes which device, so
+ # we need to look at the nova DB to properly assert the placement
+ # allocation
+ devices = objects.PciDeviceList.get_by_compute_node(
+ self.ctxt,
+ objects.ComputeNode.get_by_nodename(self.ctxt, 'test_compute0').id,
+ )
+ for dev in devices:
+ if dev.instance_uuid:
+ test_compute0_placement_pci_view["usages"][
+ dev.address][self.PCI_RC] = 1
+ test_compute0_placement_pci_view["allocations"][
+ dev.instance_uuid] = {dev.address: {self.PCI_RC: 1}}
+
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ self.assert_no_pci_healing("test_compute0")
+
class PCIServersWithPreferredNUMATest(_PCIServersTestBase):
@@ -2719,15 +3019,7 @@ class PCIServersWithPreferredNUMATest(_PCIServersTestBase):
def setUp(self):
super().setUp()
self.flags(group="pci", report_in_placement=True)
- # TODO(gibi): replace this with setting the [scheduler]pci_in_placement
- # confing to True once that config is added
- self.mock_pci_in_placement_enabled = self.useFixture(
- fixtures.MockPatch(
- 'nova.objects.request_spec.RequestSpec.'
- '_pci_in_placement_enabled',
- return_value=True
- )
- ).mock
+ self.flags(group='filter_scheduler', pci_in_placement=True)
def test_create_server_with_pci_dev_and_numa(self):
"""Validate behavior of 'preferred' PCI NUMA policy.
@@ -2813,7 +3105,7 @@ class PCIServersWithRequiredNUMATest(PCIServersWithPreferredNUMATest):
pci_info = fakelibvirt.HostPCIDevicesInfo(num_pci=2)
# the device_spec will assign different traits to 81.00 than 81.01
# so the two devices become different from placement perspective
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
{
'vendor_id': fakelibvirt.PCI_VEND_ID,
@@ -2870,9 +3162,7 @@ class PCIServersWithRequiredNUMATest(PCIServersWithPreferredNUMATest):
}
self.flags(
group="pci",
- # FIXME(gibi): make _to_device_spec_conf a general util for both
- # device spec and pci alias
- alias=self._to_device_spec_conf([pci_alias]),
+ alias=self._to_list_of_json_str([pci_alias]),
)
# Ask for dedicated CPUs, that can only be fulfilled on numa 1.
diff --git a/nova/tests/functional/libvirt/test_vpmem.py b/nova/tests/functional/libvirt/test_vpmem.py
index d1cad0e376..1200f80357 100644
--- a/nova/tests/functional/libvirt/test_vpmem.py
+++ b/nova/tests/functional/libvirt/test_vpmem.py
@@ -12,9 +12,11 @@
# under the License.
import fixtures
+from unittest import mock
from oslo_config import cfg
from oslo_log import log as logging
+from oslo_utils.fixture import uuidsentinel as uuids
from nova import objects
from nova.tests import fixtures as nova_fixtures
@@ -99,7 +101,9 @@ class VPMEMTestBase(integrated_helpers.LibvirtProviderUsageBaseTestCase):
cpu_cores=2, cpu_threads=2),
hostname=hostname)
self.mock_conn.return_value = fake_connection
- compute = self._start_compute(host=hostname)
+ with mock.patch('nova.virt.node.get_local_node_uuid') as m:
+ m.return_value = str(getattr(uuids, 'node_%s' % hostname))
+ compute = self._start_compute(host=hostname)
# Ensure populating the existing pmems correctly.
vpmems = compute.driver._vpmems_by_name
diff --git a/nova/tests/functional/notification_sample_tests/test_compute_task.py b/nova/tests/functional/notification_sample_tests/test_compute_task.py
index 3de1c7d4e1..05d2d32fde 100644
--- a/nova/tests/functional/notification_sample_tests/test_compute_task.py
+++ b/nova/tests/functional/notification_sample_tests/test_compute_task.py
@@ -10,6 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
+from nova import objects
from nova.tests import fixtures
from nova.tests.functional.notification_sample_tests \
import notification_sample_base
@@ -53,6 +56,10 @@ class TestComputeTaskNotificationSample(
},
actual=self.notifier.versioned_notifications[1])
+ @mock.patch.object(
+ objects.service, 'get_minimum_version_all_cells',
+ new=mock.Mock(return_value=62)
+ )
def test_rebuild_fault(self):
server = self._boot_a_server(
extra_params={'networks': [{'port': self.neutron.port_1['id']}]},
diff --git a/nova/tests/functional/notification_sample_tests/test_instance.py b/nova/tests/functional/notification_sample_tests/test_instance.py
index f671a8abca..5a52c2dad6 100644
--- a/nova/tests/functional/notification_sample_tests/test_instance.py
+++ b/nova/tests/functional/notification_sample_tests/test_instance.py
@@ -46,18 +46,18 @@ class TestInstanceNotificationSampleWithMultipleCompute(
self.compute2 = self.start_service('compute', host='host2')
actions = [
- self._test_live_migration_rollback,
- self._test_live_migration_abort,
- self._test_live_migration_success,
- self._test_evacuate_server,
- self._test_live_migration_force_complete
+ (self._test_live_migration_rollback, 'ACTIVE'),
+ (self._test_live_migration_abort, 'ACTIVE'),
+ (self._test_live_migration_success, 'ACTIVE'),
+ (self._test_evacuate_server, 'SHUTOFF'),
+ (self._test_live_migration_force_complete, 'ACTIVE'),
]
- for action in actions:
+ for action, expected_state in actions:
self.notifier.reset()
action(server)
# Ensure that instance is in active state after an action
- self._wait_for_state_change(server, 'ACTIVE')
+ self._wait_for_state_change(server, expected_state)
@mock.patch('nova.compute.manager.ComputeManager.'
'_live_migration_cleanup_flags', return_value=[True, False])
@@ -275,6 +275,12 @@ class TestInstanceNotificationSampleWithMultipleCompute(
self.admin_api.put_service(service_id, {'forced_down': False})
def _test_live_migration_force_complete(self, server):
+ # In the scenario evacuate happened before which stopped the
+ # server.
+ self._start_server(server)
+ self._wait_for_state_change(server, 'ACTIVE')
+ self.notifier.reset()
+
post = {
'os-migrateLive': {
'host': 'host2',
diff --git a/nova/tests/functional/regressions/test_bug_1669054.py b/nova/tests/functional/regressions/test_bug_1669054.py
index 6180dbfbaa..b20e1530cc 100644
--- a/nova/tests/functional/regressions/test_bug_1669054.py
+++ b/nova/tests/functional/regressions/test_bug_1669054.py
@@ -59,7 +59,8 @@ class ResizeEvacuateTestCase(integrated_helpers._IntegratedTestBase):
# Now try to evacuate the server back to the original source compute.
server = self._evacuate_server(
server, {'onSharedStorage': 'False'},
- expected_host=self.compute.host, expected_migration_status='done')
+ expected_host=self.compute.host, expected_migration_status='done',
+ expected_state='ACTIVE')
# Assert the RequestSpec.ignore_hosts field is not populated.
reqspec = objects.RequestSpec.get_by_instance_uuid(
diff --git a/nova/tests/functional/regressions/test_bug_1713783.py b/nova/tests/functional/regressions/test_bug_1713783.py
index 9a6a79d7a2..8088ccfe06 100644
--- a/nova/tests/functional/regressions/test_bug_1713783.py
+++ b/nova/tests/functional/regressions/test_bug_1713783.py
@@ -13,9 +13,11 @@
# limitations under the License.
import time
+from unittest import mock
from oslo_log import log as logging
+from nova import objects
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import fixtures as func_fixtures
@@ -81,6 +83,10 @@ class FailedEvacuateStateTests(test.TestCase,
created_server = self.api.post_server({'server': server_req})
return self._wait_for_state_change(created_server, 'ACTIVE')
+ @mock.patch.object(
+ objects.service, 'get_minimum_version_all_cells',
+ new=mock.Mock(return_value=62)
+ )
def test_evacuate_no_valid_host(self):
# Boot a server
server = self._boot_a_server()
diff --git a/nova/tests/functional/regressions/test_bug_1764883.py b/nova/tests/functional/regressions/test_bug_1764883.py
index aa86770584..59bbed4f46 100644
--- a/nova/tests/functional/regressions/test_bug_1764883.py
+++ b/nova/tests/functional/regressions/test_bug_1764883.py
@@ -95,7 +95,8 @@ class TestEvacuationWithSourceReturningDuringRebuild(
# Evacuate the instance from the source_host
server = self._evacuate_server(
- server, expected_migration_status='done')
+ server, expected_migration_status='done',
+ expected_state='ACTIVE')
host = server['OS-EXT-SRV-ATTR:host']
migrations = self.api.get_migrations()
diff --git a/nova/tests/functional/regressions/test_bug_1823370.py b/nova/tests/functional/regressions/test_bug_1823370.py
index 5e69905f5f..af134070cd 100644
--- a/nova/tests/functional/regressions/test_bug_1823370.py
+++ b/nova/tests/functional/regressions/test_bug_1823370.py
@@ -66,4 +66,5 @@ class MultiCellEvacuateTestCase(integrated_helpers._IntegratedTestBase):
# higher than host3.
self._evacuate_server(
server, {'onSharedStorage': 'False'}, expected_host='host3',
- expected_migration_status='done')
+ expected_migration_status='done',
+ expected_state='ACTIVE')
diff --git a/nova/tests/functional/regressions/test_bug_1896463.py b/nova/tests/functional/regressions/test_bug_1896463.py
index dc74791e0e..3cfece8d36 100644
--- a/nova/tests/functional/regressions/test_bug_1896463.py
+++ b/nova/tests/functional/regressions/test_bug_1896463.py
@@ -216,7 +216,7 @@ class TestEvacuateResourceTrackerRace(
self._run_periodics()
self._wait_for_server_parameter(
- server, {'OS-EXT-SRV-ATTR:host': 'host2', 'status': 'ACTIVE'})
+ server, {'OS-EXT-SRV-ATTR:host': 'host2', 'status': 'SHUTOFF'})
self._assert_pci_device_allocated(server['id'], self.compute1_id)
self._assert_pci_device_allocated(server['id'], self.compute2_id)
diff --git a/nova/tests/functional/regressions/test_bug_1922053.py b/nova/tests/functional/regressions/test_bug_1922053.py
index 612be27b2b..70bb3d4cab 100644
--- a/nova/tests/functional/regressions/test_bug_1922053.py
+++ b/nova/tests/functional/regressions/test_bug_1922053.py
@@ -1,3 +1,4 @@
+
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@@ -27,6 +28,7 @@ class ForceUpWithDoneEvacuations(integrated_helpers._IntegratedTestBase):
ADMIN_API = True
microversion = 'latest'
+ expected_state = 'SHUTOFF'
def _create_test_server(self, compute_host):
return self._create_server(host=compute_host, networks='none')
@@ -59,7 +61,8 @@ class ForceUpWithDoneEvacuations(integrated_helpers._IntegratedTestBase):
server = self._evacuate_server(
server,
expected_host='compute2',
- expected_migration_status='done'
+ expected_migration_status='done',
+ expected_state=self.expected_state
)
# Assert that the request to force up the host is rejected
@@ -97,6 +100,7 @@ class ForceUpWithDoneEvacuationsv252(ForceUpWithDoneEvacuations):
"""
microversion = '2.52'
+ expected_state = 'ACTIVE'
def _create_test_server(self, compute_host):
return self._create_server(az='nova:compute', networks='none')
diff --git a/nova/tests/functional/test_server_group.py b/nova/tests/functional/test_server_group.py
index 7cbe8bdb67..01e3547f7e 100644
--- a/nova/tests/functional/test_server_group.py
+++ b/nova/tests/functional/test_server_group.py
@@ -444,7 +444,8 @@ class ServerGroupTestV21(ServerGroupTestBase):
evacuated_server = self._evacuate_server(
servers[1], {'onSharedStorage': 'False'},
- expected_migration_status='done')
+ expected_migration_status='done',
+ expected_state='ACTIVE')
# check that the server is evacuated to another host
self.assertNotEqual(evacuated_server['OS-EXT-SRV-ATTR:host'],
@@ -621,7 +622,8 @@ class ServerGroupTestV215(ServerGroupTestV21):
compute3 = self.start_service('compute', host='host3')
evacuated_server = self._evacuate_server(
- servers[1], expected_migration_status='done')
+ servers[1], expected_migration_status='done',
+ expected_state='ACTIVE')
# check that the server is evacuated
self.assertNotEqual(evacuated_server['OS-EXT-SRV-ATTR:host'],
@@ -800,7 +802,8 @@ class ServerGroupTestV215(ServerGroupTestV21):
self._set_forced_down(host, True)
evacuated_server = self._evacuate_server(
- servers[1], expected_migration_status='done')
+ servers[1], expected_migration_status='done',
+ expected_state='ACTIVE')
# Note(gibi): need to get the server again as the state of the instance
# goes to ACTIVE first then the host of the instance changes to the
@@ -870,6 +873,54 @@ class ServerGroupTestV264(ServerGroupTestV215):
self.assertEqual(2, hosts.count(host))
+class ServerGroupTestV295(ServerGroupTestV264):
+ microversion = '2.95'
+
+ def _evacuate_with_soft_anti_affinity_policies(self, group):
+ created_group = self.api.post_server_groups(group)
+ servers = self._boot_servers_to_group(created_group)
+
+ host = self._get_compute_service_by_host_name(
+ servers[1]['OS-EXT-SRV-ATTR:host'])
+ # Set forced_down on the host to ensure nova considers the host down.
+ self._set_forced_down(host, True)
+
+ evacuated_server = self._evacuate_server(
+ servers[1], expected_migration_status='done')
+
+ # Note(gibi): need to get the server again as the state of the instance
+ # goes to ACTIVE first then the host of the instance changes to the
+ # new host later
+ evacuated_server = self.admin_api.get_server(evacuated_server['id'])
+
+ return [evacuated_server['OS-EXT-SRV-ATTR:host'],
+ servers[0]['OS-EXT-SRV-ATTR:host']]
+
+ def test_evacuate_with_anti_affinity(self):
+ created_group = self.api.post_server_groups(self.anti_affinity)
+ servers = self._boot_servers_to_group(created_group)
+
+ host = self._get_compute_service_by_host_name(
+ servers[1]['OS-EXT-SRV-ATTR:host'])
+ # Set forced_down on the host to ensure nova considers the host down.
+ self._set_forced_down(host, True)
+
+ # Start additional host to test evacuation
+ compute3 = self.start_service('compute', host='host3')
+
+ evacuated_server = self._evacuate_server(
+ servers[1], expected_migration_status='done')
+
+ # check that the server is evacuated
+ self.assertNotEqual(evacuated_server['OS-EXT-SRV-ATTR:host'],
+ servers[1]['OS-EXT-SRV-ATTR:host'])
+ # check that policy is kept
+ self.assertNotEqual(evacuated_server['OS-EXT-SRV-ATTR:host'],
+ servers[0]['OS-EXT-SRV-ATTR:host'])
+
+ compute3.kill()
+
+
class ServerGroupTestMultiCell(ServerGroupTestBase):
NUMBER_OF_CELLS = 2
diff --git a/nova/tests/functional/test_server_rescue.py b/nova/tests/functional/test_server_rescue.py
index fa96c10344..8f5b912943 100644
--- a/nova/tests/functional/test_server_rescue.py
+++ b/nova/tests/functional/test_server_rescue.py
@@ -10,6 +10,10 @@
# License for the specific language governing permissions and limitations
# under the License.
+import datetime
+
+from oslo_utils.fixture import uuidsentinel as uuids
+
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional.api import client
from nova.tests.functional import integrated_helpers
@@ -23,7 +27,37 @@ class BFVRescue(integrated_helpers.ProviderUsageBaseTestCase):
self.useFixture(nova_fixtures.CinderFixture(self))
self._start_compute(host='host1')
- def _create_bfv_server(self):
+ def _create_image(self, metadata=None):
+ image = {
+ 'id': uuids.stable_rescue_image,
+ 'name': 'fake-image-rescue-property',
+ 'created_at': datetime.datetime(2011, 1, 1, 1, 2, 3),
+ 'updated_at': datetime.datetime(2011, 1, 1, 1, 2, 3),
+ 'deleted_at': None,
+ 'deleted': False,
+ 'status': 'active',
+ 'is_public': False,
+ 'container_format': 'raw',
+ 'disk_format': 'raw',
+ 'size': '25165824',
+ 'min_ram': 0,
+ 'min_disk': 0,
+ 'protected': False,
+ 'visibility': 'public',
+ 'tags': ['tag1', 'tag2'],
+ 'properties': {
+ 'kernel_id': 'nokernel',
+ 'ramdisk_id': 'nokernel',
+ 'hw_rescue_device': 'disk',
+ 'hw_rescue_bus': 'scsi',
+ },
+ }
+ if metadata:
+ image['properties'].update(metadata)
+ return self.glance.create(None, image)
+
+ def _create_bfv_server(self, metadata=None):
+ image = self._create_image(metadata=metadata)
server_request = self._build_server(networks=[])
server_request.pop('imageRef')
server_request['block_device_mapping_v2'] = [{
@@ -33,7 +67,7 @@ class BFVRescue(integrated_helpers.ProviderUsageBaseTestCase):
'destination_type': 'volume'}]
server = self.api.post_server({'server': server_request})
self._wait_for_state_change(server, 'ACTIVE')
- return server
+ return server, image
class DisallowBFVRescuev286(BFVRescue):
@@ -43,10 +77,10 @@ class DisallowBFVRescuev286(BFVRescue):
microversion = '2.86'
def test_bfv_rescue_not_supported(self):
- server = self._create_bfv_server()
+ server, image = self._create_bfv_server()
ex = self.assertRaises(client.OpenStackApiException,
self.api.post_server_action, server['id'], {'rescue': {
- 'rescue_image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6'}})
+ 'rescue_image_ref': image['id']}})
self.assertEqual(400, ex.response.status_code)
self.assertIn('Cannot rescue a volume-backed instance',
ex.response.text)
@@ -60,10 +94,10 @@ class DisallowBFVRescuev286WithTrait(BFVRescue):
microversion = '2.86'
def test_bfv_rescue_not_supported(self):
- server = self._create_bfv_server()
+ server, image = self._create_bfv_server()
ex = self.assertRaises(client.OpenStackApiException,
self.api.post_server_action, server['id'], {'rescue': {
- 'rescue_image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6'}})
+ 'rescue_image_ref': image['id']}})
self.assertEqual(400, ex.response.status_code)
self.assertIn('Cannot rescue a volume-backed instance',
ex.response.text)
@@ -77,10 +111,10 @@ class DisallowBFVRescuev287WithoutTrait(BFVRescue):
microversion = '2.87'
def test_bfv_rescue_not_supported(self):
- server = self._create_bfv_server()
+ server, image = self._create_bfv_server()
ex = self.assertRaises(client.OpenStackApiException,
self.api.post_server_action, server['id'], {'rescue': {
- 'rescue_image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6'}})
+ 'rescue_image_ref': image['id']}})
self.assertEqual(400, ex.response.status_code)
self.assertIn('Host unable to rescue a volume-backed instance',
ex.response.text)
@@ -94,7 +128,41 @@ class AllowBFVRescuev287WithTrait(BFVRescue):
microversion = '2.87'
def test_bfv_rescue_supported(self):
- server = self._create_bfv_server()
+ server, image = self._create_bfv_server()
self.api.post_server_action(server['id'], {'rescue': {
+ 'rescue_image_ref': image['id']}})
+ self._wait_for_state_change(server, 'RESCUE')
+
+
+class DisallowBFVRescuev287WithoutRescueImageProperties(BFVRescue):
+ """Asserts that BFV rescue requests fail with microversion 2.87 (or later)
+ when the required hw_rescue_device and hw_rescue_bus image properties
+ are not set on the image.
+ """
+ compute_driver = 'fake.MediumFakeDriver'
+ microversion = '2.87'
+
+ def test_bfv_rescue_failed(self):
+ server, image = self._create_bfv_server()
+ # try rescue without hw_rescue_device and hw_rescue_bus properties set
+ ex = self.assertRaises(client.OpenStackApiException,
+ self.api.post_server_action, server['id'], {'rescue': {
'rescue_image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6'}})
+ self.assertEqual(400, ex.response.status_code)
+ self.assertIn('Cannot rescue a volume-backed instance',
+ ex.response.text)
+
+
+class AllowBFVRescuev287WithRescueImageProperties(BFVRescue):
+ """Asserts that BFV rescue requests pass with microversion 2.87 (or later)
+ when the required hw_rescue_device and hw_rescue_bus image properties
+ are set on the image.
+ """
+ compute_driver = 'fake.RescueBFVDriver'
+ microversion = '2.87'
+
+ def test_bfv_rescue_done(self):
+ server, image = self._create_bfv_server()
+ self.api.post_server_action(server['id'], {'rescue': {
+ 'rescue_image_ref': image['id']}})
self._wait_for_state_change(server, 'RESCUE')
diff --git a/nova/tests/functional/test_servers.py b/nova/tests/functional/test_servers.py
index d1ab84aa7b..43208aa812 100644
--- a/nova/tests/functional/test_servers.py
+++ b/nova/tests/functional/test_servers.py
@@ -2260,7 +2260,8 @@ class ServerMovingTests(integrated_helpers.ProviderUsageBaseTestCase):
}
server = self._evacuate_server(
- server, extra_post_args=post, expected_host=dest_hostname)
+ server, extra_post_args=post, expected_host=dest_hostname,
+ expected_state='ACTIVE')
# Run the periodics to show those don't modify allocations.
self._run_periodics()
@@ -2437,7 +2438,8 @@ class ServerMovingTests(integrated_helpers.ProviderUsageBaseTestCase):
# stay ACTIVE and task_state will be set to None.
server = self._evacuate_server(
server, expected_task_state=None,
- expected_migration_status='failed')
+ expected_migration_status='failed',
+ expected_state='ACTIVE')
# Run the periodics to show those don't modify allocations.
self._run_periodics()
@@ -5324,7 +5326,8 @@ class ServerMovingTestsWithNestedResourceRequests(
server = self._evacuate_server(
server, extra_post_args=post, expected_migration_status='error',
- expected_host=source_hostname)
+ expected_host=source_hostname,
+ expected_state='ACTIVE')
self.assertIn('Unable to move instance %s to host host2. The instance '
'has complex allocations on the source host so move '
@@ -5530,7 +5533,8 @@ class ServerMovingTestsFromFlatToNested(
self._evacuate_server(
server, extra_post_args=post, expected_host='host1',
- expected_migration_status='error')
+ expected_migration_status='error',
+ expected_state='ACTIVE')
# We expect that the evacuation will fail as force evacuate tries to
# blindly copy the source allocation to the destination but on the
diff --git a/nova/tests/functional/test_servers_resource_request.py b/nova/tests/functional/test_servers_resource_request.py
index f881fc0323..9c91af7218 100644
--- a/nova/tests/functional/test_servers_resource_request.py
+++ b/nova/tests/functional/test_servers_resource_request.py
@@ -2162,7 +2162,8 @@ class ServerMoveWithPortResourceRequestTest(
# simply fail and the server remains on the source host
server = self._evacuate_server(
server, expected_host='host1', expected_task_state=None,
- expected_migration_status='failed')
+ expected_migration_status='failed',
+ expected_state="ACTIVE")
# As evacuation failed the resource allocation should be untouched
self._check_allocation(
@@ -2979,6 +2980,7 @@ class ExtendedResourceRequestOldCompute(
super().setUp()
self.neutron = self.useFixture(
ExtendedResourceRequestNeutronFixture(self))
+ self.api.microversion = '2.72'
@mock.patch.object(
objects.service, 'get_minimum_version_all_cells',
diff --git a/nova/tests/functional/test_service.py b/nova/tests/functional/test_service.py
index 65b41594bd..21e9a519ee 100644
--- a/nova/tests/functional/test_service.py
+++ b/nova/tests/functional/test_service.py
@@ -10,8 +10,12 @@
# License for the specific language governing permissions and limitations
# under the License.
+import functools
from unittest import mock
+import fixtures
+from oslo_utils.fixture import uuidsentinel as uuids
+
from nova import context as nova_context
from nova import exception
from nova.objects import service
@@ -19,6 +23,7 @@ from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import fixtures as func_fixtures
from nova.tests.functional import integrated_helpers
+from nova.virt import node
class ServiceTestCase(test.TestCase,
@@ -137,3 +142,83 @@ class TestOldComputeCheck(
return_value=old_version):
self.assertRaises(
exception.TooOldComputeService, self._start_compute, 'host1')
+
+
+class TestComputeStartupChecks(test.TestCase):
+ STUB_COMPUTE_ID = False
+
+ def setUp(self):
+ super().setUp()
+ self.useFixture(nova_fixtures.RealPolicyFixture())
+ self.useFixture(nova_fixtures.NeutronFixture(self))
+ self.useFixture(nova_fixtures.GlanceFixture(self))
+ self.useFixture(func_fixtures.PlacementFixture())
+
+ self._local_uuid = str(uuids.node)
+
+ self.useFixture(fixtures.MockPatch(
+ 'nova.virt.node.get_local_node_uuid',
+ functools.partial(self.local_uuid, True)))
+ self.useFixture(fixtures.MockPatch(
+ 'nova.virt.node.read_local_node_uuid',
+ self.local_uuid))
+ self.useFixture(fixtures.MockPatch(
+ 'nova.virt.node.write_local_node_uuid',
+ mock.DEFAULT))
+ self.flags(compute_driver='fake.FakeDriverWithoutFakeNodes')
+
+ def local_uuid(self, get=False):
+ if get and not self._local_uuid:
+ # Simulate the get_local_node_uuid behavior of calling write once
+ self._local_uuid = str(uuids.node)
+ node.write_local_node_uuid(self._local_uuid)
+ return self._local_uuid
+
+ def test_compute_node_identity_greenfield(self):
+ # Level-set test case to show that starting and re-starting without
+ # any error cases works as expected.
+
+ # Start with no local compute_id
+ self._local_uuid = None
+ self.start_service('compute')
+
+ # Start should have generated and written a compute id
+ node.write_local_node_uuid.assert_called_once_with(str(uuids.node))
+
+ # Starting again should succeed and not cause another write
+ self.start_service('compute')
+ node.write_local_node_uuid.assert_called_once_with(str(uuids.node))
+
+ def test_compute_node_identity_deleted(self):
+ self.start_service('compute')
+
+ # Simulate the compute_id file being deleted
+ self._local_uuid = None
+
+ # Should refuse to start because it's not our first time and the file
+ # being missing is a hard error.
+ exc = self.assertRaises(exception.InvalidConfiguration,
+ self.start_service, 'compute')
+ self.assertIn('lost that state', str(exc))
+
+ def test_compute_node_hostname_changed(self):
+ # Start our compute once to create the node record
+ self.start_service('compute')
+
+ # Starting with a different hostname should trigger the abort
+ exc = self.assertRaises(exception.InvalidConfiguration,
+ self.start_service, 'compute', host='other')
+ self.assertIn('hypervisor_hostname', str(exc))
+
+ def test_compute_node_uuid_changed(self):
+ # Start our compute once to create the node record
+ self.start_service('compute')
+
+ # Simulate a changed local compute_id file
+ self._local_uuid = str(uuids.othernode)
+
+ # We should fail to create the compute node record again, but with a
+ # useful error message about why.
+ exc = self.assertRaises(exception.InvalidConfiguration,
+ self.start_service, 'compute')
+ self.assertIn('Duplicate compute node record', str(exc))
diff --git a/nova/tests/unit/api/openstack/compute/test_evacuate.py b/nova/tests/unit/api/openstack/compute/test_evacuate.py
index fb7f7662d8..bd88bb8d6e 100644
--- a/nova/tests/unit/api/openstack/compute/test_evacuate.py
+++ b/nova/tests/unit/api/openstack/compute/test_evacuate.py
@@ -416,3 +416,32 @@ class EvacuateTestV268(EvacuateTestV229):
def test_forced_evacuate_with_no_host_provided(self):
# not applicable for v2.68, which removed the 'force' parameter
pass
+
+
+class EvacuateTestV295(EvacuateTestV268):
+ def setUp(self):
+ super(EvacuateTestV268, self).setUp()
+ self.admin_req = fakes.HTTPRequest.blank('', use_admin_context=True,
+ version='2.95')
+ self.req = fakes.HTTPRequest.blank('', version='2.95')
+ self.mock_get_min_ver = self.useFixture(fixtures.MockPatch(
+ 'nova.objects.service.get_minimum_version_all_cells',
+ return_value=62)).mock
+
+ def test_evacuate_version_error(self):
+ self.mock_get_min_ver.return_value = 61
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self._get_evacuate_response,
+ {'host': 'my-host', 'adminPass': 'foo'})
+
+ def test_evacuate_unsupported_rpc(self):
+ def fake_evacuate(*args, **kwargs):
+ raise exception.UnsupportedRPCVersion(
+ api="fakeapi",
+ required="x.xx")
+
+ self.stub_out('nova.compute.api.API.evacuate', fake_evacuate)
+ self._check_evacuate_failure(webob.exc.HTTPConflict,
+ {'host': 'my-host',
+ 'onSharedStorage': 'False',
+ 'adminPass': 'MyNewPass'})
diff --git a/nova/tests/unit/api/openstack/compute/test_flavor_access.py b/nova/tests/unit/api/openstack/compute/test_flavor_access.py
index 0581a47c84..ea9ca2f632 100644
--- a/nova/tests/unit/api/openstack/compute/test_flavor_access.py
+++ b/nova/tests/unit/api/openstack/compute/test_flavor_access.py
@@ -353,14 +353,37 @@ class FlavorAccessTestV21(test.NoDBTestCase):
mock_verify.assert_called_once_with(
req.environ['nova.context'], 'proj2')
+ @mock.patch('nova.objects.Flavor.remove_access')
@mock.patch('nova.api.openstack.identity.verify_project_id',
side_effect=exc.HTTPBadRequest(
explanation="Project ID proj2 is not a valid project."))
- def test_remove_tenant_access_with_invalid_tenant(self, mock_verify):
+ def test_remove_tenant_access_with_invalid_tenant(self,
+ mock_verify,
+ mock_remove_access):
"""Tests the case that the tenant does not exist in Keystone."""
req = fakes.HTTPRequest.blank(self._prefix + '/flavors/2/action',
use_admin_context=True)
body = {'removeTenantAccess': {'tenant': 'proj2'}}
+
+ self.flavor_action_controller._remove_tenant_access(
+ req, '2', body=body)
+ mock_verify.assert_called_once_with(
+ req.environ['nova.context'], 'proj2')
+ mock_remove_access.assert_called_once_with('proj2')
+
+ @mock.patch('nova.api.openstack.identity.verify_project_id',
+ side_effect=exc.HTTPBadRequest(
+ explanation="Nova was unable to find Keystone "
+ "service endpoint."))
+ def test_remove_tenant_access_missing_keystone_endpoint(self,
+ mock_verify):
+ """Tests the case that Keystone identity service endpoint
+ version 3.0 was not found.
+ """
+ req = fakes.HTTPRequest.blank(self._prefix + '/flavors/2/action',
+ use_admin_context=True)
+ body = {'removeTenantAccess': {'tenant': 'proj2'}}
+
self.assertRaises(exc.HTTPBadRequest,
self.flavor_action_controller._remove_tenant_access,
req, '2', body=body)
diff --git a/nova/tests/unit/api/openstack/compute/test_remote_consoles.py b/nova/tests/unit/api/openstack/compute/test_remote_consoles.py
index bd09307567..961f4a02c9 100644
--- a/nova/tests/unit/api/openstack/compute/test_remote_consoles.py
+++ b/nova/tests/unit/api/openstack/compute/test_remote_consoles.py
@@ -104,6 +104,18 @@ class ConsolesExtensionTestV21(test.NoDBTestCase):
'get_vnc_console',
exception.InstanceNotFound(instance_id=fakes.FAKE_UUID))
+ def test_get_vnc_console_instance_invalid_state(self):
+ body = {'os-getVNCConsole': {'type': 'novnc'}}
+ self._check_console_failure(
+ self.controller.get_vnc_console,
+ webob.exc.HTTPConflict,
+ body,
+ 'get_vnc_console',
+ exception.InstanceInvalidState(
+ attr='fake-attr', state='fake-state', method='fake-method',
+ instance_uuid=fakes.FAKE_UUID)
+ )
+
def test_get_vnc_console_invalid_type(self):
body = {'os-getVNCConsole': {'type': 'invalid'}}
self._check_console_failure(
diff --git a/nova/tests/unit/api/openstack/compute/test_server_groups.py b/nova/tests/unit/api/openstack/compute/test_server_groups.py
index 636682a6b7..9d99c3ae6d 100644
--- a/nova/tests/unit/api/openstack/compute/test_server_groups.py
+++ b/nova/tests/unit/api/openstack/compute/test_server_groups.py
@@ -87,7 +87,8 @@ class ServerGroupTestV21(test.NoDBTestCase):
def setUp(self):
super(ServerGroupTestV21, self).setUp()
self._setup_controller()
- self.req = fakes.HTTPRequest.blank('')
+ self.member_req = fakes.HTTPRequest.member_req('')
+ self.reader_req = fakes.HTTPRequest.reader_req('')
self.admin_req = fakes.HTTPRequest.blank('', use_admin_context=True)
self.foo_req = fakes.HTTPRequest.blank('', project_id='foo')
self.policy = self.useFixture(fixtures.RealPolicyFixture())
@@ -114,20 +115,20 @@ class ServerGroupTestV21(test.NoDBTestCase):
def test_create_server_group_with_no_policies(self):
sgroup = server_group_template()
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
def _create_server_group_normal(self, policies=None, policy=None,
rules=None):
sgroup = server_group_template()
sgroup['policies'] = policies
- res_dict = self.controller.create(self.req,
+ res_dict = self.controller.create(self.member_req,
body={'server_group': sgroup})
self.assertEqual(res_dict['server_group']['name'], 'test')
self.assertTrue(uuidutils.is_uuid_like(res_dict['server_group']['id']))
self.assertEqual(res_dict['server_group']['policies'], policies)
def test_create_server_group_with_new_policy_before_264(self):
- req = fakes.HTTPRequest.blank('', version='2.63')
+ req = fakes.HTTPRequest.member_req('', version='2.63')
policy = 'anti-affinity'
rules = {'max_server_per_host': 3}
# 'policy' isn't an acceptable request key before 2.64
@@ -162,7 +163,7 @@ class ServerGroupTestV21(test.NoDBTestCase):
self.controller.create(self.admin_req, body={'server_group': sgroup})
# test as non-admin
- self.controller.create(self.req, body={'server_group': sgroup})
+ self.controller.create(self.member_req, body={'server_group': sgroup})
def _create_instance(self, ctx, cell):
with context.target_cell(ctx, cell) as cctx:
@@ -289,7 +290,7 @@ class ServerGroupTestV21(test.NoDBTestCase):
path = path or '/os-server-groups?all_projects=True'
if limited:
path += limited
- req = fakes.HTTPRequest.blank(path, version=api_version)
+ reader_req = fakes.HTTPRequest.reader_req(path, version=api_version)
admin_req = fakes.HTTPRequest.blank(path, use_admin_context=True,
version=api_version)
@@ -298,7 +299,7 @@ class ServerGroupTestV21(test.NoDBTestCase):
self.assertEqual(all, res_dict)
# test as non-admin
- res_dict = self.controller.index(req)
+ res_dict = self.controller.index(reader_req)
self.assertEqual(tenant_specific, res_dict)
@mock.patch('nova.objects.InstanceGroupList.get_by_project_id')
@@ -347,25 +348,27 @@ class ServerGroupTestV21(test.NoDBTestCase):
return_get_by_project = return_server_groups()
mock_get_by_project.return_value = return_get_by_project
path = '/os-server-groups'
- req = fakes.HTTPRequest.blank(path, version=api_version)
+ req = fakes.HTTPRequest.reader_req(path, version=api_version)
res_dict = self.controller.index(req)
self.assertEqual(expected, res_dict)
def test_display_members(self):
ctx = context.RequestContext('fake_user', fakes.FAKE_PROJECT_ID)
(ig_uuid, instances, members) = self._create_groups_and_instances(ctx)
- res_dict = self.controller.show(self.req, ig_uuid)
+ res_dict = self.controller.show(self.reader_req, ig_uuid)
result_members = res_dict['server_group']['members']
self.assertEqual(3, len(result_members))
for member in members:
self.assertIn(member, result_members)
def test_display_members_with_nonexistent_group(self):
- self.assertRaises(webob.exc.HTTPNotFound,
- self.controller.show, self.req, uuidsentinel.group)
+ self.assertRaises(
+ webob.exc.HTTPNotFound,
+ self.controller.show, self.reader_req, uuidsentinel.group)
def test_display_active_members_only(self):
- ctx = context.RequestContext('fake_user', fakes.FAKE_PROJECT_ID)
+ ctx = context.RequestContext('fake_user', fakes.FAKE_PROJECT_ID,
+ roles=['member', 'reader'])
(ig_uuid, instances, members) = self._create_groups_and_instances(ctx)
# delete an instance
@@ -379,7 +382,7 @@ class ServerGroupTestV21(test.NoDBTestCase):
self.assertRaises(exception.InstanceNotFound,
objects.Instance.get_by_uuid,
ctx, instances[1].uuid)
- res_dict = self.controller.show(self.req, ig_uuid)
+ res_dict = self.controller.show(self.reader_req, ig_uuid)
result_members = res_dict['server_group']['members']
# check that only the active instance is displayed
self.assertEqual(2, len(result_members))
@@ -393,7 +396,7 @@ class ServerGroupTestV21(test.NoDBTestCase):
self.controller.show(self.admin_req, ig_uuid)
# test as non-admin, same project
- self.controller.show(self.req, ig_uuid)
+ self.controller.show(self.reader_req, ig_uuid)
# test as non-admin, different project
self.assertRaises(webob.exc.HTTPNotFound,
@@ -406,7 +409,7 @@ class ServerGroupTestV21(test.NoDBTestCase):
sgroup = server_group_template(name='good* $%name',
policies=['affinity'])
- res_dict = self.controller.create(self.req,
+ res_dict = self.controller.create(self.member_req,
body={'server_group': sgroup})
self.assertEqual(res_dict['server_group']['name'], 'good* $%name')
@@ -414,99 +417,99 @@ class ServerGroupTestV21(test.NoDBTestCase):
# blank name
sgroup = server_group_template(name='', policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
# name with length 256
sgroup = server_group_template(name='1234567890' * 26,
policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
# non-string name
sgroup = server_group_template(name=12, policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
# name with leading spaces
sgroup = server_group_template(name=' leading spaces',
policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
# name with trailing spaces
sgroup = server_group_template(name='trailing space ',
policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
# name with all spaces
sgroup = server_group_template(name=' ',
policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
# name with unprintable character
sgroup = server_group_template(name='bad\x00name',
policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
# name with out of range char U0001F4A9
sgroup = server_group_template(name=u"\U0001F4A9",
policies=['affinity'])
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
def test_create_server_group_with_illegal_policies(self):
# blank policy
sgroup = server_group_template(name='fake-name', policies='')
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
# policy as integer
sgroup = server_group_template(name='fake-name', policies=7)
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
# policy as string
sgroup = server_group_template(name='fake-name', policies='invalid')
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
# policy as None
sgroup = server_group_template(name='fake-name', policies=None)
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
def test_create_server_group_conflicting_policies(self):
sgroup = server_group_template()
policies = ['anti-affinity', 'affinity']
sgroup['policies'] = policies
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
def test_create_server_group_with_duplicate_policies(self):
sgroup = server_group_template()
policies = ['affinity', 'affinity']
sgroup['policies'] = policies
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
def test_create_server_group_not_supported(self):
sgroup = server_group_template()
policies = ['storage-affinity', 'anti-affinity', 'rack-affinity']
sgroup['policies'] = policies
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
def test_create_server_group_with_no_body(self):
self.assertRaises(self.validation_error,
- self.controller.create, self.req, body=None)
+ self.controller.create, self.member_req, body=None)
def test_create_server_group_with_no_server_group(self):
body = {'no-instanceGroup': None}
self.assertRaises(self.validation_error,
- self.controller.create, self.req, body=body)
+ self.controller.create, self.member_req, body=body)
def test_list_server_group_by_tenant(self):
self._test_list_server_group_by_tenant(
@@ -528,7 +531,7 @@ class ServerGroupTestV21(test.NoDBTestCase):
self.controller.index(self.admin_req)
# test as non-admin
- self.controller.index(self.req)
+ self.controller.index(self.reader_req)
def test_list_server_group_multiple_param(self):
self._test_list_server_group(api_version=self.wsgi_api_version,
@@ -598,7 +601,7 @@ class ServerGroupTestV21(test.NoDBTestCase):
self.stub_out('nova.objects.InstanceGroup.get_by_uuid',
return_server_group)
- resp = self.controller.delete(self.req, uuidsentinel.sg1_id)
+ resp = self.controller.delete(self.member_req, uuidsentinel.sg1_id)
mock_destroy.assert_called_once_with()
# NOTE: on v2.1, http status code is set as wsgi_code of API
@@ -611,7 +614,7 @@ class ServerGroupTestV21(test.NoDBTestCase):
def test_delete_non_existing_server_group(self):
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
- self.req, 'invalid')
+ self.member_req, 'invalid')
def test_delete_server_group_rbac_default(self):
ctx = context.RequestContext('fake_user', fakes.FAKE_PROJECT_ID)
@@ -622,7 +625,7 @@ class ServerGroupTestV21(test.NoDBTestCase):
# test as non-admin
ig_uuid = self._create_groups_and_instances(ctx)[0]
- self.controller.delete(self.req, ig_uuid)
+ self.controller.delete(self.member_req, ig_uuid)
class ServerGroupTestV213(ServerGroupTestV21):
@@ -649,7 +652,7 @@ class ServerGroupTestV264(ServerGroupTestV213):
def _create_server_group_normal(self, policies=None, policy=None,
rules=None):
- req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
+ req = fakes.HTTPRequest.member_req('', version=self.wsgi_api_version)
sgroup = server_group_template()
sgroup['rules'] = rules or {}
sgroup['policy'] = policy
@@ -674,7 +677,7 @@ class ServerGroupTestV264(ServerGroupTestV213):
self.assertEqual(res_dict['server_group']['rules'], {})
def _display_server_group(self, uuid):
- req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
+ req = fakes.HTTPRequest.reader_req('', version=self.wsgi_api_version)
group = self.controller.show(req, uuid)
return group
@@ -690,7 +693,7 @@ class ServerGroupTestV264(ServerGroupTestV213):
self.assertEqual(res_dict['server_group']['rules'], rules)
def test_create_affinity_server_group_with_invalid_policy(self):
- req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
+ req = fakes.HTTPRequest.member_req('', version=self.wsgi_api_version)
sgroup = server_group_template(policy='affinity',
rules={'max_server_per_host': 3})
result = self.assertRaises(webob.exc.HTTPBadRequest,
@@ -698,7 +701,7 @@ class ServerGroupTestV264(ServerGroupTestV213):
self.assertIn("Only anti-affinity policy supports rules", str(result))
def test_create_anti_affinity_server_group_with_invalid_rules(self):
- req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
+ req = fakes.HTTPRequest.member_req('', version=self.wsgi_api_version)
# A negative test for key is unknown, the value is not positive
# and not integer
invalid_rules = [{'unknown_key': '3'},
@@ -718,7 +721,7 @@ class ServerGroupTestV264(ServerGroupTestV213):
return_value=32)
def test_create_server_group_with_low_version_compute_service(self,
mock_get_v):
- req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
+ req = fakes.HTTPRequest.member_req('', version=self.wsgi_api_version)
sgroup = server_group_template(policy='anti-affinity',
rules={'max_server_per_host': 3})
result = self.assertRaises(
@@ -734,7 +737,7 @@ class ServerGroupTestV264(ServerGroupTestV213):
self._create_server_group_normal(policy=policy)
def test_policies_since_264(self):
- req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
+ req = fakes.HTTPRequest.member_req('', version=self.wsgi_api_version)
# 'policies' isn't allowed in request >= 2.64
sgroup = server_group_template(policies=['anti-affinity'])
self.assertRaises(
@@ -742,14 +745,14 @@ class ServerGroupTestV264(ServerGroupTestV213):
req, body={'server_group': sgroup})
def test_create_server_group_without_policy(self):
- req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
+ req = fakes.HTTPRequest.member_req('', version=self.wsgi_api_version)
# 'policy' is required request key in request >= 2.64
sgroup = server_group_template()
self.assertRaises(self.validation_error, self.controller.create,
req, body={'server_group': sgroup})
def test_create_server_group_with_illegal_policies(self):
- req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
+ req = fakes.HTTPRequest.member_req('', version=self.wsgi_api_version)
# blank policy
sgroup = server_group_template(policy='')
self.assertRaises(self.validation_error, self.controller.create,
@@ -771,7 +774,7 @@ class ServerGroupTestV264(ServerGroupTestV213):
req, body={'server_group': sgroup})
def test_additional_params(self):
- req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
+ req = fakes.HTTPRequest.member_req('', version=self.wsgi_api_version)
sgroup = server_group_template(unknown='unknown')
self.assertRaises(self.validation_error, self.controller.create,
req, body={'server_group': sgroup})
@@ -786,7 +789,7 @@ class ServerGroupTestV275(ServerGroupTestV264):
path='/os-server-groups?dummy=False&all_projects=True')
def test_list_server_group_additional_param(self):
- req = fakes.HTTPRequest.blank('/os-server-groups?dummy=False',
- version=self.wsgi_api_version)
+ req = fakes.HTTPRequest.reader_req('/os-server-groups?dummy=False',
+ version=self.wsgi_api_version)
self.assertRaises(self.validation_error, self.controller.index,
req)
diff --git a/nova/tests/unit/api/openstack/fakes.py b/nova/tests/unit/api/openstack/fakes.py
index 8cf90ddebe..9ac970f787 100644
--- a/nova/tests/unit/api/openstack/fakes.py
+++ b/nova/tests/unit/api/openstack/fakes.py
@@ -240,6 +240,9 @@ class HTTPRequest(os_wsgi.Request):
def blank(cls, *args, **kwargs):
defaults = {'base_url': 'http://localhost/v2'}
use_admin_context = kwargs.pop('use_admin_context', False)
+ roles = kwargs.pop('roles', [])
+ if use_admin_context:
+ roles.append('admin')
project_id = kwargs.pop('project_id', FAKE_PROJECT_ID)
version = kwargs.pop('version', os_wsgi.DEFAULT_API_VERSION)
defaults.update(kwargs)
@@ -247,10 +250,19 @@ class HTTPRequest(os_wsgi.Request):
out.environ['nova.context'] = FakeRequestContext(
user_id='fake_user',
project_id=project_id,
- is_admin=use_admin_context)
+ is_admin=use_admin_context,
+ roles=roles)
out.api_version_request = api_version.APIVersionRequest(version)
return out
+ @classmethod
+ def member_req(cls, *args, **kwargs):
+ return cls.blank(*args, roles=['member', 'reader'], **kwargs)
+
+ @classmethod
+ def reader_req(cls, *args, **kwargs):
+ return cls.blank(*args, roles=['reader'], **kwargs)
+
class HTTPRequestV21(HTTPRequest):
pass
diff --git a/nova/tests/unit/cmd/test_policy.py b/nova/tests/unit/cmd/test_policy.py
index df51665959..29dd5610f6 100644
--- a/nova/tests/unit/cmd/test_policy.py
+++ b/nova/tests/unit/cmd/test_policy.py
@@ -128,20 +128,21 @@ class TestPolicyCheck(test.NoDBTestCase):
self.assertEqual(set(expected_rules), set(passing_rules))
def test_filter_rules_non_admin(self):
- context = nova_context.RequestContext()
+ context = nova_context.RequestContext(roles=['reader'])
rule_conditions = [base_policies.PROJECT_READER_OR_ADMIN]
expected_rules = [r.name for r in ia_policies.list_rules() if
r.check_str in rule_conditions]
self._check_filter_rules(context, expected_rules=expected_rules)
def test_filter_rules_admin(self):
- self._check_filter_rules()
+ context = nova_context.RequestContext(roles=['admin'])
+ self._check_filter_rules(context)
def test_filter_rules_instance_non_admin(self):
db_context = nova_context.RequestContext(user_id='fake-user',
project_id='fake-project')
instance = fake_instance.fake_instance_obj(db_context)
- context = nova_context.RequestContext()
+ context = nova_context.RequestContext(roles=['reader'])
expected_rules = [r.name for r in ia_policies.list_rules() if
r.check_str == base_policies.RULE_ANY]
self._check_filter_rules(context, instance, expected_rules)
@@ -150,11 +151,13 @@ class TestPolicyCheck(test.NoDBTestCase):
db_context = nova_context.RequestContext(user_id='fake-user',
project_id='fake-project')
instance = fake_instance.fake_instance_obj(db_context)
- self._check_filter_rules(target=instance)
+ context = nova_context.RequestContext(roles=['admin'])
+ self._check_filter_rules(context, target=instance)
def test_filter_rules_instance_owner(self):
db_context = nova_context.RequestContext(user_id='fake-user',
- project_id='fake-project')
+ project_id='fake-project',
+ roles=['reader'])
instance = fake_instance.fake_instance_obj(db_context)
rule_conditions = [base_policies.PROJECT_READER_OR_ADMIN]
expected_rules = [r.name for r in ia_policies.list_rules() if
diff --git a/nova/tests/unit/compute/test_api.py b/nova/tests/unit/compute/test_api.py
index f17a767b99..9d6e9ba4bd 100644
--- a/nova/tests/unit/compute/test_api.py
+++ b/nova/tests/unit/compute/test_api.py
@@ -4081,7 +4081,8 @@ class _ComputeAPIUnitTestMixIn(object):
injected_files=[], bdms=bdms,
preserve_ephemeral=False, host=None,
request_spec=fake_spec,
- reimage_boot_volume=True)
+ reimage_boot_volume=True,
+ target_state=None)
_check_auto_disk_config.assert_called_once_with(
image=image, auto_disk_config=None)
_checks_for_create_and_rebuild.assert_called_once_with(
@@ -4096,7 +4097,8 @@ class _ComputeAPIUnitTestMixIn(object):
instance,
uuids.image_ref,
admin_pass,
- reimage_boot_volume=False)
+ reimage_boot_volume=False,
+ target_state=None)
@mock.patch.object(objects.RequestSpec, 'save')
@mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
@@ -4155,7 +4157,8 @@ class _ComputeAPIUnitTestMixIn(object):
instance,
uuids.image_ref,
admin_pass,
- reimage_boot_volume=False)
+ reimage_boot_volume=False,
+ target_state=None)
@mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
@mock.patch.object(objects.Instance, 'save')
@@ -4205,7 +4208,8 @@ class _ComputeAPIUnitTestMixIn(object):
orig_image_ref=uuids.image_ref,
orig_sys_metadata=orig_system_metadata, bdms=bdms,
preserve_ephemeral=False, host=instance.host,
- request_spec=fake_spec, reimage_boot_volume=False)
+ request_spec=fake_spec, reimage_boot_volume=False,
+ target_state=None)
_check_auto_disk_config.assert_called_once_with(
image=image, auto_disk_config=None)
@@ -4278,7 +4282,8 @@ class _ComputeAPIUnitTestMixIn(object):
orig_image_ref=uuids.image_ref,
orig_sys_metadata=orig_system_metadata, bdms=bdms,
preserve_ephemeral=False, host=None,
- request_spec=fake_spec, reimage_boot_volume=False)
+ request_spec=fake_spec, reimage_boot_volume=False,
+ target_state=None)
# assert the request spec was modified so the scheduler picks
# the existing instance host/node
req_spec_save.assert_called_once_with()
@@ -4346,7 +4351,8 @@ class _ComputeAPIUnitTestMixIn(object):
orig_image_ref=uuids.image_ref,
orig_sys_metadata=orig_system_metadata, bdms=bdms,
preserve_ephemeral=False, host=instance.host,
- request_spec=fake_spec, reimage_boot_volume=False)
+ request_spec=fake_spec, reimage_boot_volume=False,
+ target_state=None)
_check_auto_disk_config.assert_called_once_with(
image=image, auto_disk_config=None)
@@ -4405,7 +4411,8 @@ class _ComputeAPIUnitTestMixIn(object):
orig_image_ref=uuids.image_ref,
orig_sys_metadata=orig_system_metadata, bdms=bdms,
preserve_ephemeral=False, host=instance.host,
- request_spec=fake_spec, reimage_boot_volume=False)
+ request_spec=fake_spec, reimage_boot_volume=False,
+ target_state=None)
_check_auto_disk_config.assert_called_once_with(
image=image, auto_disk_config=None)
@@ -4469,7 +4476,8 @@ class _ComputeAPIUnitTestMixIn(object):
orig_image_ref=uuids.image_ref,
orig_sys_metadata=orig_system_metadata, bdms=bdms,
preserve_ephemeral=False, host=instance.host,
- request_spec=fake_spec, reimage_boot_volume=False)
+ request_spec=fake_spec, reimage_boot_volume=False,
+ target_state=None)
_check_auto_disk_config.assert_called_once_with(
image=image, auto_disk_config=None)
@@ -5794,7 +5802,10 @@ class _ComputeAPIUnitTestMixIn(object):
destination_type='volume', volume_type=None,
snapshot_id=None, volume_id=uuids.volume_id,
volume_size=None)])
- rescue_image_meta_obj = image_meta_obj.ImageMeta.from_dict({})
+ rescue_image_meta_obj = image_meta_obj.ImageMeta.from_dict({
+ 'properties': {'hw_rescue_device': 'disk',
+ 'hw_rescue_bus': 'scsi'}
+ })
with test.nested(
mock.patch.object(self.compute_api.placementclient,
@@ -5846,6 +5857,7 @@ class _ComputeAPIUnitTestMixIn(object):
# Assert that the instance task state as set in the compute API
self.assertEqual(task_states.RESCUING, instance.task_state)
+ @mock.patch('nova.objects.instance.Instance.image_meta')
@mock.patch('nova.objects.compute_node.ComputeNode'
'.get_by_host_and_nodename')
@mock.patch('nova.compute.utils.is_volume_backed_instance',
@@ -5854,7 +5866,8 @@ class _ComputeAPIUnitTestMixIn(object):
'.get_by_instance_uuid')
def test_rescue_bfv_without_required_trait(self, mock_get_bdms,
mock_is_volume_backed,
- mock_get_cn):
+ mock_get_cn,
+ mock_image_meta):
instance = self._create_instance_obj()
bdms = objects.BlockDeviceMappingList(objects=[
objects.BlockDeviceMapping(
@@ -5862,6 +5875,12 @@ class _ComputeAPIUnitTestMixIn(object):
destination_type='volume', volume_type=None,
snapshot_id=None, volume_id=uuids.volume_id,
volume_size=None)])
+
+ instance.image_meta = image_meta_obj.ImageMeta.from_dict({
+ 'properties': {'hw_rescue_device': 'disk',
+ 'hw_rescue_bus': 'scsi'}
+ })
+
with test.nested(
mock.patch.object(self.compute_api.placementclient,
'get_provider_traits'),
@@ -5899,6 +5918,124 @@ class _ComputeAPIUnitTestMixIn(object):
mock_get_traits.assert_called_once_with(
self.context, uuids.cn)
+ @mock.patch('nova.objects.image_meta.ImageMeta.from_image_ref')
+ @mock.patch('nova.objects.compute_node.ComputeNode'
+ '.get_by_host_and_nodename')
+ @mock.patch('nova.compute.utils.is_volume_backed_instance',
+ return_value=True)
+ @mock.patch('nova.objects.block_device.BlockDeviceMappingList'
+ '.get_by_instance_uuid')
+ def test_rescue_bfv_with_required_image_properties(
+ self, mock_get_bdms, mock_is_volume_backed, mock_get_cn,
+ mock_image_meta_obj_from_ref):
+ instance = self._create_instance_obj()
+ bdms = objects.BlockDeviceMappingList(objects=[
+ objects.BlockDeviceMapping(
+ boot_index=0, image_id=uuids.image_id, source_type='image',
+ destination_type='volume', volume_type=None,
+ snapshot_id=None, volume_id=uuids.volume_id,
+ volume_size=None)])
+ rescue_image_meta_obj = image_meta_obj.ImageMeta.from_dict({
+ 'properties': {'hw_rescue_device': 'disk',
+ 'hw_rescue_bus': 'scsi'}
+ })
+
+ with test.nested(
+ mock.patch.object(self.compute_api.placementclient,
+ 'get_provider_traits'),
+ mock.patch.object(self.compute_api.volume_api, 'get'),
+ mock.patch.object(self.compute_api.volume_api, 'check_attached'),
+ mock.patch.object(instance, 'save'),
+ mock.patch.object(self.compute_api, '_record_action_start'),
+ mock.patch.object(self.compute_api.compute_rpcapi,
+ 'rescue_instance')
+ ) as (
+ mock_get_traits, mock_get_volume, mock_check_attached,
+ mock_instance_save, mock_record_start, mock_rpcapi_rescue
+ ):
+ # Mock out the returned compute node, image_meta, bdms and volume
+ mock_image_meta_obj_from_ref.return_value = rescue_image_meta_obj
+ mock_get_bdms.return_value = bdms
+ mock_get_volume.return_value = mock.sentinel.volume
+ mock_get_cn.return_value = mock.Mock(uuid=uuids.cn)
+
+ # Ensure the required trait is returned, allowing BFV rescue
+ mock_trait_info = mock.Mock(traits=[ot.COMPUTE_RESCUE_BFV])
+ mock_get_traits.return_value = mock_trait_info
+
+ # Try to rescue the instance
+ self.compute_api.rescue(self.context, instance,
+ rescue_image_ref=uuids.rescue_image_id,
+ allow_bfv_rescue=True)
+
+ # Assert all of the calls made in the compute API
+ mock_get_bdms.assert_called_once_with(self.context, instance.uuid)
+ mock_get_volume.assert_called_once_with(
+ self.context, uuids.volume_id)
+ mock_check_attached.assert_called_once_with(
+ self.context, mock.sentinel.volume)
+ mock_is_volume_backed.assert_called_once_with(
+ self.context, instance, bdms)
+ mock_get_cn.assert_called_once_with(
+ self.context, instance.host, instance.node)
+ mock_get_traits.assert_called_once_with(self.context, uuids.cn)
+ mock_instance_save.assert_called_once_with(
+ expected_task_state=[None])
+ mock_record_start.assert_called_once_with(
+ self.context, instance, instance_actions.RESCUE)
+ mock_rpcapi_rescue.assert_called_once_with(
+ self.context, instance=instance, rescue_password=None,
+ rescue_image_ref=uuids.rescue_image_id, clean_shutdown=True)
+
+ # Assert that the instance task state as set in the compute API
+ self.assertEqual(task_states.RESCUING, instance.task_state)
+
+ @mock.patch('nova.objects.image_meta.ImageMeta.from_image_ref')
+ @mock.patch('nova.compute.utils.is_volume_backed_instance',
+ return_value=True)
+ @mock.patch('nova.objects.block_device.BlockDeviceMappingList'
+ '.get_by_instance_uuid')
+ def test_rescue_bfv_without_required_image_properties(
+ self, mock_get_bdms, mock_is_volume_backed,
+ mock_image_meta_obj_from_ref):
+ instance = self._create_instance_obj()
+ bdms = objects.BlockDeviceMappingList(objects=[
+ objects.BlockDeviceMapping(
+ boot_index=0, image_id=uuids.image_id, source_type='image',
+ destination_type='volume', volume_type=None,
+ snapshot_id=None, volume_id=uuids.volume_id,
+ volume_size=None)])
+ rescue_image_meta_obj = image_meta_obj.ImageMeta.from_dict({
+ 'properties': {}
+ })
+
+ with test.nested(
+ mock.patch.object(self.compute_api.volume_api, 'get'),
+ mock.patch.object(self.compute_api.volume_api, 'check_attached'),
+ ) as (
+ mock_get_volume, mock_check_attached
+ ):
+ # Mock out the returned bdms, volume and image_meta
+ mock_get_bdms.return_value = bdms
+ mock_get_volume.return_value = mock.sentinel.volume
+ mock_image_meta_obj_from_ref.return_value = rescue_image_meta_obj
+
+ # Assert that any attempt to rescue a bfv instance on a compute
+ # node that does not report the COMPUTE_RESCUE_BFV trait fails and
+ # raises InstanceNotRescuable
+ self.assertRaises(exception.InstanceNotRescuable,
+ self.compute_api.rescue, self.context, instance,
+ rescue_image_ref=None, allow_bfv_rescue=True)
+
+ # Assert the calls made in the compute API prior to the failure
+ mock_get_bdms.assert_called_once_with(self.context, instance.uuid)
+ mock_get_volume.assert_called_once_with(
+ self.context, uuids.volume_id)
+ mock_check_attached.assert_called_once_with(
+ self.context, mock.sentinel.volume)
+ mock_is_volume_backed.assert_called_once_with(
+ self.context, instance, bdms)
+
@mock.patch('nova.compute.utils.is_volume_backed_instance',
return_value=True)
@mock.patch('nova.objects.block_device.BlockDeviceMappingList'
diff --git a/nova/tests/unit/compute/test_compute.py b/nova/tests/unit/compute/test_compute.py
index 8eeb5f8f58..49cf15ec17 100644
--- a/nova/tests/unit/compute/test_compute.py
+++ b/nova/tests/unit/compute/test_compute.py
@@ -2731,7 +2731,7 @@ class ComputeTestCase(BaseTestCase,
bdms=[], recreate=False, on_shared_storage=False,
preserve_ephemeral=False, migration=None, scheduled_node=None,
limits={}, request_spec=None, accel_uuids=[],
- reimage_boot_volume=False)
+ reimage_boot_volume=False, target_state=None)
self.compute.terminate_instance(self.context, instance, [])
def test_rebuild_driver(self):
@@ -2762,7 +2762,7 @@ class ComputeTestCase(BaseTestCase,
bdms=[], recreate=False, on_shared_storage=False,
preserve_ephemeral=False, migration=None, scheduled_node=None,
limits={}, request_spec=None, accel_uuids=[],
- reimage_boot_volume=False)
+ reimage_boot_volume=False, target_state=None)
self.assertTrue(called['rebuild'])
self.compute.terminate_instance(self.context, instance, [])
@@ -2815,7 +2815,7 @@ class ComputeTestCase(BaseTestCase,
bdms=bdms, recreate=False, preserve_ephemeral=False,
migration=None, scheduled_node=None, limits={},
on_shared_storage=False, request_spec=None, accel_uuids=[],
- reimage_boot_volume=False)
+ reimage_boot_volume=False, target_state=None)
self.assertTrue(called['rebuild'])
self.compute.terminate_instance(self.context, instance, [])
@@ -2834,7 +2834,8 @@ class ComputeTestCase(BaseTestCase,
new_pass="new_password", orig_sys_metadata=sys_metadata, bdms=[],
recreate=False, on_shared_storage=False, preserve_ephemeral=False,
migration=None, scheduled_node=None, limits=None,
- request_spec=None, accel_uuids=[], reimage_boot_volume=False)
+ request_spec=None, accel_uuids=[], reimage_boot_volume=False,
+ target_state=None)
self.compute.terminate_instance(self.context, instance, [])
def test_rebuild_launched_at_time(self):
@@ -2855,7 +2856,7 @@ class ComputeTestCase(BaseTestCase,
new_pass="new_password", orig_sys_metadata={}, bdms=[],
recreate=False, on_shared_storage=False, preserve_ephemeral=False,
migration=None, scheduled_node=None, limits={}, request_spec=None,
- accel_uuids=[], reimage_boot_volume=False)
+ accel_uuids=[], reimage_boot_volume=False, target_state=None)
instance.refresh()
self.assertEqual(cur_time,
instance['launched_at'].replace(tzinfo=None))
@@ -2889,7 +2890,7 @@ class ComputeTestCase(BaseTestCase,
orig_sys_metadata=sys_metadata, bdms=[], recreate=False,
on_shared_storage=False, preserve_ephemeral=False, migration=None,
scheduled_node=None, limits={}, request_spec=None, accel_uuids=[],
- reimage_boot_volume=False)
+ reimage_boot_volume=False, target_state=None)
self.compute.terminate_instance(self.context, instance, [])
@mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
@@ -4617,7 +4618,8 @@ class ComputeTestCase(BaseTestCase,
'request_spec': None,
'on_shared_storage': False,
'accel_uuids': (),
- 'reimage_boot_volume': False}),
+ 'reimage_boot_volume': False,
+ 'target_state': None}),
("set_admin_password", task_states.UPDATING_PASSWORD,
{'new_pass': None}),
("rescue_instance", task_states.RESCUING,
@@ -5136,7 +5138,7 @@ class ComputeTestCase(BaseTestCase,
orig_sys_metadata=orig_sys_metadata, bdms=[], recreate=False,
on_shared_storage=False, preserve_ephemeral=False, migration=None,
scheduled_node=None, limits={}, request_spec=None, accel_uuids=[],
- reimage_boot_volume=False)
+ reimage_boot_volume=False, target_state=None)
inst_ref.refresh()
@@ -11961,7 +11963,7 @@ class ComputeAPITestCase(BaseTestCase):
force=False)
@mock.patch('nova.compute.utils.notify_about_instance_action')
- def _test_evacuate(self, mock_notify, force=None):
+ def _test_evacuate(self, mock_notify, force=None, target_state=None):
instance = self._create_fake_instance_obj(services=True)
self.assertIsNone(instance.task_state)
@@ -11998,7 +12000,8 @@ class ComputeAPITestCase(BaseTestCase):
host='fake_dest_host',
on_shared_storage=True,
admin_password=None,
- force=force)
+ force=force,
+ target_state=target_state)
if force is False:
host = None
else:
@@ -12015,7 +12018,8 @@ class ComputeAPITestCase(BaseTestCase):
recreate=True,
on_shared_storage=True,
request_spec=fake_spec,
- host=host)
+ host=host,
+ target_state=target_state)
do_test()
instance.refresh()
@@ -12047,6 +12051,9 @@ class ComputeAPITestCase(BaseTestCase):
def test_evacuate_with_forced_host(self):
self._test_evacuate(force=True)
+ def test_evacuate_with_target_state(self):
+ self._test_evacuate(target_state="stopped")
+
@mock.patch('nova.servicegroup.api.API.service_is_up',
return_value=False)
def test_fail_evacuate_with_non_existing_destination(self, _service_is_up):
@@ -13540,7 +13547,8 @@ class EvacuateHostTestCase(BaseTestCase):
image_ref, injected_files, 'newpass', {}, bdms, recreate=True,
on_shared_storage=on_shared_storage, migration=migration,
preserve_ephemeral=False, scheduled_node=node, limits=limits,
- request_spec=None, accel_uuids=[], reimage_boot_volume=False)
+ request_spec=None, accel_uuids=[], reimage_boot_volume=False,
+ target_state=None)
if vm_states_is_stopped:
mock_notify_rebuild.assert_has_calls([
mock.call(ctxt, self.inst, self.inst.host, phase='start',
diff --git a/nova/tests/unit/compute/test_compute_mgr.py b/nova/tests/unit/compute/test_compute_mgr.py
index 21dc6860ae..16de724a42 100644
--- a/nova/tests/unit/compute/test_compute_mgr.py
+++ b/nova/tests/unit/compute/test_compute_mgr.py
@@ -57,6 +57,7 @@ from nova.objects import fields
from nova.objects import instance as instance_obj
from nova.objects import migrate_data as migrate_data_obj
from nova.objects import network_request as net_req_obj
+from nova.objects import service as service_obj
from nova.pci import request as pci_request
from nova.scheduler.client import report
from nova import test
@@ -76,6 +77,7 @@ from nova.virt import driver as virt_driver
from nova.virt import event as virtevent
from nova.virt import fake as fake_driver
from nova.virt import hardware
+from nova.virt import node as virt_node
from nova.volume import cinder
@@ -89,6 +91,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
# os-brick>=5.1 now uses external file system locks instead of internal
# locks so we need to set up locking
REQUIRES_LOCKING = True
+ STUB_COMPUTE_ID = False
def setUp(self):
super(ComputeManagerUnitTestCase, self).setUp()
@@ -906,6 +909,8 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
return instance_obj._make_instance_list(
self.context, objects.InstanceList(), db_list, None)
+ @mock.patch.object(manager.ComputeManager,
+ '_ensure_existing_node_identity')
@mock.patch.object(manager.ComputeManager, '_get_nodes')
@mock.patch.object(manager.ComputeManager,
'_error_out_instances_whose_build_was_interrupted')
@@ -924,17 +929,19 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
mock_validate_vtpm, mock_validate_pinning,
mock_destroy, mock_admin_ctxt, mock_host_get,
mock_init_host,
- mock_error_interrupted, mock_get_nodes):
+ mock_error_interrupted, mock_get_nodes,
+ mock_existing_node):
mock_admin_ctxt.return_value = self.context
inst_list = _make_instance_list(startup_instances)
mock_host_get.return_value = inst_list
our_node = objects.ComputeNode(
- host='fake-host', uuid=uuids.our_node_uuid,
+ host=self.compute.host, uuid=uuids.our_node_uuid,
hypervisor_hostname='fake-node')
mock_get_nodes.return_value = {uuids.our_node_uuid: our_node}
- self.compute.init_host()
+ self.compute.init_host(None)
+ mock_existing_node.assert_not_called()
mock_validate_pinning.assert_called_once_with(inst_list)
mock_validate_vtpm.assert_called_once_with(inst_list)
mock_destroy.assert_called_once_with(
@@ -977,8 +984,9 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
"""
mock_get_nodes.return_value = {
uuids.cn_uuid1: objects.ComputeNode(
- uuid=uuids.cn_uuid1, hypervisor_hostname='node1')}
- self.compute.init_host()
+ uuid=uuids.cn_uuid1, hypervisor_hostname='node1',
+ host=self.compute.host)}
+ self.compute.init_host(None)
mock_error_interrupted.assert_called_once_with(
test.MatchType(nova.context.RequestContext), set(),
@@ -988,16 +996,19 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
@mock.patch('nova.objects.InstanceList')
@mock.patch('nova.objects.MigrationList.get_by_filters')
- def test_cleanup_host(self, mock_miglist_get, mock_instance_list):
+ @mock.patch('nova.objects.ComputeNodeList.get_all_by_uuids')
+ def test_cleanup_host(self, mock_cnlist_get, mock_miglist_get,
+ mock_instance_list):
# just testing whether the cleanup_host method
# when fired will invoke the underlying driver's
# equivalent method.
mock_miglist_get.return_value = []
mock_instance_list.get_by_host.return_value = []
+ mock_cnlist_get.return_value = []
with mock.patch.object(self.compute, 'driver') as mock_driver:
- self.compute.init_host()
+ self.compute.init_host(None)
mock_driver.init_host.assert_called_once_with(host='fake-mini')
self.compute.cleanup_host()
@@ -1086,7 +1097,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
'remove_provider_tree_from_instance_allocation')
) as (mock_get_net, mock_remove_allocation):
- self.compute.init_host()
+ self.compute.init_host(None)
mock_remove_allocation.assert_called_once_with(
self.context, deleted_instance.uuid, uuids.our_node_uuid)
@@ -1139,11 +1150,11 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
uuids.evac_instance: evacuating_instance
}
our_node = objects.ComputeNode(
- host='fake-host', uuid=uuids.our_node_uuid,
+ host=self.compute.host, uuid=uuids.our_node_uuid,
hypervisor_hostname='fake-node')
mock_get_nodes.return_value = {uuids.our_node_uuid: our_node}
- self.compute.init_host()
+ self.compute.init_host(None)
mock_init_instance.assert_called_once_with(
self.context, active_instance)
@@ -1151,23 +1162,49 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self.context, {active_instance.uuid, evacuating_instance.uuid},
mock_get_nodes.return_value.keys())
- @mock.patch.object(objects.ComputeNode, 'get_by_host_and_nodename')
- @mock.patch.object(fake_driver.FakeDriver, 'get_available_nodes')
- def test_get_nodes(self, mock_driver_get_nodes, mock_get_by_host_and_node):
- mock_driver_get_nodes.return_value = ['fake-node1', 'fake-node2']
- cn1 = objects.ComputeNode(uuid=uuids.cn1)
- cn2 = objects.ComputeNode(uuid=uuids.cn2)
- mock_get_by_host_and_node.side_effect = [cn1, cn2]
+ @mock.patch.object(objects.ComputeNodeList, 'get_all_by_uuids')
+ @mock.patch.object(fake_driver.FakeDriver, 'get_nodenames_by_uuid')
+ def test_get_nodes(self, mock_driver_get_nodes, mock_get_by_uuid):
+ mock_driver_get_nodes.return_value = {uuids.node_fake_node1: 'host',
+ uuids.node_fake_node2: 'host'}
+ # NOTE(danms): The fake driver, by default, uses
+ # uuidsentinel.node_$node_name, so we can predict the uuids it will
+ # return here.
+ cn1 = objects.ComputeNode(uuid=uuids.node_fake_node1,
+ hypervisor_hostname='host')
+ cn2 = objects.ComputeNode(uuid=uuids.node_fake_node2,
+ hypervisor_hostname='host')
+ mock_get_by_uuid.return_value = [cn1, cn2]
nodes = self.compute._get_nodes(self.context)
- self.assertEqual({uuids.cn1: cn1, uuids.cn2: cn2}, nodes)
+ self.assertEqual({uuids.node_fake_node1: cn1,
+ uuids.node_fake_node2: cn2}, nodes)
mock_driver_get_nodes.assert_called_once_with()
- mock_get_by_host_and_node.assert_has_calls([
- mock.call(self.context, self.compute.host, 'fake-node1'),
- mock.call(self.context, self.compute.host, 'fake-node2'),
- ])
+ mock_get_by_uuid.assert_called_once_with(self.context,
+ [uuids.node_fake_node1,
+ uuids.node_fake_node2])
+
+ @mock.patch.object(objects.ComputeNodeList, 'get_all_by_uuids')
+ @mock.patch.object(fake_driver.FakeDriver, 'get_nodenames_by_uuid')
+ def test_get_nodes_mismatch(self, mock_driver_get_nodes, mock_get_by_uuid):
+ # Virt driver reports a (hypervisor_) hostname of 'host1'
+ mock_driver_get_nodes.return_value = {uuids.node_fake_node1: 'host1',
+ uuids.node_fake_node2: 'host1'}
+
+ # The database records for our compute nodes (by UUID) show a
+ # hypervisor_hostname of 'host2'
+ cn1 = objects.ComputeNode(uuid=uuids.node_fake_node1,
+ hypervisor_hostname='host2')
+ cn2 = objects.ComputeNode(uuid=uuids.node_fake_node2,
+ hypervisor_hostname='host2')
+ mock_get_by_uuid.return_value = [cn1, cn2]
+
+ # Possible hostname (as reported by the virt driver) rename,
+ # which should abort our startup
+ self.assertRaises(exception.InvalidConfiguration,
+ self.compute._get_nodes, self.context)
@mock.patch.object(manager.LOG, 'warning')
@mock.patch.object(
@@ -1189,37 +1226,35 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
"is starting on this host, then you can ignore this warning.")
@mock.patch.object(manager.LOG, 'warning')
- @mock.patch.object(objects.ComputeNode, 'get_by_host_and_nodename')
- @mock.patch.object(fake_driver.FakeDriver, 'get_available_nodes')
+ @mock.patch.object(objects.ComputeNodeList, 'get_all_by_uuids')
+ @mock.patch.object(fake_driver.FakeDriver, 'get_nodenames_by_uuid')
def test_get_nodes_node_not_found(
- self, mock_driver_get_nodes, mock_get_by_host_and_node,
+ self, mock_driver_get_nodes, mock_get_all_by_uuids,
mock_log_warning):
- mock_driver_get_nodes.return_value = ['fake-node1', 'fake-node2']
- cn2 = objects.ComputeNode(uuid=uuids.cn2)
- mock_get_by_host_and_node.side_effect = [
- exception.ComputeHostNotFound(host='fake-node1'), cn2]
+ mock_driver_get_nodes.return_value = {uuids.node_1: 'fake-node1'}
+ mock_get_all_by_uuids.return_value = []
nodes = self.compute._get_nodes(self.context)
- self.assertEqual({uuids.cn2: cn2}, nodes)
+ self.assertEqual({}, nodes)
mock_driver_get_nodes.assert_called_once_with()
- mock_get_by_host_and_node.assert_has_calls([
- mock.call(self.context, self.compute.host, 'fake-node1'),
- mock.call(self.context, self.compute.host, 'fake-node2'),
- ])
+ mock_get_all_by_uuids.assert_called_once_with(self.context,
+ [uuids.node_1])
mock_log_warning.assert_called_once_with(
- "Compute node %s not found in the database. If this is the first "
- "time this service is starting on this host, then you can ignore "
- "this warning.", 'fake-node1')
+ "Compute nodes %s for host %s were not found in the database. "
+ "If this is the first time this service is starting on this host, "
+ "then you can ignore this warning.", [uuids.node_1], 'fake-mini')
def test_init_host_disk_devices_configuration_failure(self):
self.flags(max_disk_devices_to_attach=0, group='compute')
self.assertRaises(exception.InvalidConfiguration,
- self.compute.init_host)
+ self.compute.init_host, None)
@mock.patch.object(objects.InstanceList, 'get_by_host',
new=mock.Mock())
+ @mock.patch('nova.objects.ComputeNodeList.get_all_by_uuids',
+ new=mock.Mock(return_value=[mock.MagicMock()]))
@mock.patch('nova.compute.manager.ComputeManager.'
'_validate_pinning_configuration')
def test_init_host_pinning_configuration_validation_failure(self,
@@ -1230,13 +1265,15 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
mock_validate_pinning.side_effect = exception.InvalidConfiguration
self.assertRaises(exception.InvalidConfiguration,
- self.compute.init_host)
+ self.compute.init_host, None)
@mock.patch.object(objects.InstanceList, 'get_by_host',
new=mock.Mock())
@mock.patch('nova.compute.manager.ComputeManager.'
'_validate_pinning_configuration',
new=mock.Mock())
+ @mock.patch('nova.objects.ComputeNodeList.get_all_by_uuids',
+ new=mock.Mock(return_value=[mock.MagicMock()]))
@mock.patch('nova.compute.manager.ComputeManager.'
'_validate_vtpm_configuration')
def test_init_host_vtpm_configuration_validation_failure(self,
@@ -1247,7 +1284,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
mock_validate_vtpm.side_effect = exception.InvalidConfiguration
self.assertRaises(exception.InvalidConfiguration,
- self.compute.init_host)
+ self.compute.init_host, None)
@mock.patch.object(objects.Instance, 'save')
@mock.patch.object(objects.InstanceList, 'get_by_filters')
@@ -5145,7 +5182,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
group='pci'
)
self.assertRaises(exception.PciDeviceInvalidDeviceName,
- self.compute.init_host)
+ self.compute.init_host, None)
@mock.patch('nova.compute.manager.ComputeManager._instance_update')
def test_error_out_instance_on_exception_not_implemented_err(self,
@@ -5335,7 +5372,8 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self.compute.rebuild_instance(
self.context, instance, None, None, None, None, None, None,
- recreate, False, False, None, scheduled_node, {}, None, [], False)
+ recreate, False, False, None, scheduled_node, {}, None, [], False,
+ None)
mock_set.assert_called_once_with(None, 'failed')
mock_notify_about_instance_usage.assert_called_once_with(
mock.ANY, instance, 'rebuild.error', fault=mock_rebuild.side_effect
@@ -5447,7 +5485,8 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
preserve_ephemeral=False, migration=None,
scheduled_node='fake-node',
limits={}, request_spec=request_spec, accel_uuids=[],
- reimage_boot_volume=False)
+ reimage_boot_volume=False,
+ target_state=None)
mock_validate_policy.assert_called_once_with(
elevated_context, instance, {'group': [uuids.group]})
@@ -5487,7 +5526,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
recreate=True, on_shared_storage=None, preserve_ephemeral=False,
migration=None, scheduled_node='fake-node', limits={},
request_spec=request_spec, accel_uuids=[],
- reimage_boot_volume=False)
+ reimage_boot_volume=False, target_state=None)
mock_validate_policy.assert_called_once_with(
elevated_context, instance, {'group': [uuids.group]})
@@ -5513,7 +5552,8 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self.compute.rebuild_instance(
self.context, instance, None, None,
None, None, None, None, False,
- False, False, migration, None, {}, None, [], False)
+ False, False, migration, None, {}, None, [], False,
+ None)
self.assertFalse(mock_get.called)
self.assertEqual(node, instance.node)
self.assertEqual('done', migration.status)
@@ -5535,7 +5575,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self.compute.rebuild_instance(
self.context, instance, None, None, None, None, None,
None, True, False, False, mock.sentinel.migration, None, {},
- None, [], False)
+ None, [], False, None)
mock_get.assert_called_once_with(mock.ANY, self.compute.host)
mock_rt.finish_evacuation.assert_called_once_with(
instance, 'new-node', mock.sentinel.migration)
@@ -5618,7 +5658,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
preserve_ephemeral, {}, {},
self.allocations,
mock.sentinel.mapping, [],
- False)
+ False, None)
mock_notify_usage.assert_has_calls(
[mock.call(self.context, instance, "rebuild.start",
@@ -5877,7 +5917,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
request_spec=objects.RequestSpec(),
allocations=self.allocations,
request_group_resource_providers_mapping=mock.sentinel.mapping,
- accel_uuids=[], reimage_boot_volume=False)
+ accel_uuids=[], reimage_boot_volume=False, target_state=None)
self.assertIn('Trusted image certificates provided on host', str(ex))
def test_reverts_task_state_instance_not_found(self):
@@ -6321,6 +6361,171 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self.assertEqual({'one-image': 'cached',
'two-image': 'existing'}, r)
+ @mock.patch.object(virt_node, 'write_local_node_uuid')
+ @mock.patch.object(virt_node, 'read_local_node_uuid')
+ def test_ensure_node_uuid_not_needed_version(self, mock_read, mock_write):
+ # Make sure an up-to-date service bypasses the persistence
+ service_ref = service_obj.Service()
+ self.assertEqual(service_obj.SERVICE_VERSION, service_ref.version)
+ mock_read.return_value = 'not none'
+ mock_write.assert_not_called()
+ self.compute._ensure_existing_node_identity(service_ref)
+ mock_write.assert_not_called()
+
+ @mock.patch.object(virt_node, 'write_local_node_uuid')
+ def test_ensure_node_uuid_not_needed_ironic(self, mock_node):
+ # Make sure an old service for ironic does not write a local node uuid
+ with mock.patch.object(service_obj, 'SERVICE_VERSION', new=60):
+ service_ref = service_obj.Service()
+ self.assertEqual(60, service_ref.version)
+ self.flags(compute_driver='ironic')
+ self.compute._ensure_existing_node_identity(service_ref)
+ mock_node.assert_not_called()
+
+ @mock.patch.object(virt_node, 'write_local_node_uuid')
+ @mock.patch.object(virt_node, 'read_local_node_uuid')
+ def test_ensure_node_uuid_not_needed_preprovisioned(self,
+ mock_read_node,
+ mock_write_node):
+ # Make sure an old service does not write a uuid if one is present
+ with mock.patch.object(service_obj, 'SERVICE_VERSION', new=60):
+ service_ref = service_obj.Service()
+ self.assertEqual(60, service_ref.version)
+ mock_read_node.return_value = str(uuids.SOME_UUID)
+ self.compute._ensure_existing_node_identity(service_ref)
+ mock_write_node.assert_not_called()
+
+ @mock.patch.object(virt_node, 'write_local_node_uuid')
+ @mock.patch.object(virt_node, 'read_local_node_uuid')
+ @mock.patch('nova.objects.ComputeNodeList.get_all_by_host')
+ def test_ensure_node_uuid_upgrade_no_node(self, mock_get_cn,
+ mock_read_node,
+ mock_write_node):
+ # If we are not a new service, we have no pre-provisioned node uuid
+ # and we find no nodes in the database, we do not write a local
+ # node uuid *and* we abort startup since something is likely wrong.
+ with mock.patch.object(service_obj, 'SERVICE_VERSION', new=60):
+ service_ref = service_obj.Service()
+ self.assertEqual(60, service_ref.version)
+ mock_read_node.return_value = None
+ mock_get_cn.return_value = []
+ self.assertRaises(exception.InvalidConfiguration,
+ self.compute._ensure_existing_node_identity,
+ service_ref)
+ mock_get_cn.assert_called_once_with(mock.ANY, self.compute.host)
+ mock_write_node.assert_not_called()
+
+ @mock.patch.object(virt_node, 'write_local_node_uuid')
+ @mock.patch.object(virt_node, 'read_local_node_uuid')
+ @mock.patch('nova.objects.ComputeNodeList.get_all_by_host')
+ def test_ensure_node_uuid_upgrade_multi_node(self, mock_get_cn,
+ mock_read_node,
+ mock_write_node):
+ # If we are not a new service, we have no pre-provisioned node uuid
+ # and we find multiple nodes in the database, we do not write a local
+ # node uuid *and* we abort startup since something is likely wrong.
+ with mock.patch.object(service_obj, 'SERVICE_VERSION', new=60):
+ service_ref = service_obj.Service()
+ self.assertEqual(60, service_ref.version)
+ mock_read_node.return_value = None
+ mock_get_cn.return_value = [1, 2]
+ self.assertRaises(exception.InvalidConfiguration,
+ self.compute._ensure_existing_node_identity,
+ service_ref)
+ mock_get_cn.assert_called_once_with(mock.ANY, self.compute.host)
+ mock_write_node.assert_not_called()
+
+ @mock.patch.object(virt_node, 'write_local_node_uuid')
+ @mock.patch.object(virt_node, 'read_local_node_uuid')
+ @mock.patch('nova.objects.ComputeNodeList.get_all_by_host')
+ def test_ensure_node_uuid_upgrade_writes_node_uuid(self, mock_get_cn,
+ mock_read_node,
+ mock_write_node):
+ # If we are not a new service, there is no pre-provisioned local
+ # compute node uuid, and we find exactly one compute node in the
+ # database for our host, we persist that.
+ with mock.patch.object(service_obj, 'SERVICE_VERSION', new=60):
+ service_ref = service_obj.Service()
+ self.assertEqual(60, service_ref.version)
+ mock_read_node.return_value = None
+ mock_get_cn.return_value = [
+ objects.ComputeNode(uuid=str(uuids.compute)),
+ ]
+ self.compute._ensure_existing_node_identity(service_ref)
+ mock_get_cn.assert_called_once_with(mock.ANY, self.compute.host)
+ mock_write_node.assert_called_once_with(str(uuids.compute))
+
+ @mock.patch.object(virt_node, 'read_local_node_uuid')
+ def test_ensure_node_uuid_missing_file_ironic(self, mock_read):
+ mock_service = mock.MagicMock(
+ version=service_obj.NODE_IDENTITY_VERSION)
+ mock_read.return_value = None
+ self.assertRaises(exception.InvalidConfiguration,
+ self.compute._ensure_existing_node_identity,
+ mock_service)
+ mock_read.assert_called_once_with()
+
+ # Now make sure that ironic causes this exact configuration to pass
+ self.flags(compute_driver='ironic')
+ self.compute._ensure_existing_node_identity(mock_service)
+
+ def test_ensure_node_uuid_called_by_init_host(self):
+ # test_init_host() above ensures that we do not call
+ # _ensure_existing_node_identity() in the service_ref=None case.
+ # Since testing init_host() requires a billion mocks, this
+ # tests that we do call it when expected, but make it raise
+ # to avoid running the rest of init_host().
+ with mock.patch.object(self.compute,
+ '_ensure_existing_node_identity') as m:
+ m.side_effect = test.TestingException
+ self.assertRaises(test.TestingException,
+ self.compute.init_host,
+ mock.sentinel.service_ref)
+ m.assert_called_once_with(mock.sentinel.service_ref)
+
+ def test_check_for_host_rename_ironic(self):
+ self.flags(compute_driver='ironic')
+ # Passing None here makes sure we take the early exit because of our
+ # virt driver
+ nodes = {uuids.node1: mock.MagicMock(uuid=uuids.node1,
+ host='not-this-host')}
+ self.compute._check_for_host_rename(nodes)
+
+ def test_check_for_host_rename_renamed_only(self):
+ nodes = {uuids.node1: mock.MagicMock(uuid=uuids.node1,
+ host='not-this-host')}
+ self.assertRaises(exception.InvalidConfiguration,
+ self.compute._check_for_host_rename, nodes)
+
+ def test_check_for_host_rename_renamed_one(self):
+ nodes = {uuids.node1: mock.MagicMock(uuid=uuids.node1,
+ host=self.compute.host),
+ uuids.node2: mock.MagicMock(uuid=uuids.node2,
+ host='not-this-host')}
+ self.assertRaises(exception.InvalidConfiguration,
+ self.compute._check_for_host_rename, nodes)
+
+ def test_check_for_host_rename_not_renamed(self):
+ nodes = {uuids.node1: mock.MagicMock(uuid=uuids.node1,
+ host=self.compute.host)}
+ with mock.patch.object(manager.LOG, 'debug') as mock_debug:
+ self.compute._check_for_host_rename(nodes)
+ mock_debug.assert_called_once_with(
+ 'Verified node %s matches my host %s',
+ uuids.node1, self.compute.host)
+
+ @mock.patch('nova.compute.manager.ComputeManager._get_nodes')
+ def test_check_for_host_rename_called_by_init_host(self, mock_nodes):
+ # Since testing init_host() requires a billion mocks, this
+ # tests that we do call it when expected, but make it raise
+ # to avoid running the rest of init_host().
+ with mock.patch.object(self.compute,
+ '_check_for_host_rename') as m:
+ m.side_effect = test.TestingException
+ self.assertRaises(test.TestingException,
+ self.compute.init_host, None)
+ m.assert_called_once_with(mock_nodes.return_value)
+
class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
def setUp(self):
diff --git a/nova/tests/unit/compute/test_resource_tracker.py b/nova/tests/unit/compute/test_resource_tracker.py
index 4d8ed9848b..cd36b8987f 100644
--- a/nova/tests/unit/compute/test_resource_tracker.py
+++ b/nova/tests/unit/compute/test_resource_tracker.py
@@ -64,11 +64,13 @@ _VIRT_DRIVER_AVAIL_RESOURCES = {
'hypervisor_hostname': _NODENAME,
'cpu_info': '',
'numa_topology': None,
+ 'uuid': uuids.cn1,
}
_COMPUTE_NODE_FIXTURES = [
objects.ComputeNode(
id=1,
+ deleted=False,
uuid=uuids.cn1,
host=_HOSTNAME,
vcpus=_VIRT_DRIVER_AVAIL_RESOURCES['vcpus'],
@@ -586,7 +588,7 @@ class TestUpdateAvailableResources(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_disabled(self, get_mock, migr_mock, get_cn_mock, pci_mock,
@@ -619,7 +621,7 @@ class TestUpdateAvailableResources(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_no_instances_no_migrations_no_reserved(self, get_mock, migr_mock,
@@ -643,8 +645,7 @@ class TestUpdateAvailableResources(BaseTestCase):
'flavor',
'migration_context',
'resources'])
- get_cn_mock.assert_called_once_with(mock.ANY, _HOSTNAME,
- _NODENAME)
+ get_cn_mock.assert_called_once_with(mock.ANY, uuids.cn1)
migr_mock.assert_called_once_with(mock.ANY, _HOSTNAME,
_NODENAME)
@@ -671,7 +672,7 @@ class TestUpdateAvailableResources(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
@@ -730,7 +731,7 @@ class TestUpdateAvailableResources(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_no_instances_no_migrations_reserved_disk_ram_and_cpu(
@@ -747,7 +748,7 @@ class TestUpdateAvailableResources(BaseTestCase):
update_mock = self._update_available_resources()
- get_cn_mock.assert_called_once_with(mock.ANY, _HOSTNAME, _NODENAME)
+ get_cn_mock.assert_called_once_with(mock.ANY, uuids.cn1)
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
vals = {
'free_disk_gb': 5, # 6GB avail - 1 GB reserved
@@ -772,7 +773,7 @@ class TestUpdateAvailableResources(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_some_instances_no_migrations(self, get_mock, migr_mock,
@@ -797,7 +798,7 @@ class TestUpdateAvailableResources(BaseTestCase):
update_mock = self._update_available_resources()
- get_cn_mock.assert_called_once_with(mock.ANY, _HOSTNAME, _NODENAME)
+ get_cn_mock.assert_called_once_with(mock.ANY, uuids.cn1)
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
vals = {
'free_disk_gb': 5, # 6 - 1 used
@@ -823,7 +824,7 @@ class TestUpdateAvailableResources(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@@ -863,7 +864,7 @@ class TestUpdateAvailableResources(BaseTestCase):
update_mock = self._update_available_resources()
- get_cn_mock.assert_called_once_with(mock.ANY, _HOSTNAME, _NODENAME)
+ get_cn_mock.assert_called_once_with(mock.ANY, uuids.cn1)
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
vals = {
'free_disk_gb': 5,
@@ -889,7 +890,7 @@ class TestUpdateAvailableResources(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@@ -926,7 +927,7 @@ class TestUpdateAvailableResources(BaseTestCase):
update_mock = self._update_available_resources()
- get_cn_mock.assert_called_once_with(mock.ANY, _HOSTNAME, _NODENAME)
+ get_cn_mock.assert_called_once_with(mock.ANY, uuids.cn1)
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
vals = {
'free_disk_gb': 1,
@@ -952,7 +953,7 @@ class TestUpdateAvailableResources(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@@ -987,7 +988,7 @@ class TestUpdateAvailableResources(BaseTestCase):
update_mock = self._update_available_resources()
- get_cn_mock.assert_called_once_with(mock.ANY, _HOSTNAME, _NODENAME)
+ get_cn_mock.assert_called_once_with(mock.ANY, uuids.cn1)
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
vals = {
'free_disk_gb': 1,
@@ -1013,7 +1014,7 @@ class TestUpdateAvailableResources(BaseTestCase):
return_value=objects.PciDeviceList())
@mock.patch('nova.objects.MigrationContext.get_by_instance_uuid',
return_value=None)
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@@ -1056,7 +1057,7 @@ class TestUpdateAvailableResources(BaseTestCase):
update_mock = self._update_available_resources()
- get_cn_mock.assert_called_once_with(mock.ANY, _HOSTNAME, _NODENAME)
+ get_cn_mock.assert_called_once_with(mock.ANY, uuids.cn1)
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
vals = {
# 6 total - 1G existing - 5G new flav - 1G old flav
@@ -1084,7 +1085,7 @@ class TestUpdateAvailableResources(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@@ -1121,7 +1122,7 @@ class TestUpdateAvailableResources(BaseTestCase):
update_mock = self._update_available_resources()
- get_cn_mock.assert_called_once_with(mock.ANY, _HOSTNAME, _NODENAME)
+ get_cn_mock.assert_called_once_with(mock.ANY, uuids.cn1)
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
vals = {
'free_disk_gb': 1,
@@ -1147,7 +1148,7 @@ class TestUpdateAvailableResources(BaseTestCase):
new=mock.Mock(return_value=objects.PciDeviceList()))
@mock.patch('nova.objects.MigrationContext.get_by_instance_uuid',
new=mock.Mock(return_value=None))
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@@ -1199,7 +1200,7 @@ class TestUpdateAvailableResources(BaseTestCase):
new=mock.Mock(return_value=objects.PciDeviceList()))
@mock.patch('nova.objects.MigrationContext.get_by_instance_uuid',
new=mock.Mock(return_value=None))
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@@ -1240,7 +1241,7 @@ class TestUpdateAvailableResources(BaseTestCase):
new=mock.Mock(return_value=False))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
new=mock.Mock(return_value=objects.PciDeviceList()))
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_check_resources_startup_fail(self, mock_get_instances,
@@ -1273,7 +1274,7 @@ class TestInitComputeNode(BaseTestCase):
return_value=objects.PciDeviceList())
@mock.patch('nova.objects.ComputeNode.create')
@mock.patch('nova.objects.Service.get_by_compute_host')
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_update')
def test_no_op_init_compute_node(self, update_mock, get_mock, service_mock,
@@ -1296,14 +1297,14 @@ class TestInitComputeNode(BaseTestCase):
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
@mock.patch('nova.objects.ComputeNode.create')
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_update')
def test_compute_node_loaded(self, update_mock, get_mock, create_mock,
pci_mock):
self._setup_rt()
- def fake_get_node(_ctx, host, node):
+ def fake_get_node(_ctx, uuid):
res = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
return res
@@ -1313,85 +1314,67 @@ class TestInitComputeNode(BaseTestCase):
self.assertFalse(
self.rt._init_compute_node(mock.sentinel.ctx, resources))
- get_mock.assert_called_once_with(mock.sentinel.ctx, _HOSTNAME,
- _NODENAME)
+ get_mock.assert_called_once_with(mock.sentinel.ctx,
+ uuids.cn1)
self.assertFalse(create_mock.called)
self.assertFalse(update_mock.called)
- @mock.patch('nova.objects.ComputeNodeList.get_by_hypervisor')
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
@mock.patch('nova.objects.ComputeNode.create')
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_update')
def test_compute_node_rebalanced(self, update_mock, get_mock, create_mock,
- pci_mock, get_by_hypervisor_mock):
+ pci_mock):
self._setup_rt()
self.driver_mock.rebalances_nodes = True
cn = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
cn.host = "old-host"
- def fake_get_all(_ctx, nodename):
- return [cn]
+ def fake_get_node(_ctx, uuid):
+ return cn
- get_mock.side_effect = exc.NotFound
- get_by_hypervisor_mock.side_effect = fake_get_all
+ get_mock.side_effect = fake_get_node
resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES)
self.assertFalse(
self.rt._init_compute_node(mock.sentinel.ctx, resources))
- get_mock.assert_called_once_with(mock.sentinel.ctx, _HOSTNAME,
- _NODENAME)
- get_by_hypervisor_mock.assert_called_once_with(mock.sentinel.ctx,
- _NODENAME)
+ get_mock.assert_called_once_with(mock.sentinel.ctx, uuids.cn1)
create_mock.assert_not_called()
update_mock.assert_called_once_with(mock.sentinel.ctx, cn)
self.assertEqual(_HOSTNAME, self.rt.compute_nodes[_NODENAME].host)
- @mock.patch('nova.objects.ComputeNodeList.get_by_hypervisor')
@mock.patch('nova.objects.ComputeNode.create')
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_update')
def test_compute_node_created_on_empty(self, update_mock, get_mock,
- create_mock,
- get_by_hypervisor_mock):
- get_by_hypervisor_mock.return_value = []
- self._test_compute_node_created(update_mock, get_mock, create_mock,
- get_by_hypervisor_mock)
+ create_mock):
+ self._test_compute_node_created(update_mock, get_mock, create_mock)
- @mock.patch('nova.objects.ComputeNodeList.get_by_hypervisor')
@mock.patch('nova.objects.ComputeNode.create')
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_update')
def test_compute_node_created_on_empty_rebalance(self, update_mock,
get_mock,
- create_mock,
- get_by_hypervisor_mock):
- get_by_hypervisor_mock.return_value = []
+ create_mock):
self._test_compute_node_created(update_mock, get_mock, create_mock,
- get_by_hypervisor_mock,
rebalances_nodes=True)
- @mock.patch('nova.objects.ComputeNodeList.get_by_hypervisor')
@mock.patch('nova.objects.ComputeNode.create')
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_update')
def test_compute_node_created_too_many(self, update_mock, get_mock,
- create_mock,
- get_by_hypervisor_mock):
- get_by_hypervisor_mock.return_value = ["fake_node_1", "fake_node_2"]
+ create_mock):
self._test_compute_node_created(update_mock, get_mock, create_mock,
- get_by_hypervisor_mock,
rebalances_nodes=True)
def _test_compute_node_created(self, update_mock, get_mock, create_mock,
- get_by_hypervisor_mock,
rebalances_nodes=False):
self.flags(cpu_allocation_ratio=1.0, ram_allocation_ratio=1.0,
disk_allocation_ratio=1.0)
@@ -1452,13 +1435,9 @@ class TestInitComputeNode(BaseTestCase):
self.rt._init_compute_node(mock.sentinel.ctx, resources))
cn = self.rt.compute_nodes[_NODENAME]
- get_mock.assert_called_once_with(mock.sentinel.ctx, _HOSTNAME,
- _NODENAME)
- if rebalances_nodes:
- get_by_hypervisor_mock.assert_called_once_with(
- mock.sentinel.ctx, _NODENAME)
- else:
- get_by_hypervisor_mock.assert_not_called()
+ get_mock.assert_called_once_with(mock.sentinel.ctx,
+ uuids.compute_node_uuid)
+
create_mock.assert_called_once_with()
self.assertTrue(obj_base.obj_equal_prims(expected_compute, cn))
setup_pci.assert_called_once_with(mock.sentinel.ctx, cn, resources)
@@ -1466,7 +1445,7 @@ class TestInitComputeNode(BaseTestCase):
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_setup_pci_tracker')
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename',
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid',
side_effect=exc.ComputeHostNotFound(host=_HOSTNAME))
@mock.patch('nova.objects.ComputeNode.create',
side_effect=(test.TestingException, None))
@@ -1489,14 +1468,14 @@ class TestInitComputeNode(BaseTestCase):
self.assertTrue(self.rt._init_compute_node(ctxt, resources))
self.assertIn(_NODENAME, self.rt.compute_nodes)
mock_get.assert_has_calls([mock.call(
- ctxt, _HOSTNAME, _NODENAME)] * 2)
+ ctxt, uuids.cn_uuid)] * 2)
self.assertEqual(2, mock_create.call_count)
mock_setup_pci.assert_called_once_with(
ctxt, test.MatchType(objects.ComputeNode), resources)
@mock.patch('nova.objects.ComputeNodeList.get_by_hypervisor')
@mock.patch('nova.objects.ComputeNode.create')
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_update')
def test_node_removed(self, update_mock, get_mock,
@@ -1512,6 +1491,81 @@ class TestInitComputeNode(BaseTestCase):
self.assertNotIn(_NODENAME, self.rt.stats)
self.assertNotIn(_NODENAME, self.rt.old_resources)
+ @mock.patch.object(resource_tracker.ResourceTracker,
+ '_update')
+ @mock.patch.object(resource_tracker.ResourceTracker,
+ '_setup_pci_tracker')
+ def test_undelete_node(self, mock_pci, mock_update):
+ self._setup_rt()
+ node = mock.MagicMock()
+ node.deleted = True
+ node.uuid = str(uuids.host1)
+ node.host = 'fake-host'
+ context_mock = mock.MagicMock()
+ resources = {'hypervisor_hostname': 'fake-host',
+ 'uuid': str(uuids.host1)}
+ with mock.patch.object(self.rt, '_get_compute_node') as getcn:
+ getcn.return_value = node
+
+ # _init_compute_node() should return False to indicate that
+ # it found an existing node
+ self.assertFalse(
+ self.rt._init_compute_node(context_mock, resources))
+
+ # Node should be undeleted and saved
+ self.assertFalse(node.deleted)
+ self.assertIsNone(node.deleted_at)
+ node.save.assert_called_once_with()
+
+ # Host is the same, no _update()
+ self.assertEqual('fake-host', node.host)
+ mock_update.assert_not_called()
+
+ @mock.patch.object(resource_tracker.ResourceTracker,
+ '_update')
+ @mock.patch.object(resource_tracker.ResourceTracker,
+ '_setup_pci_tracker')
+ def test_undelete_node_move_host(self, mock_pci, mock_update):
+ self._setup_rt()
+ node = mock.MagicMock()
+ node.deleted = True
+ node.uuid = str(uuids.host1)
+ node.host = 'old-host'
+ context_mock = mock.MagicMock()
+ resources = {'hypervisor_hostname': 'fake-host',
+ 'uuid': str(uuids.host1)}
+ with mock.patch.object(self.rt, '_get_compute_node') as getcn:
+ getcn.return_value = node
+
+ # _init_compute_node() should return False to indicate that
+ # it found an existing node
+ self.assertFalse(
+ self.rt._init_compute_node(context_mock, resources))
+
+ # Node should be undeleted and saved
+ self.assertFalse(node.deleted)
+ self.assertIsNone(node.deleted_at)
+ node.save.assert_called_once_with()
+
+ # Our host changed, so we should have the updated value and have
+ # called _update()
+ self.assertEqual('fake-host', node.host)
+ mock_update.assert_called()
+
+ @mock.patch.object(resource_tracker.ResourceTracker,
+ '_get_compute_node',
+ return_value=None)
+ @mock.patch('nova.objects.compute_node.ComputeNode.create')
+ def test_create_failed_conflict(self, mock_create, mock_getcn):
+ self._setup_rt()
+ resources = {'hypervisor_hostname': 'node1',
+ 'uuid': uuids.node1}
+ mock_create.side_effect = exc.DuplicateRecord(target='foo')
+ self.assertRaises(exc.InvalidConfiguration,
+ self.rt._init_compute_node,
+ mock.MagicMock,
+ resources)
+
@ddt.ddt
class TestUpdateComputeNode(BaseTestCase):
@@ -2635,7 +2689,7 @@ class TestResize(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@mock.patch('nova.objects.ComputeNode.save')
@@ -2739,7 +2793,7 @@ class TestResize(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename',
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid',
return_value=_COMPUTE_NODE_FIXTURES[0])
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error',
return_value=[])
@@ -2911,7 +2965,7 @@ class TestResize(BaseTestCase):
@mock.patch('nova.pci.manager.PciDevTracker.claim_instance')
@mock.patch('nova.pci.request.get_pci_requests_from_flavor')
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node')
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@mock.patch('nova.objects.ComputeNode.save')
@@ -3081,7 +3135,7 @@ class TestResize(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@mock.patch('nova.objects.ComputeNode.save')
@@ -3213,7 +3267,7 @@ class TestRebuild(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@mock.patch('nova.objects.ComputeNode.save')
diff --git a/nova/tests/unit/compute/test_rpcapi.py b/nova/tests/unit/compute/test_rpcapi.py
index 55d0fc53e8..6f78678a92 100644
--- a/nova/tests/unit/compute/test_rpcapi.py
+++ b/nova/tests/unit/compute/test_rpcapi.py
@@ -836,7 +836,8 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
orig_sys_metadata=None, recreate=True, on_shared_storage=True,
preserve_ephemeral=True, migration=None, node=None,
limits=None, request_spec=None, accel_uuids=[],
- reimage_boot_volume=False, version='6.1')
+ reimage_boot_volume=False, target_state=None,
+ version='6.2')
def test_rebuild_instance_old_rpcapi(self):
# With rpcapi < 5.12, accel_uuids must be dropped in the client call.
@@ -868,9 +869,11 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
ctxt, instance=self.fake_instance_obj,
accel_uuids=['938af7f9-f136-4e5a-bdbe-3b6feab54311'],
node=None, host=None, reimage_boot_volume=False,
- **rebuild_args)
+ target_state=None, **rebuild_args)
- mock_client.can_send_version.assert_has_calls([mock.call('6.0'),
+ mock_client.can_send_version.assert_has_calls([mock.call('6.2'),
+ mock.call('6.1'),
+ mock.call('6.0'),
mock.call('5.12')])
mock_client.prepare.assert_called_with(
server=self.fake_instance_obj.host, version='5.0')
@@ -890,7 +893,7 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
compute_api.router.client.return_value = mock_client
# Force can_send_version to [False, True, True], so that 6.0
# version is used.
- mock_client.can_send_version.side_effect = [False, True, True]
+ mock_client.can_send_version.side_effect = [False, False, True, True]
mock_cctx = mock.MagicMock()
mock_client.prepare.return_value = mock_cctx
rebuild_args = {
@@ -908,12 +911,47 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
'limits': None,
'accel_uuids': [],
'reimage_boot_volume': True,
+ 'target_state': None,
}
self.assertRaises(
exception.NovaException, compute_api.rebuild_instance,
ctxt, instance=self.fake_instance_obj,
node=None, host=None, **rebuild_args)
- mock_client.can_send_version.assert_has_calls([mock.call('6.1')])
+ mock_client.can_send_version.assert_has_calls([mock.call('6.2')])
+
+ def test_rebuild_instance_evacuate_old_rpcapi(self):
+ # With rpcapi < 6.2, if evacuate we should raise error.
+ ctxt = context.RequestContext('fake_user', 'fake_project')
+ compute_api = compute_rpcapi.ComputeAPI()
+ compute_api.router.client = mock.Mock()
+ mock_client = mock.MagicMock()
+ compute_api.router.client.return_value = mock_client
+ # Force can_send_version to return False.
+ mock_client.can_send_version.return_value = False
+ mock_cctx = mock.MagicMock()
+ mock_client.prepare.return_value = mock_cctx
+ rebuild_args = {
+ 'new_pass': 'admin_password',
+ 'injected_files': 'files_to_inject',
+ 'image_ref': uuids.image_ref,
+ 'orig_image_ref': uuids.orig_image_ref,
+ 'orig_sys_metadata': 'orig_sys_meta',
+ 'bdms': {},
+ 'recreate': False,
+ 'on_shared_storage': False,
+ 'preserve_ephemeral': False,
+ 'request_spec': None,
+ 'migration': None,
+ 'limits': None,
+ 'accel_uuids': [],
+ 'reimage_boot_volume': True,
+ 'target_state': 'stopped',
+ }
+ self.assertRaises(
+ exception.UnsupportedRPCVersion,
+ compute_api.rebuild_instance,
+ ctxt, instance=self.fake_instance_obj,
+ node=None, host=None, **rebuild_args)
def test_reserve_block_device_name(self):
self.flags(long_rpc_timeout=1234)
diff --git a/nova/tests/unit/compute/test_utils.py b/nova/tests/unit/compute/test_utils.py
index c77e39ceac..dd10ecd7df 100644
--- a/nova/tests/unit/compute/test_utils.py
+++ b/nova/tests/unit/compute/test_utils.py
@@ -1597,7 +1597,7 @@ class PciRequestUpdateTestCase(test.NoDBTestCase):
provider_mapping)
self.assertEqual(
- ",".join({uuids.rp1, uuids.rp2}), req.spec[0]["rp_uuids"]
+ {uuids.rp1, uuids.rp2}, set(req.spec[0]["rp_uuids"].split(','))
)
def test_pci_request_has_no_mapping(self):
diff --git a/nova/tests/unit/conductor/test_conductor.py b/nova/tests/unit/conductor/test_conductor.py
index e942217a6c..971570dfb5 100644
--- a/nova/tests/unit/conductor/test_conductor.py
+++ b/nova/tests/unit/conductor/test_conductor.py
@@ -389,7 +389,8 @@ class _BaseTaskTestCase(object):
'preserve_ephemeral': False,
'host': 'compute-host',
'request_spec': None,
- 'reimage_boot_volume': False}
+ 'reimage_boot_volume': False,
+ 'target_state': None}
if update_args:
rebuild_args.update(update_args)
compute_rebuild_args = copy.deepcopy(rebuild_args)
@@ -4751,9 +4752,34 @@ class ConductorTaskRPCAPITestCase(_BaseTaskTestCase,
mock.sentinel.migration)
can_send_version.assert_called_once_with('1.23')
+ def test_evacuate_old_rpc_with_target_state(self):
+ inst_obj = self._create_fake_instance_obj()
+ rebuild_args, compute_args = self._prepare_rebuild_args(
+ {'host': inst_obj.host,
+ 'target_state': 'stopped'})
+ with mock.patch.object(
+ self.conductor.client, 'can_send_version', return_value=False):
+ self.assertRaises(exc.UnsupportedRPCVersion,
+ self.conductor.rebuild_instance,
+ self.context, inst_obj, **rebuild_args)
+
+ def test_evacuate_old_rpc_without_target_state(self):
+ inst_obj = self._create_fake_instance_obj()
+ rebuild_args, compute_args = self._prepare_rebuild_args(
+ {'host': inst_obj.host,
+ 'target_state': None})
+ with mock.patch.object(
+ self.conductor.client, 'can_send_version',
+ return_value=False) as can_send_version:
+ self.conductor.rebuild_instance(
+ self.context, inst_obj, **rebuild_args)
+ can_send_version.assert_has_calls([
+ mock.call('1.25'), mock.call('1.24'),
+ mock.call('1.12')])
+
def test_rebuild_instance_volume_backed(self):
inst_obj = self._create_fake_instance_obj()
- version = '1.24'
+ version = '1.25'
cctxt_mock = mock.MagicMock()
rebuild_args, compute_args = self._prepare_rebuild_args(
{'host': inst_obj.host})
@@ -4785,7 +4811,8 @@ class ConductorTaskRPCAPITestCase(_BaseTaskTestCase,
self.conductor.rebuild_instance,
self.context, inst_obj,
**rebuild_args)
- can_send_version.assert_called_once_with('1.24')
+ can_send_version.assert_has_calls([mock.call('1.25'),
+ mock.call('1.24')])
class ConductorTaskAPITestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
diff --git a/nova/tests/unit/console/test_websocketproxy.py b/nova/tests/unit/console/test_websocketproxy.py
index fc25bef2bc..639623bbb5 100644
--- a/nova/tests/unit/console/test_websocketproxy.py
+++ b/nova/tests/unit/console/test_websocketproxy.py
@@ -635,7 +635,9 @@ class NovaProxyRequestHandlerTestCase(test.NoDBTestCase):
# now the same url but with extra leading '/' characters removed.
if expected_cpython in errmsg:
location = result[3].decode()
- location = location.removeprefix('Location: ').rstrip('\r\n')
+ if location.startswith('Location: '):
+ location = location[len('Location: '):]
+ location = location.rstrip('\r\n')
self.assertTrue(
location.startswith('/example.com/%2F..'),
msg='Redirect location is not the expected sanitized URL',
diff --git a/nova/tests/unit/objects/test_compute_node.py b/nova/tests/unit/objects/test_compute_node.py
index 7e6894a1cc..84c4e87785 100644
--- a/nova/tests/unit/objects/test_compute_node.py
+++ b/nova/tests/unit/objects/test_compute_node.py
@@ -16,6 +16,7 @@ import copy
from unittest import mock
import netaddr
+from oslo_db import exception as db_exc
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel
from oslo_utils import timeutils
@@ -341,6 +342,14 @@ class _TestComputeNodeObject(object):
'uuid': uuidsentinel.fake_compute_node}
mock_create.assert_called_once_with(self.context, param_dict)
+ @mock.patch('nova.db.main.api.compute_node_create')
+ def test_create_duplicate(self, mock_create):
+ mock_create.side_effect = db_exc.DBDuplicateEntry
+ compute = compute_node.ComputeNode(context=self.context)
+ compute.service_id = 456
+ compute.hypervisor_hostname = 'node1'
+ self.assertRaises(exception.DuplicateRecord, compute.create)
+
@mock.patch.object(db, 'compute_node_update')
@mock.patch(
'nova.db.main.api.compute_node_get', return_value=fake_compute_node)
@@ -553,17 +562,15 @@ class _TestComputeNodeObject(object):
def test_update_from_virt_driver_uuid_already_set(self):
"""Tests update_from_virt_driver where the compute node object already
- has a uuid value so the uuid from the virt driver is ignored.
+ has a uuid value so an error is raised.
"""
# copy in case the update has a side effect
resources = copy.deepcopy(fake_resources)
# Emulate the ironic driver which adds a uuid field.
resources['uuid'] = uuidsentinel.node_uuid
compute = compute_node.ComputeNode(uuid=uuidsentinel.something_else)
- compute.update_from_virt_driver(resources)
- expected = fake_compute_with_resources.obj_clone()
- expected.uuid = uuidsentinel.something_else
- self.assertTrue(base.obj_equal_prims(expected, compute))
+ self.assertRaises(exception.InvalidNodeConfiguration,
+ compute.update_from_virt_driver, resources)
def test_update_from_virt_driver_missing_field(self):
# NOTE(pmurray): update_from_virt_driver does not require
diff --git a/nova/tests/unit/objects/test_request_spec.py b/nova/tests/unit/objects/test_request_spec.py
index 271c943025..58b9859234 100644
--- a/nova/tests/unit/objects/test_request_spec.py
+++ b/nova/tests/unit/objects/test_request_spec.py
@@ -14,7 +14,6 @@
import collections
from unittest import mock
-import fixtures
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import uuidutils
@@ -431,13 +430,8 @@ class _TestRequestSpecObject(object):
self.assertListEqual([rg], spec.requested_resources)
self.assertEqual(req_lvl_params, spec.request_level_params)
- # TODO(gibi): replace this with setting the config
- # [scheduler]pci_in_placement=True once that flag is available
- @mock.patch(
- 'nova.objects.request_spec.RequestSpec._pci_in_placement_enabled',
- new=mock.Mock(return_value=True),
- )
def test_from_components_flavor_based_pci_requests(self):
+ self.flags(group='filter_scheduler', pci_in_placement=True)
ctxt = context.RequestContext(
fakes.FAKE_USER_ID, fakes.FAKE_PROJECT_ID
)
@@ -1119,18 +1113,10 @@ class TestRemoteRequestSpecObject(test_objects._RemoteTest,
class TestInstancePCIRequestToRequestGroups(test.NoDBTestCase):
def setUp(self):
super().setUp()
- # TODO(gibi): replace this with setting the config
- # [scheduler]pci_in_placement=True once that flag is available
- self.mock_pci_in_placement_enabled = self.useFixture(
- fixtures.MockPatch(
- "nova.objects.request_spec.RequestSpec."
- "_pci_in_placement_enabled",
- return_value=True,
- )
- ).mock
+ self.flags(group='filter_scheduler', pci_in_placement=True)
def test_pci_reqs_ignored_if_disabled(self):
- self.mock_pci_in_placement_enabled.return_value = False
+ self.flags(group='filter_scheduler', pci_in_placement=False)
spec = request_spec.RequestSpec(
requested_resources=[],
diff --git a/nova/tests/unit/pci/test_stats.py b/nova/tests/unit/pci/test_stats.py
index 2d48513e0e..7eb43a05f4 100644
--- a/nova/tests/unit/pci/test_stats.py
+++ b/nova/tests/unit/pci/test_stats.py
@@ -12,7 +12,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-
+import collections
from unittest import mock
from oslo_config import cfg
@@ -1039,9 +1039,9 @@ class PciDeviceStatsProviderMappingTestCase(test.NoDBTestCase):
),
]
self.flags(device_spec=device_spec, group="pci")
- dev_filter = whitelist.Whitelist(device_spec)
+ self.dev_filter = whitelist.Whitelist(device_spec)
self.pci_stats = stats.PciDeviceStats(
- objects.NUMATopology(), dev_filter=dev_filter
+ objects.NUMATopology(), dev_filter=self.dev_filter
)
# add devices represented by different RPs in placement
# two VFs on the same PF
@@ -1384,6 +1384,101 @@ class PciDeviceStatsProviderMappingTestCase(test.NoDBTestCase):
{pool['rp_uuid'] for pool in self.pci_stats.pools},
)
+ def _create_two_pools_with_two_vfs(self):
+ # create two pools (PFs) with two VFs each
+ self.pci_stats = stats.PciDeviceStats(
+ objects.NUMATopology(), dev_filter=self.dev_filter
+ )
+ for pf_index in [1, 2]:
+ for vf_index in [1, 2]:
+ dev = objects.PciDevice(
+ compute_node_id=1,
+ vendor_id="dead",
+ product_id="beef",
+ address=f"0000:81:0{pf_index}.{vf_index}",
+ parent_addr=f"0000:81:0{pf_index}.0",
+ numa_node=0,
+ dev_type="type-VF",
+ )
+ self.pci_stats.add_device(dev)
+ dev.extra_info = {'rp_uuid': getattr(uuids, f"pf{pf_index}")}
+
+ # populate the RP -> pool mapping from the devices to its pools
+ self.pci_stats.populate_pools_metadata_from_assigned_devices()
+
+ # we have 2 pool and 4 devs in total
+ self.num_pools = 2
+ self.assertEqual(self.num_pools, len(self.pci_stats.pools))
+ self.num_devs = 4
+ self.assertEqual(
+ self.num_devs, sum(pool["count"] for pool in self.pci_stats.pools)
+ )
+
+ def test_apply_asymmetric_allocation(self):
+ self._create_two_pools_with_two_vfs()
+ # ask for 3 VFs
+ vf_req = objects.InstancePCIRequest(
+ count=3,
+ alias_name='a-vf',
+ request_id=uuids.vf_req,
+ spec=[
+ {
+ "vendor_id": "dead",
+ "product_id": "beef",
+ "dev_type": "type-VF",
+ }
+ ],
+ )
+
+ # Simulate that placement returned an allocation candidate where 1 VF
+ # is consumed from PF1 and two from PF2
+ mapping = {
+ # the VF is represented by the parent PF RP
+ f"{uuids.vf_req}-0": [uuids.pf1],
+ f"{uuids.vf_req}-1": [uuids.pf2],
+ f"{uuids.vf_req}-2": [uuids.pf2],
+ }
+ # This should fit
+ self.assertTrue(
+ self.pci_stats.support_requests([vf_req], mapping)
+ )
+ # and when consumed the consumption from the pools should be in sync
+ # with the placement allocation. So the PF2 pool is expected to
+ # disappear as it is fully consumed and the PF1 pool should have
+ # one free device.
+ self.pci_stats.apply_requests([vf_req], mapping)
+ self.assertEqual(1, len(self.pci_stats.pools))
+ self.assertEqual(uuids.pf1, self.pci_stats.pools[0]['rp_uuid'])
+ self.assertEqual(1, self.pci_stats.pools[0]['count'])
+
+ def test_consume_asymmetric_allocation(self):
+ self._create_two_pools_with_two_vfs()
+ # ask for 3 VFs
+ vf_req = objects.InstancePCIRequest(
+ count=3,
+ alias_name='a-vf',
+ request_id=uuids.vf_req,
+ spec=[
+ {
+ "vendor_id": "dead",
+ "product_id": "beef",
+ "dev_type": "type-VF",
+ # Simulate that the scheduler already allocate a candidate
+ # and the mapping is stored in the request.
+ # In placement 1 VF is allocated from PF1 and two from PF2
+ "rp_uuids": ",".join([uuids.pf1, uuids.pf2, uuids.pf2])
+ }
+ ],
+ )
+
+ # So when the PCI claim consumes devices based on this request we
+ # expect that nova follows what is allocated in placement.
+ devs = self.pci_stats.consume_requests([vf_req])
+ self.assertEqual(
+ {"0000:81:01.0": 1, "0000:81:02.0": 2},
+ collections.Counter(dev.parent_addr for dev in devs),
+ )
+
def test_consume_restricted_by_allocation(self):
pf_req = objects.InstancePCIRequest(
count=1,
diff --git a/nova/tests/unit/policies/base.py b/nova/tests/unit/policies/base.py
index 68a051b26c..7490441d92 100644
--- a/nova/tests/unit/policies/base.py
+++ b/nova/tests/unit/policies/base.py
@@ -58,6 +58,16 @@ class BasePolicyTest(test.TestCase):
def setUp(self):
super(BasePolicyTest, self).setUp()
+ # TODO(gmann): enforce_scope and enforce_new_defaults are enabled
+ # by default in the code so disable them in base test class until
+ # we have deprecated rules and their tests. We have enforce_scope
+ # and no-legacy tests which are explicitly enabling scope and new
+ # defaults to test the new defaults and scope. In future, once
+ # we remove the deprecated rules, along with refactoring the unit
+ # tests we can remove overriding the oslo policy flags.
+ self.flags(enforce_scope=False, group="oslo_policy")
+ if not self.without_deprecated_rules:
+ self.flags(enforce_new_defaults=False, group="oslo_policy")
self.useFixture(fixtures.NeutronFixture(self))
self.policy = self.useFixture(fixtures.RealPolicyFixture())
diff --git a/nova/tests/unit/policies/test_evacuate.py b/nova/tests/unit/policies/test_evacuate.py
index ddc8241003..b9e4c29dba 100644
--- a/nova/tests/unit/policies/test_evacuate.py
+++ b/nova/tests/unit/policies/test_evacuate.py
@@ -103,7 +103,7 @@ class EvacuatePolicyTest(base.BasePolicyTest):
evacuate_mock.assert_called_once_with(
self.user_req.environ['nova.context'],
mock.ANY, 'my-host', False,
- 'MyNewPass', None)
+ 'MyNewPass', None, None)
class EvacuateNoLegacyNoScopePolicyTest(EvacuatePolicyTest):
diff --git a/nova/tests/unit/scheduler/test_host_manager.py b/nova/tests/unit/scheduler/test_host_manager.py
index 6c30e73571..1a7daa515f 100644
--- a/nova/tests/unit/scheduler/test_host_manager.py
+++ b/nova/tests/unit/scheduler/test_host_manager.py
@@ -1562,12 +1562,14 @@ class HostStateTestCase(test.NoDBTestCase):
self.assertIsNone(host.updated)
host.consume_from_request(spec_obj)
- numa_fit_mock.assert_called_once_with(fake_host_numa_topology,
- fake_numa_topology,
- limits=None, pci_requests=None,
- pci_stats=None,
- provider_mapping=None,
- )
+ numa_fit_mock.assert_called_once_with(
+ fake_host_numa_topology,
+ fake_numa_topology,
+ limits=None,
+ pci_requests=None,
+ pci_stats=None,
+ provider_mapping=None,
+ )
numa_usage_mock.assert_called_once_with(fake_host_numa_topology,
fake_numa_topology)
sync_mock.assert_called_once_with(("fakehost", "fakenode"))
diff --git a/nova/tests/unit/scheduler/test_manager.py b/nova/tests/unit/scheduler/test_manager.py
index 4e7c0dc008..e7866069b3 100644
--- a/nova/tests/unit/scheduler/test_manager.py
+++ b/nova/tests/unit/scheduler/test_manager.py
@@ -2047,6 +2047,8 @@ class SchedulerManagerAllocationCandidateTestCase(test.NoDBTestCase):
alloc_reqs_by_rp_uuid[uuids.host1] = [
{
"mappings": {
+ # This is odd but the un-name request group uses "" as the
+ # name of the group.
"": [uuids.host1],
uuids.group_req1: [getattr(uuids, f"host1_child{i}")],
}
@@ -2071,7 +2073,8 @@ class SchedulerManagerAllocationCandidateTestCase(test.NoDBTestCase):
self.assertEqual(1, len(selections))
selection = selections[0]
self.assertEqual(uuids.host1, selection.compute_node_uuid)
- # we expect that host1_child2 candidate is selected
+ # we expect that host1_child2 candidate is selected as the
+ # DropFirstFilter will drop host1_child1
expected_a_c = {
"mappings": {
"": [uuids.host1],
diff --git a/nova/tests/unit/test_filesystem.py b/nova/tests/unit/test_filesystem.py
new file mode 100644
index 0000000000..85f16157ee
--- /dev/null
+++ b/nova/tests/unit/test_filesystem.py
@@ -0,0 +1,52 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+from unittest import mock
+
+from nova import exception
+from nova import filesystem
+from nova import test
+
+
+class TestFSCommon(test.NoDBTestCase):
+
+ def test_read_sys(self):
+ open_mock = mock.mock_open(read_data='bar')
+ with mock.patch('builtins.open', open_mock) as m_open:
+ self.assertEqual('bar', filesystem.read_sys('foo'))
+ expected_path = os.path.join(filesystem.SYS, 'foo')
+ m_open.assert_called_once_with(expected_path, mode='r')
+
+ def test_read_sys_error(self):
+ with mock.patch('builtins.open',
+ side_effect=OSError('error')) as m_open:
+ self.assertRaises(exception.FileNotFound,
+ filesystem.read_sys, 'foo')
+ expected_path = os.path.join(filesystem.SYS, 'foo')
+ m_open.assert_called_once_with(expected_path, mode='r')
+
+ def test_write_sys(self):
+ open_mock = mock.mock_open()
+ with mock.patch('builtins.open', open_mock) as m_open:
+ self.assertIsNone(filesystem.write_sys('foo', 'bar'))
+ expected_path = os.path.join(filesystem.SYS, 'foo')
+ m_open.assert_called_once_with(expected_path, mode='w')
+ open_mock().write.assert_called_once_with('bar')
+
+ def test_write_sys_error(self):
+ with mock.patch('builtins.open',
+ side_effect=OSError('fake_error')) as m_open:
+ self.assertRaises(exception.FileNotFound,
+ filesystem.write_sys, 'foo', 'bar')
+ expected_path = os.path.join(filesystem.SYS, 'foo')
+ m_open.assert_called_once_with(expected_path, mode='w')
diff --git a/nova/tests/unit/test_policy.py b/nova/tests/unit/test_policy.py
index 871e836d87..752b872381 100644
--- a/nova/tests/unit/test_policy.py
+++ b/nova/tests/unit/test_policy.py
@@ -303,10 +303,10 @@ class RealRolePolicyTestCase(test.NoDBTestCase):
def setUp(self):
super(RealRolePolicyTestCase, self).setUp()
self.policy = self.useFixture(nova_fixtures.RealPolicyFixture())
- self.non_admin_context = context.RequestContext('fake', 'fake',
- roles=['member'])
- self.admin_context = context.RequestContext('fake', 'fake', True,
- roles=['admin', 'member'])
+ self.non_admin_context = context.RequestContext(
+ 'fake', 'fake', roles=['member', 'reader'])
+ self.admin_context = context.RequestContext(
+ 'fake', 'fake', True, roles=['admin', 'member', 'reader'])
self.target = {}
self.fake_policy = jsonutils.loads(fake_policy.policy_data)
@@ -387,6 +387,7 @@ class RealRolePolicyTestCase(test.NoDBTestCase):
"os_compute_api:os-hypervisors:search",
"os_compute_api:os-hypervisors:servers",
"os_compute_api:limits:other_project",
+"os_compute_api:os-flavor-access",
)
self.admin_or_owner_rules = (
@@ -440,7 +441,6 @@ class RealRolePolicyTestCase(test.NoDBTestCase):
"os_compute_api:os-remote-consoles",
"os_compute_api:os-deferred-delete:restore",
"os_compute_api:os-deferred-delete:force",
-"os_compute_api:os-flavor-access",
"os_compute_api:os-flavor-extra-specs:index",
"os_compute_api:os-flavor-extra-specs:show",
"os_compute_api:os-floating-ips:add",
diff --git a/nova/tests/unit/test_rpc.py b/nova/tests/unit/test_rpc.py
index 3fe56013bd..40a914b5f7 100644
--- a/nova/tests/unit/test_rpc.py
+++ b/nova/tests/unit/test_rpc.py
@@ -214,20 +214,20 @@ class TestRPC(test.NoDBTestCase):
@mock.patch.object(rpc, 'TRANSPORT')
@mock.patch.object(rpc, 'profiler', None)
@mock.patch.object(rpc, 'RequestContextSerializer')
- @mock.patch.object(messaging, 'RPCClient')
- def test_get_client(self, mock_client, mock_ser, mock_TRANSPORT):
+ @mock.patch.object(messaging, 'get_rpc_client')
+ def test_get_client(self, mock_get, mock_ser, mock_TRANSPORT):
tgt = mock.Mock()
ser = mock.Mock()
- mock_client.return_value = 'client'
+ mock_get.return_value = 'client'
mock_ser.return_value = ser
client = rpc.get_client(tgt, version_cap='1.0', serializer='foo')
mock_ser.assert_called_once_with('foo')
- mock_client.assert_called_once_with(mock_TRANSPORT,
- tgt, version_cap='1.0',
- call_monitor_timeout=None,
- serializer=ser)
+ mock_get.assert_called_once_with(mock_TRANSPORT,
+ tgt, version_cap='1.0',
+ call_monitor_timeout=None,
+ serializer=ser)
self.assertEqual('client', client)
@mock.patch.object(rpc, 'TRANSPORT')
@@ -253,21 +253,21 @@ class TestRPC(test.NoDBTestCase):
@mock.patch.object(rpc, 'TRANSPORT')
@mock.patch.object(rpc, 'profiler', mock.Mock())
@mock.patch.object(rpc, 'ProfilerRequestContextSerializer')
- @mock.patch.object(messaging, 'RPCClient')
- def test_get_client_profiler_enabled(self, mock_client, mock_ser,
+ @mock.patch.object(messaging, 'get_rpc_client')
+ def test_get_client_profiler_enabled(self, mock_get, mock_ser,
mock_TRANSPORT):
tgt = mock.Mock()
ser = mock.Mock()
- mock_client.return_value = 'client'
+ mock_get.return_value = 'client'
mock_ser.return_value = ser
client = rpc.get_client(tgt, version_cap='1.0', serializer='foo')
mock_ser.assert_called_once_with('foo')
- mock_client.assert_called_once_with(mock_TRANSPORT,
- tgt, version_cap='1.0',
- call_monitor_timeout=None,
- serializer=ser)
+ mock_get.assert_called_once_with(mock_TRANSPORT,
+ tgt, version_cap='1.0',
+ call_monitor_timeout=None,
+ serializer=ser)
self.assertEqual('client', client)
@mock.patch.object(rpc, 'TRANSPORT')
@@ -432,11 +432,11 @@ class TestProfilerRequestContextSerializer(test.NoDBTestCase):
class TestClientRouter(test.NoDBTestCase):
- @mock.patch('oslo_messaging.RPCClient')
- def test_by_instance(self, mock_rpcclient):
+ @mock.patch('oslo_messaging.get_rpc_client')
+ def test_by_instance(self, mock_get):
default_client = mock.Mock()
cell_client = mock.Mock()
- mock_rpcclient.return_value = cell_client
+ mock_get.return_value = cell_client
ctxt = mock.Mock()
ctxt.mq_connection = mock.sentinel.transport
@@ -444,7 +444,7 @@ class TestClientRouter(test.NoDBTestCase):
client = router.client(ctxt)
# verify a client was created by ClientRouter
- mock_rpcclient.assert_called_once_with(
+ mock_get.assert_called_once_with(
mock.sentinel.transport, default_client.target,
version_cap=default_client.version_cap,
call_monitor_timeout=default_client.call_monitor_timeout,
@@ -452,11 +452,11 @@ class TestClientRouter(test.NoDBTestCase):
# verify cell client was returned
self.assertEqual(cell_client, client)
- @mock.patch('oslo_messaging.RPCClient')
- def test_by_instance_untargeted(self, mock_rpcclient):
+ @mock.patch('oslo_messaging.get_rpc_client')
+ def test_by_instance_untargeted(self, mock_get):
default_client = mock.Mock()
cell_client = mock.Mock()
- mock_rpcclient.return_value = cell_client
+ mock_get.return_value = cell_client
ctxt = mock.Mock()
ctxt.mq_connection = None
@@ -464,7 +464,7 @@ class TestClientRouter(test.NoDBTestCase):
client = router.client(ctxt)
self.assertEqual(router.default_client, client)
- self.assertFalse(mock_rpcclient.called)
+ self.assertFalse(mock_get.called)
class TestIsNotificationsEnabledDecorator(test.NoDBTestCase):
diff --git a/nova/tests/unit/test_service.py b/nova/tests/unit/test_service.py
index 9fb6fa1c40..acc1aeca7f 100644
--- a/nova/tests/unit/test_service.py
+++ b/nova/tests/unit/test_service.py
@@ -128,7 +128,7 @@ class ServiceTestCase(test.NoDBTestCase):
serv.manager.additional_endpoints = []
serv.start()
# init_host is called before any service record is created
- serv.manager.init_host.assert_called_once_with()
+ serv.manager.init_host.assert_called_once_with(None)
mock_get_by_host_and_binary.assert_called_once_with(mock.ANY,
self.host, self.binary)
mock_create.assert_called_once_with()
@@ -186,7 +186,7 @@ class ServiceTestCase(test.NoDBTestCase):
mock_create.side_effect = ex
serv.manager = mock_manager
self.assertRaises(test.TestingException, serv.start)
- serv.manager.init_host.assert_called_with()
+ serv.manager.init_host.assert_called_with(None)
mock_get_by_host_and_binary.assert_has_calls([
mock.call(mock.ANY, self.host, self.binary),
mock.call(mock.ANY, self.host, self.binary)])
@@ -216,7 +216,7 @@ class ServiceTestCase(test.NoDBTestCase):
serv.manager.service_name = self.topic
serv.manager.additional_endpoints = []
serv.start()
- serv.manager.init_host.assert_called_once_with()
+ serv.manager.init_host.assert_called_once_with(None)
mock_get_by_host_and_binary.assert_called_once_with(mock.ANY,
self.host,
self.binary)
@@ -241,7 +241,8 @@ class ServiceTestCase(test.NoDBTestCase):
serv.manager.additional_endpoints = []
serv.start()
- serv.manager.init_host.assert_called_with()
+ serv.manager.init_host.assert_called_with(
+ mock_svc_get_by_host_and_binary.return_value)
serv.stop()
serv.manager.cleanup_host.assert_called_with()
diff --git a/nova/tests/unit/virt/disk/test_api.py b/nova/tests/unit/virt/disk/test_api.py
index 62005de525..135558e145 100644
--- a/nova/tests/unit/virt/disk/test_api.py
+++ b/nova/tests/unit/virt/disk/test_api.py
@@ -40,6 +40,7 @@ class FakeMount(object):
class APITestCase(test.NoDBTestCase):
+ @mock.patch('nova.virt.disk.vfs.guestfs.VFSGuestFS', new=mock.Mock())
def test_can_resize_need_fs_type_specified(self):
imgfile = tempfile.NamedTemporaryFile()
self.addCleanup(imgfile.close)
diff --git a/nova/tests/unit/virt/libvirt/cpu/__init__.py b/nova/tests/unit/virt/libvirt/cpu/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/virt/libvirt/cpu/__init__.py
diff --git a/nova/tests/unit/virt/libvirt/cpu/test_api.py b/nova/tests/unit/virt/libvirt/cpu/test_api.py
new file mode 100644
index 0000000000..d47b3690a3
--- /dev/null
+++ b/nova/tests/unit/virt/libvirt/cpu/test_api.py
@@ -0,0 +1,63 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from unittest import mock
+
+from nova import test
+from nova.virt.libvirt.cpu import api
+from nova.virt.libvirt.cpu import core
+
+
+class TestAPI(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestAPI, self).setUp()
+ self.core_1 = api.Core(1)
+
+ @mock.patch.object(core, 'get_online')
+ def test_online(self, mock_get_online):
+ mock_get_online.return_value = True
+ self.assertTrue(self.core_1.online)
+ mock_get_online.assert_called_once_with(self.core_1.ident)
+
+ @mock.patch.object(core, 'set_online')
+ def test_set_online(self, mock_set_online):
+ self.core_1.online = True
+ mock_set_online.assert_called_once_with(self.core_1.ident)
+
+ @mock.patch.object(core, 'set_offline')
+ def test_set_offline(self, mock_set_offline):
+ self.core_1.online = False
+ mock_set_offline.assert_called_once_with(self.core_1.ident)
+
+ def test_hash(self):
+ self.assertEqual(hash(self.core_1.ident), hash(self.core_1))
+
+ @mock.patch.object(core, 'get_governor')
+ def test_governor(self, mock_get_governor):
+ mock_get_governor.return_value = 'fake_governor'
+ self.assertEqual('fake_governor', self.core_1.governor)
+ mock_get_governor.assert_called_once_with(self.core_1.ident)
+
+ @mock.patch.object(core, 'set_governor')
+ def test_set_governor_low(self, mock_set_governor):
+ self.flags(cpu_power_governor_low='fake_low_gov', group='libvirt')
+ self.core_1.set_low_governor()
+ mock_set_governor.assert_called_once_with(self.core_1.ident,
+ 'fake_low_gov')
+
+ @mock.patch.object(core, 'set_governor')
+ def test_set_governor_high(self, mock_set_governor):
+ self.flags(cpu_power_governor_high='fake_high_gov', group='libvirt')
+ self.core_1.set_high_governor()
+ mock_set_governor.assert_called_once_with(self.core_1.ident,
+ 'fake_high_gov')
diff --git a/nova/tests/unit/virt/libvirt/cpu/test_core.py b/nova/tests/unit/virt/libvirt/cpu/test_core.py
new file mode 100644
index 0000000000..a3cba00d3b
--- /dev/null
+++ b/nova/tests/unit/virt/libvirt/cpu/test_core.py
@@ -0,0 +1,122 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from unittest import mock
+
+from nova import exception
+from nova import test
+from nova.tests import fixtures
+from nova.virt.libvirt.cpu import core
+
+
+class TestCore(test.NoDBTestCase):
+
+ @mock.patch.object(core.filesystem, 'read_sys')
+ @mock.patch.object(core.hardware, 'parse_cpu_spec')
+ def test_get_available_cores(self, mock_parse_cpu_spec, mock_read_sys):
+ mock_read_sys.return_value = '1-2'
+ mock_parse_cpu_spec.return_value = set([1, 2])
+ self.assertEqual(set([1, 2]), core.get_available_cores())
+ mock_read_sys.assert_called_once_with(core.AVAILABLE_PATH)
+ mock_parse_cpu_spec.assert_called_once_with('1-2')
+
+ @mock.patch.object(core.filesystem, 'read_sys')
+ @mock.patch.object(core.hardware, 'parse_cpu_spec')
+ def test_get_available_cores_none(
+ self, mock_parse_cpu_spec, mock_read_sys):
+ mock_read_sys.return_value = ''
+ self.assertEqual(set(), core.get_available_cores())
+ mock_parse_cpu_spec.assert_not_called()
+
+ @mock.patch.object(core, 'get_available_cores')
+ def test_exists(self, mock_get_available_cores):
+ mock_get_available_cores.return_value = set([1])
+ self.assertTrue(core.exists(1))
+ mock_get_available_cores.assert_called_once_with()
+ self.assertFalse(core.exists(2))
+
+ @mock.patch.object(
+ core, 'CPU_PATH_TEMPLATE',
+ new_callable=mock.PropertyMock(return_value='/sys/blah%(core)s'))
+ @mock.patch.object(core, 'exists')
+ def test_gen_cpu_path(self, mock_exists, mock_cpu_path):
+ mock_exists.return_value = True
+ self.assertEqual('/sys/blah1', core.gen_cpu_path(1))
+ mock_exists.assert_called_once_with(1)
+
+ @mock.patch.object(core, 'exists')
+ def test_gen_cpu_path_raises(self, mock_exists):
+ mock_exists.return_value = False
+ self.assertRaises(ValueError, core.gen_cpu_path, 1)
+ self.assertIn('Unable to access CPU: 1', self.stdlog.logger.output)
+
+
+class TestCoreHelpers(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestCoreHelpers, self).setUp()
+ self.useFixture(fixtures.PrivsepFixture())
+ _p1 = mock.patch.object(core, 'exists', return_value=True)
+ self.mock_exists = _p1.start()
+ self.addCleanup(_p1.stop)
+
+ _p2 = mock.patch.object(core, 'gen_cpu_path',
+ side_effect=lambda x: '/fakesys/blah%s' % x)
+ self.mock_gen_cpu_path = _p2.start()
+ self.addCleanup(_p2.stop)
+
+ @mock.patch.object(core.filesystem, 'read_sys')
+ def test_get_online(self, mock_read_sys):
+ mock_read_sys.return_value = '1'
+ self.assertTrue(core.get_online(1))
+ mock_read_sys.assert_called_once_with('/fakesys/blah1/online')
+
+ @mock.patch.object(core.filesystem, 'read_sys')
+ def test_get_online_not_exists(self, mock_read_sys):
+ mock_read_sys.side_effect = exception.FileNotFound(file_path='foo')
+ self.assertTrue(core.get_online(1))
+ mock_read_sys.assert_called_once_with('/fakesys/blah1/online')
+
+ @mock.patch.object(core.filesystem, 'write_sys')
+ @mock.patch.object(core, 'get_online')
+ def test_set_online(self, mock_get_online, mock_write_sys):
+ mock_get_online.return_value = True
+ self.assertTrue(core.set_online(1))
+ mock_write_sys.assert_called_once_with('/fakesys/blah1/online',
+ data='1')
+ mock_get_online.assert_called_once_with(1)
+
+ @mock.patch.object(core.filesystem, 'write_sys')
+ @mock.patch.object(core, 'get_online')
+ def test_set_offline(self, mock_get_online, mock_write_sys):
+ mock_get_online.return_value = False
+ self.assertTrue(core.set_offline(1))
+ mock_write_sys.assert_called_once_with('/fakesys/blah1/online',
+ data='0')
+ mock_get_online.assert_called_once_with(1)
+
+ @mock.patch.object(core.filesystem, 'read_sys')
+ def test_get_governor(self, mock_read_sys):
+ mock_read_sys.return_value = 'fake_gov'
+ self.assertEqual('fake_gov', core.get_governor(1))
+ mock_read_sys.assert_called_once_with(
+ '/fakesys/blah1/cpufreq/scaling_governor')
+
+ @mock.patch.object(core, 'get_governor')
+ @mock.patch.object(core.filesystem, 'write_sys')
+ def test_set_governor(self, mock_write_sys, mock_get_governor):
+ mock_get_governor.return_value = 'fake_gov'
+ self.assertEqual('fake_gov',
+ core.set_governor(1, 'fake_gov'))
+ mock_write_sys.assert_called_once_with(
+ '/fakesys/blah1/cpufreq/scaling_governor', data='fake_gov')
+ mock_get_governor.assert_called_once_with(1)
diff --git a/nova/tests/unit/virt/libvirt/test_driver.py b/nova/tests/unit/virt/libvirt/test_driver.py
index b7d46c8ca2..139299e1fa 100644
--- a/nova/tests/unit/virt/libvirt/test_driver.py
+++ b/nova/tests/unit/virt/libvirt/test_driver.py
@@ -817,6 +817,16 @@ class LibvirtConnTestCase(test.NoDBTestCase,
"Driver capabilities for 'supports_socket_pci_numa_affinity' "
"is invalid",
)
+ self.assertFalse(
+ drvr.capabilities['supports_ephemeral_encryption'],
+ "Driver capabilities for 'supports_ephemeral_encryption' "
+ "is invalid",
+ )
+ self.assertFalse(
+ drvr.capabilities['supports_ephemeral_encryption_luks'],
+ "Driver capabilities for 'supports_ephemeral_encryption_luks' "
+ " is invalid",
+ )
def test_driver_capabilities_qcow2_with_rbd(self):
self.flags(images_type='rbd', group='libvirt')
@@ -1320,7 +1330,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertRaises(exception.Invalid, drvr.init_host, "dummyhost")
- @mock.patch('nova.virt.libvirt.host.libvirt.Connection.compareCPU')
+ @mock.patch(
+ 'nova.virt.libvirt.host.libvirt.Connection.compareHypervisorCPU')
def test__check_cpu_compatibility_advance_model(self, mocked_compare):
mocked_compare.side_effect = (2, 0)
self.flags(cpu_mode="custom",
@@ -1333,6 +1344,22 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_register_all_undefined_instance_details',
new=mock.Mock())
+ @mock.patch('nova.virt.libvirt.host.libvirt.Connection.compareCPU')
+ def test__check_cpu_compatibility_skip_compare_at_init(
+ self, mocked_compare
+ ):
+ self.flags(group='workarounds', skip_cpu_compare_at_startup=True)
+ self.flags(cpu_mode="custom",
+ cpu_models=["Icelake-Server-noTSX"],
+ cpu_model_extra_flags = ["-mpx"],
+ group="libvirt")
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ drvr.init_host("dummyhost")
+ mocked_compare.assert_not_called()
+
+ @mock.patch.object(libvirt_driver.LibvirtDriver,
+ '_register_all_undefined_instance_details',
+ new=mock.Mock())
def test__check_cpu_compatibility_with_flag(self):
self.flags(cpu_mode="custom",
cpu_models=["Penryn"],
@@ -1341,9 +1368,10 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr.init_host("dummyhost")
- @mock.patch('nova.virt.libvirt.host.libvirt.Connection.compareCPU')
+ @mock.patch(
+ 'nova.virt.libvirt.host.libvirt.Connection.compareHypervisorCPU')
def test__check_cpu_compatibility_advance_flag(self, mocked_compare):
- mocked_compare.side_effect = (2, 0)
+ mocked_compare.side_effect = (-1, 0)
self.flags(cpu_mode="custom",
cpu_models=["qemu64"],
cpu_model_extra_flags = ["avx", "avx2"],
@@ -1352,11 +1380,12 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertRaises(exception.InvalidCPUInfo,
drvr.init_host, "dummyhost")
- @mock.patch('nova.virt.libvirt.host.libvirt.Connection.compareCPU')
+ @mock.patch(
+ 'nova.virt.libvirt.host.libvirt.Connection.compareHypervisorCPU')
def test__check_cpu_compatibility_wrong_flag(self, mocked_compare):
# here, and in the surrounding similar tests, the non-zero error
# code in the compareCPU() side effect indicates error
- mocked_compare.side_effect = (2, 0)
+ mocked_compare.side_effect = (-1, 0)
self.flags(cpu_mode="custom",
cpu_models=["Broadwell-noTSX"],
cpu_model_extra_flags = ["a v x"],
@@ -1365,11 +1394,12 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertRaises(exception.InvalidCPUInfo,
drvr.init_host, "dummyhost")
- @mock.patch('nova.virt.libvirt.host.libvirt.Connection.compareCPU')
+ @mock.patch(
+ 'nova.virt.libvirt.host.libvirt.Connection.compareHypervisorCPU')
def test__check_cpu_compatibility_enabled_and_disabled_flags(
self, mocked_compare
):
- mocked_compare.side_effect = (2, 0)
+ mocked_compare.side_effect = (-1, 0)
self.flags(
cpu_mode="custom",
cpu_models=["Cascadelake-Server"],
@@ -1822,6 +1852,22 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_guest.set_user_password.assert_called_once_with("root", "123")
+ @mock.patch('nova.virt.libvirt.host.Host.get_guest')
+ def test_qemu_announce_self(self, mock_get_guest):
+ # Enable the workaround, configure to call announce_self 3 times
+ self.flags(enable_qemu_monitor_announce_self=True, group='workarounds')
+
+ mock_guest = mock.Mock(spec=libvirt_guest.Guest)
+ mock_get_guest.return_value = mock_guest
+
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ drvr._qemu_monitor_announce_self(mock_guest)
+
+ # Ensure that 3 calls are made as defined by option
+ # enable_qemu_monitor_announce_self_retries default of 3
+ mock_guest.announce_self.assert_any_call()
+ self.assertEqual(3, mock_guest.announce_self.call_count)
+
@mock.patch('nova.utils.get_image_from_system_metadata')
@mock.patch.object(host.Host,
'has_min_version', return_value=True)
@@ -11210,7 +11256,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
new=mock.Mock(return_value=False))
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file')
- @mock.patch.object(fakelibvirt.Connection, 'compareCPU')
+ @mock.patch.object(fakelibvirt.Connection, 'compareHypervisorCPU')
def test_check_can_live_migrate_dest_all_pass_with_block_migration(
self, mock_cpu, mock_test_file,
):
@@ -11249,7 +11295,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
new=mock.Mock(return_value=False))
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file')
- @mock.patch.object(fakelibvirt.Connection, 'compareCPU')
+ @mock.patch.object(fakelibvirt.Connection, 'compareHypervisorCPU')
def test_check_can_live_migrate_dest_all_pass_with_over_commit(
self, mock_cpu, mock_test_file,
):
@@ -11289,7 +11335,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
new=mock.Mock(return_value=False))
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file')
- @mock.patch.object(fakelibvirt.Connection, 'compareCPU')
+ @mock.patch.object(fakelibvirt.Connection, 'compareHypervisorCPU')
def test_check_can_live_migrate_dest_all_pass_no_block_migration(
self, mock_cpu, mock_test_file,
):
@@ -11327,7 +11373,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file',
return_value='fake')
- @mock.patch.object(fakelibvirt.Connection, 'compareCPU')
+ @mock.patch.object(fakelibvirt.Connection, 'compareHypervisorCPU')
def test_check_can_live_migrate_dest_fills_listen_addrs(
self, mock_cpu, mock_test_file,
):
@@ -11359,7 +11405,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file',
return_value='fake')
- @mock.patch.object(fakelibvirt.Connection, 'compareCPU',
+ @mock.patch.object(fakelibvirt.Connection, 'compareHypervisorCPU',
return_value=1)
def test_check_can_live_migrate_dest_ensure_serial_adds_not_set(
self, mock_cpu, mock_test_file,
@@ -11467,7 +11513,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
new=mock.Mock(return_value=False))
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file')
- @mock.patch.object(fakelibvirt.Connection, 'compareCPU')
+ @mock.patch.object(fakelibvirt.Connection, 'compareHypervisorCPU')
def test_check_can_live_migrate_dest_no_instance_cpu_info(
self, mock_cpu, mock_test_file,
):
@@ -11508,7 +11554,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
new=mock.Mock(return_value=False))
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file')
- @mock.patch.object(fakelibvirt.Connection, 'compareCPU')
+ @mock.patch.object(fakelibvirt.Connection, 'compareHypervisorCPU')
def test_check_can_live_migrate_dest_file_backed(
self, mock_cpu, mock_test_file,
):
@@ -11534,7 +11580,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertTrue(return_value.dst_wants_file_backed_memory)
- @mock.patch.object(fakelibvirt.Connection, 'compareCPU')
+ @mock.patch.object(fakelibvirt.Connection, 'compareHypervisorCPU')
def test_check_can_live_migrate_dest_incompatible_cpu_raises(
self, mock_cpu):
instance_ref = objects.Instance(**self.test_instance)
@@ -11570,7 +11616,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
for vif in result.vifs:
self.assertTrue(vif.supports_os_vif_delegation)
- @mock.patch.object(host.Host, 'compare_cpu')
+ @mock.patch.object(host.Host, 'compare_hypervisor_cpu')
@mock.patch.object(nova.virt.libvirt, 'config')
def test_compare_cpu_compatible_host_cpu(self, mock_vconfig, mock_compare):
instance = objects.Instance(**self.test_instance)
@@ -11580,7 +11626,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
instance)
self.assertIsNone(ret)
- @mock.patch.object(host.Host, 'compare_cpu')
+ @mock.patch.object(host.Host, 'compare_hypervisor_cpu')
@mock.patch.object(nova.virt.libvirt, 'config')
def test_compare_cpu_handles_not_supported_error_gracefully(self,
mock_vconfig,
@@ -11617,7 +11663,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion',
return_value=versionutils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_AARCH64_CPU_COMPARE))
- @mock.patch.object(host.Host, 'compare_cpu')
+ @mock.patch.object(host.Host, 'compare_hypervisor_cpu')
def test_compare_cpu_host_aarch64(self,
mock_compare,
mock_get_libversion,
@@ -11640,7 +11686,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_compare.assert_called_once_with(caps.host.cpu.to_xml())
self.assertIsNone(ret)
- @mock.patch.object(host.Host, 'compare_cpu')
+ @mock.patch.object(host.Host, 'compare_hypervisor_cpu')
@mock.patch.object(nova.virt.libvirt.LibvirtDriver,
'_vcpu_model_to_cpu_config')
def test_compare_cpu_compatible_guest_cpu(self, mock_vcpu_to_cpu,
@@ -11659,7 +11705,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
ret = conn._compare_cpu(None, None, instance)
self.assertIsNone(ret)
- @mock.patch.object(host.Host, 'compare_cpu')
+ @mock.patch.object(host.Host, 'compare_hypervisor_cpu')
@mock.patch.object(nova.virt.libvirt, 'config')
def test_compare_cpu_invalid_cpuinfo_raises(self, mock_vconfig,
mock_compare):
@@ -11671,7 +11717,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
jsonutils.dumps(_fake_cpu_info),
instance)
- @mock.patch.object(host.Host, 'compare_cpu')
+ @mock.patch.object(host.Host, 'compare_hypervisor_cpu')
@mock.patch.object(nova.virt.libvirt, 'config')
def test_compare_cpu_incompatible_cpu_raises(self, mock_vconfig,
mock_compare):
@@ -14102,6 +14148,85 @@ class LibvirtConnTestCase(test.NoDBTestCase,
def test_live_migration_main_monitoring_failed(self):
self._test_live_migration_main(mon_side_effect=Exception)
+ @mock.patch.object(host.Host, "get_connection", new=mock.Mock())
+ @mock.patch.object(utils, "spawn", new=mock.Mock())
+ @mock.patch.object(host.Host, "get_guest")
+ @mock.patch.object(
+ libvirt_driver.LibvirtDriver, "_live_migration_copy_disk_paths")
+ def _test_live_migration_monitor_job_stats_exception(
+ self, exc, mock_copy_disk_paths, mock_get_guest, expect_success=True
+ ):
+ # Verify behavior when various exceptions are raised inside of
+ # Guest.get_job_info() during live migration monitoring.
+ mock_domain = mock.Mock(fakelibvirt.virDomain)
+ guest = libvirt_guest.Guest(mock_domain)
+ mock_get_guest.return_value = guest
+
+ # First, raise the exception from jobStats(), then return "completed"
+ # to make sure we exit the monitoring loop.
+ guest._domain.jobStats.side_effect = [
+ exc,
+ {'type': fakelibvirt.VIR_DOMAIN_JOB_COMPLETED},
+ ]
+
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ instance = objects.Instance(**self.test_instance)
+ post_method = mock.Mock()
+ migrate_data = mock.Mock()
+ disks_to_copy = (['/some/path/one', '/test/path/two'],
+ ['vda', 'vdb'])
+ mock_copy_disk_paths.return_value = disks_to_copy
+
+ func = drvr._live_migration
+ args = (self.context, instance, mock.sentinel.dest, post_method,
+ mock.sentinel.recover_method, mock.sentinel.block_migration,
+ migrate_data)
+
+ if expect_success:
+ func(*args)
+ post_method.assert_called_once_with(
+ self.context, instance, mock.sentinel.dest,
+ mock.sentinel.block_migration, migrate_data
+ )
+ else:
+ actual_exc = self.assertRaises(
+ fakelibvirt.libvirtError, func, *args)
+ self.assertEqual(exc, actual_exc)
+
+ def test_live_migration_monitor_job_stats_no_domain(self):
+ exp = fakelibvirt.make_libvirtError(
+ fakelibvirt.libvirtError, 'no domain',
+ error_code=fakelibvirt.VIR_ERR_NO_DOMAIN
+ )
+ self._test_live_migration_monitor_job_stats_exception(
+ exp, expect_success=True)
+
+ def test_live_migration_monitor_job_stats_op_invalid(self):
+ exp = fakelibvirt.make_libvirtError(
+ fakelibvirt.libvirtError, 'operation invalid',
+ error_code=fakelibvirt.VIR_ERR_OPERATION_INVALID
+ )
+ self._test_live_migration_monitor_job_stats_exception(
+ exp, expect_success=True)
+
+ def test_live_migration_monitor_job_stats_no_ram_info_set(self):
+ exp = fakelibvirt.make_libvirtError(
+ fakelibvirt.libvirtError, 'internal error',
+ error_message='migration was active, but no RAM info was set',
+ error_code=fakelibvirt.VIR_ERR_INTERNAL_ERROR
+ )
+ self._test_live_migration_monitor_job_stats_exception(
+ exp, expect_success=True)
+
+ def test_live_migration_monitor_job_stats_internal_error(self):
+ exp = fakelibvirt.make_libvirtError(
+ fakelibvirt.libvirtError,
+ 'some other internal error',
+ error_code=fakelibvirt.VIR_ERR_INTERNAL_ERROR
+ )
+ self._test_live_migration_monitor_job_stats_exception(
+ exp, expect_success=False)
+
@mock.patch('os.path.exists', return_value=False)
@mock.patch('nova.virt.libvirt.utils.create_image')
@mock.patch.object(libvirt_driver.LibvirtDriver,
diff --git a/nova/tests/unit/virt/libvirt/test_utils.py b/nova/tests/unit/virt/libvirt/test_utils.py
index 0b80bde49f..c648108f56 100644
--- a/nova/tests/unit/virt/libvirt/test_utils.py
+++ b/nova/tests/unit/virt/libvirt/test_utils.py
@@ -103,19 +103,23 @@ class LibvirtUtilsTestCase(test.NoDBTestCase):
def test_valid_hostname_bad(self):
self.assertFalse(libvirt_utils.is_valid_hostname("foo/?com=/bin/sh"))
+ @mock.patch('tempfile.NamedTemporaryFile')
@mock.patch('oslo_concurrency.processutils.execute')
@mock.patch('nova.virt.images.qemu_img_info')
def _test_create_image(
self, path, disk_format, disk_size, mock_info, mock_execute,
- backing_file=None
+ mock_ntf, backing_file=None, encryption=None
):
mock_info.return_value = mock.Mock(
file_format=mock.sentinel.backing_fmt,
cluster_size=mock.sentinel.cluster_size,
)
+ fh = mock_ntf.return_value.__enter__.return_value
libvirt_utils.create_image(
- path, disk_format, disk_size, backing_file=backing_file)
+ path, disk_format, disk_size, backing_file=backing_file,
+ encryption=encryption,
+ )
cow_opts = []
@@ -130,9 +134,32 @@ class LibvirtUtilsTestCase(test.NoDBTestCase):
f'cluster_size={mock.sentinel.cluster_size}',
]
+ encryption_opts = []
+
+ if encryption:
+ encryption_opts = [
+ '--object', f"secret,id=sec,file={fh.name}",
+ '-o', 'encrypt.key-secret=sec',
+ '-o', f"encrypt.format={encryption.get('format')}",
+ ]
+
+ encryption_options = {
+ 'cipher-alg': 'aes-256',
+ 'cipher-mode': 'xts',
+ 'hash-alg': 'sha256',
+ 'iter-time': 2000,
+ 'ivgen-alg': 'plain64',
+ 'ivgen-hash-alg': 'sha256',
+ }
+ for option, value in encryption_options.items():
+ encryption_opts += [
+ '-o',
+ f'encrypt.{option}={value}',
+ ]
+
expected_args = (
'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'create', '-f',
- disk_format, *cow_opts, path,
+ disk_format, *cow_opts, *encryption_opts, path,
)
if disk_size is not None:
expected_args += (disk_size,)
@@ -159,6 +186,16 @@ class LibvirtUtilsTestCase(test.NoDBTestCase):
backing_file=mock.sentinel.backing_file,
)
+ def test_create_image_encryption(self):
+ encryption = {
+ 'secret': 'a_secret',
+ 'format': 'luks',
+ }
+ self._test_create_image(
+ '/some/stuff', 'qcow2', '1234567891234',
+ encryption=encryption,
+ )
+
@ddt.unpack
@ddt.data({'fs_type': 'some_fs_type',
'default_eph_format': None,
diff --git a/nova/tests/unit/virt/test_images.py b/nova/tests/unit/virt/test_images.py
index 58581d93ba..62a61c1e8b 100644
--- a/nova/tests/unit/virt/test_images.py
+++ b/nova/tests/unit/virt/test_images.py
@@ -16,6 +16,8 @@ import os
from unittest import mock
from oslo_concurrency import processutils
+from oslo_serialization import jsonutils
+from oslo_utils import imageutils
from nova.compute import utils as compute_utils
from nova import exception
@@ -135,3 +137,47 @@ class QemuTestCase(test.NoDBTestCase):
'-O', 'out_format', '-f', 'in_format', 'source', 'dest')
mock_disk_op_sema.__enter__.assert_called_once()
self.assertTupleEqual(expected, mock_execute.call_args[0])
+
+ def test_convert_image_vmdk_allowed_list_checking(self):
+ info = {'format': 'vmdk',
+ 'format-specific': {
+ 'type': 'vmdk',
+ 'data': {
+ 'create-type': 'monolithicFlat',
+ }}}
+
+ # If the format is not in the allowed list, we should get an error
+ self.assertRaises(exception.ImageUnacceptable,
+ images.check_vmdk_image, 'foo',
+ imageutils.QemuImgInfo(jsonutils.dumps(info),
+ format='json'))
+
+ # With the format in the allowed list, no error
+ self.flags(vmdk_allowed_types=['streamOptimized', 'monolithicFlat',
+ 'monolithicSparse'],
+ group='compute')
+ images.check_vmdk_image('foo',
+ imageutils.QemuImgInfo(jsonutils.dumps(info),
+ format='json'))
+
+ # With an empty list, allow nothing
+ self.flags(vmdk_allowed_types=[], group='compute')
+ self.assertRaises(exception.ImageUnacceptable,
+ images.check_vmdk_image, 'foo',
+ imageutils.QemuImgInfo(jsonutils.dumps(info),
+ format='json'))
+
+ @mock.patch.object(images, 'fetch')
+ @mock.patch('nova.privsep.qemu.unprivileged_qemu_img_info')
+ def test_fetch_checks_vmdk_rules(self, mock_info, mock_fetch):
+ info = {'format': 'vmdk',
+ 'format-specific': {
+ 'type': 'vmdk',
+ 'data': {
+ 'create-type': 'monolithicFlat',
+ }}}
+ mock_info.return_value = jsonutils.dumps(info)
+ with mock.patch('os.path.exists', return_value=True):
+ e = self.assertRaises(exception.ImageUnacceptable,
+ images.fetch_to_raw, None, 'foo', 'anypath')
+ self.assertIn('Invalid VMDK create-type specified', str(e))
diff --git a/nova/tests/unit/virt/test_node.py b/nova/tests/unit/virt/test_node.py
new file mode 100644
index 0000000000..668b762520
--- /dev/null
+++ b/nova/tests/unit/virt/test_node.py
@@ -0,0 +1,142 @@
+# Copyright 2022 Red Hat, inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+from unittest import mock
+import uuid
+
+import fixtures
+from oslo_config import cfg
+from oslo_utils.fixture import uuidsentinel as uuids
+import testtools
+
+from nova import exception
+from nova import test
+from nova.tests import fixtures as nova_fixtures
+from nova.virt import node
+
+CONF = cfg.CONF
+
+
+# NOTE(danms): We do not inherit from test.TestCase because we need
+# our node methods not stubbed out in order to exercise them.
+class TestNodeIdentity(testtools.TestCase):
+ def flags(self, **kw):
+ """Override flag variables for a test."""
+ group = kw.pop('group', None)
+ for k, v in kw.items():
+ CONF.set_override(k, v, group)
+
+ def setUp(self):
+ super().setUp()
+ self.useFixture(nova_fixtures.ConfFixture(CONF))
+ self.tempdir = self.useFixture(fixtures.TempDir()).path
+ self.identity_file = os.path.join(self.tempdir, node.COMPUTE_ID_FILE)
+ self.fake_config_files = ['%s/etc/nova.conf' % self.tempdir,
+ '%s/etc/nova/nova.conf' % self.tempdir,
+ '%s/opt/etc/nova/nova.conf' % self.tempdir]
+ for fn in self.fake_config_files:
+ os.makedirs(os.path.dirname(fn))
+ self.flags(state_path=self.tempdir,
+ config_file=self.fake_config_files)
+ node.LOCAL_NODE_UUID = None
+
+ def test_generate_local_node_uuid(self):
+ node_uuid = uuids.node
+ node.write_local_node_uuid(node_uuid)
+
+ e = self.assertRaises(exception.InvalidNodeConfiguration,
+ node.write_local_node_uuid, 'anything')
+ self.assertIn(
+ 'Identity file %s appeared unexpectedly' % self.identity_file,
+ str(e))
+
+ def test_generate_local_node_uuid_unexpected_open_fail(self):
+ with mock.patch('builtins.open') as mock_open:
+ mock_open.side_effect = IndexError()
+ e = self.assertRaises(exception.InvalidNodeConfiguration,
+ node.write_local_node_uuid, 'foo')
+ self.assertIn('Unable to write uuid to %s' % (
+ self.identity_file), str(e))
+
+ def test_generate_local_node_uuid_unexpected_write_fail(self):
+ with mock.patch('builtins.open') as mock_open:
+ mock_write = mock_open.return_value.__enter__.return_value.write
+ mock_write.side_effect = IndexError()
+ e = self.assertRaises(exception.InvalidNodeConfiguration,
+ node.write_local_node_uuid, 'foo')
+ self.assertIn('Unable to write uuid to %s' % (
+ self.identity_file), str(e))
+
+ def test_get_local_node_uuid_simple_exists(self):
+ node_uuid = uuids.node
+ with test.patch_open('%s/etc/nova/compute_id' % self.tempdir,
+ node_uuid):
+ self.assertEqual(node_uuid, node.get_local_node_uuid())
+
+ def test_get_local_node_uuid_simple_exists_whitespace(self):
+ node_uuid = uuids.node
+ # Make sure we strip whitespace from the file contents
+ with test.patch_open('%s/etc/nova/compute_id' % self.tempdir,
+ ' %s \n' % node_uuid):
+ self.assertEqual(node_uuid, node.get_local_node_uuid())
+
+ def test_get_local_node_uuid_simple_generate(self):
+ self.assertIsNone(node.LOCAL_NODE_UUID)
+ node_uuid1 = node.get_local_node_uuid()
+ self.assertEqual(node_uuid1, node.LOCAL_NODE_UUID)
+ node_uuid2 = node.get_local_node_uuid()
+ self.assertEqual(node_uuid2, node.LOCAL_NODE_UUID)
+
+ # Make sure we got the same thing each time, and that it's a
+ # valid uuid. Since we provided no uuid, it must have been
+ # generated the first time and read/returned the second.
+ self.assertEqual(node_uuid1, node_uuid2)
+ uuid.UUID(node_uuid1)
+
+ # Try to read it directly to make sure the file was really
+ # created and with the right value.
+ self.assertEqual(node_uuid1, node.read_local_node_uuid())
+
+ def test_get_local_node_uuid_two(self):
+ node_uuid = uuids.node
+
+ # Write the uuid to two of our locations
+ for cf in (self.fake_config_files[0], self.fake_config_files[1]):
+ open(os.path.join(os.path.dirname(cf),
+ node.COMPUTE_ID_FILE), 'w').write(node_uuid)
+
+ # Make sure we got the expected uuid and that no exceptions
+ # were raised about the files disagreeing
+ self.assertEqual(node_uuid, node.get_local_node_uuid())
+
+ def test_get_local_node_uuid_two_mismatch(self):
+ node_uuids = [uuids.node1, uuids.node2]
+
+ # Write a different uuid to each file
+ for id, fn in zip(node_uuids, self.fake_config_files):
+ open(os.path.join(
+ os.path.dirname(fn),
+ node.COMPUTE_ID_FILE), 'w').write(id)
+
+ # Make sure we get an error that identifies the mismatching
+ # file with its uuid, as well as what we expected to find
+ e = self.assertRaises(exception.InvalidNodeConfiguration,
+ node.get_local_node_uuid)
+ expected = ('UUID %s in %s does not match %s' % (
+ node_uuids[1],
+ os.path.join(os.path.dirname(self.fake_config_files[1]),
+ 'compute_id'),
+ node_uuids[0]))
+ self.assertIn(expected, str(e))
diff --git a/nova/virt/driver.py b/nova/virt/driver.py
index 532ed1fa50..5d42a392d8 100644
--- a/nova/virt/driver.py
+++ b/nova/virt/driver.py
@@ -34,6 +34,7 @@ from nova import context as nova_context
from nova.i18n import _
from nova import objects
from nova.virt import event as virtevent
+import nova.virt.node
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
@@ -1595,6 +1596,11 @@ class ComputeDriver(object):
"""
raise NotImplementedError()
+ def get_nodenames_by_uuid(self, refresh=False):
+ """Returns a dict of {uuid: nodename} for all managed nodes."""
+ nodename = self.get_available_nodes()[0]
+ return {nova.virt.node.get_local_node_uuid(): nodename}
+
def node_is_available(self, nodename):
"""Return whether this compute service manages a particular node."""
if nodename in self.get_available_nodes():
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index 362bf89973..bf7dc8fc72 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -32,6 +32,7 @@ import fixtures
import os_resource_classes as orc
from oslo_log import log as logging
from oslo_serialization import jsonutils
+from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import versionutils
from nova.compute import power_state
@@ -48,6 +49,7 @@ from nova.objects import migrate_data
from nova.virt import driver
from nova.virt import hardware
from nova.virt.ironic import driver as ironic
+import nova.virt.node
from nova.virt import virtapi
CONF = nova.conf.CONF
@@ -160,8 +162,8 @@ class FakeDriver(driver.ComputeDriver):
self._host = host
# NOTE(gibi): this is unnecessary complex and fragile but this is
# how many current functional sample tests expect the node name.
- self._nodes = (['fake-mini'] if self._host == 'compute'
- else [self._host])
+ self._set_nodes(['fake-mini'] if self._host == 'compute'
+ else [self._host])
def _set_nodes(self, nodes):
# NOTE(gibi): this is not part of the driver interface but used
@@ -504,6 +506,12 @@ class FakeDriver(driver.ComputeDriver):
host_status['host_hostname'] = nodename
host_status['host_name_label'] = nodename
host_status['cpu_info'] = jsonutils.dumps(cpu_info)
+ # NOTE(danms): Because the fake driver runs on the same host
+ # in tests, potentially with multiple nodes, we need to
+ # control our node uuids. Make sure we return a unique and
+ # consistent uuid for each node we are responsible for to
+ # avoid the persistent local node identity from taking over.
+ host_status['uuid'] = str(getattr(uuids, 'node_%s' % nodename))
return host_status
def update_provider_tree(self, provider_tree, nodename, allocations=None):
@@ -646,6 +654,10 @@ class FakeDriver(driver.ComputeDriver):
def get_available_nodes(self, refresh=False):
return self._nodes
+ def get_nodenames_by_uuid(self, refresh=False):
+ return {str(getattr(uuids, 'node_%s' % n)): n
+ for n in self.get_available_nodes()}
+
def instance_on_disk(self, instance):
return False
@@ -764,7 +776,7 @@ class PredictableNodeUUIDDriver(SmallFakeDriver):
PredictableNodeUUIDDriver, self).get_available_resource(nodename)
# This is used in ComputeNode.update_from_virt_driver which is called
# from the ResourceTracker when creating a ComputeNode.
- resources['uuid'] = uuid.uuid5(uuid.NAMESPACE_DNS, nodename)
+ resources['uuid'] = str(uuid.uuid5(uuid.NAMESPACE_DNS, nodename))
return resources
@@ -1119,3 +1131,22 @@ class EphEncryptionDriverPLAIN(MediumFakeDriver):
FakeDriver.capabilities,
supports_ephemeral_encryption=True,
supports_ephemeral_encryption_plain=True)
+
+
+class FakeDriverWithoutFakeNodes(FakeDriver):
+ """FakeDriver that behaves like a real single-node driver.
+
+ This behaves like a real virt driver from the perspective of its
+ nodes, with a stable nodename and use of the global node identity
+ stuff to provide a stable node UUID.
+ """
+
+ def get_available_resource(self, nodename):
+ resources = super().get_available_resource(nodename)
+ resources['uuid'] = nova.virt.node.get_local_node_uuid()
+ return resources
+
+ def get_nodenames_by_uuid(self, refresh=False):
+ return {
+ nova.virt.node.get_local_node_uuid(): self.get_available_nodes()[0]
+ }
diff --git a/nova/virt/images.py b/nova/virt/images.py
index 5358f3766a..f13c872290 100644
--- a/nova/virt/images.py
+++ b/nova/virt/images.py
@@ -110,6 +110,34 @@ def get_info(context, image_href):
return IMAGE_API.get(context, image_href)
+def check_vmdk_image(image_id, data):
+ # Check some rules about VMDK files. Specifically we want to make
+ # sure that the "create-type" of the image is one that we allow.
+ # Some types of VMDK files can reference files outside the disk
+ # image and we do not want to allow those for obvious reasons.
+
+ types = CONF.compute.vmdk_allowed_types
+
+ if not len(types):
+ LOG.warning('Refusing to allow VMDK image as vmdk_allowed_'
+ 'types is empty')
+ msg = _('Invalid VMDK create-type specified')
+ raise exception.ImageUnacceptable(image_id=image_id, reason=msg)
+
+ try:
+ create_type = data.format_specific['data']['create-type']
+ except KeyError:
+ msg = _('Unable to determine VMDK create-type')
+ raise exception.ImageUnacceptable(image_id=image_id, reason=msg)
+
+ if create_type not in CONF.compute.vmdk_allowed_types:
+ LOG.warning('Refusing to process VMDK file with create-type of %r '
+ 'which is not in allowed set of: %s', create_type,
+ ','.join(CONF.compute.vmdk_allowed_types))
+ msg = _('Invalid VMDK create-type specified')
+ raise exception.ImageUnacceptable(image_id=image_id, reason=msg)
+
+
def fetch_to_raw(context, image_href, path, trusted_certs=None):
path_tmp = "%s.part" % path
fetch(context, image_href, path_tmp, trusted_certs)
@@ -129,6 +157,9 @@ def fetch_to_raw(context, image_href, path, trusted_certs=None):
reason=(_("fmt=%(fmt)s backed by: %(backing_file)s") %
{'fmt': fmt, 'backing_file': backing_file}))
+ if fmt == 'vmdk':
+ check_vmdk_image(image_href, data)
+
if fmt != "raw" and CONF.force_raw_images:
staged = "%s.converted" % path
LOG.debug("%s was %s, converting to raw", image_href, fmt)
diff --git a/nova/virt/ironic/driver.py b/nova/virt/ironic/driver.py
index 117294ff89..77fefb81ea 100644
--- a/nova/virt/ironic/driver.py
+++ b/nova/virt/ironic/driver.py
@@ -839,6 +839,13 @@ class IronicDriver(virt_driver.ComputeDriver):
return node_uuids
+ def get_nodenames_by_uuid(self, refresh=False):
+ nodes = self.get_available_nodes(refresh=refresh)
+ # We use the uuid for compute_node.uuid and
+ # compute_node.hypervisor_hostname, so the dict keys and values are
+ # the same.
+ return dict(zip(nodes, nodes))
+
def update_provider_tree(self, provider_tree, nodename, allocations=None):
"""Update a ProviderTree object with current resource provider and
inventory information.
diff --git a/nova/virt/libvirt/cpu/__init__.py b/nova/virt/libvirt/cpu/__init__.py
new file mode 100644
index 0000000000..962c9469a0
--- /dev/null
+++ b/nova/virt/libvirt/cpu/__init__.py
@@ -0,0 +1,16 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.virt.libvirt.cpu import api
+
+
+Core = api.Core
diff --git a/nova/virt/libvirt/cpu/api.py b/nova/virt/libvirt/cpu/api.py
new file mode 100644
index 0000000000..e0b0a277d1
--- /dev/null
+++ b/nova/virt/libvirt/cpu/api.py
@@ -0,0 +1,62 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from dataclasses import dataclass
+
+from oslo_log import log as logging
+
+import nova.conf
+from nova.virt.libvirt.cpu import core
+
+LOG = logging.getLogger(__name__)
+
+CONF = nova.conf.CONF
+
+
+@dataclass
+class Core:
+ """Class to model a CPU core as reported by sysfs.
+
+ It may be a physical CPU core or a hardware thread on a shared CPU core
+ depending on if the system supports SMT.
+ """
+
+ # NOTE(sbauza): ident is a mandatory field.
+ # The CPU core id/number
+ ident: int
+
+ @property
+ def online(self) -> bool:
+ return core.get_online(self.ident)
+
+ @online.setter
+ def online(self, state: bool) -> None:
+ if state:
+ core.set_online(self.ident)
+ else:
+ core.set_offline(self.ident)
+
+ def __hash__(self):
+ return hash(self.ident)
+
+ def __eq__(self, other):
+ return self.ident == other.ident
+
+ @property
+ def governor(self) -> str:
+ return core.get_governor(self.ident)
+
+ def set_high_governor(self) -> None:
+ core.set_governor(self.ident, CONF.libvirt.cpu_power_governor_high)
+
+ def set_low_governor(self) -> None:
+ core.set_governor(self.ident, CONF.libvirt.cpu_power_governor_low)
diff --git a/nova/virt/libvirt/cpu/core.py b/nova/virt/libvirt/cpu/core.py
new file mode 100644
index 0000000000..782f028fee
--- /dev/null
+++ b/nova/virt/libvirt/cpu/core.py
@@ -0,0 +1,78 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+import typing as ty
+
+from oslo_log import log as logging
+
+from nova import exception
+from nova import filesystem
+import nova.privsep
+from nova.virt import hardware
+
+LOG = logging.getLogger(__name__)
+
+AVAILABLE_PATH = '/sys/devices/system/cpu/present'
+
+CPU_PATH_TEMPLATE = '/sys/devices/system/cpu/cpu%(core)s'
+
+
+def get_available_cores() -> ty.Set[int]:
+ cores = filesystem.read_sys(AVAILABLE_PATH)
+ return hardware.parse_cpu_spec(cores) if cores else set()
+
+
+def exists(core: int) -> bool:
+ return core in get_available_cores()
+
+
+def gen_cpu_path(core: int) -> str:
+ if not exists(core):
+ LOG.warning('Unable to access CPU: %s', core)
+ raise ValueError('CPU: %(core)s does not exist', core)
+ return CPU_PATH_TEMPLATE % {'core': core}
+
+
+def get_online(core: int) -> bool:
+ try:
+ online = filesystem.read_sys(
+ os.path.join(gen_cpu_path(core), 'online')).strip()
+ except exception.FileNotFound:
+ # The online file may not exist if we haven't written it yet.
+ # By default, this means that the CPU is online.
+ online = '1'
+ return online == '1'
+
+
+@nova.privsep.sys_admin_pctxt.entrypoint
+def set_online(core: int) -> bool:
+ filesystem.write_sys(os.path.join(gen_cpu_path(core), 'online'), data='1')
+ return get_online(core)
+
+
+def set_offline(core: int) -> bool:
+ filesystem.write_sys(os.path.join(gen_cpu_path(core), 'online'), data='0')
+ return not get_online(core)
+
+
+def get_governor(core: int) -> str:
+ return filesystem.read_sys(
+ os.path.join(gen_cpu_path(core), 'cpufreq/scaling_governor')).strip()
+
+
+@nova.privsep.sys_admin_pctxt.entrypoint
+def set_governor(core: int, governor: str) -> str:
+ filesystem.write_sys(
+ os.path.join(gen_cpu_path(core), 'cpufreq/scaling_governor'),
+ data=governor)
+ return get_governor(core)
diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py
index 8b36fea3c9..b9a389910d 100644
--- a/nova/virt/libvirt/driver.py
+++ b/nova/virt/libvirt/driver.py
@@ -412,6 +412,8 @@ class LibvirtDriver(driver.ComputeDriver):
not CONF.force_raw_images)
requires_ploop_image = CONF.libvirt.virt_type == 'parallels'
+ self.image_backend = imagebackend.Backend(CONF.use_cow_images)
+
self.capabilities = {
"has_imagecache": True,
"supports_evacuate": True,
@@ -439,6 +441,10 @@ class LibvirtDriver(driver.ComputeDriver):
"supports_bfv_rescue": True,
"supports_vtpm": CONF.libvirt.swtpm_enabled,
"supports_socket_pci_numa_affinity": True,
+ "supports_ephemeral_encryption":
+ self.image_backend.backend().SUPPORTS_LUKS,
+ "supports_ephemeral_encryption_luks":
+ self.image_backend.backend().SUPPORTS_LUKS,
}
super(LibvirtDriver, self).__init__(virtapi)
@@ -463,7 +469,6 @@ class LibvirtDriver(driver.ComputeDriver):
self._disk_cachemode = None
self.image_cache_manager = imagecache.ImageCacheManager()
- self.image_backend = imagebackend.Backend(CONF.use_cow_images)
self.disk_cachemodes = {}
@@ -984,33 +989,26 @@ class LibvirtDriver(driver.ComputeDriver):
msg = _("The cpu_models option is required when cpu_mode=custom")
raise exception.Invalid(msg)
- cpu = vconfig.LibvirtConfigGuestCPU()
- for model in models:
- cpu.model = self._get_cpu_model_mapping(model)
- try:
- self._compare_cpu(cpu, self._get_cpu_info(), None)
- except exception.InvalidCPUInfo as e:
- msg = (_("Configured CPU model: %(model)s is not "
- "compatible with host CPU. Please correct your "
- "config and try again. %(e)s") % {
- 'model': model, 'e': e})
- raise exception.InvalidCPUInfo(msg)
-
- # Use guest CPU model to check the compatibility between guest CPU and
- # configured extra_flags
- cpu = vconfig.LibvirtConfigGuestCPU()
- cpu.model = self._host.get_capabilities().host.cpu.model
- for flag in set(x.lower() for x in CONF.libvirt.cpu_model_extra_flags):
- cpu_feature = self._prepare_cpu_flag(flag)
- cpu.add_feature(cpu_feature)
- try:
- self._compare_cpu(cpu, self._get_cpu_info(), None)
- except exception.InvalidCPUInfo as e:
- msg = (_("Configured extra flag: %(flag)s it not correct, or "
- "the host CPU does not support this flag. Please "
- "correct the config and try again. %(e)s") % {
- 'flag': flag, 'e': e})
- raise exception.InvalidCPUInfo(msg)
+ if not CONF.workarounds.skip_cpu_compare_at_startup:
+ # Use guest CPU model to check the compatibility between
+ # guest CPU and configured extra_flags
+ for model in models:
+ cpu = vconfig.LibvirtConfigGuestCPU()
+ cpu.model = self._get_cpu_model_mapping(model)
+ for flag in set(x.lower() for
+ x in CONF.libvirt.cpu_model_extra_flags):
+ cpu_feature = self._prepare_cpu_flag(flag)
+ cpu.add_feature(cpu_feature)
+ try:
+ self._compare_cpu(cpu, self._get_cpu_info(), None)
+ except exception.InvalidCPUInfo as e:
+ msg = (_("Configured CPU model: %(model)s "
+ "and CPU Flags %(flags)s ar not "
+ "compatible with host CPU. Please correct your "
+ "config and try again. %(e)s") % {
+ 'model': model, 'e': e,
+ 'flags': CONF.libvirt.cpu_model_extra_flags})
+ raise exception.InvalidCPUInfo(msg)
def _check_vtpm_support(self) -> None:
# TODO(efried): A key manager must be configured to create/retrieve
@@ -9522,6 +9520,7 @@ class LibvirtDriver(driver.ComputeDriver):
data["hypervisor_type"] = self._host.get_driver_type()
data["hypervisor_version"] = self._host.get_version()
data["hypervisor_hostname"] = self._host.get_hostname()
+ data["uuid"] = self._host.get_node_uuid()
# TODO(berrange): why do we bother converting the
# libvirt capabilities XML into a special JSON format ?
# The data format is different across all the drivers
@@ -9975,7 +9974,7 @@ class LibvirtDriver(driver.ComputeDriver):
try:
cpu_xml = cpu.to_xml()
LOG.debug("cpu compare xml: %s", cpu_xml, instance=instance)
- ret = self._host.compare_cpu(cpu_xml)
+ ret = self._host.compare_hypervisor_cpu(cpu_xml)
except libvirt.libvirtError as e:
error_code = e.get_error_code()
if error_code == libvirt.VIR_ERR_NO_SUPPORT:
@@ -10067,6 +10066,24 @@ class LibvirtDriver(driver.ComputeDriver):
:param instance: instance object that is in migration
"""
+ current = eventlet.getcurrent()
+ # NOTE(gibi) not all eventlet spawn is under our control, so
+ # there can be senders without test_case_id set, find the first
+ # ancestor that was spawned from nova.utils.spawn[_n] and
+ # therefore has the id set.
+ while (
+ current is not None and
+ not getattr(current, 'test_case_id', None)
+ ):
+ current = current.parent
+
+ if current is not None:
+ LOG.warning(
+ "!!!---!!! live_migration_abort thread was spawned by "
+ "TestCase ID: %s. If you see this in a failed functional test "
+ "then please let #openstack-nova on IRC know about it. "
+ "!!!---!!!", current.test_case_id
+ )
guest = self._host.get_guest(instance)
dom = guest._domain
@@ -11058,16 +11075,37 @@ class LibvirtDriver(driver.ComputeDriver):
if not CONF.workarounds.enable_qemu_monitor_announce_self:
return
- LOG.info('Sending announce-self command to QEMU monitor',
- instance=instance)
+ current_attempt = 0
- try:
- guest = self._host.get_guest(instance)
- guest.announce_self()
- except Exception:
- LOG.warning('Failed to send announce-self command to QEMU monitor',
- instance=instance)
- LOG.exception()
+ max_attempts = (
+ CONF.workarounds.qemu_monitor_announce_self_count)
+ # qemu_monitor_announce_retry_interval specified in seconds
+ announce_pause = (
+ CONF.workarounds.qemu_monitor_announce_self_interval)
+
+ while(current_attempt < max_attempts):
+ # Increment attempt
+ current_attempt += 1
+
+ # Only use announce_pause after the first attempt to avoid
+ # pausing before calling announce_self for the first attempt
+ if current_attempt != 1:
+ greenthread.sleep(announce_pause)
+
+ LOG.info('Sending announce-self command to QEMU monitor. '
+ 'Attempt %(current_attempt)s of %(max_attempts)s',
+ {'current_attempt': current_attempt,
+ 'max_attempts': max_attempts}, instance=instance)
+ try:
+ guest = self._host.get_guest(instance)
+ guest.announce_self()
+ except Exception:
+ LOG.warning('Failed to send announce-self command to '
+ 'QEMU monitor. Attempt %(current_attempt)s of '
+ '%(max_attempts)s',
+ {'current_attempt': current_attempt,
+ 'max_attempts': max_attempts}, instance=instance)
+ LOG.exception()
def post_live_migration_at_destination(self, context,
instance,
@@ -11317,6 +11355,9 @@ class LibvirtDriver(driver.ComputeDriver):
def get_available_nodes(self, refresh=False):
return [self._host.get_hostname()]
+ def get_nodenames_by_uuid(self, refresh=False):
+ return {self._host.get_node_uuid(): self._host.get_hostname()}
+
def get_host_cpu_stats(self):
"""Return the current CPU state of the host."""
return self._host.get_cpu_stats()
diff --git a/nova/virt/libvirt/host.py b/nova/virt/libvirt/host.py
index 46435a9a7f..b986702401 100644
--- a/nova/virt/libvirt/host.py
+++ b/nova/virt/libvirt/host.py
@@ -66,6 +66,7 @@ from nova.virt.libvirt import event as libvirtevent
from nova.virt.libvirt import guest as libvirt_guest
from nova.virt.libvirt import migration as libvirt_migrate
from nova.virt.libvirt import utils as libvirt_utils
+import nova.virt.node # noqa
if ty.TYPE_CHECKING:
import libvirt
@@ -138,6 +139,7 @@ class Host(object):
self._caps = None
self._domain_caps = None
self._hostname = None
+ self._node_uuid = None
self._wrapped_conn = None
self._wrapped_conn_lock = threading.Lock()
@@ -1059,6 +1061,12 @@ class Host(object):
{'old': self._hostname, 'new': hostname})
return self._hostname
+ def get_node_uuid(self):
+ """Returns the UUID of this node."""
+ if not self._node_uuid:
+ self._node_uuid = nova.virt.node.get_local_node_uuid()
+ return self._node_uuid
+
def find_secret(self, usage_type, usage_id):
"""Find a secret.
@@ -1605,6 +1613,22 @@ class Host(object):
"""Compares the given CPU description with the host CPU."""
return self.get_connection().compareCPU(xmlDesc, flags)
+ def compare_hypervisor_cpu(self, xmlDesc, flags=0):
+ """Compares the given CPU description with the CPU provided by
+ the host hypervisor. This is different from the older method,
+ compare_cpu(), which compares a given CPU definition with the
+ host CPU without considering the abilities of the host
+ hypervisor. Except @xmlDesc, rest of all the parameters to
+ compareHypervisorCPU API are optional (libvirt will choose
+ sensible defaults).
+ """
+ emulator = None
+ arch = None
+ machine = None
+ virttype = None
+ return self.get_connection().compareHypervisorCPU(
+ emulator, arch, machine, virttype, xmlDesc, flags)
+
def is_cpu_control_policy_capable(self):
"""Returns whether kernel configuration CGROUP_SCHED is enabled
diff --git a/nova/virt/libvirt/imagebackend.py b/nova/virt/libvirt/imagebackend.py
index 534cc60759..0a64ef43dd 100644
--- a/nova/virt/libvirt/imagebackend.py
+++ b/nova/virt/libvirt/imagebackend.py
@@ -81,6 +81,7 @@ def _update_utime_ignore_eacces(path):
class Image(metaclass=abc.ABCMeta):
SUPPORTS_CLONE = False
+ SUPPORTS_LUKS = False
def __init__(
self,
diff --git a/nova/virt/libvirt/utils.py b/nova/virt/libvirt/utils.py
index 0675e4ac14..adb2ec45a1 100644
--- a/nova/virt/libvirt/utils.py
+++ b/nova/virt/libvirt/utils.py
@@ -22,6 +22,7 @@ import grp
import os
import pwd
import re
+import tempfile
import typing as ty
import uuid
@@ -114,6 +115,7 @@ def create_image(
disk_format: str,
disk_size: ty.Optional[ty.Union[str, int]],
backing_file: ty.Optional[str] = None,
+ encryption: ty.Optional[ty.Dict[str, ty.Any]] = None
) -> None:
"""Disk image creation with qemu-img
:param path: Desired location of the disk image
@@ -125,15 +127,16 @@ def create_image(
If no suffix is given, it will be interpreted as bytes.
Can be None in the case of a COW image.
:param backing_file: (Optional) Backing file to use.
+ :param encryption: (Optional) Dict detailing various encryption attributes
+ such as the format and passphrase.
"""
- base_cmd = [
+ cmd = [
'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'create', '-f', disk_format
]
- cow_opts = []
if backing_file:
base_details = images.qemu_img_info(backing_file)
- cow_opts += [
+ cow_opts = [
f'backing_file={backing_file}',
f'backing_fmt={base_details.file_format}'
]
@@ -147,12 +150,60 @@ def create_image(
# Format as a comma separated list
csv_opts = ",".join(cow_opts)
- cow_opts = ['-o', csv_opts]
-
- cmd = base_cmd + cow_opts + [path]
- if disk_size is not None:
- cmd += [str(disk_size)]
- processutils.execute(*cmd)
+ cmd += ['-o', csv_opts]
+
+ # Disk size can be None in the case of a COW image
+ disk_size_arg = [str(disk_size)] if disk_size is not None else []
+
+ if encryption:
+ with tempfile.NamedTemporaryFile(mode='tr+', encoding='utf-8') as f:
+ # Write out the passphrase secret to a temp file
+ f.write(encryption.get('secret'))
+
+ # Ensure the secret is written to disk, we can't .close() here as
+ # that removes the file when using NamedTemporaryFile
+ f.flush()
+
+ # The basic options include the secret and encryption format
+ encryption_opts = [
+ '--object', f"secret,id=sec,file={f.name}",
+ '-o', 'encrypt.key-secret=sec',
+ '-o', f"encrypt.format={encryption.get('format')}",
+ ]
+ # Supported luks options:
+ # cipher-alg=<str> - Name of cipher algorithm and key length
+ # cipher-mode=<str> - Name of encryption cipher mode
+ # hash-alg=<str> - Name of hash algorithm to use for PBKDF
+ # iter-time=<num> - Time to spend in PBKDF in milliseconds
+ # ivgen-alg=<str> - Name of IV generator algorithm
+ # ivgen-hash-alg=<str> - Name of IV generator hash algorithm
+ #
+ # NOTE(melwitt): Sensible defaults (that match the qemu defaults)
+ # are hardcoded at this time for simplicity and consistency when
+ # instances are migrated. Configuration of luks options could be
+ # added in a future release.
+ encryption_options = {
+ 'cipher-alg': 'aes-256',
+ 'cipher-mode': 'xts',
+ 'hash-alg': 'sha256',
+ 'iter-time': 2000,
+ 'ivgen-alg': 'plain64',
+ 'ivgen-hash-alg': 'sha256',
+ }
+
+ for option, value in encryption_options.items():
+ encryption_opts += [
+ '-o',
+ f'encrypt.{option}={value}',
+ ]
+
+ # We need to execute the command while the NamedTemporaryFile still
+ # exists
+ cmd += encryption_opts + [path] + disk_size_arg
+ processutils.execute(*cmd)
+ else:
+ cmd += [path] + disk_size_arg
+ processutils.execute(*cmd)
def create_ploop_image(
diff --git a/nova/virt/node.py b/nova/virt/node.py
new file mode 100644
index 0000000000..4cb3d0a573
--- /dev/null
+++ b/nova/virt/node.py
@@ -0,0 +1,108 @@
+# Copyright 2022 Red Hat, inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import logging
+import os
+import uuid
+
+from oslo_utils import uuidutils
+
+import nova.conf
+from nova import exception
+
+CONF = nova.conf.CONF
+LOG = logging.getLogger(__name__)
+COMPUTE_ID_FILE = 'compute_id'
+LOCAL_NODE_UUID = None
+
+
+def write_local_node_uuid(node_uuid):
+ # We only ever write an identity file in the CONF.state_path
+ # location
+ fn = os.path.join(CONF.state_path, COMPUTE_ID_FILE)
+
+ # Try to create the identity file and write our uuid into it. Fail
+ # if the file exists (since it shouldn't if we made it here).
+ try:
+ with open(fn, 'x') as f:
+ f.write(node_uuid)
+ except FileExistsError:
+ # If the file exists, we must either fail or re-survey all the
+ # potential files. If we just read and return it, it could be
+ # inconsistent with files in the other locations.
+ raise exception.InvalidNodeConfiguration(
+ reason='Identity file %s appeared unexpectedly' % fn)
+ except Exception as e:
+ raise exception.InvalidNodeConfiguration(
+ reason='Unable to write uuid to %s: %s' % (fn, e))
+
+ LOG.info('Wrote node identity %s to %s', node_uuid, fn)
+
+
+def read_local_node_uuid():
+ locations = ([os.path.dirname(f) for f in CONF.config_file] +
+ [CONF.state_path])
+
+ uuids = []
+ found = []
+ for location in locations:
+ fn = os.path.join(location, COMPUTE_ID_FILE)
+ try:
+ # UUIDs should be 36 characters in canonical format. Read
+ # a little more to be graceful about whitespace in/around
+ # the actual value we want to read. However, it must parse
+ # to a legit UUID once we strip the whitespace.
+ with open(fn) as f:
+ content = f.read(40)
+ node_uuid = str(uuid.UUID(content.strip()))
+ except FileNotFoundError:
+ continue
+ except ValueError:
+ raise exception.InvalidNodeConfiguration(
+ reason='Unable to parse UUID from %s' % fn)
+ uuids.append(node_uuid)
+ found.append(fn)
+
+ if uuids:
+ # Any identities we found must be consistent, or we fail
+ first = uuids[0]
+ for i, (node_uuid, fn) in enumerate(zip(uuids, found)):
+ if node_uuid != first:
+ raise exception.InvalidNodeConfiguration(
+ reason='UUID %s in %s does not match %s' % (
+ node_uuid, fn, uuids[i - 1]))
+ LOG.info('Determined node identity %s from %s', first, found[0])
+ return first
+ else:
+ return None
+
+
+def get_local_node_uuid():
+ """Read or create local node uuid file.
+
+ :returns: UUID string read from file, or generated
+ """
+ global LOCAL_NODE_UUID
+
+ if LOCAL_NODE_UUID is not None:
+ return LOCAL_NODE_UUID
+
+ node_uuid = read_local_node_uuid()
+ if not node_uuid:
+ node_uuid = uuidutils.generate_uuid()
+ LOG.info('Generated node identity %s', node_uuid)
+ write_local_node_uuid(node_uuid)
+
+ LOCAL_NODE_UUID = node_uuid
+ return node_uuid
diff --git a/releasenotes/notes/allowing-target-state-for-evacuate-d4c1912c481973d6.yaml b/releasenotes/notes/allowing-target-state-for-evacuate-d4c1912c481973d6.yaml
new file mode 100644
index 0000000000..6c5bc98046
--- /dev/null
+++ b/releasenotes/notes/allowing-target-state-for-evacuate-d4c1912c481973d6.yaml
@@ -0,0 +1,13 @@
+---
+features:
+ - |
+ Starting with v2.95 any evacuated instance will be stopped at
+ destination. The required minimum version for Nova computes is
+ 27.0.0 (antelope 2023.1). Operator can still continue using
+ previous behavior by selecting microversion below v2.95.
+upgrade:
+ - |
+ Operators will have to consider upgrading compute hosts to Nova
+ 27.0.0 (antelope 2023.1) in order to take advantage of the new
+ (microversion v2.95) evacuate API behavior. An exception will be
+ raised for older versions.
diff --git a/releasenotes/notes/bp-pci-device-tracking-in-placement-antelope-082310a2b0337e0e.yaml b/releasenotes/notes/bp-pci-device-tracking-in-placement-antelope-082310a2b0337e0e.yaml
new file mode 100644
index 0000000000..7a9e53ed26
--- /dev/null
+++ b/releasenotes/notes/bp-pci-device-tracking-in-placement-antelope-082310a2b0337e0e.yaml
@@ -0,0 +1,8 @@
+---
+features:
+ - |
+ Since 26.0.0 (Zed) Nova supports tracking PCI devices in Placement. Now
+ Nova also supports scheduling flavor based PCI device requests via
+ Placement. This support is disable by default. Please read
+ `documentation <https://docs.openstack.org/nova/latest/admin/pci-passthrough.html#pci-tracking-in-placement>`_
+ for more details on what is supported how this feature can be enabled.
diff --git a/releasenotes/notes/bug-1996995-qemu_monitor_announce_self-add-configurables-2b2f19d238442f72.yaml b/releasenotes/notes/bug-1996995-qemu_monitor_announce_self-add-configurables-2b2f19d238442f72.yaml
new file mode 100644
index 0000000000..0941dd7450
--- /dev/null
+++ b/releasenotes/notes/bug-1996995-qemu_monitor_announce_self-add-configurables-2b2f19d238442f72.yaml
@@ -0,0 +1,28 @@
+---
+fixes:
+ - |
+ Fixes `bug 1996995`_ in which VMs live migrated on certain VXLAN Arista
+ network fabrics were inaccessible until the switch arp cache expired.
+
+ A Nova workaround option of ``enable_qemu_monitor_announce_self`` was added
+ to fix `bug 1815989`_ which when enabled would interact with the QEMU
+ monitor and force a VM to announce itself.
+
+ On certain network fabrics, VMs that are live migrated remain inaccessible
+ via the network despite the QEMU monitor announce_self command successfully
+ being called.
+
+ It was noted that on Arista VXLAN fabrics, testing showed that it required
+ several attempts of running the QEMU announce_self monitor command before
+ the switch would acknowledge a VM's new location on the fabric.
+
+ This fix introduces two operator configurable options.
+ The first option sets the number of times the QEMU monitor announce_self
+ command is called - ``qemu_announce_self_count``
+
+ The second option allows operators to set the delay between the QEMU
+ announce_self commands in seconds for subsequent announce_self commands
+ with ``qemu_announce_self_interval``
+
+ .. _`bug 1996995`: https://bugs.launchpad.net/nova/+bug/1996995
+ .. _`bug 1815989`: https://bugs.launchpad.net/nova/+bug/1815989
diff --git a/releasenotes/notes/enable-enforce-scope-and-new-defaults-14db8c75b263b599.yaml b/releasenotes/notes/enable-enforce-scope-and-new-defaults-14db8c75b263b599.yaml
new file mode 100644
index 0000000000..72a6f861b6
--- /dev/null
+++ b/releasenotes/notes/enable-enforce-scope-and-new-defaults-14db8c75b263b599.yaml
@@ -0,0 +1,23 @@
+---
+upgrade:
+ - |
+ The Nova service enable the API policies (RBAC) new defaults and scope by
+ default. The Default value of config options ``[oslo_policy] enforce_scope``
+ and ``[oslo_policy] oslo_policy.enforce_new_defaults`` have been changed
+ to ``True``.
+
+ This means if you are using system scope token to access Nova API then
+ the request will be failed with 403 error code. Also, new defaults will be
+ enforced by default. To know about the new defaults of each policy
+ rule, refer to the `Policy New Defaults`_. For more detail about the Nova
+ API policies changes, refer to `Policy Concepts`_.
+
+ If you want to disable them then modify the below config options value in
+ ``nova.conf`` file::
+
+ [oslo_policy]
+ enforce_new_defaults=False
+ enforce_scope=False
+
+ .. _`Policy New Defaults`: https://docs.openstack.org/nova/latest/configuration/policy.html
+ .. _`Policy Concepts`: https://docs.openstack.org/nova/latest/configuration/policy-concepts.html
diff --git a/releasenotes/notes/microversion-2-94-59649401d5763286.yaml b/releasenotes/notes/microversion-2-94-59649401d5763286.yaml
new file mode 100644
index 0000000000..d0927e6f75
--- /dev/null
+++ b/releasenotes/notes/microversion-2-94-59649401d5763286.yaml
@@ -0,0 +1,22 @@
+---
+features:
+ - |
+ The 2.94 microversion has been added. This microversion extends
+ microversion 2.90 by allowing Fully Qualified Domain Names (FQDN) wherever
+ the ``hostname`` is able to be specified. This consists of creating an
+ instance (``POST /servers``), updating an instance
+ (``PUT /servers/{id}``), or rebuilding an instance
+ (``POST /servers/{server_id}/action (rebuild)``). When using an FQDN as the
+ instance hostname, the ``[api]dhcp_domain`` configuration option must be
+ set to the empty string in order for the correct FQDN to appear in the
+ ``hostname`` field in the metadata API.
+
+upgrade:
+ - |
+ In order to make use of microversion's 2.94 FQDN hostnames, the
+ ``[api]dhcp_domain`` config option must be set to the empty string. If
+ this is not done, the ``hostname`` field in the metadata API will be
+ incorrect, as it will include the value of ``[api]dhcp_domain`` appended to
+ the instance's FQDN. Note that simply not setting ``[api]dhcp_domain`` is
+ not enough, as it has a default value of ``novalocal``. It must explicitly
+ be set to the empty string.
diff --git a/releasenotes/notes/rescue-volume-based-instance-c6e3fba236d90be7.yaml b/releasenotes/notes/rescue-volume-based-instance-c6e3fba236d90be7.yaml
new file mode 100644
index 0000000000..7e80059b80
--- /dev/null
+++ b/releasenotes/notes/rescue-volume-based-instance-c6e3fba236d90be7.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ Fix rescuing volume based instance by adding a check for 'hw_rescue_disk'
+ and 'hw_rescue_device' properties in image metadata before attempting
+ to rescue instance.
diff --git a/releasenotes/notes/stable-compute-uuid-08663a0955616728.yaml b/releasenotes/notes/stable-compute-uuid-08663a0955616728.yaml
new file mode 100644
index 0000000000..fdeb593bd2
--- /dev/null
+++ b/releasenotes/notes/stable-compute-uuid-08663a0955616728.yaml
@@ -0,0 +1,19 @@
+---
+features:
+ - |
+ The compute manager now uses a local file to provide node uuid persistence
+ to guard against problems with renamed services, among other things.
+ Deployers wishing to ensure that *new* compute services get a predicatble
+ uuid before initial startup may provision that file and nova will use it,
+ otherwise nova will generate and write one to a `compute_id` file in
+ `CONF.state_path` the first time it starts up. Accidental renames of a
+ compute node's hostname will be detected and the manager will exit to avoid
+ database corruption. Note that none of this applies to Ironic computes, as
+ they manage nodes and uuids differently.
+upgrade:
+ - |
+ Existing compute nodes will, upon upgrade, perist the uuid of the compute
+ node assigned to their hostname at first startup. Since this must match
+ what is currently in the database, it is important to let nova provision
+ this file from its database. Nova will only persist to a `compute_id` file
+ in the `CONF.state_path` directory, which should already be writable.
diff --git a/releasenotes/notes/use-compareHypervisorCPU-b75c8f097cc73556.yaml b/releasenotes/notes/use-compareHypervisorCPU-b75c8f097cc73556.yaml
new file mode 100644
index 0000000000..924e09a602
--- /dev/null
+++ b/releasenotes/notes/use-compareHypervisorCPU-b75c8f097cc73556.yaml
@@ -0,0 +1,12 @@
+---
+fixes:
+ - |
+ Nova's use of libvirt's compareCPU() API has become error-prone as
+ it doesn't take into account host hypervisor's capabilities. With
+ QEMU >=2.9 and libvirt >= 4.4.0, libvirt will do the right thing in
+ terms of CPU comparison checks via a new replacement API,
+ compareHypervisorCPU(). Nova satisfies the said minimum version
+ requirements of QEMU and libvirt by a good margin.
+
+ This change replaces the usage of older API, compareCPU(), with the
+ new one, compareHypervisorCPU().
diff --git a/requirements.txt b/requirements.txt
index dc1c414aba..9954d06bc9 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -42,8 +42,8 @@ oslo.upgradecheck>=1.3.0
oslo.utils>=4.12.1 # Apache-2.0
oslo.db>=10.0.0 # Apache-2.0
oslo.rootwrap>=5.15.0 # Apache-2.0
-oslo.messaging>=10.3.0 # Apache-2.0
-oslo.policy>=3.7.0 # Apache-2.0
+oslo.messaging>=14.1.0 # Apache-2.0
+oslo.policy>=3.11.0 # Apache-2.0
oslo.privsep>=2.6.2 # Apache-2.0
oslo.i18n>=5.1.0 # Apache-2.0
oslo.service>=2.8.0 # Apache-2.0
diff --git a/tox.ini b/tox.ini
index 8272774fa9..097edbe827 100644
--- a/tox.ini
+++ b/tox.ini
@@ -61,7 +61,7 @@ description =
# because we do not want placement present during unit tests.
deps =
{[testenv]deps}
- openstack-placement>=1.0.0
+ openstack-placement>=9.0.0.0b1
extras =
commands =
stestr --test-path=./nova/tests/functional run {posargs}