summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.zuul.yaml75
-rw-r--r--HACKING.rst1
-rw-r--r--api-guide/source/accelerator-support.rst4
-rw-r--r--api-guide/source/users.rst2
-rw-r--r--api-ref/source/parameters.yaml3
-rw-r--r--api-ref/source/servers-actions.inc5
-rw-r--r--api-ref/source/servers.inc7
-rw-r--r--doc/api_samples/os-evacuate/v2.95/server-evacuate-find-host-req.json4
-rw-r--r--doc/api_samples/os-evacuate/v2.95/server-evacuate-req.json5
-rw-r--r--doc/api_samples/servers/v2.94/server-action-rebuild-resp.json80
-rw-r--r--doc/api_samples/servers/v2.94/server-action-rebuild.json15
-rw-r--r--doc/api_samples/servers/v2.94/server-create-req.json30
-rw-r--r--doc/api_samples/servers/v2.94/server-create-resp.json22
-rw-r--r--doc/api_samples/servers/v2.94/server-get-resp.json81
-rw-r--r--doc/api_samples/servers/v2.94/server-update-req.json8
-rw-r--r--doc/api_samples/servers/v2.94/server-update-resp.json78
-rw-r--r--doc/api_samples/servers/v2.94/servers-details-resp.json88
-rw-r--r--doc/api_samples/servers/v2.94/servers-list-resp.json24
-rw-r--r--doc/api_samples/versions/v21-version-get-resp.json2
-rw-r--r--doc/api_samples/versions/versions-get-resp.json2
-rw-r--r--doc/source/admin/architecture.rst2
-rw-r--r--doc/source/admin/availability-zones.rst56
-rw-r--r--doc/source/admin/compute-node-identification.rst83
-rw-r--r--doc/source/admin/cpu-topologies.rst91
-rw-r--r--doc/source/admin/huge-pages.rst2
-rw-r--r--doc/source/admin/index.rst2
-rw-r--r--doc/source/admin/live-migration-usage.rst2
-rw-r--r--doc/source/admin/manage-logs.rst10
-rw-r--r--doc/source/admin/pci-passthrough.rst37
-rw-r--r--doc/source/admin/remote-console-access.rst16
-rw-r--r--doc/source/admin/soft-delete-shadow-tables.rst62
-rw-r--r--doc/source/admin/upgrades.rst20
-rw-r--r--doc/source/cli/nova-compute.rst2
-rw-r--r--doc/source/contributor/how-to-get-involved.rst4
-rw-r--r--doc/source/contributor/process.rst10
-rw-r--r--doc/source/user/wsgi.rst14
-rw-r--r--etc/nova/nova-config-generator.conf1
-rw-r--r--mypy-files.txt4
-rw-r--r--nova/api/openstack/api_version_request.py6
-rw-r--r--nova/api/openstack/compute/evacuate.py25
-rw-r--r--nova/api/openstack/compute/flavor_access.py9
-rw-r--r--nova/api/openstack/compute/remote_consoles.py3
-rw-r--r--nova/api/openstack/compute/rest_api_version_history.rst20
-rw-r--r--nova/api/openstack/compute/schemas/evacuate.py4
-rw-r--r--nova/api/openstack/compute/schemas/servers.py14
-rw-r--r--nova/api/openstack/compute/servers.py11
-rw-r--r--nova/api/openstack/identity.py22
-rw-r--r--nova/api/openstack/wsgi_app.py5
-rw-r--r--nova/cmd/manage.py4
-rw-r--r--nova/compute/api.py16
-rw-r--r--nova/compute/claims.py25
-rw-r--r--nova/compute/manager.py231
-rw-r--r--nova/compute/pci_placement_translator.py74
-rw-r--r--nova/compute/resource_tracker.py131
-rw-r--r--nova/compute/rpcapi.py18
-rw-r--r--nova/compute/utils.py27
-rw-r--r--nova/compute/vm_states.py3
-rw-r--r--nova/conductor/api.py6
-rw-r--r--nova/conductor/manager.py24
-rw-r--r--nova/conductor/rpcapi.py15
-rw-r--r--nova/conductor/tasks/live_migrate.py2
-rw-r--r--nova/conductor/tasks/migrate.py5
-rw-r--r--nova/conf/api.py7
-rw-r--r--nova/conf/compute.py9
-rw-r--r--nova/conf/ironic.py1
-rw-r--r--nova/conf/libvirt.py19
-rw-r--r--nova/conf/mks.py2
-rw-r--r--nova/conf/pci.py30
-rw-r--r--nova/conf/scheduler.py21
-rw-r--r--nova/conf/spice.py53
-rw-r--r--nova/conf/vmware.py5
-rw-r--r--nova/conf/workarounds.py44
-rw-r--r--nova/db/main/migrations/versions/960aac0e09ea_de_duplicate_indexes_in_instances__.py35
-rw-r--r--nova/db/main/models.py2
-rw-r--r--nova/exception.py30
-rw-r--r--nova/filesystem.py59
-rw-r--r--nova/hacking/checks.py21
-rw-r--r--nova/manager.py7
-rw-r--r--nova/objects/compute_node.py15
-rw-r--r--nova/objects/request_spec.py113
-rw-r--r--nova/objects/service.py33
-rw-r--r--nova/pci/request.py8
-rw-r--r--nova/pci/stats.py283
-rw-r--r--nova/policies/tenant_networks.py4
-rw-r--r--nova/policy.py12
-rw-r--r--nova/rpc.py16
-rw-r--r--nova/scheduler/filters/__init__.py44
-rw-r--r--nova/scheduler/filters/numa_topology_filter.py24
-rw-r--r--nova/scheduler/filters/pci_passthrough_filter.py23
-rw-r--r--nova/scheduler/host_manager.py34
-rw-r--r--nova/scheduler/manager.py105
-rw-r--r--nova/scheduler/utils.py11
-rw-r--r--nova/service.py4
-rw-r--r--nova/test.py23
-rw-r--r--nova/tests/fixtures/__init__.py2
-rw-r--r--nova/tests/fixtures/cinder.py21
-rw-r--r--nova/tests/fixtures/filesystem.py81
-rw-r--r--nova/tests/fixtures/libvirt.py6
-rw-r--r--nova/tests/fixtures/libvirt_imagebackend.py3
-rw-r--r--nova/tests/fixtures/nova.py79
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/os-evacuate/v2.95/server-evacuate-find-host-req.json.tpl5
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/os-evacuate/v2.95/server-evacuate-req.json.tpl5
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-action-rebuild-resp.json.tpl80
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-action-rebuild.json.tpl15
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-create-req.json.tpl21
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-create-resp.json.tpl22
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-get-resp.json.tpl81
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-update-req.json.tpl8
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-update-resp.json.tpl78
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/servers-details-resp.json.tpl88
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/servers-list-resp.json.tpl24
-rw-r--r--nova/tests/functional/api_sample_tests/test_evacuate.py52
-rw-r--r--nova/tests/functional/api_sample_tests/test_remote_consoles.py20
-rw-r--r--nova/tests/functional/api_sample_tests/test_servers.py45
-rw-r--r--nova/tests/functional/compute/test_resource_tracker.py1
-rw-r--r--nova/tests/functional/integrated_helpers.py23
-rw-r--r--nova/tests/functional/libvirt/base.py8
-rw-r--r--nova/tests/functional/libvirt/test_evacuate.py4
-rw-r--r--nova/tests/functional/libvirt/test_pci_in_placement.py471
-rw-r--r--nova/tests/functional/libvirt/test_pci_sriov_servers.py1141
-rw-r--r--nova/tests/functional/libvirt/test_power_manage.py270
-rw-r--r--nova/tests/functional/libvirt/test_vpmem.py6
-rw-r--r--nova/tests/functional/notification_sample_tests/test_compute_task.py7
-rw-r--r--nova/tests/functional/notification_sample_tests/test_instance.py20
-rw-r--r--nova/tests/functional/regressions/test_bug_1669054.py3
-rw-r--r--nova/tests/functional/regressions/test_bug_1713783.py6
-rw-r--r--nova/tests/functional/regressions/test_bug_1764883.py3
-rw-r--r--nova/tests/functional/regressions/test_bug_1823370.py3
-rw-r--r--nova/tests/functional/regressions/test_bug_1896463.py2
-rw-r--r--nova/tests/functional/regressions/test_bug_1922053.py6
-rw-r--r--nova/tests/functional/regressions/test_bug_1951656.py73
-rw-r--r--nova/tests/functional/regressions/test_bug_1980720.py68
-rw-r--r--nova/tests/functional/test_instance_actions.py9
-rw-r--r--nova/tests/functional/test_server_group.py57
-rw-r--r--nova/tests/functional/test_server_rescue.py86
-rw-r--r--nova/tests/functional/test_servers.py12
-rw-r--r--nova/tests/functional/test_servers_resource_request.py22
-rw-r--r--nova/tests/functional/test_service.py85
-rw-r--r--nova/tests/unit/api/openstack/compute/test_evacuate.py29
-rw-r--r--nova/tests/unit/api/openstack/compute/test_flavor_access.py25
-rw-r--r--nova/tests/unit/api/openstack/compute/test_remote_consoles.py12
-rw-r--r--nova/tests/unit/api/openstack/compute/test_server_groups.py97
-rw-r--r--nova/tests/unit/api/openstack/fakes.py14
-rw-r--r--nova/tests/unit/api/openstack/test_wsgi_app.py15
-rw-r--r--nova/tests/unit/cmd/test_policy.py13
-rw-r--r--nova/tests/unit/compute/test_api.py170
-rw-r--r--nova/tests/unit/compute/test_claims.py6
-rw-r--r--nova/tests/unit/compute/test_compute.py42
-rw-r--r--nova/tests/unit/compute/test_compute_mgr.py386
-rw-r--r--nova/tests/unit/compute/test_pci_placement_translator.py87
-rw-r--r--nova/tests/unit/compute/test_resource_tracker.py215
-rw-r--r--nova/tests/unit/compute/test_rpcapi.py48
-rw-r--r--nova/tests/unit/compute/test_shelve.py6
-rw-r--r--nova/tests/unit/compute/test_utils.py68
-rw-r--r--nova/tests/unit/conductor/tasks/test_live_migrate.py2
-rw-r--r--nova/tests/unit/conductor/test_conductor.py33
-rw-r--r--nova/tests/unit/console/test_websocketproxy.py4
-rw-r--r--nova/tests/unit/db/main/test_migrations.py9
-rw-r--r--nova/tests/unit/objects/test_compute_node.py17
-rw-r--r--nova/tests/unit/objects/test_request_spec.py233
-rw-r--r--nova/tests/unit/pci/test_request.py15
-rw-r--r--nova/tests/unit/pci/test_stats.py950
-rw-r--r--nova/tests/unit/policies/base.py10
-rw-r--r--nova/tests/unit/policies/test_evacuate.py2
-rw-r--r--nova/tests/unit/scheduler/filters/test_numa_topology_filters.py97
-rw-r--r--nova/tests/unit/scheduler/filters/test_pci_passthrough_filters.py113
-rw-r--r--nova/tests/unit/scheduler/test_host_manager.py12
-rw-r--r--nova/tests/unit/scheduler/test_manager.py871
-rw-r--r--nova/tests/unit/test_filesystem.py52
-rw-r--r--nova/tests/unit/test_hacking.py21
-rw-r--r--nova/tests/unit/test_policy.py10
-rw-r--r--nova/tests/unit/test_rpc.py44
-rw-r--r--nova/tests/unit/test_service.py9
-rw-r--r--nova/tests/unit/virt/disk/test_api.py1
-rw-r--r--nova/tests/unit/virt/ironic/test_driver.py70
-rw-r--r--nova/tests/unit/virt/libvirt/cpu/__init__.py0
-rw-r--r--nova/tests/unit/virt/libvirt/cpu/test_api.py194
-rw-r--r--nova/tests/unit/virt/libvirt/cpu/test_core.py122
-rw-r--r--nova/tests/unit/virt/libvirt/test_config.py58
-rw-r--r--nova/tests/unit/virt/libvirt/test_driver.py282
-rw-r--r--nova/tests/unit/virt/libvirt/test_host.py6
-rw-r--r--nova/tests/unit/virt/libvirt/test_utils.py43
-rw-r--r--nova/tests/unit/virt/test_hardware.py103
-rw-r--r--nova/tests/unit/virt/test_images.py46
-rw-r--r--nova/tests/unit/virt/test_netutils.py23
-rw-r--r--nova/tests/unit/virt/test_node.py142
-rw-r--r--nova/utils.py47
-rw-r--r--nova/virt/driver.py6
-rw-r--r--nova/virt/fake.py37
-rw-r--r--nova/virt/hardware.py27
-rw-r--r--nova/virt/images.py31
-rw-r--r--nova/virt/ironic/driver.py45
-rw-r--r--nova/virt/libvirt/config.py27
-rw-r--r--nova/virt/libvirt/cpu/__init__.py0
-rw-r--r--nova/virt/libvirt/cpu/api.py157
-rw-r--r--nova/virt/libvirt/cpu/core.py78
-rw-r--r--nova/virt/libvirt/driver.py198
-rw-r--r--nova/virt/libvirt/host.py34
-rw-r--r--nova/virt/libvirt/imagebackend.py1
-rw-r--r--nova/virt/libvirt/utils.py97
-rw-r--r--nova/virt/libvirt/volume/fibrechannel.py3
-rw-r--r--nova/virt/netutils.py9
-rw-r--r--nova/virt/node.py108
-rw-r--r--playbooks/ceph/glance-copy-policy.yaml15
-rw-r--r--playbooks/ceph/glance-setup.yaml39
-rw-r--r--releasenotes/notes/Do-not-send-mtu-value-in-metadata-for-networks-with-enabled-dhcp-641506f2a13b540f.yaml5
-rw-r--r--releasenotes/notes/add-spice-compression-support-e41676f445544e8d.yaml23
-rw-r--r--releasenotes/notes/allowing-target-state-for-evacuate-d4c1912c481973d6.yaml13
-rw-r--r--releasenotes/notes/antelope-prelude-4a99907b00e739f8.yaml51
-rw-r--r--releasenotes/notes/bp-libvirt-cpu-state-mgmt-fbc9c1f9f473003c.yaml18
-rw-r--r--releasenotes/notes/bp-pci-device-tracking-in-placement-antelope-082310a2b0337e0e.yaml8
-rw-r--r--releasenotes/notes/bug-1996995-qemu_monitor_announce_self-add-configurables-2b2f19d238442f72.yaml28
-rw-r--r--releasenotes/notes/enable-enforce-scope-and-new-defaults-14db8c75b263b599.yaml23
-rw-r--r--releasenotes/notes/fix-ironic-scheduler-race-08cf8aba0365f512.yaml11
-rw-r--r--releasenotes/notes/microversion-2-94-59649401d5763286.yaml22
-rw-r--r--releasenotes/notes/multiple-config-files-with-mod_wsgi-f114ea5fdd8b9a51.yaml14
-rw-r--r--releasenotes/notes/rescue-volume-based-instance-c6e3fba236d90be7.yaml6
-rw-r--r--releasenotes/notes/stable-compute-uuid-08663a0955616728.yaml19
-rw-r--r--releasenotes/notes/use-compareHypervisorCPU-b75c8f097cc73556.yaml12
-rw-r--r--releasenotes/source/2023.1.rst6
-rw-r--r--releasenotes/source/index.rst1
-rw-r--r--releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po89
-rw-r--r--requirements.txt6
-rw-r--r--setup.cfg1
-rw-r--r--tox.ini13
225 files changed, 10810 insertions, 1218 deletions
diff --git a/.zuul.yaml b/.zuul.yaml
index e8732a61af..8cad924e1f 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -248,8 +248,9 @@
tox_envlist: all
# Only run compute API tests.
tempest_test_regex: ^tempest\.api\.compute
- # Skip slow tests.
- tempest_exclude_regex: .*\[.*\bslow\b.*\]
+ # Skip slow tests. Also, skip some volume detach tests until bug#1998148
+ # is fixed.
+ tempest_exclude_regex: (^tempest\.(api\.compute\.(volumes\.test_attach_volume\.AttachVolumeTestJSON\.test_attach_detach_volume|servers\.(test_server_rescue\.ServerStableDeviceRescueTest\.test_stable_device_rescue_disk_virtio_with_volume_attached|test_server_rescue_negative\.ServerRescueNegativeTestJSON\.test_rescued_vm_detach_volume)))|.*\[.*\bslow\b.*\])
devstack_local_conf:
test-config:
$TEMPEST_CONFIG:
@@ -418,6 +419,7 @@
# Added in Yoga.
NOVNC_FROM_PACKAGE: False
NOVA_USE_UNIFIED_LIMITS: True
+ MYSQL_REDUCE_MEMORY: True
devstack_services:
# Disable OVN services
br-ex-tcpdump: false
@@ -594,8 +596,11 @@
required-projects:
- openstack/nova
pre-run:
- - playbooks/ceph/glance-copy-policy.yaml
+ - playbooks/ceph/glance-setup.yaml
vars:
+ # NOTE(danms): Increase our swap size since we're dealing with
+ # larger images and trigger OOMs.
+ configure_swap_size: 4096
# NOTE(danms): These tests create an empty non-raw image, which nova
# will refuse because we set never_download_image_if_on_rbd in this job.
# Just skip these tests for this case.
@@ -603,6 +608,8 @@
GLANCE_STANDALONE: True
GLANCE_USE_IMPORT_WORKFLOW: True
DEVSTACK_PARALLEL: True
+ GLANCE_LIMIT_IMAGE_SIZE_TOTAL: 2048
+ MYSQL_REDUCE_MEMORY: True
# NOTE(danms): This job is pretty heavy as it is, so we disable some
# services that are not relevant to the nova-glance-ceph scenario
# that this job is intended to validate.
@@ -613,6 +620,12 @@
s-object: false
s-proxy: false
devstack_local_conf:
+ test-config:
+ $TEMPEST_CONFIG:
+ image-feature-enabled:
+ manage_locations: true
+ volume:
+ volume_size: 1
post-config:
$NOVA_CONF:
libvirt:
@@ -621,7 +634,7 @@
never_download_image_if_on_rbd: True
$GLANCE_API_CONF:
DEFAULT:
- enabled_backends: "cheap:file, robust:rbd"
+ enabled_backends: "cheap:file, robust:rbd, web:http"
default_log_levels: "amqp=WARN, amqplib=WARN, boto=WARN, qpid=WARN, sqlalchemy=WARN, suds=INFO, oslo.messaging=INFO, oslo_messaging=INFO, iso8601=WARN, requests.packages.urllib3.connectionpool=WARN, urllib3.connectionpool=WARN, websocket=WARN, requests.packages.urllib3.util.retry=WARN, urllib3.util.retry=WARN, keystonemiddleware=WARN, routes.middleware=WARN, stevedore=WARN, taskflow=WARN, keystoneauth=WARN, oslo.cache=INFO, dogpile.core.dogpile=INFO, oslo_policy=DEBUG"
glance_store:
default_backend: cheap
@@ -633,6 +646,8 @@
rbd_store_ceph_conf: /etc/ceph/ceph.conf
cheap:
filesystem_store_datadir: /opt/stack/data/glance/images/
+ web:
+ https_insecure: false
os_glance_staging_store:
filesystem_store_datadir: /opt/stack/data/glance/os_glance_staging_store/
os_glance_tasks_store:
@@ -643,6 +658,45 @@
image_conversion:
output_format: raw
+# TODO(gmann): As per the 2023.1 testing runtime, we need to run at least
+# one job on Focal. This job can be removed as per the future testing
+# runtime (whenever we drop the Ubuntu Focal testing).
+- job:
+ name: tempest-integrated-compute-ubuntu-focal
+ description: This is integrated compute job testing on Ubuntu Focal(20.04)
+ parent: tempest-integrated-compute
+ nodeset: openstack-single-node-focal
+
+# TODO(gmann): Remove this jobs once all the required services for intergrate
+# compute gate (Cinder, Glance, Neutron) by default enable scope and new
+# defaults which means all the nova jobs will be tested with new RBAC in
+# integrated way and we do not need this separate job.
+- job:
+ name: tempest-integrated-compute-enforce-scope-new-defaults
+ parent: tempest-integrated-compute
+ description: |
+ This job runs the Tempest tests with scope and new defaults enabled
+ for Nova, Neutron, Glance, and Cinder services.
+ # TODO (gmann): There were few fixes in neutron and neutron-lib for the
+ # RBAC but they are not yet released so we need to add both projcts as
+ # the required-projects. Those can be removed once new version of neutron
+ # and neutron-lib is released.
+ required-projects:
+ - openstack/neutron
+ - openstack/neutron-lib
+ vars:
+ devstack_localrc:
+ # Enabeling the scope and new defaults for services implemented it.
+ # NOTE (gmann): We need to keep keystone scope check disable as
+ # services (except ironic) does not support the system scope and
+ # they need keystone to continue working with project scope. Until
+ # Keystone policies are changed to work for project scoped also, we
+ # need to keep scope check disable for keystone.
+ NOVA_ENFORCE_SCOPE: true
+ CINDER_ENFORCE_SCOPE: true
+ GLANCE_ENFORCE_SCOPE: true
+ NEUTRON_ENFORCE_SCOPE: true
+
- project:
# Please try to keep the list of job names sorted alphabetically.
templates:
@@ -679,10 +733,7 @@
voting: false
- nova-tox-functional-py38
- nova-tox-functional-py39
- - nova-tox-functional-py310:
- voting: true
- - openstack-tox-py310:
- voting: true
+ - nova-tox-functional-py310
- tempest-integrated-compute:
# NOTE(gmann): Policies changes do not need to run all the
# integration test jobs. Running only tempest and grenade
@@ -702,6 +753,10 @@
- ^setup.cfg$
- ^tools/.*$
- ^tox.ini$
+ - tempest-integrated-compute-ubuntu-focal:
+ irrelevant-files: *policies-irrelevant-files
+ - tempest-integrated-compute-enforce-scope-new-defaults:
+ irrelevant-files: *policies-irrelevant-files
- grenade-skip-level:
irrelevant-files: *policies-irrelevant-files
- nova-grenade-multinode:
@@ -735,6 +790,10 @@
- ^(?!nova/network/.*)(?!nova/virt/libvirt/vif.py).*$
- tempest-integrated-compute:
irrelevant-files: *policies-irrelevant-files
+ - tempest-integrated-compute-ubuntu-focal:
+ irrelevant-files: *policies-irrelevant-files
+ - tempest-integrated-compute-enforce-scope-new-defaults:
+ irrelevant-files: *policies-irrelevant-files
- nova-grenade-multinode:
irrelevant-files: *policies-irrelevant-files
- tempest-ipv6-only:
diff --git a/HACKING.rst b/HACKING.rst
index a2f67d993b..c5a1ba4ae3 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -76,6 +76,7 @@ Nova Specific Commandments
with eventlet patched code. Use nova.utils.ReaderWriterLock() instead.
- [N370] Don't use or import six
- [N371] You must explicitly import python's mock: ``from unittest import mock``
+- [N372] Don't use the setDaemon method. Use the daemon attribute instead.
Creating Unit Tests
-------------------
diff --git a/api-guide/source/accelerator-support.rst b/api-guide/source/accelerator-support.rst
index c71e899fd4..9d1b4d77b4 100644
--- a/api-guide/source/accelerator-support.rst
+++ b/api-guide/source/accelerator-support.rst
@@ -12,7 +12,7 @@ appropriate privileges) must do the following:
* Create a device profile in Cyborg, which specifies what accelerator
resources need to be provisioned. (See `Cyborg device profiles API`_.)
- .. _`Cyborg device profiles API`: https://docs.openstack.org/api-ref/accelerator/v2/index.html#device-profiles
+ .. _`Cyborg device profiles API`: https://docs.openstack.org/api-ref/accelerator/#device-profiles
* Set the device profile name as an extra spec in a chosen flavor,
with this syntax:
@@ -102,7 +102,7 @@ appropriate privileges) must do the following:
resources need to be provisioned. (See `Cyborg device profiles API`_,
`Cyborg SRIOV Test Report`_.)
- .. _`Cyborg device profiles API`: https://docs.openstack.org/api-ref/accelerator/v2/index.html#device-profiles
+ .. _`Cyborg device profiles API`: https://docs.openstack.org/api-ref/accelerator/#device-profiles
.. _`Cyborg SRIOV Test Report`: https://wiki.openstack.org/wiki/Cyborg/TestReport/IntelNic
* create a 'accelerator-direct' vnic type port with the device-profile name
diff --git a/api-guide/source/users.rst b/api-guide/source/users.rst
index a0b74374a2..28a59201c0 100644
--- a/api-guide/source/users.rst
+++ b/api-guide/source/users.rst
@@ -28,7 +28,7 @@ The Compute API uses these roles, along with oslo.policy, to decide
what the user is authorized to do.
Refer to the to
-:nova-doc:`compute admin guide </admin/arch#projects-users-and-roles>`
+:nova-doc:`compute admin guide </admin/architecture#projects-users-and-roles>`
for details.
Personas used in this guide
diff --git a/api-ref/source/parameters.yaml b/api-ref/source/parameters.yaml
index aba8185a4b..e185dce29d 100644
--- a/api-ref/source/parameters.yaml
+++ b/api-ref/source/parameters.yaml
@@ -6382,6 +6382,9 @@ server_hostname_req:
description: |
The hostname to configure for the instance in the metadata service.
+ Starting with microversion 2.94, this can be a Fully Qualified Domain Name
+ (FQDN) of up to 255 characters in length.
+
.. note::
This information is published via the metadata service and requires
diff --git a/api-ref/source/servers-actions.inc b/api-ref/source/servers-actions.inc
index 3b8b68d4ff..bb9953afa0 100644
--- a/api-ref/source/servers-actions.inc
+++ b/api-ref/source/servers-actions.inc
@@ -604,6 +604,11 @@ Request
.. literalinclude:: ../../doc/api_samples/servers/v2.90/server-action-rebuild.json
:language: javascript
+**Example Rebuild Server (rebuild Action) (v2.94)**
+
+.. literalinclude:: ../../doc/api_samples/servers/v2.94/server-action-rebuild.json
+ :language: javascript
+
Response
--------
diff --git a/api-ref/source/servers.inc b/api-ref/source/servers.inc
index 547a71e914..e72d0641b9 100644
--- a/api-ref/source/servers.inc
+++ b/api-ref/source/servers.inc
@@ -448,6 +448,11 @@ Request
.. literalinclude:: ../../doc/api_samples/servers/v2.90/server-create-req.json
:language: javascript
+**Example Create Server With FQDN in Hostname (v2.94)**
+
+.. literalinclude:: ../../doc/api_samples/servers/v2.94/server-create-req.json
+ :language: javascript
+
Response
--------
@@ -610,7 +615,7 @@ Response
.. rest_parameters:: parameters.yaml
- - server: server
+ - servers: servers
- accessIPv4: accessIPv4
- accessIPv6: accessIPv6
- addresses: addresses
diff --git a/doc/api_samples/os-evacuate/v2.95/server-evacuate-find-host-req.json b/doc/api_samples/os-evacuate/v2.95/server-evacuate-find-host-req.json
new file mode 100644
index 0000000000..8ad929226e
--- /dev/null
+++ b/doc/api_samples/os-evacuate/v2.95/server-evacuate-find-host-req.json
@@ -0,0 +1,4 @@
+{
+ "evacuate": {
+ }
+}
diff --git a/doc/api_samples/os-evacuate/v2.95/server-evacuate-req.json b/doc/api_samples/os-evacuate/v2.95/server-evacuate-req.json
new file mode 100644
index 0000000000..d192892cdc
--- /dev/null
+++ b/doc/api_samples/os-evacuate/v2.95/server-evacuate-req.json
@@ -0,0 +1,5 @@
+{
+ "evacuate": {
+ "host": "testHost"
+ }
+}
diff --git a/doc/api_samples/servers/v2.94/server-action-rebuild-resp.json b/doc/api_samples/servers/v2.94/server-action-rebuild-resp.json
new file mode 100644
index 0000000000..7eeb568ea4
--- /dev/null
+++ b/doc/api_samples/servers/v2.94/server-action-rebuild-resp.json
@@ -0,0 +1,80 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO",
+ "OS-EXT-AZ:availability_zone": "us-west",
+ "OS-EXT-SRV-ATTR:hostname": "updated-hostname.example.com",
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "OS-SRV-USG:launched_at": "2021-08-19T15:16:22.177882",
+ "OS-SRV-USG:terminated_at": null,
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "addresses": {
+ "private": [
+ {
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
+ "OS-EXT-IPS:type": "fixed",
+ "addr": "192.168.1.30",
+ "version": 4
+ }
+ ]
+ },
+ "adminPass": "seekr3t",
+ "config_drive": "",
+ "created": "2019-04-23T17:10:22Z",
+ "description": null,
+ "flavor": {
+ "disk": 1,
+ "ephemeral": 0,
+ "extra_specs": {},
+ "original_name": "m1.tiny",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1
+ },
+ "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6",
+ "id": "0c37a84a-c757-4f22-8c7f-0bf8b6970886",
+ "image": {
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/0c37a84a-c757-4f22-8c7f-0bf8b6970886",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/0c37a84a-c757-4f22-8c7f-0bf8b6970886",
+ "rel": "bookmark"
+ }
+ ],
+ "locked": false,
+ "locked_reason": null,
+ "metadata": {
+ "meta_var": "meta_val"
+ },
+ "name": "foobar",
+ "os-extended-volumes:volumes_attached": [],
+ "progress": 0,
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "server_groups": [],
+ "status": "ACTIVE",
+ "tags": [],
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "trusted_image_certificates": null,
+ "updated": "2019-04-23T17:10:24Z",
+ "user_data": "ZWNobyAiaGVsbG8gd29ybGQi",
+ "user_id": "fake"
+ }
+}
diff --git a/doc/api_samples/servers/v2.94/server-action-rebuild.json b/doc/api_samples/servers/v2.94/server-action-rebuild.json
new file mode 100644
index 0000000000..b5401ad9ca
--- /dev/null
+++ b/doc/api_samples/servers/v2.94/server-action-rebuild.json
@@ -0,0 +1,15 @@
+{
+ "rebuild" : {
+ "accessIPv4" : "1.2.3.4",
+ "accessIPv6" : "80fe::",
+ "OS-DCF:diskConfig": "AUTO",
+ "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b",
+ "name" : "foobar",
+ "adminPass" : "seekr3t",
+ "hostname": "custom-hostname.example.com",
+ "metadata" : {
+ "meta_var" : "meta_val"
+ },
+ "user_data": "ZWNobyAiaGVsbG8gd29ybGQi"
+ }
+}
diff --git a/doc/api_samples/servers/v2.94/server-create-req.json b/doc/api_samples/servers/v2.94/server-create-req.json
new file mode 100644
index 0000000000..c6d4ce5640
--- /dev/null
+++ b/doc/api_samples/servers/v2.94/server-create-req.json
@@ -0,0 +1,30 @@
+{
+ "server" : {
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "name" : "new-server-test",
+ "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "1",
+ "availability_zone": "us-west",
+ "OS-DCF:diskConfig": "AUTO",
+ "hostname": "custom-hostname.example.com",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality": [
+ {
+ "path": "/etc/banner.txt",
+ "contents": "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6 b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ],
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "user_data" : "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg=="
+ },
+ "OS-SCH-HNT:scheduler_hints": {
+ "same_host": "48e6a9f6-30af-47e0-bc04-acaed113bb4e"
+ }
+}
diff --git a/doc/api_samples/servers/v2.94/server-create-resp.json b/doc/api_samples/servers/v2.94/server-create-resp.json
new file mode 100644
index 0000000000..f50e29dd8b
--- /dev/null
+++ b/doc/api_samples/servers/v2.94/server-create-resp.json
@@ -0,0 +1,22 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO",
+ "adminPass": "6NpUwoz2QDRN",
+ "id": "f5dc173b-6804-445a-a6d8-c705dad5b5eb",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/f5dc173b-6804-445a-a6d8-c705dad5b5eb",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/f5dc173b-6804-445a-a6d8-c705dad5b5eb",
+ "rel": "bookmark"
+ }
+ ],
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ]
+ }
+}
diff --git a/doc/api_samples/servers/v2.94/server-get-resp.json b/doc/api_samples/servers/v2.94/server-get-resp.json
new file mode 100644
index 0000000000..0a05b2f917
--- /dev/null
+++ b/doc/api_samples/servers/v2.94/server-get-resp.json
@@ -0,0 +1,81 @@
+{
+ "server": {
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "addresses": {
+ "private": [
+ {
+ "addr": "192.168.1.30",
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
+ "OS-EXT-IPS:type": "fixed",
+ "version": 4
+ }
+ ]
+ },
+ "created": "2013-09-03T04:01:32Z",
+ "description": null,
+ "locked": false,
+ "locked_reason": null,
+ "flavor": {
+ "disk": 1,
+ "ephemeral": 0,
+ "extra_specs": {},
+ "original_name": "m1.tiny",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1
+ },
+ "hostId": "92154fab69d5883ba2c8622b7e65f745dd33257221c07af363c51b29",
+ "id": "0e44cc9c-e052-415d-afbf-469b0d384170",
+ "image": {
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/0e44cc9c-e052-415d-afbf-469b0d384170",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/0e44cc9c-e052-415d-afbf-469b0d384170",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "config_drive": "",
+ "OS-DCF:diskConfig": "AUTO",
+ "OS-EXT-AZ:availability_zone": "nova",
+ "OS-EXT-SRV-ATTR:hostname": "custom-hostname.example.com",
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "os-extended-volumes:volumes_attached": [
+ {"id": "volume_id1", "delete_on_termination": false},
+ {"id": "volume_id2", "delete_on_termination": false}
+ ],
+ "OS-SRV-USG:launched_at": "2013-09-23T13:37:00.880302",
+ "OS-SRV-USG:terminated_at": null,
+ "progress": 0,
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "server_groups": [],
+ "status": "ACTIVE",
+ "tags": [],
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "trusted_image_certificates": null,
+ "updated": "2013-09-03T04:01:33Z",
+ "user_id": "fake"
+ }
+}
diff --git a/doc/api_samples/servers/v2.94/server-update-req.json b/doc/api_samples/servers/v2.94/server-update-req.json
new file mode 100644
index 0000000000..1743f05fc7
--- /dev/null
+++ b/doc/api_samples/servers/v2.94/server-update-req.json
@@ -0,0 +1,8 @@
+{
+ "server": {
+ "accessIPv4": "4.3.2.1",
+ "accessIPv6": "80fe::",
+ "OS-DCF:diskConfig": "AUTO",
+ "hostname" : "new-server-hostname.example.com"
+ }
+}
diff --git a/doc/api_samples/servers/v2.94/server-update-resp.json b/doc/api_samples/servers/v2.94/server-update-resp.json
new file mode 100644
index 0000000000..4aa834f9ec
--- /dev/null
+++ b/doc/api_samples/servers/v2.94/server-update-resp.json
@@ -0,0 +1,78 @@
+{
+ "server": {
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "addresses": {
+ "private": [
+ {
+ "addr": "192.168.1.30",
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
+ "OS-EXT-IPS:type": "fixed",
+ "version": 4
+ }
+ ]
+ },
+ "created": "2013-09-03T04:01:32Z",
+ "description": null,
+ "locked": false,
+ "locked_reason": null,
+ "flavor": {
+ "disk": 1,
+ "ephemeral": 0,
+ "extra_specs": {},
+ "original_name": "m1.tiny",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1
+ },
+ "hostId": "92154fab69d5883ba2c8622b7e65f745dd33257221c07af363c51b29",
+ "id": "0e44cc9c-e052-415d-afbf-469b0d384170",
+ "image": {
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/0e44cc9c-e052-415d-afbf-469b0d384170",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/0e44cc9c-e052-415d-afbf-469b0d384170",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "config_drive": "",
+ "OS-DCF:diskConfig": "AUTO",
+ "OS-EXT-AZ:availability_zone": "us-west",
+ "OS-EXT-SRV-ATTR:hostname": "new-server-hostname.example.com",
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "os-extended-volumes:volumes_attached": [],
+ "OS-SRV-USG:launched_at": "2013-09-23T13:37:00.880302",
+ "OS-SRV-USG:terminated_at": null,
+ "progress": 0,
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "server_groups": [],
+ "status": "ACTIVE",
+ "tags": [],
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "trusted_image_certificates": null,
+ "updated": "2013-09-03T04:01:33Z",
+ "user_id": "fake"
+ }
+}
diff --git a/doc/api_samples/servers/v2.94/servers-details-resp.json b/doc/api_samples/servers/v2.94/servers-details-resp.json
new file mode 100644
index 0000000000..54b63fa523
--- /dev/null
+++ b/doc/api_samples/servers/v2.94/servers-details-resp.json
@@ -0,0 +1,88 @@
+{
+ "servers": [
+ {
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "addresses": {
+ "private": [
+ {
+ "addr": "192.168.1.30",
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
+ "OS-EXT-IPS:type": "fixed",
+ "version": 4
+ }
+ ]
+ },
+ "created": "2013-09-03T04:01:32Z",
+ "description": "",
+ "flavor": {
+ "disk": 1,
+ "ephemeral": 0,
+ "extra_specs": {},
+ "original_name": "m1.tiny",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1
+ },
+ "hostId": "bcf92836fc9ed4203a75cb0337afc7f917d2be504164b995c2334b25",
+ "id": "f5dc173b-6804-445a-a6d8-c705dad5b5eb",
+ "image": {
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/f5dc173b-6804-445a-a6d8-c705dad5b5eb",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/f5dc173b-6804-445a-a6d8-c705dad5b5eb",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "config_drive": "",
+ "locked": false,
+ "locked_reason": "",
+ "OS-DCF:diskConfig": "AUTO",
+ "OS-EXT-AZ:availability_zone": "nova",
+ "OS-EXT-SRV-ATTR:hostname": "custom-hostname.example.com",
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "os-extended-volumes:volumes_attached": [
+ {"id": "volume_id1", "delete_on_termination": false},
+ {"id": "volume_id2", "delete_on_termination": false}
+ ],
+ "OS-SRV-USG:launched_at": "2013-09-23T13:53:12.774549",
+ "OS-SRV-USG:terminated_at": null,
+ "progress": 0,
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "status": "ACTIVE",
+ "tags": [],
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "trusted_image_certificates": null,
+ "updated": "2013-09-03T04:01:32Z",
+ "user_id": "fake"
+ }
+ ],
+ "servers_links": [
+ {
+ "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/detail?limit=1&marker=f5dc173b-6804-445a-a6d8-c705dad5b5eb",
+ "rel": "next"
+ }
+ ]
+}
diff --git a/doc/api_samples/servers/v2.94/servers-list-resp.json b/doc/api_samples/servers/v2.94/servers-list-resp.json
new file mode 100644
index 0000000000..742d54b170
--- /dev/null
+++ b/doc/api_samples/servers/v2.94/servers-list-resp.json
@@ -0,0 +1,24 @@
+{
+ "servers": [
+ {
+ "id": "22c91117-08de-4894-9aa9-6ef382400985",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/22c91117-08de-4894-9aa9-6ef382400985",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/22c91117-08de-4894-9aa9-6ef382400985",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "new-server-test"
+ }
+ ],
+ "servers_links": [
+ {
+ "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers?limit=1&marker=22c91117-08de-4894-9aa9-6ef382400985",
+ "rel": "next"
+ }
+ ]
+}
diff --git a/doc/api_samples/versions/v21-version-get-resp.json b/doc/api_samples/versions/v21-version-get-resp.json
index 78678556bf..3f285e6017 100644
--- a/doc/api_samples/versions/v21-version-get-resp.json
+++ b/doc/api_samples/versions/v21-version-get-resp.json
@@ -19,7 +19,7 @@
}
],
"status": "CURRENT",
- "version": "2.93",
+ "version": "2.95",
"min_version": "2.1",
"updated": "2013-07-23T11:33:21Z"
}
diff --git a/doc/api_samples/versions/versions-get-resp.json b/doc/api_samples/versions/versions-get-resp.json
index 59b67279b7..749fd4674f 100644
--- a/doc/api_samples/versions/versions-get-resp.json
+++ b/doc/api_samples/versions/versions-get-resp.json
@@ -22,7 +22,7 @@
}
],
"status": "CURRENT",
- "version": "2.93",
+ "version": "2.95",
"min_version": "2.1",
"updated": "2013-07-23T11:33:21Z"
}
diff --git a/doc/source/admin/architecture.rst b/doc/source/admin/architecture.rst
index 69130122f7..f5e2b90dd9 100644
--- a/doc/source/admin/architecture.rst
+++ b/doc/source/admin/architecture.rst
@@ -173,7 +173,7 @@ is possible to configure other filesystem types.
.. rubric:: Cinder-provisioned block storage
-The OpenStack Block Storage service, Cinder, provides persistent volumes hat
+The OpenStack Block Storage service, Cinder, provides persistent volumes that
are represented by a persistent virtualized block device independent of any
particular instance.
diff --git a/doc/source/admin/availability-zones.rst b/doc/source/admin/availability-zones.rst
index ffe1be06f9..aff8a0ab31 100644
--- a/doc/source/admin/availability-zones.rst
+++ b/doc/source/admin/availability-zones.rst
@@ -118,11 +118,47 @@ Implications for moving servers
There are several ways to move a server to another host: evacuate, resize,
cold migrate, live migrate, and unshelve. Move operations typically go through
-the scheduler to pick the target host *unless* a target host is specified and
-the request forces the server to that host by bypassing the scheduler. Only
-evacuate and live migrate can forcefully bypass the scheduler and move a
-server to a specified host and even then it is highly recommended to *not*
-force and bypass the scheduler.
+the scheduler to pick the target host.
+
+Prior to API microversion 2.68, using older openstackclient (pre-5.5.0) and
+novaclient, it was possible to specify a target host and the request forces
+the server to that host by bypassing the scheduler. Only evacuate and live
+migrate can forcefully bypass the scheduler and move a server to specified host
+and even then it is highly recommended to *not* force and bypass a scheduler.
+
+- live migrate with force host (works with older openstackclients(pre-5.5.0):
+
+.. code-block:: console
+
+ $ openstack server migrate --live <host> <server>
+
+- live migrate without forcing:
+
+.. code-block:: console
+
+ $ openstack server migrate --live-migration --host <host> <server>
+
+While support for 'server evacuate' command to openstackclient was added
+in 5.5.3 and there it never exposed ability to force an evacuation, but
+it was previously possible with novaclient.
+
+- evacuate with force host:
+
+.. code-block:: console
+
+ $ nova evacuate --force <server> <host>
+
+- evacuate without forcing using novaclient:
+
+.. code-block:: console
+
+ $ nova evacuate
+
+- evacuate without forcing using openstackclient:
+
+.. code-block:: console
+
+ $ openstack server evacuate --host <host> <server>
With respect to availability zones, a server is restricted to a zone if:
@@ -150,16 +186,6 @@ If the server was not created in a specific zone then it is free to be moved
to other zones, i.e. the :ref:`AvailabilityZoneFilter <AvailabilityZoneFilter>`
is a no-op.
-Knowing this, it is dangerous to force a server to another host with evacuate
-or live migrate if the server is restricted to a zone and is then forced to
-move to a host in another zone, because that will create an inconsistency in
-the internal tracking of where that server should live and may require manually
-updating the database for that server. For example, if a user creates a server
-in zone A and then the admin force live migrates the server to zone B, and then
-the user resizes the server, the scheduler will try to move it back to zone A
-which may or may not work, e.g. if the admin deleted or renamed zone A in the
-interim.
-
Resource affinity
~~~~~~~~~~~~~~~~~
diff --git a/doc/source/admin/compute-node-identification.rst b/doc/source/admin/compute-node-identification.rst
new file mode 100644
index 0000000000..31d4802d0b
--- /dev/null
+++ b/doc/source/admin/compute-node-identification.rst
@@ -0,0 +1,83 @@
+===========================
+Compute Node Identification
+===========================
+
+Nova requires that compute nodes maintain a constant and consistent identity
+during their lifecycle. With the exception of the ironic driver, starting in
+the 2023.1 release, this is achieved by use of a file containing the node
+unique identifier that is persisted on disk. Prior to 2023.1, a combination of
+the compute node's hostname and the :oslo.config:option:`host` value in the
+configuration file were used.
+
+The 2023.1 and later compute node identification file must remain unchanged
+during the lifecycle of the compute node. Changing the value or removing the
+file will result in a failure to start and may require advanced techniques
+for recovery. The file is read once at `nova-compute`` startup, at which point
+it is validated for formatting and the corresponding node is located or
+created in the database.
+
+.. note::
+
+ Even after 2023.1, the compute node's hostname may not be changed after
+ the initial registration with the controller nodes, it is just not used
+ as the primary method for identification.
+
+The behavior of ``nova-compute`` is different when using the ironic driver,
+as the (UUID-based) identity and mapping of compute nodes to compute manager
+service hosts is dynamic. In that case, no single node identity is maintained
+by the compute host and thus no identity file is read or written. Thus none
+of the sections below apply to hosts with :oslo.config:option:`compute_driver`
+set to `ironic`.
+
+Self-provisioning of the node identity
+--------------------------------------
+
+By default, ``nova-compute`` will automatically generate and write a UUID to
+disk the first time it starts up, and will use that going forward as its
+stable identity. Using the :oslo.config:option:`state_path`
+(which is ``/var/lib/nova`` on most systems), a ``compute_id`` file will be
+created with a generated UUID.
+
+Since this file (and it's parent directory) is writable by nova, it may be
+desirable to move this to one of the other locations that nova looks for the
+identification file.
+
+Deployment provisioning of the node identity
+--------------------------------------------
+
+In addition to the location mentioned above, nova will also search the parent
+directories of any config file in use (either the defaults or provided on
+the command line) for a ``compute_id`` file. Thus, a deployment tool may, on
+most systems, pre-provision the node's UUID by writing one to
+``/etc/nova/compute_id``.
+
+The contents of the file should be a single UUID in canonical textual
+representation with no additional whitespace or other characters. The following
+should work on most Linux systems:
+
+.. code-block:: shell
+
+ $ uuidgen > /etc/nova/compute_id
+
+.. note::
+
+ **Do not** execute the above command blindly in every run of a deployment
+ tool, as that will result in overwriting the ``compute_id`` file each time,
+ which *will* prevent nova from working properly.
+
+Upgrading from pre-2023.1
+-------------------------
+
+Before release 2023.1, ``nova-compute`` only used the hostname (combined with
+:oslo.config:option:`host`, if set) to identify its compute node objects in
+the database. When upgrading from a prior release, the compute node will
+perform a one-time migration of the hostname-matched compute node UUID to the
+``compute_id`` file in the :oslo.config:option:`state_path` location.
+
+.. note::
+
+ It is imperative that you allow the above migration to run and complete on
+ compute nodes that are being upgraded. Skipping this step by
+ pre-provisioning a ``compute_id`` file before the upgrade will **not** work
+ and will be equivalent to changing the compute node UUID after it has
+ already been created once.
diff --git a/doc/source/admin/cpu-topologies.rst b/doc/source/admin/cpu-topologies.rst
index 9770639c3a..082c88f655 100644
--- a/doc/source/admin/cpu-topologies.rst
+++ b/doc/source/admin/cpu-topologies.rst
@@ -730,6 +730,97 @@ CPU policy, meanwhile, will consume ``VCPU`` inventory.
.. _configure-hyperv-numa:
+Configuring CPU power management for dedicated cores
+----------------------------------------------------
+
+.. versionchanged:: 27.0.0
+
+ This feature was only introduced by the 2023.1 Antelope release
+
+.. important::
+
+ The functionality described below is currently only supported by the
+ libvirt/KVM driver.
+
+For power saving reasons, operators can decide to turn down the power usage of
+CPU cores whether they are in use or not. For obvious reasons, Nova only allows
+to change the power consumption of a dedicated CPU core and not a shared one.
+Accordingly, usage of this feature relies on the reading of
+:oslo.config:option:`compute.cpu_dedicated_set` config option to know which CPU
+cores to handle.
+The main action to enable the power management of dedicated cores is to set
+:oslo.config:option:`libvirt.cpu_power_management` config option to ``True``.
+
+By default, if this option is enabled, Nova will lookup the dedicated cores and
+power them down at the compute service startup. Then, once an instance starts
+by being attached to a dedicated core, this below core will be powered up right
+before the libvirt guest starts. On the other way, once an instance is stopped,
+migrated or deleted, then the corresponding dedicated core will be powered down.
+
+There are two distinct strategies for powering up or down :
+
+- the default is to offline the CPU core and online it when needed.
+- an alternative strategy is to use two distinct CPU governors for the up state
+ and the down state.
+
+The strategy can be chosen using
+:oslo.config:option:`libvirt.cpu_power_management_strategy` config option.
+``cpu_state`` supports the first online/offline strategy, while ``governor``
+sets the alternative strategy.
+We default to turning off the cores as it provides you the best power savings
+while there could be other tools outside Nova to manage the governor, like
+tuned. That being said, we also provide a way to automatically change the
+governors on the fly, as explained below.
+
+If the strategy is set to ``governor``, a couple of config options are provided
+to define which exact CPU govenor to use for each of the up and down states :
+
+- :oslo.config:option:`libvirt.cpu_power_governor_low` will define the governor
+ to use for the powerdown state (defaults to ``powersave``)
+- :oslo.config:option:`libvirt.cpu_power_governor_high` will define the
+ governor to use for the powerup state (defaults to ``performance``)
+
+.. important::
+ This is the responsibility of the operator to ensure that the govenors
+ defined by the configuration options are currently supported by the OS
+ underlying kernel that runs the compute service.
+
+ As a side note, we recommend the ``schedutil`` governor as an alternative for
+ the high-power state (if the kernel supports it) as the CPU frequency is
+ dynamically set based on CPU task states. Other governors may be worth to
+ be tested, including ``conservative`` and ``ondemand`` which are quite a bit
+ more power consuming than ``schedutil`` but more efficient than
+ ``performance``. See `Linux kernel docs`_ for further explanations.
+
+.. _`Linux kernel docs`: https://www.kernel.org/doc/Documentation/cpu-freq/governors.txt
+
+As an example, a ``nova.conf`` part of configuration would look like::
+
+ [compute]
+ cpu_dedicated_set=2-17
+
+ [libvirt]
+ cpu_power_management=True
+ cpu_power_management_strategy=cpu_state
+
+.. warning::
+
+ The CPU core #0 has a special meaning in most of the recent Linux kernels.
+ This is always highly discouraged to use it for CPU pinning but please
+ refrain to have it power managed or you could have surprises if Nova turns
+ it off !
+
+One last important note : you can decide to change the CPU management strategy
+during the compute lifecycle, or you can currently already manage the CPU
+states. For ensuring that Nova can correctly manage the CPU performances, we
+added a couple of checks at startup that refuse to start nova-compute service
+if those arbitrary rules aren't enforced :
+
+- if the operator opts for ``cpu_state`` strategy, then all dedicated CPU
+ governors *MUST* be identical.
+- if they decide using ``governor``, then all dedicated CPU cores *MUST* be
+ online.
+
Configuring Hyper-V compute nodes for instance NUMA policies
------------------------------------------------------------
diff --git a/doc/source/admin/huge-pages.rst b/doc/source/admin/huge-pages.rst
index 73f6c5dd2d..a451c6e3ab 100644
--- a/doc/source/admin/huge-pages.rst
+++ b/doc/source/admin/huge-pages.rst
@@ -96,7 +96,7 @@ pages at boot time, run:
.. code-block:: console
- # echo 'GRUB_CMDLINE_LINUX="$GRUB_CMDLINE_LINUX hugepagesz=2M hugepages=2048 transparent_hugepage=never"' > /etc/default/grub
+ # echo 'GRUB_CMDLINE_LINUX="$GRUB_CMDLINE_LINUX hugepagesz=2M hugepages=2048 transparent_hugepage=never"' >> /etc/default/grub
$ grep GRUB_CMDLINE_LINUX /etc/default/grub
GRUB_CMDLINE_LINUX="..."
GRUB_CMDLINE_LINUX="$GRUB_CMDLINE_LINUX hugepagesz=2M hugepages=2048 transparent_hugepage=never"
diff --git a/doc/source/admin/index.rst b/doc/source/admin/index.rst
index 287e9d8fb5..8cb5bf7156 100644
--- a/doc/source/admin/index.rst
+++ b/doc/source/admin/index.rst
@@ -206,6 +206,7 @@ instance for these kind of workloads.
secure-boot
sev
managing-resource-providers
+ compute-node-identification
resource-limits
cpu-models
libvirt-misc
@@ -230,3 +231,4 @@ Once you are running nova, the following information is extremely useful.
node-down
hw-machine-type
hw-emulation-architecture
+ soft-delete-shadow-tables
diff --git a/doc/source/admin/live-migration-usage.rst b/doc/source/admin/live-migration-usage.rst
index 783ab5e27c..32c67c2b0a 100644
--- a/doc/source/admin/live-migration-usage.rst
+++ b/doc/source/admin/live-migration-usage.rst
@@ -102,7 +102,7 @@ Manual selection of the destination host
.. code-block:: console
- $ openstack server migrate d1df1b5a-70c4-4fed-98b7-423362f2c47c --live HostC
+ $ openstack server migrate d1df1b5a-70c4-4fed-98b7-423362f2c47c --live-migration --host HostC
#. Confirm that the instance has been migrated successfully:
diff --git a/doc/source/admin/manage-logs.rst b/doc/source/admin/manage-logs.rst
index f60a523852..3a1546d8f4 100644
--- a/doc/source/admin/manage-logs.rst
+++ b/doc/source/admin/manage-logs.rst
@@ -181,12 +181,18 @@ websocket client to access the serial console.
.. rubric:: Accessing the serial console on an instance
-#. Use the :command:`nova get-serial-proxy` command to retrieve the websocket
+#. Use the :command:`nova get-serial-console` command to retrieve the websocket
URL for the serial console on the instance:
.. code-block:: console
- $ nova get-serial-proxy INSTANCE_NAME
+ $ nova get-serial-console INSTANCE_NAME
+
+ Or use the :command:`openstack console url show` command.
+
+ .. code-block:: console
+
+ $ openstack console url show --serial INSTANCE_NAME
.. list-table::
:header-rows: 0
diff --git a/doc/source/admin/pci-passthrough.rst b/doc/source/admin/pci-passthrough.rst
index de79792f16..09a963603d 100644
--- a/doc/source/admin/pci-passthrough.rst
+++ b/doc/source/admin/pci-passthrough.rst
@@ -65,6 +65,10 @@ capabilities.
:oslo.config:option:`pci.device_spec` configuration that uses the
``devname`` field.
+.. versionchanged:: 27.0.0 (2023.1 Antelope):
+ Nova provides Placement based scheduling support for servers with flavor
+ based PCI requests. This support is disable by default.
+
Enabling PCI passthrough
------------------------
@@ -442,6 +446,39 @@ removed and VFs from the same PF is configured (or vice versa) then
nova-compute will refuse to start as it would create a situation where both
the PF and its VFs are made available for consumption.
+Since nova 27.0.0 (2023.1 Antelope) scheduling and allocation of PCI devices
+in Placement can also be enabled via
+:oslo.config:option:`filter_scheduler.pci_in_placement`. Please note that this
+should only be enabled after all the computes in the system is configured to
+report PCI inventory in Placement via
+enabling :oslo.config:option:`pci.report_in_placement`. In Antelope flavor
+based PCI requests are support but Neutron port base PCI requests are not
+handled in Placement.
+
+If you are upgrading from an earlier version with already existing servers with
+PCI usage then you must enable :oslo.config:option:`pci.report_in_placement`
+first on all your computes having PCI allocations and then restart the
+nova-compute service, before you enable
+:oslo.config:option:`filter_scheduler.pci_in_placement`. The compute service
+will heal the missing PCI allocation in placement during startup and will
+continue healing missing allocations for future servers until the scheduling
+support is enabled.
+
+If a flavor requests multiple ``type-VF`` devices via
+:nova:extra-spec:`pci_passthrough:alias` then it is important to consider the
+value of :nova:extra-spec:`group_policy` as well. The value ``none``
+allows nova to select VFs from the same parent PF to fulfill the request. The
+value ``isolate`` restricts nova to select each VF from a different parent PF
+to fulfill the request. If :nova:extra-spec:`group_policy` is not provided in
+such flavor then it will defaulted to ``none``.
+
+Symmetrically with the ``resource_class`` and ``traits`` fields of
+:oslo.config:option:`pci.device_spec` the :oslo.config:option:`pci.alias`
+configuration option supports requesting devices by Placement resource class
+name via the ``resource_class`` field and also support requesting traits to
+be present on the selected devices via the ``traits`` field in the alias. If
+the ``resource_class`` field is not specified in the alias then it is defaulted
+by nova to ``CUSTOM_PCI_<vendor_id>_<product_id>``.
For deeper technical details please read the `nova specification. <https://specs.openstack.org/openstack/nova-specs/specs/zed/approved/pci-device-tracking-in-placement.html>`_
diff --git a/doc/source/admin/remote-console-access.rst b/doc/source/admin/remote-console-access.rst
index 01ef44810c..9b28646d27 100644
--- a/doc/source/admin/remote-console-access.rst
+++ b/doc/source/admin/remote-console-access.rst
@@ -221,6 +221,9 @@ server. In :file:`/etc/libvirt/qemu.conf`, uncomment the following settings:
service, this ensures that only approved VNC proxy servers can connect to the
Compute nodes.
+Make sure to provide correct permissions to the certificate files for the process
+which creates instance. Please follow the libvirt wiki page [3]_ for the same.
+
After editing :file:`qemu.conf`, the ``libvirtd`` service must be restarted:
.. code-block:: shell
@@ -363,6 +366,16 @@ Replace ``IP_ADDRESS`` with the IP address from which the proxy is accessible
by the outside world. For example, this may be the management interface IP
address of the controller or the VIP.
+Optionally, the :program:`nova-compute` service supports the following
+additional options to configure compression settings (algorithms and modes)
+for SPICE consoles.
+
+- :oslo.config:option:`spice.image_compression`
+- :oslo.config:option:`spice.jpeg_compression`
+- :oslo.config:option:`spice.zlib_compression`
+- :oslo.config:option:`spice.playback_compression`
+- :oslo.config:option:`spice.streaming_mode`
+
Serial
------
@@ -610,5 +623,6 @@ Frequently Asked Questions
References
----------
-.. [1] https://qemu.weilnetz.de/doc/qemu-doc.html#vnc_005fsec_005fcertificate_005fverify
+.. [1] https://qemu.weilnetz.de/doc/4.2/qemu-doc.html#vnc_005fsec_005fcertificate_005fverify
.. [2] https://tools.ietf.org/html/rfc3280#section-4.2.1.10
+.. [3] https://wiki.libvirt.org/page/VNCTLSSetup#Changes_to_be_made_on_the_virtualisation_host_server \ No newline at end of file
diff --git a/doc/source/admin/soft-delete-shadow-tables.rst b/doc/source/admin/soft-delete-shadow-tables.rst
new file mode 100644
index 0000000000..126279c4d0
--- /dev/null
+++ b/doc/source/admin/soft-delete-shadow-tables.rst
@@ -0,0 +1,62 @@
+=============================
+Soft Delete and Shadow Tables
+=============================
+
+Nova has two unrelated features which are called ``soft delete``:
+
+Soft delete instances that can be restored
+------------------------------------------
+
+After an instance delete request, the actual delete is
+delayed by a configurable amount of time (config option
+:oslo.config:option:`reclaim_instance_interval`). During the delay,
+the instance is marked to be in state ``SOFT_DELETED`` and can be
+restored (:command:`openstack server restore`) by an admin in order to
+gracefully handle human mistakes. If the instance is not restored during
+the configured delay, a periodic job actually deletes the instance.
+
+This feature is optional and by default off.
+
+See also:
+
+- "Delete, Restore" in `API Guide: Server Concepts
+ <https://docs.openstack.org/api-guide/compute/server_concepts.html#server-actions>`_
+- config reference: :oslo.config:option:`reclaim_instance_interval`
+
+Soft delete database rows to shadow tables
+------------------------------------------
+
+At an actual instance delete, no DB record is deleted. Instead the
+records are marked as deleted (for example ``instances.deleted``
+in Nova cell databases). This preserves historic information
+for debugging and audit uses. But it also leads to accumulation
+of data in Nova cell DB tables, which may have an effect on
+Nova DB performance as documented in `DB prune deleted rows
+<https://docs.openstack.org/nova/latest/admin/upgrades.html#concepts>`_.
+
+The records marked as deleted can be cleaned up in multiple stages.
+First you can move them to so-called shadow tables (tables with prefix
+``shadow_`` in Nova cell databases). This is called *archiving the
+deleted rows*. Nova does not query shadow tables, therefore data moved
+to the shadow tables no longer affect DB performance. However storage
+space is still consumed. Then you can actually delete the information
+from the shadow tables. This is called *DB purge*.
+
+These operations can be performed by nova-manage:
+
+- https://docs.openstack.org/nova/latest/cli/nova-manage.html#db-archive-deleted-rows
+- https://docs.openstack.org/nova/latest/cli/nova-manage.html#db-purge
+
+This feature is not optional. Every long-running deployment should
+regularly archive and purge the deleted rows. For example via a cron
+job to regularly call :program:`nova-manage db archive_deleted_rows` and
+:program:`nova-manage db purge`. The tradeoffs between data retention,
+DB performance and storage needs should be considered.
+
+In the Mitaka release there was an agreement between Nova developers that
+it's not desirable to provide shadow tables for every table in the Nova
+database, `documented in a spec
+<https://specs.openstack.org/openstack/nova-specs/specs/mitaka/implemented/no-more-soft-delete.html>`_.
+
+Therefore not all information about an instance is preserved in the shadow
+tables. Since then new shadow tables are not introduced.
diff --git a/doc/source/admin/upgrades.rst b/doc/source/admin/upgrades.rst
index 00a714970b..61fd0cf258 100644
--- a/doc/source/admin/upgrades.rst
+++ b/doc/source/admin/upgrades.rst
@@ -41,21 +41,27 @@ Rolling upgrade process
To reduce downtime, the compute services can be upgraded in a rolling fashion.
It means upgrading a few services at a time. This results in a condition where
both old (N) and new (N+1) nova-compute services co-exist for a certain time
-period. Note that, there is no upgrade of the hypervisor here, this is just
+period (or even N with N+2 upgraded nova-compute services, see below).
+Note that, there is no upgrade of the hypervisor here, this is just
upgrading the nova services. If reduced downtime is not a concern (or lower
complexity is desired), all services may be taken down and restarted at the
same time.
.. important::
- Nova does not currently support the coexistence of N and N+2 or greater
- :program:`nova-compute` or :program:`nova-conductor` services in the same
- deployment. The `nova-conductor`` service will fail to start when a
- ``nova-compute`` service that is older than the previous release (N-2 or
- greater) is detected. Similarly, in a :doc:`deployment with multiple cells
+ As of OpenStack 2023.1 (Antelope), Nova supports the coexistence of N and
+ N-2 (Yoga) :program:`nova-compute` or :program:`nova-conductor` services in
+ the same deployment. The `nova-conductor`` service will fail to start when
+ a ``nova-compute`` service that is older than the support envelope is
+ detected. This varies by release and the support envelope will be explained
+ in the release notes. Similarly, in a :doc:`deployment with multiple cells
</admin/cells>`, neither the super conductor service nor any per-cell
conductor service will start if any other conductor service in the
- deployment is older than the previous release.
+ deployment is older than the N-2 release.
+
+ Releases older than 2023.1 will only support rolling upgrades for a single
+ release difference between :program:`nova-compute` and
+ :program:`nova-conductor` services.
#. Before maintenance window:
diff --git a/doc/source/cli/nova-compute.rst b/doc/source/cli/nova-compute.rst
index f190949efa..1346dab92e 100644
--- a/doc/source/cli/nova-compute.rst
+++ b/doc/source/cli/nova-compute.rst
@@ -41,6 +41,8 @@ Files
* ``/etc/nova/policy.d/``
* ``/etc/nova/rootwrap.conf``
* ``/etc/nova/rootwrap.d/``
+* ``/etc/nova/compute_id``
+* ``/var/lib/nova/compute_id``
See Also
========
diff --git a/doc/source/contributor/how-to-get-involved.rst b/doc/source/contributor/how-to-get-involved.rst
index fd90138354..28e75564b0 100644
--- a/doc/source/contributor/how-to-get-involved.rst
+++ b/doc/source/contributor/how-to-get-involved.rst
@@ -261,7 +261,7 @@ reviews:
- Where do I start? What should I review?
- There are various tools, but a good place to start is:
- https://etherpad.openstack.org/p/nova-runways-zed
+ https://review.opendev.org/q/project:openstack/nova+status:open+label:Review-Priority%253DANY
- Depending on the time in the cycle, it's worth looking at
NeedsCodeReview blueprints:
https://blueprints.launchpad.net/nova/
@@ -323,7 +323,7 @@ becoming a member of nova-core.
How to do great nova-spec reviews?
==================================
-https://specs.openstack.org/openstack/nova-specs/specs/zed/template.html
+https://specs.openstack.org/openstack/nova-specs/specs/2023.1/template.html
:doc:`/contributor/blueprints`.
diff --git a/doc/source/contributor/process.rst b/doc/source/contributor/process.rst
index 1cbb9a0c72..f1be1c1b4a 100644
--- a/doc/source/contributor/process.rst
+++ b/doc/source/contributor/process.rst
@@ -36,8 +36,8 @@ If you are new to Nova, please read this first: :ref:`getting_involved`.
Dates overview
==============
-For Zed, please see:
-https://wiki.openstack.org/wiki/Nova/Zed_Release_Schedule
+For 2023.1 Antelope, please see:
+https://wiki.openstack.org/wiki/Nova/2023.1_Release_Schedule
.. note:: Throughout this document any link which references the name of a
release cycle in the link can usually be changed to the name of the
@@ -102,9 +102,9 @@ Why we have a Spec Freeze:
By the freeze date, we expect all blueprints that will be approved for the
cycle to be listed on launchpad and all relevant specs to be merged.
-For Zed, blueprints can be found at
-https://blueprints.launchpad.net/nova/zed and specs at
-https://specs.openstack.org/openstack/nova-specs/specs/zed/index.html
+For 2023.1 Antelope, blueprints can be found at
+https://blueprints.launchpad.net/nova/antelope and specs at
+https://specs.openstack.org/openstack/nova-specs/specs/2023.1/index.html
Starting with Liberty, we are keeping a backlog open for submission at all
times.
diff --git a/doc/source/user/wsgi.rst b/doc/source/user/wsgi.rst
index 6b314b4832..63f949df1a 100644
--- a/doc/source/user/wsgi.rst
+++ b/doc/source/user/wsgi.rst
@@ -8,10 +8,16 @@ as Apache_ or nginx_).
The nova project provides two automatically generated entry points that
support this: ``nova-api-wsgi`` and ``nova-metadata-wsgi``. These read
-``nova.conf`` and ``api-paste.ini`` and generate the required module-level
-``application`` that most WSGI servers require. If nova is installed using pip,
-these two scripts will be installed into whatever the expected ``bin``
-directory is for the environment.
+``nova.conf`` and ``api-paste.ini`` by default and generate the required
+module-level ``application`` that most WSGI servers require.
+If nova is installed using pip, these two scripts will be installed into
+whatever the expected ``bin`` directory is for the environment.
+
+The config files and config directory can be overridden via the
+``OS_NOVA_CONFIG_FILES`` and ``OS_NOVA_CONFIG_DIR`` environment variables.
+File paths listed in ``OS_NOVA_CONFIG_FILES`` are relative to
+``OS_NOVA_CONFIG_DIR`` and delimited by ``;``.
+
The new scripts replace older experimental scripts that could be found in the
``nova/wsgi`` directory of the code repository. The new scripts are *not*
diff --git a/etc/nova/nova-config-generator.conf b/etc/nova/nova-config-generator.conf
index 742f348f11..8b7fd3bec8 100644
--- a/etc/nova/nova-config-generator.conf
+++ b/etc/nova/nova-config-generator.conf
@@ -16,3 +16,4 @@ namespace = oslo.concurrency
namespace = oslo.reports
namespace = keystonemiddleware.auth_token
namespace = osprofiler
+namespace = os_vif
diff --git a/mypy-files.txt b/mypy-files.txt
index 5a3b9ab339..391ed58d87 100644
--- a/mypy-files.txt
+++ b/mypy-files.txt
@@ -1,6 +1,7 @@
nova/compute/manager.py
nova/compute/pci_placement_translator.py
nova/crypto.py
+nova/filesystem.py
nova/limit/local.py
nova/limit/placement.py
nova/network/neutron.py
@@ -13,6 +14,9 @@ nova/virt/driver.py
nova/virt/hardware.py
nova/virt/libvirt/machine_type_utils.py
nova/virt/libvirt/__init__.py
+nova/virt/libvirt/cpu/__init__.py
+nova/virt/libvirt/cpu/api.py
+nova/virt/libvirt/cpu/core.py
nova/virt/libvirt/driver.py
nova/virt/libvirt/event.py
nova/virt/libvirt/guest.py
diff --git a/nova/api/openstack/api_version_request.py b/nova/api/openstack/api_version_request.py
index 84d8872f9e..718ac7e8e6 100644
--- a/nova/api/openstack/api_version_request.py
+++ b/nova/api/openstack/api_version_request.py
@@ -235,7 +235,7 @@ REST_API_VERSION_HISTORY = """REST API Version History:
``POST /flavors/{flavor_id}/os-extra_specs`` and
``PUT /flavors/{flavor_id}/os-extra_specs/{id}`` APIs.
* 2.87 - Adds support for rescuing boot from volume instances when the
- compute host reports the COMPUTE_BFV_RESCUE capability trait.
+ compute host reports the COMPUTE_RESCUE_BFV capability trait.
* 2.88 - Drop statistics-style fields from the ``/os-hypervisors/detail``
and ``/os-hypervisors/{hypervisor_id}`` APIs, and remove the
``/os-hypervisors/statistics`` and
@@ -253,6 +253,8 @@ REST_API_VERSION_HISTORY = """REST API Version History:
``POST /os-keypairs`` and allow including @ and dot (.) characters
in keypair name.
* 2.93 - Add support for volume backed server rebuild.
+ * 2.94 - Allow FQDN in server hostname.
+ * 2.95 - Evacuate will now stop instance at destination.
"""
# The minimum and maximum versions of the API supported
@@ -261,7 +263,7 @@ REST_API_VERSION_HISTORY = """REST API Version History:
# Note(cyeoh): This only applies for the v2.1 API once microversions
# support is fully merged. It does not affect the V2 API.
_MIN_API_VERSION = '2.1'
-_MAX_API_VERSION = '2.93'
+_MAX_API_VERSION = '2.95'
DEFAULT_API_VERSION = _MIN_API_VERSION
# Almost all proxy APIs which are related to network, images and baremetal
diff --git a/nova/api/openstack/compute/evacuate.py b/nova/api/openstack/compute/evacuate.py
index aa35812759..a6602be079 100644
--- a/nova/api/openstack/compute/evacuate.py
+++ b/nova/api/openstack/compute/evacuate.py
@@ -23,9 +23,11 @@ from nova.api.openstack.compute.schemas import evacuate
from nova.api.openstack import wsgi
from nova.api import validation
from nova.compute import api as compute
+from nova.compute import vm_states
import nova.conf
from nova import exception
from nova.i18n import _
+from nova import objects
from nova.policies import evacuate as evac_policies
from nova import utils
@@ -33,6 +35,8 @@ CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
+MIN_VER_NOVA_COMPUTE_EVACUATE_STOPPED = 62
+
class EvacuateController(wsgi.Controller):
def __init__(self):
@@ -77,7 +81,8 @@ class EvacuateController(wsgi.Controller):
@validation.schema(evacuate.evacuate, "2.0", "2.13")
@validation.schema(evacuate.evacuate_v214, "2.14", "2.28")
@validation.schema(evacuate.evacuate_v2_29, "2.29", "2.67")
- @validation.schema(evacuate.evacuate_v2_68, "2.68")
+ @validation.schema(evacuate.evacuate_v2_68, "2.68", "2.94")
+ @validation.schema(evacuate.evacuate_v2_95, "2.95")
def _evacuate(self, req, id, body):
"""Permit admins to evacuate a server from a failed host
to a new one.
@@ -92,6 +97,19 @@ class EvacuateController(wsgi.Controller):
host = evacuate_body.get("host")
force = None
+ target_state = None
+ if api_version_request.is_supported(req, min_version='2.95'):
+ min_ver = objects.service.get_minimum_version_all_cells(
+ context, ['nova-compute'])
+ if min_ver < MIN_VER_NOVA_COMPUTE_EVACUATE_STOPPED:
+ raise exception.NotSupportedComputeForEvacuateV295(
+ {'currently': min_ver,
+ 'expected': MIN_VER_NOVA_COMPUTE_EVACUATE_STOPPED})
+ # Starts to 2.95 any evacuated instances will be stopped at
+ # destination. Previously an active or stopped instance would have
+ # kept its state.
+ target_state = vm_states.STOPPED
+
on_shared_storage = self._get_on_shared_storage(req, evacuate_body)
if api_version_request.is_supported(req, min_version='2.29'):
@@ -120,7 +138,8 @@ class EvacuateController(wsgi.Controller):
try:
self.compute_api.evacuate(context, instance, host,
- on_shared_storage, password, force)
+ on_shared_storage, password, force,
+ target_state)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'evacuate', id)
@@ -130,6 +149,8 @@ class EvacuateController(wsgi.Controller):
exception.ExtendedResourceRequestOldCompute,
) as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
+ except exception.UnsupportedRPCVersion as e:
+ raise exc.HTTPConflict(explanation=e.format_message())
if (not api_version_request.is_supported(req, min_version='2.14') and
CONF.api.enable_instance_password):
diff --git a/nova/api/openstack/compute/flavor_access.py b/nova/api/openstack/compute/flavor_access.py
index e17e6f0ddc..fc8df15db5 100644
--- a/nova/api/openstack/compute/flavor_access.py
+++ b/nova/api/openstack/compute/flavor_access.py
@@ -93,7 +93,14 @@ class FlavorActionController(wsgi.Controller):
vals = body['removeTenantAccess']
tenant = vals['tenant']
- identity.verify_project_id(context, tenant)
+ # It doesn't really matter if project exists or not: we can delete
+ # it from flavor's access list in both cases.
+ try:
+ identity.verify_project_id(context, tenant)
+ except webob.exc.HTTPBadRequest as identity_exc:
+ msg = "Project ID %s is not a valid project." % tenant
+ if msg not in identity_exc.explanation:
+ raise
# NOTE(gibi): We have to load a flavor from the db here as
# flavor.remove_access() will try to emit a notification and that needs
diff --git a/nova/api/openstack/compute/remote_consoles.py b/nova/api/openstack/compute/remote_consoles.py
index 36015542aa..7d374ef432 100644
--- a/nova/api/openstack/compute/remote_consoles.py
+++ b/nova/api/openstack/compute/remote_consoles.py
@@ -56,6 +56,9 @@ class RemoteConsolesController(wsgi.Controller):
raise webob.exc.HTTPNotFound(explanation=e.format_message())
except exception.InstanceNotReady as e:
raise webob.exc.HTTPConflict(explanation=e.format_message())
+ except exception.InstanceInvalidState as e:
+ common.raise_http_conflict_for_instance_invalid_state(
+ e, 'get_vnc_console', id)
except NotImplementedError:
common.raise_feature_not_supported()
diff --git a/nova/api/openstack/compute/rest_api_version_history.rst b/nova/api/openstack/compute/rest_api_version_history.rst
index 642aefab49..c7a2777d3a 100644
--- a/nova/api/openstack/compute/rest_api_version_history.rst
+++ b/nova/api/openstack/compute/rest_api_version_history.rst
@@ -1141,7 +1141,7 @@ Validation is only used for recognized extra spec namespaces, currently:
-------------------------------------
Adds support for rescuing boot from volume instances when the compute host
-reports the ``COMPUTE_BFV_RESCUE`` capability trait.
+reports the ``COMPUTE_RESCUE_BFV`` capability trait.
.. _microversion 2.88:
@@ -1229,3 +1229,21 @@ Add support for volume backed server rebuild. The end user will provide the
image with the rebuild command and it will rebuild the volume with the new
image similar to the result of rebuilding an ephemeral disk.
+
+2.94
+----
+
+The ``hostname`` parameter to the ``POST /servers`` (create server), ``PUT
+/servers/{id}`` (update server) and ``POST /servers/{server_id}/action
+(rebuild)`` (rebuild server) APIs is now allowed to be a Fully Qualified Domain
+Name (FQDN).
+
+.. _microversion 2.95:
+
+2.95 (Maximum in 2023.1 Antelope)
+---------------------------------
+
+Any evacuated instances will be now stopped at destination. This
+requires minimun nova release 27.0.0, OpenStack release 2023.1
+Antelope. Operators can still use previous microversion for older
+behavior.
diff --git a/nova/api/openstack/compute/schemas/evacuate.py b/nova/api/openstack/compute/schemas/evacuate.py
index a415a97f89..c7b84a655e 100644
--- a/nova/api/openstack/compute/schemas/evacuate.py
+++ b/nova/api/openstack/compute/schemas/evacuate.py
@@ -46,3 +46,7 @@ evacuate_v2_29['properties']['evacuate']['properties'][
# v2.68 removes the 'force' parameter added in v2.29, meaning it is identical
# to v2.14
evacuate_v2_68 = copy.deepcopy(evacuate_v214)
+
+# v2.95 keeps the same schema, evacuating an instance will now result its state
+# to be stopped at destination.
+evacuate_v2_95 = copy.deepcopy(evacuate_v2_68)
diff --git a/nova/api/openstack/compute/schemas/servers.py b/nova/api/openstack/compute/schemas/servers.py
index 300411de40..0869f83434 100644
--- a/nova/api/openstack/compute/schemas/servers.py
+++ b/nova/api/openstack/compute/schemas/servers.py
@@ -360,6 +360,11 @@ create_v290 = copy.deepcopy(create_v274)
create_v290['properties']['server'][
'properties']['hostname'] = parameter_types.hostname
+# Support FQDN as hostname
+create_v294 = copy.deepcopy(create_v290)
+create_v294['properties']['server'][
+ 'properties']['hostname'] = parameter_types.fqdn
+
update = {
'type': 'object',
'properties': {
@@ -391,6 +396,11 @@ update_v290 = copy.deepcopy(update_v219)
update_v290['properties']['server'][
'properties']['hostname'] = parameter_types.hostname
+
+update_v294 = copy.deepcopy(update_v290)
+update_v294['properties']['server'][
+ 'properties']['hostname'] = parameter_types.fqdn
+
rebuild = {
'type': 'object',
'properties': {
@@ -449,6 +459,10 @@ rebuild_v290 = copy.deepcopy(rebuild_v263)
rebuild_v290['properties']['rebuild']['properties'][
'hostname'] = parameter_types.hostname
+rebuild_v294 = copy.deepcopy(rebuild_v290)
+rebuild_v294['properties']['rebuild']['properties'][
+ 'hostname'] = parameter_types.fqdn
+
resize = {
'type': 'object',
diff --git a/nova/api/openstack/compute/servers.py b/nova/api/openstack/compute/servers.py
index 6a9bf1fa92..33e74456fd 100644
--- a/nova/api/openstack/compute/servers.py
+++ b/nova/api/openstack/compute/servers.py
@@ -677,7 +677,8 @@ class ServersController(wsgi.Controller):
@validation.schema(schema_servers.create_v263, '2.63', '2.66')
@validation.schema(schema_servers.create_v267, '2.67', '2.73')
@validation.schema(schema_servers.create_v274, '2.74', '2.89')
- @validation.schema(schema_servers.create_v290, '2.90')
+ @validation.schema(schema_servers.create_v290, '2.90', '2.93')
+ @validation.schema(schema_servers.create_v294, '2.94')
def create(self, req, body):
"""Creates a new server for a given user."""
context = req.environ['nova.context']
@@ -906,7 +907,8 @@ class ServersController(wsgi.Controller):
@validation.schema(schema_servers.update_v20, '2.0', '2.0')
@validation.schema(schema_servers.update, '2.1', '2.18')
@validation.schema(schema_servers.update_v219, '2.19', '2.89')
- @validation.schema(schema_servers.update_v290, '2.90')
+ @validation.schema(schema_servers.update_v290, '2.90', '2.93')
+ @validation.schema(schema_servers.update_v294, '2.94')
def update(self, req, id, body):
"""Update server then pass on to version-specific controller."""
@@ -1147,7 +1149,8 @@ class ServersController(wsgi.Controller):
@validation.schema(schema_servers.rebuild_v254, '2.54', '2.56')
@validation.schema(schema_servers.rebuild_v257, '2.57', '2.62')
@validation.schema(schema_servers.rebuild_v263, '2.63', '2.89')
- @validation.schema(schema_servers.rebuild_v290, '2.90')
+ @validation.schema(schema_servers.rebuild_v290, '2.90', '2.93')
+ @validation.schema(schema_servers.rebuild_v294, '2.94')
def _action_rebuild(self, req, id, body):
"""Rebuild an instance with the given attributes."""
rebuild_dict = body['rebuild']
@@ -1353,6 +1356,8 @@ class ServersController(wsgi.Controller):
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'createImage', id)
+ except exception.InstanceQuiesceFailed as err:
+ raise exc.HTTPConflict(explanation=err.format_message())
except exception.Invalid as err:
raise exc.HTTPBadRequest(explanation=err.format_message())
except exception.OverQuota as e:
diff --git a/nova/api/openstack/identity.py b/nova/api/openstack/identity.py
index 7ffc623fed..15ec884aea 100644
--- a/nova/api/openstack/identity.py
+++ b/nova/api/openstack/identity.py
@@ -27,24 +27,27 @@ def verify_project_id(context, project_id):
"""verify that a project_id exists.
This attempts to verify that a project id exists. If it does not,
- an HTTPBadRequest is emitted.
+ an HTTPBadRequest is emitted. Also HTTPBadRequest is emitted
+ if Keystone identity service version 3.0 is not found.
"""
adap = utils.get_ksa_adapter(
'identity', ksa_auth=context.get_auth_plugin(),
min_version=(3, 0), max_version=(3, 'latest'))
- failure = webob.exc.HTTPBadRequest(
- explanation=_("Project ID %s is not a valid project.") %
- project_id)
try:
resp = adap.get('/projects/%s' % project_id)
except kse.EndpointNotFound:
LOG.error(
- "Keystone identity service version 3.0 was not found. This might "
- "be because your endpoint points to the v2.0 versioned endpoint "
- "which is not supported. Please fix this.")
- raise failure
+ "Keystone identity service version 3.0 was not found. This "
+ "might be caused by Nova misconfiguration or Keystone "
+ "problems.")
+ msg = _("Nova was unable to find Keystone service endpoint.")
+ # TODO(astupnik). It may be reasonable to switch to HTTP 503
+ # (HTTP Service Unavailable) instead of HTTP Bad Request here.
+ # If proper Keystone servie is inaccessible, then technially
+ # this is a server side error and not an error in Nova.
+ raise webob.exc.HTTPBadRequest(explanation=msg)
except kse.ClientException:
# something is wrong, like there isn't a keystone v3 endpoint,
# or nova isn't configured for the interface to talk to it;
@@ -57,7 +60,8 @@ def verify_project_id(context, project_id):
return True
elif resp.status_code == 404:
# we got access, and we know this project is not there
- raise failure
+ msg = _("Project ID %s is not a valid project.") % project_id
+ raise webob.exc.HTTPBadRequest(explanation=msg)
elif resp.status_code == 403:
# we don't have enough permission to verify this, so default
# to "it's ok".
diff --git a/nova/api/openstack/wsgi_app.py b/nova/api/openstack/wsgi_app.py
index d60069ce84..6a2b72a611 100644
--- a/nova/api/openstack/wsgi_app.py
+++ b/nova/api/openstack/wsgi_app.py
@@ -42,8 +42,11 @@ def _get_config_files(env=None):
if env is None:
env = os.environ
dirname = env.get('OS_NOVA_CONFIG_DIR', '/etc/nova').strip()
+ files = env.get('OS_NOVA_CONFIG_FILES', '').split(';')
+ if files == ['']:
+ files = CONFIG_FILES
return [os.path.join(dirname, config_file)
- for config_file in CONFIG_FILES]
+ for config_file in files]
def _setup_service(host, name):
diff --git a/nova/cmd/manage.py b/nova/cmd/manage.py
index 08b8ebb310..45ae678ab4 100644
--- a/nova/cmd/manage.py
+++ b/nova/cmd/manage.py
@@ -122,6 +122,10 @@ def format_dict(dct, dict_property="Property", dict_value='Value',
"""
pt = prettytable.PrettyTable([dict_property, dict_value])
pt.align = 'l'
+ # starting in PrettyTable 3.4.0 we need to also set the header
+ # as align now only applies to the data.
+ if hasattr(pt, 'header_align'):
+ pt.header_align = 'l'
for k, v in sorted(dct.items(), key=sort_key):
# convert dict to str to check length
if isinstance(v, dict):
diff --git a/nova/compute/api.py b/nova/compute/api.py
index c06fefdd3c..6b2023c19f 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -2547,6 +2547,8 @@ class API:
instance=instance)
with nova_context.target_cell(context, cell) as cctxt:
self._local_delete(cctxt, instance, bdms, delete_type, cb)
+ self._record_action_start(context, instance,
+ instance_actions.DELETE)
except exception.InstanceNotFound:
# NOTE(comstud): Race condition. Instance already gone.
@@ -3795,7 +3797,8 @@ class API:
orig_sys_metadata=orig_sys_metadata, bdms=bdms,
preserve_ephemeral=preserve_ephemeral, host=host,
request_spec=request_spec,
- reimage_boot_volume=reimage_boot_volume)
+ reimage_boot_volume=reimage_boot_volume,
+ target_state=None)
def _check_volume_status(self, context, bdms):
"""Check whether the status of the volume is "in-use".
@@ -4697,6 +4700,7 @@ class API:
allow_bfv_rescue=False):
"""Rescue the given instance."""
+ image_meta = None
if rescue_image_ref:
try:
image_meta = image_meta_obj.ImageMeta.from_image_ref(
@@ -4717,6 +4721,8 @@ class API:
"image properties set")
raise exception.UnsupportedRescueImage(
image=rescue_image_ref)
+ else:
+ image_meta = instance.image_meta
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
@@ -4725,6 +4731,9 @@ class API:
volume_backed = compute_utils.is_volume_backed_instance(
context, instance, bdms)
+ allow_bfv_rescue &= 'hw_rescue_bus' in image_meta.properties and \
+ 'hw_rescue_device' in image_meta.properties
+
if volume_backed and allow_bfv_rescue:
cn = objects.ComputeNode.get_by_host_and_nodename(
context, instance.host, instance.node)
@@ -5609,7 +5618,7 @@ class API:
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.ERROR], task_state=None)
def evacuate(self, context, instance, host, on_shared_storage,
- admin_password=None, force=None):
+ admin_password=None, force=None, target_state=None):
"""Running evacuate to target host.
Checking vm compute host state, if the host not in expected_state,
@@ -5620,6 +5629,7 @@ class API:
:param on_shared_storage: True if instance files on shared storage
:param admin_password: password to set on rebuilt instance
:param force: Force the evacuation to the specific host target
+ :param target_state: Set a target state for the evacuated instance
"""
LOG.debug('vm evacuation scheduled', instance=instance)
@@ -5683,7 +5693,7 @@ class API:
on_shared_storage=on_shared_storage,
host=host,
request_spec=request_spec,
- )
+ target_state=target_state)
def get_migrations(self, context, filters):
"""Get all migrations for the given filters."""
diff --git a/nova/compute/claims.py b/nova/compute/claims.py
index 79e8f2f012..490b418081 100644
--- a/nova/compute/claims.py
+++ b/nova/compute/claims.py
@@ -124,7 +124,13 @@ class Claim(NopClaim):
pci_requests = self._pci_requests
if pci_requests.requests:
stats = self.tracker.pci_tracker.stats
- if not stats.support_requests(pci_requests.requests):
+ if not stats.support_requests(
+ pci_requests.requests,
+ # We explicitly signal that we are _after_ the scheduler made
+ # allocations in placement and therefore pci_requests.requests
+ # carry its own placement provider mapping information
+ provider_mapping=None,
+ ):
return _('Claim pci failed')
def _test_numa_topology(self, compute_node, limit):
@@ -139,12 +145,17 @@ class Claim(NopClaim):
if pci_requests.requests:
pci_stats = self.tracker.pci_tracker.stats
- instance_topology = (
- hardware.numa_fit_instance_to_host(
- host_topology, requested_topology,
- limits=limit,
- pci_requests=pci_requests.requests,
- pci_stats=pci_stats))
+ instance_topology = hardware.numa_fit_instance_to_host(
+ host_topology,
+ requested_topology,
+ limits=limit,
+ pci_requests=pci_requests.requests,
+ pci_stats=pci_stats,
+ # We explicitly signal that we are _after_ the scheduler made
+ # allocations in placement and therefore pci_requests.requests
+ # carry its own placement provider mapping information
+ provider_mapping=None,
+ )
if requested_topology and not instance_topology:
if pci_requests.requests:
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 8de5422c6d..5ea71827fc 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -84,6 +84,7 @@ from nova.objects import external_event as external_event_obj
from nova.objects import fields
from nova.objects import instance as obj_instance
from nova.objects import migrate_data as migrate_data_obj
+from nova.objects import service as service_obj
from nova.pci import request as pci_req_module
from nova.pci import whitelist
from nova import safe_utils
@@ -96,6 +97,7 @@ from nova.virt import configdrive
from nova.virt import driver
from nova.virt import event as virtevent
from nova.virt import hardware
+import nova.virt.node
from nova.virt import storage_users
from nova.virt import virtapi
from nova.volume import cinder
@@ -616,7 +618,7 @@ class ComputeVirtAPI(virtapi.VirtAPI):
class ComputeManager(manager.Manager):
"""Manages the running instances from creation to destruction."""
- target = messaging.Target(version='6.1')
+ target = messaging.Target(version='6.2')
def __init__(self, compute_driver=None, *args, **kwargs):
"""Load configuration options and connect to the hypervisor."""
@@ -1470,31 +1472,111 @@ class ComputeManager(manager.Manager):
:return: a dict of ComputeNode objects keyed by the UUID of the given
node.
"""
- nodes_by_uuid = {}
try:
- node_names = self.driver.get_available_nodes()
+ node_ids = self.driver.get_nodenames_by_uuid()
except exception.VirtDriverNotReady:
LOG.warning(
"Virt driver is not ready. If this is the first time this "
- "service is starting on this host, then you can ignore this "
- "warning.")
+ "service is starting on this host, then you can ignore "
+ "this warning.")
return {}
- for node_name in node_names:
- try:
- node = objects.ComputeNode.get_by_host_and_nodename(
- context, self.host, node_name)
- nodes_by_uuid[node.uuid] = node
- except exception.ComputeHostNotFound:
- LOG.warning(
- "Compute node %s not found in the database. If this is "
- "the first time this service is starting on this host, "
- "then you can ignore this warning.", node_name)
- return nodes_by_uuid
+ nodes = objects.ComputeNodeList.get_all_by_uuids(context,
+ list(node_ids.keys()))
+ if not nodes:
+ # NOTE(danms): This should only happen if the compute_id is
+ # pre-provisioned on a host that has never started.
+ LOG.warning('Compute nodes %s for host %s were not found in the '
+ 'database. If this is the first time this service is '
+ 'starting on this host, then you can ignore this '
+ 'warning.',
+ list(node_ids.keys()), self.host)
+ return {}
+
+ for node in nodes:
+ if node.hypervisor_hostname != node_ids.get(node.uuid):
+ raise exception.InvalidConfiguration(
+ ('My compute node %s has hypervisor_hostname %s '
+ 'but virt driver reports it should be %s. Possible '
+ 'rename detected, refusing to start!') % (
+ node.uuid, node.hypervisor_hostname,
+ node_ids.get(node.uuid)))
+
+ return {n.uuid: n for n in nodes}
+
+ def _ensure_existing_node_identity(self, service_ref):
+ """If we are upgrading from an older service version, we need
+ to write our node identity uuid (if not already done) based on
+ nodes assigned to us in the database.
+ """
+ if 'ironic' in CONF.compute_driver.lower():
+ # We do not persist a single local node identity for
+ # ironic
+ return
+
+ if service_ref.version >= service_obj.NODE_IDENTITY_VERSION:
+ # Already new enough, nothing to do here, but make sure that we
+ # have a UUID file already, as this is not our first time starting.
+ if nova.virt.node.read_local_node_uuid() is None:
+ raise exception.InvalidConfiguration(
+ ('No local node identity found, but this is not our '
+ 'first startup on this host. Refusing to start after '
+ 'potentially having lost that state!'))
+ return
+
+ if nova.virt.node.read_local_node_uuid():
+ # We already have a local node identity, no migration needed
+ return
- def init_host(self):
+ context = nova.context.get_admin_context()
+ db_nodes = objects.ComputeNodeList.get_all_by_host(context, self.host)
+ if not db_nodes:
+ # This means we have no nodes in the database (that we
+ # know of) and thus have no need to record an existing
+ # UUID. That is probably strange, so log a warning.
+ raise exception.InvalidConfiguration(
+ ('Upgrading from service version %i but found no '
+ 'nodes in the database for host %s to persist '
+ 'locally; Possible rename detected, '
+ 'refusing to start!') % (
+ service_ref.version, self.host))
+
+ if len(db_nodes) > 1:
+ # If this happens we can't do the right thing, so raise an
+ # exception to abort host startup
+ LOG.warning('Multiple nodes found in the database for host %s; '
+ 'unable to persist local node identity automatically')
+ raise exception.InvalidConfiguration(
+ 'Multiple nodes found in database, manual node uuid '
+ 'configuration required')
+
+ nova.virt.node.write_local_node_uuid(db_nodes[0].uuid)
+
+ def _check_for_host_rename(self, nodes_by_uuid):
+ if 'ironic' in CONF.compute_driver.lower():
+ # Ironic (currently) rebalances nodes at various times, and as
+ # such, nodes being discovered as assigned to this host with a
+ # different hostname is not surprising. Skip this check for
+ # ironic.
+ return
+ for node in nodes_by_uuid.values():
+ if node.host != self.host:
+ raise exception.InvalidConfiguration(
+ 'My node %s has host %r but my host is %r; '
+ 'Possible rename detected, refusing to start!' % (
+ node.uuid, node.host, self.host))
+ LOG.debug('Verified node %s matches my host %s',
+ node.uuid, self.host)
+
+ def init_host(self, service_ref):
"""Initialization for a standalone compute service."""
+ if service_ref:
+ # If we are an existing service, check to see if we need
+ # to record a locally-persistent node identity because
+ # we have upgraded from a previous version.
+ self._ensure_existing_node_identity(service_ref)
+
if CONF.pci.device_spec:
# Simply loading the PCI passthrough spec will do a bunch of
# validation that would otherwise wait until the PciDevTracker is
@@ -1524,7 +1606,18 @@ class ComputeManager(manager.Manager):
raise exception.InvalidConfiguration(msg)
self.driver.init_host(host=self.host)
+
+ # NOTE(gibi): At this point the compute_nodes of the resource tracker
+ # has not been populated yet so we cannot rely on the resource tracker
+ # here.
context = nova.context.get_admin_context()
+ nodes_by_uuid = self._get_nodes(context)
+
+ # NOTE(danms): Check for a possible host rename and abort
+ # startup before we start mucking with instances we think are
+ # ours.
+ self._check_for_host_rename(nodes_by_uuid)
+
instances = objects.InstanceList.get_by_host(
context, self.host,
expected_attrs=['info_cache', 'metadata', 'numa_topology'])
@@ -1534,17 +1627,12 @@ class ComputeManager(manager.Manager):
self._validate_pinning_configuration(instances)
self._validate_vtpm_configuration(instances)
- # NOTE(gibi): At this point the compute_nodes of the resource tracker
- # has not been populated yet so we cannot rely on the resource tracker
- # here.
# NOTE(gibi): If ironic and vcenter virt driver slow start time
# becomes problematic here then we should consider adding a config
# option or a driver flag to tell us if we should thread
# _destroy_evacuated_instances and
# _error_out_instances_whose_build_was_interrupted out in the
# background on startup
- nodes_by_uuid = self._get_nodes(context)
-
try:
# checking that instance was not already evacuated to other host
evacuated_instances = self._destroy_evacuated_instances(
@@ -2471,10 +2559,12 @@ class ComputeManager(manager.Manager):
if provider_mapping:
try:
- compute_utils\
- .update_pci_request_spec_with_allocated_interface_name(
- context, self.reportclient,
- instance.pci_requests.requests, provider_mapping)
+ compute_utils.update_pci_request_with_placement_allocations(
+ context,
+ self.reportclient,
+ instance.pci_requests.requests,
+ provider_mapping,
+ )
except (exception.AmbiguousResourceProviderForPCIRequest,
exception.UnexpectedResourceProviderNameForPCIRequest
) as e:
@@ -2736,7 +2826,8 @@ class ComputeManager(manager.Manager):
block_device_mapping)
resources['block_device_info'] = block_device_info
except (exception.InstanceNotFound,
- exception.UnexpectedDeletingTaskStateError):
+ exception.UnexpectedDeletingTaskStateError,
+ exception.ComputeResourcesUnavailable):
with excutils.save_and_reraise_exception():
self._build_resources_cleanup(instance, network_info)
except (exception.UnexpectedTaskStateError,
@@ -3621,7 +3712,7 @@ class ComputeManager(manager.Manager):
bdms, recreate, on_shared_storage,
preserve_ephemeral, migration,
scheduled_node, limits, request_spec, accel_uuids,
- reimage_boot_volume):
+ reimage_boot_volume, target_state):
"""Destroy and re-make this instance.
A 'rebuild' effectively purges all existing data from the system and
@@ -3656,6 +3747,7 @@ class ComputeManager(manager.Manager):
:param reimage_boot_volume: Boolean to specify whether the user has
explicitly requested to rebuild a boot
volume
+ :param target_state: Set a target state for the evacuated instance.
"""
# recreate=True means the instance is being evacuated from a failed
@@ -3720,7 +3812,8 @@ class ComputeManager(manager.Manager):
image_meta, injected_files, new_pass, orig_sys_metadata,
bdms, evacuate, on_shared_storage, preserve_ephemeral,
migration, request_spec, allocs, rebuild_claim,
- scheduled_node, limits, accel_uuids, reimage_boot_volume)
+ scheduled_node, limits, accel_uuids, reimage_boot_volume,
+ target_state)
except (exception.ComputeResourcesUnavailable,
exception.RescheduledException) as e:
if isinstance(e, exception.ComputeResourcesUnavailable):
@@ -3780,7 +3873,7 @@ class ComputeManager(manager.Manager):
injected_files, new_pass, orig_sys_metadata, bdms, evacuate,
on_shared_storage, preserve_ephemeral, migration, request_spec,
allocations, rebuild_claim, scheduled_node, limits, accel_uuids,
- reimage_boot_volume):
+ reimage_boot_volume, target_state):
"""Helper to avoid deep nesting in the top-level method."""
provider_mapping = None
@@ -3788,10 +3881,12 @@ class ComputeManager(manager.Manager):
provider_mapping = self._get_request_group_mapping(request_spec)
if provider_mapping:
- compute_utils.\
- update_pci_request_spec_with_allocated_interface_name(
- context, self.reportclient,
- instance.pci_requests.requests, provider_mapping)
+ compute_utils.update_pci_request_with_placement_allocations(
+ context,
+ self.reportclient,
+ instance.pci_requests.requests,
+ provider_mapping,
+ )
claim_context = rebuild_claim(
context, instance, scheduled_node, allocations,
@@ -3802,7 +3897,8 @@ class ComputeManager(manager.Manager):
context, instance, orig_image_ref, image_meta, injected_files,
new_pass, orig_sys_metadata, bdms, evacuate, on_shared_storage,
preserve_ephemeral, migration, request_spec, allocations,
- provider_mapping, accel_uuids, reimage_boot_volume)
+ provider_mapping, accel_uuids, reimage_boot_volume,
+ target_state)
@staticmethod
def _get_image_name(image_meta):
@@ -3816,10 +3912,18 @@ class ComputeManager(manager.Manager):
injected_files, new_pass, orig_sys_metadata, bdms, evacuate,
on_shared_storage, preserve_ephemeral, migration, request_spec,
allocations, request_group_resource_providers_mapping,
- accel_uuids, reimage_boot_volume):
+ accel_uuids, reimage_boot_volume, target_state):
orig_vm_state = instance.vm_state
if evacuate:
+ if target_state and orig_vm_state != vm_states.ERROR:
+ # This will ensure that at destination the instance will have
+ # the desired state.
+ if target_state not in vm_states.ALLOW_TARGET_STATES:
+ raise exception.InstanceEvacuateNotSupportedTargetState(
+ target_state=target_state)
+ orig_vm_state = target_state
+
if request_spec:
# NOTE(gibi): Do a late check of server group policy as
# parallel scheduling could violate such policy. This will
@@ -5414,10 +5518,12 @@ class ComputeManager(manager.Manager):
if provider_mapping:
try:
- compute_utils.\
- update_pci_request_spec_with_allocated_interface_name(
- context, self.reportclient,
- instance.pci_requests.requests, provider_mapping)
+ compute_utils.update_pci_request_with_placement_allocations(
+ context,
+ self.reportclient,
+ instance.pci_requests.requests,
+ provider_mapping,
+ )
except (exception.AmbiguousResourceProviderForPCIRequest,
exception.UnexpectedResourceProviderNameForPCIRequest
) as e:
@@ -5520,7 +5626,7 @@ class ComputeManager(manager.Manager):
clean_shutdown)
except exception.BuildAbortException:
# NOTE(gibi): We failed
- # update_pci_request_spec_with_allocated_interface_name so
+ # update_pci_request_with_placement_allocations so
# there is no reason to re-schedule. Just revert the allocation
# and fail the migration.
with excutils.save_and_reraise_exception():
@@ -5651,7 +5757,7 @@ class ComputeManager(manager.Manager):
'host (%s).', self.host, instance=instance)
self._send_prep_resize_notifications(
ctxt, instance, fields.NotificationPhase.START, flavor)
- # TODO(mriedem): update_pci_request_spec_with_allocated_interface_name
+ # TODO(mriedem): update_pci_request_with_placement_allocations
# should be called here if the request spec has request group mappings,
# e.g. for things like QoS ports with resource requests. Do it outside
# the try/except so if it raises BuildAbortException we do not attempt
@@ -6901,12 +7007,12 @@ class ComputeManager(manager.Manager):
try:
if provider_mappings:
- update = (
- compute_utils.
- update_pci_request_spec_with_allocated_interface_name)
- update(
- context, self.reportclient, instance.pci_requests.requests,
- provider_mappings)
+ compute_utils.update_pci_request_with_placement_allocations(
+ context,
+ self.reportclient,
+ instance.pci_requests.requests,
+ provider_mappings,
+ )
accel_info = []
if accel_uuids:
@@ -7986,12 +8092,12 @@ class ComputeManager(manager.Manager):
instance_uuid=instance.uuid) from e
try:
- update = (
- compute_utils.
- update_pci_request_spec_with_allocated_interface_name)
- update(
- context, self.reportclient, pci_reqs.requests,
- provider_mappings)
+ compute_utils.update_pci_request_with_placement_allocations(
+ context,
+ self.reportclient,
+ pci_reqs.requests,
+ provider_mappings,
+ )
except (
exception.AmbiguousResourceProviderForPCIRequest,
exception.UnexpectedResourceProviderNameForPCIRequest
@@ -8810,7 +8916,8 @@ class ComputeManager(manager.Manager):
# in order to be able to track and abort it in the future.
self._waiting_live_migrations[instance.uuid] = (None, None)
try:
- future = self._live_migration_executor.submit(
+ future = nova.utils.pass_context(
+ self._live_migration_executor.submit,
self._do_live_migration, context, dest, instance,
block_migration, migration, migrate_data)
self._waiting_live_migrations[instance.uuid] = (migration, future)
@@ -10094,7 +10201,9 @@ class ComputeManager(manager.Manager):
else:
LOG.debug('Triggering sync for uuid %s', uuid)
self._syncs_in_progress[uuid] = True
- self._sync_power_pool.spawn_n(_sync, db_instance)
+ nova.utils.pass_context(self._sync_power_pool.spawn_n,
+ _sync,
+ db_instance)
def _query_driver_power_state_and_sync(self, context, db_instance):
if db_instance.task_state is not None:
@@ -10377,6 +10486,14 @@ class ComputeManager(manager.Manager):
LOG.exception(
"Error updating PCI resources for node %(node)s.",
{'node': nodename})
+ except exception.InvalidConfiguration as e:
+ if startup:
+ # If this happens during startup, we need to let it raise to
+ # abort our service startup.
+ raise
+ else:
+ LOG.error("Error updating resources for node %s: %s",
+ nodename, e)
except Exception:
LOG.exception("Error updating resources for node %(node)s.",
{'node': nodename})
@@ -11293,7 +11410,7 @@ class _ComputeV5Proxy(object):
bdms, recreate, on_shared_storage,
preserve_ephemeral, migration,
scheduled_node, limits, request_spec,
- accel_uuids, False)
+ accel_uuids, False, None)
# 5.13 support for optional accel_uuids argument
def shelve_instance(self, context, instance, image_id,
diff --git a/nova/compute/pci_placement_translator.py b/nova/compute/pci_placement_translator.py
index d6d7fdd6f1..016efd9122 100644
--- a/nova/compute/pci_placement_translator.py
+++ b/nova/compute/pci_placement_translator.py
@@ -65,20 +65,50 @@ def _normalize_traits(traits: ty.List[str]) -> ty.List[str]:
return list(standard_traits) + custom_traits
-def _get_traits_for_dev(
- dev_spec_tags: ty.Dict[str, str],
-) -> ty.Set[str]:
+def get_traits(traits_str: str) -> ty.Set[str]:
+ """Return a normalized set of placement standard and custom traits from
+ a string of comma separated trait names.
+ """
# traits is a comma separated list of placement trait names
- traits_str = dev_spec_tags.get("traits")
if not traits_str:
- return {os_traits.COMPUTE_MANAGED_PCI_DEVICE}
+ return set()
+ return set(_normalize_traits(traits_str.split(',')))
- traits = traits_str.split(',')
- return set(_normalize_traits(traits)) | {
+
+def _get_traits_for_dev(
+ dev_spec_tags: ty.Dict[str, str],
+) -> ty.Set[str]:
+ return get_traits(dev_spec_tags.get("traits", "")) | {
os_traits.COMPUTE_MANAGED_PCI_DEVICE
}
+def _normalize_resource_class(rc: str) -> str:
+ rc = rc.upper()
+ if (
+ rc not in os_resource_classes.STANDARDS and
+ not os_resource_classes.is_custom(rc)
+ ):
+ rc = os_resource_classes.normalize_name(rc)
+ # mypy: normalize_name will return non None for non None input
+ assert rc
+
+ return rc
+
+
+def get_resource_class(
+ requested_name: ty.Optional[str], vendor_id: str, product_id: str
+) -> str:
+ """Return the normalized resource class name based on what is requested
+ or if nothing is requested then generated from the vendor_id and product_id
+ """
+ if requested_name:
+ rc = _normalize_resource_class(requested_name)
+ else:
+ rc = f"CUSTOM_PCI_{vendor_id}_{product_id}".upper()
+ return rc
+
+
def _get_rc_for_dev(
dev: pci_device.PciDevice,
dev_spec_tags: ty.Dict[str, str],
@@ -91,23 +121,8 @@ def _get_rc_for_dev(
The user specified resource class is normalized if it is not already an
acceptable standard or custom resource class.
"""
- # Either use the resource class from the config or the vendor_id and
- # product_id of the device to generate the RC
rc = dev_spec_tags.get("resource_class")
- if rc:
- rc = rc.upper()
- if (
- rc not in os_resource_classes.STANDARDS and
- not os_resource_classes.is_custom(rc)
- ):
- rc = os_resource_classes.normalize_name(rc)
- # mypy: normalize_name will return non None for non None input
- assert rc
-
- else:
- rc = f"CUSTOM_PCI_{dev.vendor_id}_{dev.product_id}".upper()
-
- return rc
+ return get_resource_class(rc, dev.vendor_id, dev.product_id)
class PciResourceProvider:
@@ -246,6 +261,12 @@ class PciResourceProvider:
)
provider_tree.update_traits(self.name, self.traits)
+ # Here we are sure the RP exists in the provider_tree. So, we can
+ # record the RP UUID in each PciDevice this RP represents
+ rp_uuid = provider_tree.data(self.name).uuid
+ for dev in self.devs:
+ dev.extra_info['rp_uuid'] = rp_uuid
+
def update_allocations(
self,
allocations: dict,
@@ -583,12 +604,17 @@ def update_provider_tree_for_pci(
pv.update_provider_tree(provider_tree)
old_alloc = copy.deepcopy(allocations)
+ # update_provider_tree correlated the PciDevice objects with RPs in
+ # placement and recorded the RP UUID in the PciDevice object. We need to
+ # trigger an update on the device pools in the tracker to get the device
+ # RP UUID mapped to the device pools
+ pci_tracker.stats.populate_pools_metadata_from_assigned_devices()
updated = pv.update_allocations(allocations, provider_tree)
if updated:
LOG.debug(
"Placement PCI view needs allocation healing. This should only "
- "happen if [scheduler]pci_in_placement is still disabled. "
+ "happen if [filter_scheduler]pci_in_placement is still disabled. "
"Original allocations: %s New allocations: %s",
old_alloc,
allocations,
diff --git a/nova/compute/resource_tracker.py b/nova/compute/resource_tracker.py
index ffbc7ed03f..3f911f3708 100644
--- a/nova/compute/resource_tracker.py
+++ b/nova/compute/resource_tracker.py
@@ -49,6 +49,7 @@ from nova import rpc
from nova.scheduler.client import report
from nova import utils
from nova.virt import hardware
+from nova.virt import node
CONF = nova.conf.CONF
@@ -619,18 +620,11 @@ class ResourceTracker(object):
:param prefix: Prefix to use when accessing migration context
attributes. 'old_' or 'new_', with 'new_' being the default.
"""
- # Remove usage for an instance that is tracked in migrations, such as
- # on the dest node during revert resize.
- if instance['uuid'] in self.tracked_migrations:
- migration = self.tracked_migrations.pop(instance['uuid'])
+ if instance["uuid"] in self.tracked_migrations:
if not flavor:
- flavor = self._get_flavor(instance, prefix, migration)
- # Remove usage for an instance that is not tracked in migrations (such
- # as on the source node after a migration).
- # NOTE(lbeliveau): On resize on the same node, the instance is
- # included in both tracked_migrations and tracked_instances.
- elif instance['uuid'] in self.tracked_instances:
- self.tracked_instances.remove(instance['uuid'])
+ flavor = self._get_flavor(
+ instance, prefix, self.tracked_migrations[instance["uuid"]]
+ )
if flavor is not None:
numa_topology = self._get_migration_context_resource(
@@ -646,6 +640,15 @@ class ResourceTracker(object):
ctxt = context.elevated()
self._update(ctxt, self.compute_nodes[nodename])
+ # Remove usage for an instance that is tracked in migrations, such as
+ # on the dest node during revert resize.
+ self.tracked_migrations.pop(instance['uuid'], None)
+ # Remove usage for an instance that is not tracked in migrations (such
+ # as on the source node after a migration).
+ # NOTE(lbeliveau): On resize on the same node, the instance is
+ # included in both tracked_migrations and tracked_instances.
+ self.tracked_instances.discard(instance['uuid'])
+
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True)
def update_usage(self, context, instance, nodename):
"""Update the resource usage and stats after a change in an
@@ -666,50 +669,6 @@ class ResourceTracker(object):
return (nodename not in self.compute_nodes or
not self.driver.node_is_available(nodename))
- def _check_for_nodes_rebalance(self, context, resources, nodename):
- """Check if nodes rebalance has happened.
-
- The ironic driver maintains a hash ring mapping bare metal nodes
- to compute nodes. If a compute dies, the hash ring is rebuilt, and
- some of its bare metal nodes (more precisely, those not in ACTIVE
- state) are assigned to other computes.
-
- This method checks for this condition and adjusts the database
- accordingly.
-
- :param context: security context
- :param resources: initial values
- :param nodename: node name
- :returns: True if a suitable compute node record was found, else False
- """
- if not self.driver.rebalances_nodes:
- return False
-
- # Its possible ironic just did a node re-balance, so let's
- # check if there is a compute node that already has the correct
- # hypervisor_hostname. We can re-use that rather than create a
- # new one and have to move existing placement allocations
- cn_candidates = objects.ComputeNodeList.get_by_hypervisor(
- context, nodename)
-
- if len(cn_candidates) == 1:
- cn = cn_candidates[0]
- LOG.info("ComputeNode %(name)s moving from %(old)s to %(new)s",
- {"name": nodename, "old": cn.host, "new": self.host})
- cn.host = self.host
- self.compute_nodes[nodename] = cn
- self._copy_resources(cn, resources)
- self._setup_pci_tracker(context, cn, resources)
- self._update(context, cn)
- return True
- elif len(cn_candidates) > 1:
- LOG.error(
- "Found more than one ComputeNode for nodename %s. "
- "Please clean up the orphaned ComputeNode records in your DB.",
- nodename)
-
- return False
-
def _init_compute_node(self, context, resources):
"""Initialize the compute node if it does not already exist.
@@ -727,6 +686,7 @@ class ResourceTracker(object):
False otherwise
"""
nodename = resources['hypervisor_hostname']
+ node_uuid = resources['uuid']
# if there is already a compute node just use resources
# to initialize
@@ -738,23 +698,43 @@ class ResourceTracker(object):
# now try to get the compute node record from the
# database. If we get one we use resources to initialize
- cn = self._get_compute_node(context, nodename)
+
+ # We use read_deleted=True so that we will find and recover a deleted
+ # node object, if necessary.
+ with utils.temporary_mutation(context, read_deleted='yes'):
+ cn = self._get_compute_node(context, node_uuid)
+ if cn and cn.deleted:
+ # Undelete and save this right now so that everything below
+ # can continue without read_deleted=yes
+ LOG.info('Undeleting compute node %s', cn.uuid)
+ cn.deleted = False
+ cn.deleted_at = None
+ cn.save()
if cn:
+ if cn.host != self.host:
+ LOG.info("ComputeNode %(name)s moving from %(old)s to %(new)s",
+ {"name": nodename, "old": cn.host, "new": self.host})
+ cn.host = self.host
+ self._update(context, cn)
+
self.compute_nodes[nodename] = cn
self._copy_resources(cn, resources)
self._setup_pci_tracker(context, cn, resources)
return False
- if self._check_for_nodes_rebalance(context, resources, nodename):
- return False
-
# there was no local copy and none in the database
# so we need to create a new compute node. This needs
# to be initialized with resource values.
cn = objects.ComputeNode(context)
cn.host = self.host
self._copy_resources(cn, resources, initial=True)
- cn.create()
+ try:
+ cn.create()
+ except exception.DuplicateRecord:
+ raise exception.InvalidConfiguration(
+ 'Duplicate compute node record found for host %s node %s' % (
+ cn.host, cn.hypervisor_hostname))
+
# Only map the ComputeNode into compute_nodes if create() was OK
# because if create() fails, on the next run through here nodename
# would be in compute_nodes and we won't try to create again (because
@@ -887,6 +867,14 @@ class ResourceTracker(object):
# contains a non-None value, even for non-Ironic nova-compute hosts. It
# is this value that will be populated in the compute_nodes table.
resources['host_ip'] = CONF.my_ip
+ if 'uuid' not in resources:
+ # NOTE(danms): Any driver that does not provide a uuid per
+ # node gets the locally-persistent compute_id. Only ironic
+ # should be setting the per-node uuid (and returning
+ # multiple nodes in general). If this is the first time we
+ # are creating a compute node on this host, we will
+ # generate and persist this uuid for the future.
+ resources['uuid'] = node.get_local_node_uuid()
# We want the 'cpu_info' to be None from the POV of the
# virt driver, but the DB requires it to be non-null so
@@ -991,8 +979,6 @@ class ResourceTracker(object):
# notified when instances are deleted, we need remove all usages
# from deleted instances.
self.pci_tracker.clean_usage(instances, migrations)
- dev_pools_obj = self.pci_tracker.stats.to_device_pools_obj()
- cn.pci_device_pools = dev_pools_obj
self._report_final_resource_view(nodename)
@@ -1014,14 +1000,13 @@ class ResourceTracker(object):
if startup:
self._check_resources(context)
- def _get_compute_node(self, context, nodename):
+ def _get_compute_node(self, context, node_uuid):
"""Returns compute node for the host and nodename."""
try:
- return objects.ComputeNode.get_by_host_and_nodename(
- context, self.host, nodename)
+ return objects.ComputeNode.get_by_uuid(context, node_uuid)
except exception.NotFound:
LOG.warning("No compute node record for %(host)s:%(node)s",
- {'host': self.host, 'node': nodename})
+ {'host': self.host, 'node': node_uuid})
def _report_hypervisor_resource_view(self, resources):
"""Log the hypervisor's view of free resources.
@@ -1314,13 +1299,23 @@ class ResourceTracker(object):
def _update(self, context, compute_node, startup=False):
"""Update partial stats locally and populate them to Scheduler."""
+
+ self._update_to_placement(context, compute_node, startup)
+
+ if self.pci_tracker:
+ # sync PCI device pool state stored in the compute node with
+ # the actual state from the PCI tracker as we commit changes in
+ # the DB and in the PCI tracker below
+ dev_pools_obj = self.pci_tracker.stats.to_device_pools_obj()
+ compute_node.pci_device_pools = dev_pools_obj
+
# _resource_change will update self.old_resources if it detects changes
# but we want to restore those if compute_node.save() fails.
nodename = compute_node.hypervisor_hostname
old_compute = self.old_resources[nodename]
if self._resource_change(compute_node):
# If the compute_node's resource changed, update to DB. Note that
- # _update_to_placement below does not supersede the need to do this
+ # _update_to_placement above does not supersede the need to do this
# because there are stats-related fields in the ComputeNode object
# which could have changed and still need to be reported to the
# scheduler filters/weighers (which could be out of tree as well).
@@ -1333,8 +1328,6 @@ class ResourceTracker(object):
with excutils.save_and_reraise_exception(logger=LOG):
self.old_resources[nodename] = old_compute
- self._update_to_placement(context, compute_node, startup)
-
if self.pci_tracker:
self.pci_tracker.save(context)
diff --git a/nova/compute/rpcapi.py b/nova/compute/rpcapi.py
index 3ac4c6dcfa..efc06300db 100644
--- a/nova/compute/rpcapi.py
+++ b/nova/compute/rpcapi.py
@@ -403,6 +403,7 @@ class ComputeAPI(object):
* ... - Rename the instance_type argument of resize_instance() to
flavor
* 6.1 - Add reimage_boot_volume parameter to rebuild_instance()
+ * 6.2 - Add target_state parameter to rebuild_instance()
'''
VERSION_ALIASES = {
@@ -424,6 +425,7 @@ class ComputeAPI(object):
'xena': '6.0',
'yoga': '6.0',
'zed': '6.1',
+ 'antelope': '6.2',
}
@property
@@ -1083,7 +1085,7 @@ class ComputeAPI(object):
image_ref, orig_image_ref, orig_sys_metadata, bdms,
recreate, on_shared_storage, host, node,
preserve_ephemeral, migration, limits, request_spec, accel_uuids,
- reimage_boot_volume):
+ reimage_boot_volume, target_state):
# NOTE(edleafe): compute nodes can only use the dict form of limits.
if isinstance(limits, objects.SchedulerLimits):
@@ -1096,12 +1098,20 @@ class ComputeAPI(object):
'limits': limits,
'request_spec': request_spec,
'accel_uuids': accel_uuids,
- 'reimage_boot_volume': reimage_boot_volume
+ 'reimage_boot_volume': reimage_boot_volume,
+ 'target_state': target_state,
}
-
- version = '6.1'
+ version = '6.2'
client = self.router.client(ctxt)
if not client.can_send_version(version):
+ if msg_args['target_state']:
+ raise exception.UnsupportedRPCVersion(
+ api="rebuild_instance",
+ required="6.2")
+ else:
+ del msg_args['target_state']
+ version = '6.1'
+ if not client.can_send_version(version):
if msg_args['reimage_boot_volume']:
raise exception.NovaException(
'Compute RPC version does not support '
diff --git a/nova/compute/utils.py b/nova/compute/utils.py
index 036e69b7ce..30efc24fc7 100644
--- a/nova/compute/utils.py
+++ b/nova/compute/utils.py
@@ -1491,7 +1491,7 @@ def notify_about_instance_delete(notifier, context, instance,
phase=fields.NotificationPhase.END)
-def update_pci_request_spec_with_allocated_interface_name(
+def update_pci_request_with_placement_allocations(
context, report_client, pci_requests, provider_mapping):
"""Update the instance's PCI request based on the request group -
resource provider mapping and the device RP name from placement.
@@ -1512,12 +1512,33 @@ def update_pci_request_spec_with_allocated_interface_name(
if not pci_requests:
return
- def needs_update(pci_request, mapping):
+ def needs_update_due_to_qos(pci_request, mapping):
return (pci_request.requester_id and
pci_request.requester_id in mapping)
+ def get_group_mapping_for_flavor_based_pci_request(pci_request, mapping):
+ # NOTE(gibi): for flavor based PCI requests nova generates RequestGroup
+ # suffixes from InstancePCIRequests in the form of
+ # {request_id}-{count_index}
+ # NOTE(gibi): a suffixed request group always fulfilled from a single
+ # RP
+ return {
+ group_id: rp_uuids[0]
+ for group_id, rp_uuids in mapping.items()
+ if group_id.startswith(pci_request.request_id)
+ }
+
for pci_request in pci_requests:
- if needs_update(pci_request, provider_mapping):
+ mapping = get_group_mapping_for_flavor_based_pci_request(
+ pci_request, provider_mapping)
+
+ if mapping:
+ for spec in pci_request.spec:
+ # FIXME(gibi): this is baaad but spec is a dict of strings so
+ # we need to serialize
+ spec['rp_uuids'] = ','.join(mapping.values())
+
+ elif needs_update_due_to_qos(pci_request, provider_mapping):
provider_uuids = provider_mapping[pci_request.requester_id]
if len(provider_uuids) != 1:
diff --git a/nova/compute/vm_states.py b/nova/compute/vm_states.py
index 633894c1ea..1a916ea59a 100644
--- a/nova/compute/vm_states.py
+++ b/nova/compute/vm_states.py
@@ -76,3 +76,6 @@ ALLOW_TRIGGER_CRASH_DUMP = [ACTIVE, PAUSED, RESCUED, RESIZED, ERROR]
# states we allow resources to be freed in
ALLOW_RESOURCE_REMOVAL = [DELETED, SHELVED_OFFLOADED]
+
+# states we allow for evacuate instance
+ALLOW_TARGET_STATES = [STOPPED]
diff --git a/nova/conductor/api.py b/nova/conductor/api.py
index 778fdd6c73..843c8ce3a3 100644
--- a/nova/conductor/api.py
+++ b/nova/conductor/api.py
@@ -144,7 +144,8 @@ class ComputeTaskAPI(object):
injected_files, new_pass, orig_sys_metadata,
bdms, recreate=False, on_shared_storage=False,
preserve_ephemeral=False, host=None,
- request_spec=None, reimage_boot_volume=False):
+ request_spec=None, reimage_boot_volume=False,
+ target_state=None):
self.conductor_compute_rpcapi.rebuild_instance(context,
instance=instance,
new_pass=new_pass,
@@ -158,7 +159,8 @@ class ComputeTaskAPI(object):
preserve_ephemeral=preserve_ephemeral,
host=host,
request_spec=request_spec,
- reimage_boot_volume=reimage_boot_volume)
+ reimage_boot_volume=reimage_boot_volume,
+ target_state=target_state)
def cache_images(self, context, aggregate, image_ids):
"""Request images be pre-cached on hosts within an aggregate.
diff --git a/nova/conductor/manager.py b/nova/conductor/manager.py
index 9e822db081..4b34b8339c 100644
--- a/nova/conductor/manager.py
+++ b/nova/conductor/manager.py
@@ -235,7 +235,7 @@ class ComputeTaskManager:
may involve coordinating activities on multiple compute nodes.
"""
- target = messaging.Target(namespace='compute_task', version='1.24')
+ target = messaging.Target(namespace='compute_task', version='1.25')
def __init__(self):
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
@@ -1037,6 +1037,12 @@ class ComputeTaskManager:
request_spec.requested_resources = res_req
request_spec.request_level_params = req_lvl_params
+ # NOTE(gibi): as PCI devices is tracked in placement we
+ # need to generate request groups from InstancePCIRequests.
+ # This will append new RequestGroup objects to the
+ # request_spec.requested_resources list if needed
+ request_spec.generate_request_groups_from_pci_requests()
+
# NOTE(cfriesen): Ensure that we restrict the scheduler to
# the cell specified by the instance mapping.
self._restrict_request_spec_to_cell(
@@ -1146,7 +1152,8 @@ class ComputeTaskManager:
injected_files, new_pass, orig_sys_metadata,
bdms, recreate, on_shared_storage,
preserve_ephemeral=False, host=None,
- request_spec=None, reimage_boot_volume=False):
+ request_spec=None, reimage_boot_volume=False,
+ target_state=None):
# recreate=True means the instance is being evacuated from a failed
# host to a new destination host. The 'recreate' variable name is
# confusing, so rename it to evacuate here at the top, which is simpler
@@ -1242,6 +1249,12 @@ class ComputeTaskManager:
request_spec.requested_resources = res_req
request_spec.request_level_params = req_lvl_params
+ # NOTE(gibi): as PCI devices is tracked in placement we
+ # need to generate request groups from InstancePCIRequests.
+ # This will append new RequestGroup objects to the
+ # request_spec.requested_resources list if needed
+ request_spec.generate_request_groups_from_pci_requests()
+
try:
# if this is a rebuild of instance on the same host with
# new image.
@@ -1344,7 +1357,8 @@ class ComputeTaskManager:
limits=limits,
request_spec=request_spec,
accel_uuids=accel_uuids,
- reimage_boot_volume=reimage_boot_volume)
+ reimage_boot_volume=reimage_boot_volume,
+ target_state=target_state)
def _validate_image_traits_for_rebuild(self, context, instance, image_ref):
"""Validates that the traits specified in the image can be satisfied
@@ -2082,8 +2096,8 @@ class ComputeTaskManager:
skipped_host(target_ctxt, host, image_ids)
continue
- fetch_pool.spawn_n(wrap_cache_images, target_ctxt, host,
- image_ids)
+ utils.pass_context(fetch_pool.spawn_n, wrap_cache_images,
+ target_ctxt, host, image_ids)
# Wait until all those things finish
fetch_pool.waitall()
diff --git a/nova/conductor/rpcapi.py b/nova/conductor/rpcapi.py
index ffaecd2c95..a5f0cf0094 100644
--- a/nova/conductor/rpcapi.py
+++ b/nova/conductor/rpcapi.py
@@ -287,6 +287,7 @@ class ComputeTaskAPI(object):
1.22 - Added confirm_snapshot_based_resize()
1.23 - Added revert_snapshot_based_resize()
1.24 - Add reimage_boot_volume parameter to rebuild_instance()
+ 1.25 - Add target_state parameter to rebuild_instance()
"""
def __init__(self):
@@ -428,8 +429,8 @@ class ComputeTaskAPI(object):
image_ref, orig_image_ref, orig_sys_metadata, bdms,
recreate=False, on_shared_storage=False, host=None,
preserve_ephemeral=False, request_spec=None,
- reimage_boot_volume=False):
- version = '1.24'
+ reimage_boot_volume=False, target_state=None):
+ version = '1.25'
kw = {'instance': instance,
'new_pass': new_pass,
'injected_files': injected_files,
@@ -442,9 +443,17 @@ class ComputeTaskAPI(object):
'preserve_ephemeral': preserve_ephemeral,
'host': host,
'request_spec': request_spec,
- 'reimage_boot_volume': reimage_boot_volume
+ 'reimage_boot_volume': reimage_boot_volume,
+ 'target_state': target_state,
}
if not self.client.can_send_version(version):
+ if kw['target_state']:
+ raise exception.UnsupportedRPCVersion(
+ api="rebuild_instance", required="1.25")
+ else:
+ del kw['target_state']
+ version = '1.24'
+ if not self.client.can_send_version(version):
if kw['reimage_boot_volume']:
raise exception.NovaException(
'Conductor RPC version does not support '
diff --git a/nova/conductor/tasks/live_migrate.py b/nova/conductor/tasks/live_migrate.py
index f8819b0dc8..cca97c53f7 100644
--- a/nova/conductor/tasks/live_migrate.py
+++ b/nova/conductor/tasks/live_migrate.py
@@ -542,7 +542,7 @@ class LiveMigrationTask(base.TaskBase):
# will be persisted when post_live_migration_at_destination
# runs.
compute_utils.\
- update_pci_request_spec_with_allocated_interface_name(
+ update_pci_request_with_placement_allocations(
self.context, self.report_client,
self.instance.pci_requests.requests, provider_mapping)
try:
diff --git a/nova/conductor/tasks/migrate.py b/nova/conductor/tasks/migrate.py
index 8838d0240a..754f9e5ba7 100644
--- a/nova/conductor/tasks/migrate.py
+++ b/nova/conductor/tasks/migrate.py
@@ -258,6 +258,11 @@ class MigrationTask(base.TaskBase):
# resource requests in a single list and add them to the RequestSpec.
self.request_spec.requested_resources = port_res_req
self.request_spec.request_level_params = req_lvl_params
+ # NOTE(gibi): as PCI devices is tracked in placement we need to
+ # generate request groups from InstancePCIRequests. This will append
+ # new RequestGroup objects to the request_spec.requested_resources list
+ # if needed
+ self.request_spec.generate_request_groups_from_pci_requests()
self._set_requested_destination_cell(legacy_props)
diff --git a/nova/conf/api.py b/nova/conf/api.py
index 5c8a367e8e..58cbc4931e 100644
--- a/nova/conf/api.py
+++ b/nova/conf/api.py
@@ -225,8 +225,11 @@ service.
help="""
Domain name used to configure FQDN for instances.
-Configure a fully-qualified domain name for instance hostnames. If unset, only
-the hostname without a domain will be configured.
+Configure a fully-qualified domain name for instance hostnames. The value is
+suffixed to the instance hostname from the database to construct the hostname
+that appears in the metadata API. To disable this behavior (for example in
+order to correctly support microversion's 2.94 FQDN hostnames), set this to the
+empty string.
Possible values:
diff --git a/nova/conf/compute.py b/nova/conf/compute.py
index 224c802935..de2743d850 100644
--- a/nova/conf/compute.py
+++ b/nova/conf/compute.py
@@ -1016,6 +1016,15 @@ Related options:
filtering computes based on supported image types, which is required
to be enabled for this to take effect.
"""),
+ cfg.ListOpt('vmdk_allowed_types',
+ default=['streamOptimized', 'monolithicSparse'],
+ help="""
+A list of strings describing allowed VMDK "create-type" subformats
+that will be allowed. This is recommended to only include
+single-file-with-sparse-header variants to avoid potential host file
+exposure due to processing named extents. If this list is empty, then no
+form of VMDK image will be allowed.
+"""),
cfg.BoolOpt('packing_host_numa_cells_allocation_strategy',
default=False,
help="""
diff --git a/nova/conf/ironic.py b/nova/conf/ironic.py
index dc5d2412c4..2734f2b78a 100644
--- a/nova/conf/ironic.py
+++ b/nova/conf/ironic.py
@@ -27,6 +27,7 @@ ironic_group = cfg.OptGroup(
help="""
Configuration options for Ironic driver (Bare Metal).
If using the Ironic driver following options must be set:
+
* auth_type
* auth_url
* project_name
diff --git a/nova/conf/libvirt.py b/nova/conf/libvirt.py
index 4ea37b8fe9..204fe5c4b8 100644
--- a/nova/conf/libvirt.py
+++ b/nova/conf/libvirt.py
@@ -987,6 +987,7 @@ slowly to be useful. Actual errors will be reported by Glance and noticed
according to the poll interval.
Related options:
+
* images_type - must be set to ``rbd``
* images_rbd_glance_store_name - must be set to a store name
* images_rbd_glance_copy_poll_interval - controls the failure time-to-notice
@@ -1477,6 +1478,23 @@ Related options:
"""),
]
+libvirt_cpu_mgmt_opts = [
+ cfg.BoolOpt('cpu_power_management',
+ default=False,
+ help='Use libvirt to manage CPU cores performance.'),
+ cfg.StrOpt('cpu_power_management_strategy',
+ choices=['cpu_state', 'governor'],
+ default='cpu_state',
+ help='Tuning strategy to reduce CPU power consumption when '
+ 'unused'),
+ cfg.StrOpt('cpu_power_governor_low',
+ default='powersave',
+ help='Governor to use in order '
+ 'to reduce CPU power consumption'),
+ cfg.StrOpt('cpu_power_governor_high',
+ default='performance',
+ help='Governor to use in order to have best CPU performance'),
+]
ALL_OPTS = list(itertools.chain(
libvirt_general_opts,
@@ -1498,6 +1516,7 @@ ALL_OPTS = list(itertools.chain(
libvirt_volume_nvmeof_opts,
libvirt_pmem_opts,
libvirt_vtpm_opts,
+ libvirt_cpu_mgmt_opts,
))
diff --git a/nova/conf/mks.py b/nova/conf/mks.py
index 1703f5f240..ec403a1a4f 100644
--- a/nova/conf/mks.py
+++ b/nova/conf/mks.py
@@ -23,7 +23,9 @@ Nova compute node uses WebMKS, a desktop sharing protocol to provide
instance console access to VM's created by VMware hypervisors.
Related options:
+
Following options must be set to provide console access.
+
* mksproxy_base_url
* enabled
""")
diff --git a/nova/conf/pci.py b/nova/conf/pci.py
index 673185391b..533bf52ead 100644
--- a/nova/conf/pci.py
+++ b/nova/conf/pci.py
@@ -67,6 +67,36 @@ Possible Values:
Required NUMA affinity of device. Valid values are: ``legacy``,
``preferred`` and ``required``.
+ ``resource_class``
+ The optional Placement resource class name that is used
+ to track the requested PCI devices in Placement. It can be a standard
+ resource class from the ``os-resource-classes`` lib. Or it can be an
+ arbitrary string. If it is an non-standard resource class then Nova will
+ normalize it to a proper Placement resource class by
+ making it upper case, replacing any consecutive character outside of
+ ``[A-Z0-9_]`` with a single '_', and prefixing the name with ``CUSTOM_`` if
+ not yet prefixed. The maximum allowed length is 255 character including the
+ prefix. If ``resource_class`` is not provided Nova will generate it from
+ ``vendor_id`` and ``product_id`` values of the alias in the form of
+ ``CUSTOM_PCI_{vendor_id}_{product_id}``. The ``resource_class`` requested
+ in the alias is matched against the ``resource_class`` defined in the
+ ``[pci]device_spec``. This field can only be used only if
+ ``[filter_scheduler]pci_in_placement`` is enabled.
+
+ ``traits``
+ An optional comma separated list of Placement trait names requested to be
+ present on the resource provider that fulfills this alias. Each trait can
+ be a standard trait from ``os-traits`` lib or it can be an arbitrary
+ string. If it is a non-standard trait then Nova will normalize the
+ trait name by making it upper case, replacing any consecutive character
+ outside of ``[A-Z0-9_]`` with a single '_', and prefixing the name
+ with ``CUSTOM_`` if not yet prefixed. The maximum allowed length of a
+ trait name is 255 character including the prefix. Every trait in
+ ``traits`` requested in the alias ensured to be in the list of traits
+ provided in the ``traits`` field of the ``[pci]device_spec`` when
+ scheduling the request. This field can only be used only if
+ ``[filter_scheduler]pci_in_placement`` is enabled.
+
* Supports multiple aliases by repeating the option (not by specifying
a list value)::
diff --git a/nova/conf/scheduler.py b/nova/conf/scheduler.py
index 03e78fe701..c75bd07c5b 100644
--- a/nova/conf/scheduler.py
+++ b/nova/conf/scheduler.py
@@ -745,7 +745,26 @@ Possible values:
Related options:
* ``[filter_scheduler] aggregate_image_properties_isolation_namespace``
-""")]
+"""),
+ cfg.BoolOpt(
+ "pci_in_placement",
+ default=False,
+ help="""
+Enable scheduling and claiming PCI devices in Placement.
+
+This can be enabled after ``[pci]report_in_placement`` is enabled on all
+compute hosts.
+
+When enabled the scheduler queries Placement about the PCI device
+availability to select destination for a server with PCI request. The scheduler
+also allocates the selected PCI devices in Placement. Note that this logic
+does not replace the PCIPassthroughFilter but extends it.
+
+* ``[pci] report_in_placement``
+* ``[pci] alias``
+* ``[pci] device_spec``
+"""),
+]
metrics_group = cfg.OptGroup(
name="metrics",
diff --git a/nova/conf/spice.py b/nova/conf/spice.py
index 59ed4e80a0..e5854946f1 100644
--- a/nova/conf/spice.py
+++ b/nova/conf/spice.py
@@ -85,6 +85,59 @@ Agent. With the Spice agent installed the following features are enabled:
needing to click inside the console or press keys to release it. The
performance of mouse movement is also improved.
"""),
+ cfg.StrOpt('image_compression',
+ advanced=True,
+ choices=[
+ ('auto_glz', 'enable image compression mode to choose between glz '
+ 'and quic algorithm, based on image properties'),
+ ('auto_lz', 'enable image compression mode to choose between lz '
+ 'and quic algorithm, based on image properties'),
+ ('quic', 'enable image compression based on the SFALIC algorithm'),
+ ('glz', 'enable image compression using lz with history based '
+ 'global dictionary'),
+ ('lz', 'enable image compression with the Lempel-Ziv algorithm'),
+ ('off', 'disable image compression')
+ ],
+ help="""
+Configure the SPICE image compression (lossless).
+"""),
+ cfg.StrOpt('jpeg_compression',
+ advanced=True,
+ choices=[
+ ('auto', 'enable JPEG image compression automatically'),
+ ('never', 'disable JPEG image compression'),
+ ('always', 'enable JPEG image compression')
+ ],
+ help="""
+Configure the SPICE wan image compression (lossy for slow links).
+"""),
+ cfg.StrOpt('zlib_compression',
+ advanced=True,
+ choices=[
+ ('auto', 'enable zlib image compression automatically'),
+ ('never', 'disable zlib image compression'),
+ ('always', 'enable zlib image compression')
+ ],
+ help="""
+Configure the SPICE wan image compression (lossless for slow links).
+"""),
+ cfg.BoolOpt('playback_compression',
+ advanced=True,
+ help="""
+Enable the SPICE audio stream compression (using celt).
+"""),
+ cfg.StrOpt('streaming_mode',
+ advanced=True,
+ choices=[
+ ('filter', 'SPICE server adds additional filters to decide if '
+ 'video streaming should be activated'),
+ ('all', 'any fast-refreshing window can be encoded into a video '
+ 'stream'),
+ ('off', 'no video detection and (lossy) compression is performed')
+ ],
+ help="""
+Configure the SPICE video stream detection and (lossy) compression.
+"""),
cfg.URIOpt('html5proxy_base_url',
default='http://127.0.0.1:6082/spice_auto.html',
help="""
diff --git a/nova/conf/vmware.py b/nova/conf/vmware.py
index 63a5f04ea4..17a2676b64 100644
--- a/nova/conf/vmware.py
+++ b/nova/conf/vmware.py
@@ -76,7 +76,9 @@ Possible values:
* Any valid URI (The scheme is 'telnet' or 'telnets'.)
Related options:
+
This option is ignored if serial_port_service_uri is not specified.
+
* serial_port_service_uri
"""),
cfg.StrOpt('serial_log_dir',
@@ -112,6 +114,7 @@ If true, the vCenter server certificate is not verified. If false,
then the default CA truststore is used for verification.
Related options:
+
* ca_file: This option is ignored if "ca_file" is set.
"""),
cfg.StrOpt('cluster_name',
@@ -158,7 +161,9 @@ Possible values:
* Any valid port number within 5900 -(5900 + vnc_port_total)
Related options:
+
Below options should be set to enable VNC client.
+
* vnc.enabled = True
* vnc_port_total
"""),
diff --git a/nova/conf/workarounds.py b/nova/conf/workarounds.py
index 2ec53282cd..943ec74885 100644
--- a/nova/conf/workarounds.py
+++ b/nova/conf/workarounds.py
@@ -374,6 +374,28 @@ Related options:
* :oslo.config:option:`DEFAULT.compute_driver` (libvirt)
"""),
+ cfg.IntOpt('qemu_monitor_announce_self_count',
+ default=3,
+ min=1,
+ help="""
+The total number of times to send the announce_self command to the QEMU
+monitor when enable_qemu_monitor_announce_self is enabled.
+
+Related options:
+
+* :oslo.config:option:`WORKAROUNDS.enable_qemu_monitor_announce_self` (libvirt)
+"""),
+ cfg.IntOpt('qemu_monitor_announce_self_interval',
+ default=1,
+ min=1,
+ help="""
+The number of seconds to wait before re-sending the announce_self
+command to the QEMU monitor.
+
+Related options:
+
+* :oslo.config:option:`WORKAROUNDS.enable_qemu_monitor_announce_self` (libvirt)
+"""),
cfg.BoolOpt('disable_compute_service_check_for_ffu',
default=False,
help="""
@@ -410,6 +432,13 @@ with the destination host. When using QEMU >= 2.9 and libvirt >=
4.4.0, libvirt will do the correct thing with respect to checking CPU
compatibility on the destination host during live migration.
"""),
+ cfg.BoolOpt('skip_cpu_compare_at_startup',
+ default=False,
+ help="""
+This will skip the CPU comparison call at the startup of Compute
+service and lets libvirt handle it.
+"""),
+
cfg.BoolOpt(
'skip_hypervisor_version_check_on_lm',
default=False,
@@ -417,6 +446,21 @@ compatibility on the destination host during live migration.
When this is enabled, it will skip version-checking of hypervisors
during live migration.
"""),
+ cfg.BoolOpt(
+ 'skip_reserve_in_use_ironic_nodes',
+ default=False,
+ help="""
+This may be useful if you use the Ironic driver, but don't have
+automatic cleaning enabled in Ironic. Nova, by default, will mark
+Ironic nodes as reserved as soon as they are in use. When you free
+the Ironic node (by deleting the nova instance) it takes a while
+for Nova to un-reserve that Ironic node in placement. Usually this
+is a good idea, because it avoids placement providing an Ironic
+as a valid candidate when it is still being cleaned.
+Howerver, if you don't use automatic cleaning, it can cause an
+extra delay before and Ironic node is available for building a
+new Nova instance.
+"""),
]
diff --git a/nova/db/main/migrations/versions/960aac0e09ea_de_duplicate_indexes_in_instances__.py b/nova/db/main/migrations/versions/960aac0e09ea_de_duplicate_indexes_in_instances__.py
new file mode 100644
index 0000000000..f4666a2b00
--- /dev/null
+++ b/nova/db/main/migrations/versions/960aac0e09ea_de_duplicate_indexes_in_instances__.py
@@ -0,0 +1,35 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""de-duplicate_indexes_in_instances__console_auth_tokens
+
+Revision ID: 960aac0e09ea
+Revises: ccb0fa1a2252
+Create Date: 2022-09-15 17:00:23.175991
+"""
+
+from alembic import op
+
+
+# revision identifiers, used by Alembic.
+revision = '960aac0e09ea'
+down_revision = 'ccb0fa1a2252'
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ with op.batch_alter_table('console_auth_tokens', schema=None) as batch_op:
+ batch_op.drop_index('console_auth_tokens_token_hash_idx')
+
+ with op.batch_alter_table('instances', schema=None) as batch_op:
+ batch_op.drop_index('uuid')
diff --git a/nova/db/main/models.py b/nova/db/main/models.py
index 7551584c1c..f8363a89c0 100644
--- a/nova/db/main/models.py
+++ b/nova/db/main/models.py
@@ -266,7 +266,6 @@ class Instance(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents a guest VM."""
__tablename__ = 'instances'
__table_args__ = (
- sa.Index('uuid', 'uuid', unique=True),
sa.Index('instances_project_id_idx', 'project_id'),
sa.Index('instances_project_id_deleted_idx',
'project_id', 'deleted'),
@@ -1046,7 +1045,6 @@ class ConsoleAuthToken(BASE, NovaBase):
__table_args__ = (
sa.Index('console_auth_tokens_instance_uuid_idx', 'instance_uuid'),
sa.Index('console_auth_tokens_host_expires_idx', 'host', 'expires'),
- sa.Index('console_auth_tokens_token_hash_idx', 'token_hash'),
sa.Index(
'console_auth_tokens_token_hash_instance_uuid_idx', 'token_hash',
'instance_uuid',
diff --git a/nova/exception.py b/nova/exception.py
index 3d8e596312..0c0ffa85a1 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -217,6 +217,11 @@ class InvalidVIOMMUArchitecture(Invalid):
"but given architecture %(arch)s.")
+class InstanceQuiesceFailed(Invalid):
+ msg_fmt = _("Failed to quiesce instance: %(reason)s")
+ code = 409
+
+
class InvalidConfiguration(Invalid):
msg_fmt = _("Configuration is Invalid.")
@@ -1446,6 +1451,11 @@ class InstanceEvacuateNotSupported(Invalid):
msg_fmt = _('Instance evacuate is not supported.')
+class InstanceEvacuateNotSupportedTargetState(Invalid):
+ msg_fmt = _("Target state '%(target_state)s' for instance evacuate "
+ "is not supported.")
+
+
class DBNotAllowed(NovaException):
msg_fmt = _('%(binary)s attempted direct database access which is '
'not allowed by policy')
@@ -1474,6 +1484,11 @@ class UnsupportedRescueImage(Invalid):
msg_fmt = _("Requested rescue image '%(image)s' is not supported")
+class UnsupportedRPCVersion(Invalid):
+ msg_fmt = _("Unsupported RPC version for %(api)s. "
+ "Required >= %(required)s")
+
+
class Base64Exception(NovaException):
msg_fmt = _("Invalid Base 64 data for file %(path)s")
@@ -2491,3 +2506,18 @@ class PlacementPciMixedTraitsException(PlacementPciException):
class ReimageException(NovaException):
msg_fmt = _("Reimaging volume failed.")
+
+
+class InvalidNodeConfiguration(NovaException):
+ msg_fmt = _('Invalid node identity configuration: %(reason)s')
+
+
+class DuplicateRecord(NovaException):
+ msg_fmt = _('Unable to create duplicate record for %(target)s')
+
+
+class NotSupportedComputeForEvacuateV295(NotSupported):
+ msg_fmt = _("Starting with microversion 2.95, evacuate API will stop "
+ "instance on destination. To evacuate before upgrades are "
+ "complete please use an older microversion. Required version "
+ "for compute %(expected), current version %(currently)s")
diff --git a/nova/filesystem.py b/nova/filesystem.py
new file mode 100644
index 0000000000..5394d2d835
--- /dev/null
+++ b/nova/filesystem.py
@@ -0,0 +1,59 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Functions to address filesystem calls, particularly sysfs."""
+
+import os
+
+from oslo_log import log as logging
+
+from nova import exception
+
+LOG = logging.getLogger(__name__)
+
+
+SYS = '/sys'
+
+
+# NOTE(bauzas): this method is deliberately not wrapped in a privsep entrypoint
+def read_sys(path: str) -> str:
+ """Reads the content of a file in the sys filesystem.
+
+ :param path: relative or absolute. If relative, will be prefixed by /sys.
+ :returns: contents of that file.
+ :raises: nova.exception.FileNotFound if we can't read that file.
+ """
+ try:
+ # The path can be absolute with a /sys prefix but that's fine.
+ with open(os.path.join(SYS, path), mode='r') as data:
+ return data.read()
+ except (OSError, ValueError) as exc:
+ raise exception.FileNotFound(file_path=path) from exc
+
+
+# NOTE(bauzas): this method is deliberately not wrapped in a privsep entrypoint
+# In order to correctly use it, you need to decorate the caller with a specific
+# privsep entrypoint.
+def write_sys(path: str, data: str) -> None:
+ """Writes the content of a file in the sys filesystem with data.
+
+ :param path: relative or absolute. If relative, will be prefixed by /sys.
+ :param data: the data to write.
+ :returns: contents of that file.
+ :raises: nova.exception.FileNotFound if we can't write that file.
+ """
+ try:
+ # The path can be absolute with a /sys prefix but that's fine.
+ with open(os.path.join(SYS, path), mode='w') as fd:
+ fd.write(data)
+ except (OSError, ValueError) as exc:
+ raise exception.FileNotFound(file_path=path) from exc
diff --git a/nova/hacking/checks.py b/nova/hacking/checks.py
index cd393e7b33..704538250f 100644
--- a/nova/hacking/checks.py
+++ b/nova/hacking/checks.py
@@ -141,6 +141,8 @@ rwlock_re = re.compile(
r"(?P<module_part>(oslo_concurrency\.)?(lockutils|fasteners))"
r"\.ReaderWriterLock\(.*\)")
six_re = re.compile(r"^(import six(\..*)?|from six(\..*)? import .*)$")
+# Regex for catching the setDaemon method
+set_daemon_re = re.compile(r"\.setDaemon\(")
class BaseASTChecker(ast.NodeVisitor):
@@ -1078,3 +1080,22 @@ def import_stock_mock(logical_line):
"N371: You must explicitly import python's mock: "
"``from unittest import mock``"
)
+
+
+@core.flake8ext
+def check_set_daemon(logical_line):
+ """Check for use of the setDaemon method of the threading.Thread class
+
+ The setDaemon method of the threading.Thread class has been deprecated
+ since Python 3.10. Use the daemon attribute instead.
+
+ See
+ https://docs.python.org/3.10/library/threading.html#threading.Thread.setDaemon
+ for details.
+
+ N372
+ """
+ res = set_daemon_re.search(logical_line)
+ if res:
+ yield (0, "N372: Don't use the setDaemon method. "
+ "Use the daemon attribute instead.")
diff --git a/nova/manager.py b/nova/manager.py
index 9c00401b96..df03305367 100644
--- a/nova/manager.py
+++ b/nova/manager.py
@@ -103,12 +103,15 @@ class Manager(PeriodicTasks, metaclass=ManagerMeta):
"""Tasks to be run at a periodic interval."""
return self.run_periodic_tasks(context, raise_on_error=raise_on_error)
- def init_host(self):
+ def init_host(self, service_ref):
"""Hook to do additional manager initialization when one requests
the service be started. This is called before any service record
- is created.
+ is created, but if one already exists for this service, it is
+ provided.
Child classes should override this method.
+
+ :param service_ref: An objects.Service if one exists, else None.
"""
pass
diff --git a/nova/objects/compute_node.py b/nova/objects/compute_node.py
index 60c2be71cd..dfc1b2ae28 100644
--- a/nova/objects/compute_node.py
+++ b/nova/objects/compute_node.py
@@ -12,6 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+from oslo_db import exception as db_exc
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
from oslo_utils import versionutils
@@ -339,7 +340,12 @@ class ComputeNode(base.NovaPersistentObject, base.NovaObject):
self._convert_supported_instances_to_db_format(updates)
self._convert_pci_stats_to_db_format(updates)
- db_compute = db.compute_node_create(self._context, updates)
+ try:
+ db_compute = db.compute_node_create(self._context, updates)
+ except db_exc.DBDuplicateEntry:
+ target = 'compute node %s:%s' % (updates['hypervisor_hostname'],
+ updates['uuid'])
+ raise exception.DuplicateRecord(target=target)
self._from_db_object(self._context, self, db_compute)
@base.remotable
@@ -388,8 +394,11 @@ class ComputeNode(base.NovaPersistentObject, base.NovaObject):
# The uuid field is read-only so it should only be set when
# creating the compute node record for the first time. Ignore
# it otherwise.
- if key == 'uuid' and 'uuid' in self:
- continue
+ if (key == 'uuid' and 'uuid' in self and
+ resources[key] != self.uuid):
+ raise exception.InvalidNodeConfiguration(
+ reason='Attempt to overwrite node %s with %s!' % (
+ self.uuid, resources[key]))
setattr(self, key, resources[key])
# supported_instances has a different name in compute_node
diff --git a/nova/objects/request_spec.py b/nova/objects/request_spec.py
index c17c963e77..a4ca77edf6 100644
--- a/nova/objects/request_spec.py
+++ b/nova/objects/request_spec.py
@@ -14,12 +14,15 @@
import copy
import itertools
+import typing as ty
import os_resource_classes as orc
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import versionutils
+from nova.compute import pci_placement_translator
+import nova.conf
from nova.db.api import api as api_db_api
from nova.db.api import models as api_models
from nova import exception
@@ -28,6 +31,7 @@ from nova.objects import base
from nova.objects import fields
from nova.objects import instance as obj_instance
+CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
REQUEST_SPEC_OPTIONAL_ATTRS = ['requested_destination',
@@ -473,6 +477,113 @@ class RequestSpec(base.NovaObject):
filt_props['requested_destination'] = self.requested_destination
return filt_props
+ @staticmethod
+ def _rc_from_request(spec: ty.Dict[str, ty.Any]) -> str:
+ return pci_placement_translator.get_resource_class(
+ spec.get("resource_class"),
+ spec.get("vendor_id"),
+ spec.get("product_id"),
+ )
+
+ @staticmethod
+ def _traits_from_request(spec: ty.Dict[str, ty.Any]) -> ty.Set[str]:
+ return pci_placement_translator.get_traits(spec.get("traits", ""))
+
+ def generate_request_groups_from_pci_requests(self):
+ if not CONF.filter_scheduler.pci_in_placement:
+ return False
+
+ for pci_request in self.pci_requests.requests:
+ if pci_request.source == objects.InstancePCIRequest.NEUTRON_PORT:
+ # TODO(gibi): Handle neutron based PCI requests here in a later
+ # cycle.
+ continue
+
+ if len(pci_request.spec) != 1:
+ # We are instantiating InstancePCIRequest objects with spec in
+ # two cases:
+ # 1) when a neutron port is translated to InstancePCIRequest
+ # object in
+ # nova.network.neutron.API.create_resource_requests
+ # 2) when the pci_passthrough:alias flavor extra_spec is
+ # translated to InstancePCIRequest objects in
+ # nova.pci.request._get_alias_from_config which enforces the
+ # json schema defined in nova.pci.request.
+ #
+ # In both cases only a single dict is added to the spec list.
+ # If we ever want to add support for multiple specs per request
+ # then we have to solve the issue that each spec can request a
+ # different resource class from placement. The only place in
+ # nova that currently handles multiple specs per request is
+ # nova.pci.utils.pci_device_prop_match() and it considers them
+ # as alternatives. So specs with different resource classes
+ # would mean alternative resource_class requests. This cannot
+ # be expressed today in the allocation_candidate query towards
+ # placement.
+ raise ValueError(
+ "PCI tracking in placement does not support multiple "
+ "specs per PCI request"
+ )
+
+ spec = pci_request.spec[0]
+
+ # The goal is to translate InstancePCIRequest to RequestGroup. Each
+ # InstancePCIRequest can be fulfilled from the whole RP tree. And
+ # a flavor based InstancePCIRequest might request more than one
+ # device (if count > 1) and those devices still need to be placed
+ # independently to RPs. So we could have two options to translate
+ # an InstancePCIRequest object to RequestGroup objects:
+ # 1) put the all the requested resources from every
+ # InstancePCIRequest to the unsuffixed RequestGroup.
+ # 2) generate a separate RequestGroup for each individual device
+ # request
+ #
+ # While #1) feels simpler it has a big downside. The unsuffixed
+ # group will have a bulk request group resource provider mapping
+ # returned from placement. So there would be no easy way to later
+ # untangle which InstancePCIRequest is fulfilled by which RP, and
+ # therefore which PCI device should be used to allocate a specific
+ # device on the hypervisor during the PCI claim. Note that there
+ # could be multiple PF RPs providing the same type of resources but
+ # still we need to make sure that if a resource is allocated in
+ # placement from a specific RP (representing a physical device)
+ # then the PCI claim should consume resources from the same
+ # physical device.
+ #
+ # So we need at least a separate RequestGroup per
+ # InstancePCIRequest. However, for a InstancePCIRequest(count=2)
+ # that would mean a RequestGroup(RC:2) which would mean both
+ # resource should come from the same RP in placement. This is
+ # impossible for PF or PCI type requests and over restrictive for
+ # VF type requests. Therefore we need to generate one RequestGroup
+ # per requested device. So for InstancePCIRequest(count=2) we need
+ # to generate two separate RequestGroup(RC:1) objects.
+
+ # NOTE(gibi): If we have count=2 requests then the multiple
+ # RequestGroup split below only works if group_policy is set to
+ # none as group_policy=isolate would prevent allocating two VFs
+ # from the same PF. Fortunately
+ # nova.scheduler.utils.resources_from_request_spec() already
+ # defaults group_policy to none if it is not specified in the
+ # flavor and there are multiple RequestGroups in the RequestSpec.
+
+ for i in range(pci_request.count):
+ rg = objects.RequestGroup(
+ use_same_provider=True,
+ # we need to generate a unique ID for each group, so we use
+ # a counter
+ requester_id=f"{pci_request.request_id}-{i}",
+ # as we split count >= 2 requests to independent groups
+ # each group will have a resource request of one
+ resources={
+ self._rc_from_request(spec): 1
+ },
+ required_traits=self._traits_from_request(spec),
+ # TODO(gibi): later we can add support for complex trait
+ # queries here including forbidden_traits.
+ )
+ self.requested_resources.append(rg)
+
@classmethod
def from_components(
cls, context, instance_uuid, image, flavor,
@@ -539,6 +650,8 @@ class RequestSpec(base.NovaObject):
if port_resource_requests:
spec_obj.requested_resources.extend(port_resource_requests)
+ spec_obj.generate_request_groups_from_pci_requests()
+
# NOTE(gibi): later the scheduler adds more request level params but
# never overrides existing ones so we can initialize them here.
if request_level_params is None:
diff --git a/nova/objects/service.py b/nova/objects/service.py
index 71361e0168..b17b5c2050 100644
--- a/nova/objects/service.py
+++ b/nova/objects/service.py
@@ -31,7 +31,7 @@ LOG = logging.getLogger(__name__)
# NOTE(danms): This is the global service version counter
-SERVICE_VERSION = 64
+SERVICE_VERSION = 66
# NOTE(danms): This is our SERVICE_VERSION history. The idea is that any
@@ -225,17 +225,42 @@ SERVICE_VERSION_HISTORY = (
# Version 64: Compute RPC v6.1:
# Add reimage_boot_volume parameter to rebuild_instance()
{'compute_rpc': '6.1'},
+ # Version 65: Compute RPC v6.1:
+ # Added stable local node identity
+ {'compute_rpc': '6.1'},
+ # Version 66: Compute RPC v6.2:
+ # Add target_state parameter to rebuild_instance()
+ {'compute_rpc': '6.2'},
)
-# This is used to raise an error at service startup if older than N-1 computes
-# are detected. Update this at the beginning of every release cycle to point to
-# the smallest service version that was added in N-1.
+# This is the version after which we can rely on having a persistent
+# local node identity for single-node systems.
+NODE_IDENTITY_VERSION = 65
+
+# This is used to raise an error at service startup if older than supported
+# computes are detected.
+# NOTE(sbauza) : Please modify it this way :
+# * At the beginning of a non-SLURP release (eg. 2023.2 Bobcat) (or just after
+# the previous SLURP release RC1, like 2023.1 Antelope), please bump
+# OLDEST_SUPPORTED_SERVICE_VERSION to the previous SLURP release (in that
+# example, Antelope)
+# * At the beginning of a SLURP release (eg. 2024.1 C) (or just after the
+# previous non-SLURP release RC1, like 2023.2 Bobcat), please keep the
+# OLDEST_SUPPORTED_SERVICE_VERSION value using the previous SLURP release
+# (in that example, Antelope)
+# * At the end of any release (SLURP or non-SLURP), please modify
+# SERVICE_VERSION_ALIASES to add a key/value with key being the release name
+# and value be the latest service version that the release supports (for
+# example, before Bobcat RC1, please add 'Bobcat': XX where X is the latest
+# servion version that was added)
OLDEST_SUPPORTED_SERVICE_VERSION = 'Yoga'
SERVICE_VERSION_ALIASES = {
'Victoria': 52,
'Wallaby': 54,
'Xena': 57,
'Yoga': 61,
+ 'Zed': 64,
+ 'Antelope': 66,
}
diff --git a/nova/pci/request.py b/nova/pci/request.py
index 38056d79b3..27ada6c045 100644
--- a/nova/pci/request.py
+++ b/nova/pci/request.py
@@ -106,6 +106,12 @@ _ALIAS_SCHEMA = {
"type": "string",
"enum": list(obj_fields.PCINUMAAffinityPolicy.ALL),
},
+ "resource_class": {
+ "type": "string",
+ },
+ "traits": {
+ "type": "string",
+ },
},
"required": ["name"],
}
@@ -114,7 +120,7 @@ _ALIAS_SCHEMA = {
def _get_alias_from_config() -> Alias:
"""Parse and validate PCI aliases from the nova config.
- :returns: A dictionary where the keys are device names and the values are
+ :returns: A dictionary where the keys are alias names and the values are
tuples of form ``(numa_policy, specs)``. ``numa_policy`` describes the
required NUMA affinity of the device(s), while ``specs`` is a list of
PCI device specs.
diff --git a/nova/pci/stats.py b/nova/pci/stats.py
index 3518b95289..5c5f7c669c 100644
--- a/nova/pci/stats.py
+++ b/nova/pci/stats.py
@@ -13,7 +13,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-
+import collections
import copy
import typing as ty
@@ -64,6 +64,19 @@ class PciDeviceStats(object):
"""
pool_keys = ['product_id', 'vendor_id', 'numa_node', 'dev_type']
+ # these can be specified in the [pci]device_spec and can be requested via
+ # the PCI alias, but they are matched by the placement
+ # allocation_candidates query, so we can ignore them during pool creation
+ # and during filtering here
+ ignored_spec_tags = ignored_pool_tags = ['resource_class', 'traits']
+ # this is a metadata key in the spec that is matched
+ # specially in _filter_pools_based_on_placement_allocation. So we can
+ # ignore them in the general matching logic.
+ ignored_spec_tags += ['rp_uuids']
+ # this is a metadata key in the pool that is matched
+ # specially in _filter_pools_based_on_placement_allocation. So we can
+ # ignore them in the general matching logic.
+ ignored_pool_tags += ['rp_uuid']
def __init__(
self,
@@ -134,8 +147,22 @@ class PciDeviceStats(object):
return None
tags = devspec.get_tags()
pool = {k: getattr(dev, k) for k in self.pool_keys}
+
if tags:
- pool.update(tags)
+ pool.update(
+ {
+ k: v
+ for k, v in tags.items()
+ if k not in self.ignored_pool_tags
+ }
+ )
+ # NOTE(gibi): since PCI in placement maps a PCI dev or a PF to a
+ # single RP and the scheduler allocates from a specific RP we need
+ # to split the pools by PCI or PF address. We can still keep
+ # the VFs from the same parent PF in a single pool though as they
+ # are equivalent from placement perspective.
+ pool['address'] = dev.parent_addr or dev.address
+
# NOTE(gibi): parent_ifname acts like a tag during pci claim but
# not provided as part of the whitelist spec as it is auto detected
# by the virt driver.
@@ -224,6 +251,17 @@ class PciDeviceStats(object):
free_devs.extend(pool['devices'])
return free_devs
+ def _allocate_devs(
+ self, pool: Pool, num: int, request_id: str
+ ) -> ty.List["objects.PciDevice"]:
+ alloc_devices = []
+ for _ in range(num):
+ pci_dev = pool['devices'].pop()
+ self._handle_device_dependents(pci_dev)
+ pci_dev.request_id = request_id
+ alloc_devices.append(pci_dev)
+ return alloc_devices
+
def consume_requests(
self,
pci_requests: 'objects.InstancePCIRequests',
@@ -235,7 +273,10 @@ class PciDeviceStats(object):
for request in pci_requests:
count = request.count
- pools = self._filter_pools(self.pools, request, numa_cells)
+ rp_uuids = self._get_rp_uuids_for_request(
+ request=request, provider_mapping=None)
+ pools = self._filter_pools(
+ self.pools, request, numa_cells, rp_uuids=rp_uuids)
# Failed to allocate the required number of devices. Return the
# devices already allocated during previous iterations back to
@@ -251,20 +292,29 @@ class PciDeviceStats(object):
self.add_device(alloc_devices.pop())
raise exception.PciDeviceRequestFailed(requests=pci_requests)
- for pool in pools:
- if pool['count'] >= count:
- num_alloc = count
- else:
- num_alloc = pool['count']
- count -= num_alloc
- pool['count'] -= num_alloc
- for d in range(num_alloc):
- pci_dev = pool['devices'].pop()
- self._handle_device_dependents(pci_dev)
- pci_dev.request_id = request.request_id
- alloc_devices.append(pci_dev)
- if count == 0:
- break
+ if not rp_uuids:
+ # if there is no placement allocation then we are free to
+ # consume from the pools in any order:
+ for pool in pools:
+ if pool['count'] >= count:
+ num_alloc = count
+ else:
+ num_alloc = pool['count']
+ count -= num_alloc
+ pool['count'] -= num_alloc
+ alloc_devices += self._allocate_devs(
+ pool, num_alloc, request.request_id)
+ if count == 0:
+ break
+ else:
+ # but if there is placement allocation then we have to follow
+ # it
+ requested_devs_per_pool_rp = collections.Counter(rp_uuids)
+ for pool in pools:
+ count = requested_devs_per_pool_rp[pool['rp_uuid']]
+ pool['count'] -= count
+ alloc_devices += self._allocate_devs(
+ pool, count, request.request_id)
return alloc_devices
@@ -313,7 +363,15 @@ class PciDeviceStats(object):
:returns: A list of pools that can be used to support the request if
this is possible.
"""
- request_specs = request.spec
+
+ def ignore_keys(spec):
+ return {
+ k: v
+ for k, v in spec.items()
+ if k not in self.ignored_spec_tags
+ }
+
+ request_specs = [ignore_keys(spec) for spec in request.spec]
return [
pool for pool in pools
if utils.pci_device_prop_match(pool, request_specs)
@@ -510,11 +568,52 @@ class PciDeviceStats(object):
pool.get(PCI_REMOTE_MANAGED_TAG))]
return pools
+ def _filter_pools_based_on_placement_allocation(
+ self,
+ pools: ty.List[Pool],
+ request: 'objects.InstancePCIRequest',
+ rp_uuids: ty.List[str],
+ ) -> ty.List[Pool]:
+ if not rp_uuids:
+ # If there is no placement allocation then we don't need to filter
+ # by it. This could happen if the instance only has neutron port
+ # based InstancePCIRequest as that is currently not having
+ # placement allocation (except for QoS ports, but that handled in a
+ # separate codepath) or if the [filter_scheduler]pci_in_placement
+ # configuration option is not enabled in the scheduler.
+ return pools
+
+ requested_dev_count_per_rp = collections.Counter(rp_uuids)
+ matching_pools = []
+ for pool in pools:
+ rp_uuid = pool.get('rp_uuid')
+ if rp_uuid is None:
+ # NOTE(gibi): As rp_uuids is not empty the scheduler allocated
+ # PCI resources on this host, so we know that
+ # [pci]report_in_placement is enabled on this host. But this
+ # pool has no RP mapping which can only happen if the pool
+ # contains PCI devices with physical_network tag, as those
+ # devices not yet reported in placement. But if they are not
+ # reported then we can ignore them here too.
+ continue
+
+ if (
+ # the placement allocation contains this pool
+ rp_uuid in requested_dev_count_per_rp and
+ # the amount of dev allocated in placement can be consumed
+ # from the pool
+ pool["count"] >= requested_dev_count_per_rp[rp_uuid]
+ ):
+ matching_pools.append(pool)
+
+ return matching_pools
+
def _filter_pools(
self,
pools: ty.List[Pool],
request: 'objects.InstancePCIRequest',
numa_cells: ty.Optional[ty.List['objects.InstanceNUMACell']],
+ rp_uuids: ty.List[str],
) -> ty.Optional[ty.List[Pool]]:
"""Determine if an individual PCI request can be met.
@@ -529,6 +628,9 @@ class PciDeviceStats(object):
quantity and required NUMA affinity of device(s) we want.
:param numa_cells: A list of InstanceNUMACell objects whose ``id``
corresponds to the ``id`` of host NUMACell objects.
+ :param rp_uuids: A list of PR uuids this request fulfilled from in
+ placement. So here we have to consider only the pools matching with
+ thes RP uuids
:returns: A list of pools that can be used to support the request if
this is possible, else None.
"""
@@ -613,6 +715,19 @@ class PciDeviceStats(object):
before_count - after_count
)
+ # if there is placement allocation for the request then we have to
+ # remove the pools that are not in the placement allocation
+ before_count = after_count
+ pools = self._filter_pools_based_on_placement_allocation(
+ pools, request, rp_uuids)
+ after_count = sum([pool['count'] for pool in pools])
+ if after_count < before_count:
+ LOG.debug(
+ 'Dropped %d device(s) that are not part of the placement '
+ 'allocation',
+ before_count - after_count
+ )
+
if after_count < request.count:
LOG.debug('Not enough PCI devices left to satisfy request')
return None
@@ -622,6 +737,7 @@ class PciDeviceStats(object):
def support_requests(
self,
requests: ty.List['objects.InstancePCIRequest'],
+ provider_mapping: ty.Optional[ty.Dict[str, ty.List[str]]],
numa_cells: ty.Optional[ty.List['objects.InstanceNUMACell']] = None,
) -> bool:
"""Determine if the PCI requests can be met.
@@ -635,6 +751,12 @@ class PciDeviceStats(object):
:param requests: A list of InstancePCIRequest object describing the
types, quantities and required NUMA affinities of devices we want.
:type requests: nova.objects.InstancePCIRequests
+ :param provider_mapping: A dict keyed by RequestGroup requester_id,
+ to a list of resource provider UUIDs which provide resource
+ for that RequestGroup. If it is None then it signals that the
+ InstancePCIRequest objects already stores a mapping per request.
+ I.e.: we are called _after_ the scheduler made allocations for this
+ request in placement.
:param numa_cells: A list of InstanceNUMACell objects whose ``id``
corresponds to the ``id`` of host NUMACells, or None.
:returns: Whether this compute node can satisfy the given request.
@@ -650,7 +772,7 @@ class PciDeviceStats(object):
# objects.
stats = copy.deepcopy(self)
try:
- stats.apply_requests(requests, numa_cells)
+ stats.apply_requests(requests, provider_mapping, numa_cells)
except exception.PciDeviceRequestFailed:
return False
@@ -660,6 +782,7 @@ class PciDeviceStats(object):
self,
pools: ty.List[Pool],
request: 'objects.InstancePCIRequest',
+ rp_uuids: ty.List[str],
numa_cells: ty.Optional[ty.List['objects.InstanceNUMACell']] = None,
) -> bool:
"""Apply an individual PCI request.
@@ -673,6 +796,8 @@ class PciDeviceStats(object):
:param pools: A list of PCI device pool dicts
:param request: An InstancePCIRequest object describing the type,
quantity and required NUMA affinity of device(s) we want.
+ :param rp_uuids: A list of PR uuids this request fulfilled from in
+ placement
:param numa_cells: A list of InstanceNUMACell objects whose ``id``
corresponds to the ``id`` of host NUMACell objects.
:returns: True if the request was applied against the provided pools
@@ -682,22 +807,77 @@ class PciDeviceStats(object):
# Two concurrent requests may succeed when called support_requests
# because this method does not remove related devices from the pools
- filtered_pools = self._filter_pools(pools, request, numa_cells)
+ filtered_pools = self._filter_pools(
+ pools, request, numa_cells, rp_uuids)
if not filtered_pools:
return False
- count = request.count
- for pool in filtered_pools:
- count = self._decrease_pool_count(pools, pool, count)
- if not count:
- break
+ if not rp_uuids:
+ # If there is no placement allocation for this request then we are
+ # free to consume from the filtered pools in any order
+ count = request.count
+ for pool in filtered_pools:
+ count = self._decrease_pool_count(pools, pool, count)
+ if not count:
+ break
+ else:
+ # but if there is placement allocation then we have to follow that
+ requested_devs_per_pool_rp = collections.Counter(rp_uuids)
+ for pool in filtered_pools:
+ count = requested_devs_per_pool_rp[pool['rp_uuid']]
+ pool['count'] -= count
+ if pool['count'] == 0:
+ pools.remove(pool)
return True
+ def _get_rp_uuids_for_request(
+ self,
+ provider_mapping: ty.Optional[ty.Dict[str, ty.List[str]]],
+ request: 'objects.InstancePCIRequest'
+ ) -> ty.List[str]:
+ """Return the list of RP uuids that are fulfilling the request.
+
+ An RP will be in the list as many times as many devices needs to
+ be allocated from that RP.
+ """
+
+ if request.source == objects.InstancePCIRequest.NEUTRON_PORT:
+ # TODO(gibi): support neutron based requests in a later cycle
+ # an empty list will signal that any PCI pool can be used for this
+ # request
+ return []
+
+ if not provider_mapping:
+ # NOTE(gibi): AFAIK specs is always a list of a single dict
+ # but the object is hard to change retroactively
+ rp_uuids = request.spec[0].get('rp_uuids')
+ if not rp_uuids:
+ # This can happen if [filter_scheduler]pci_in_placement is not
+ # enabled yet
+ # An empty list will signal that any PCI pool can be used for
+ # this request
+ return []
+
+ # TODO(gibi): this is baaad but spec is a dict of string so
+ # the list is serialized
+ return rp_uuids.split(',')
+
+ # NOTE(gibi): the PCI prefilter generates RequestGroup suffixes from
+ # InstancePCIRequests in the form of {request_id}-{count_index}
+ # NOTE(gibi): a suffixed request group always fulfilled from a single
+ # RP
+ return [
+ rp_uuids[0]
+ for group_id, rp_uuids in provider_mapping.items()
+ if group_id.startswith(request.request_id)
+ ]
+
def apply_requests(
self,
requests: ty.List['objects.InstancePCIRequest'],
+ provider_mapping: ty.Optional[ty.Dict[str, ty.List[str]]],
numa_cells: ty.Optional[ty.List['objects.InstanceNUMACell']] = None,
) -> None:
"""Apply PCI requests to the PCI stats.
@@ -711,15 +891,23 @@ class PciDeviceStats(object):
:param requests: A list of InstancePCIRequest object describing the
types, quantities and required NUMA affinities of devices we want.
:type requests: nova.objects.InstancePCIRequests
+ :param provider_mapping: A dict keyed by RequestGroup requester_id,
+ to a list of resource provider UUIDs which provide resource
+ for that RequestGroup. If it is None then it signals that the
+ InstancePCIRequest objects already stores a mapping per request.
+ I.e.: we are called _after_ the scheduler made allocations for this
+ request in placement.
:param numa_cells: A list of InstanceNUMACell objects whose ``id``
corresponds to the ``id`` of host NUMACells, or None.
:raises: exception.PciDeviceRequestFailed if this compute node cannot
satisfy the given request.
"""
- if not all(
- self._apply_request(self.pools, r, numa_cells) for r in requests
- ):
- raise exception.PciDeviceRequestFailed(requests=requests)
+
+ for r in requests:
+ rp_uuids = self._get_rp_uuids_for_request(provider_mapping, r)
+
+ if not self._apply_request(self.pools, r, rp_uuids, numa_cells):
+ raise exception.PciDeviceRequestFailed(requests=requests)
def __iter__(self) -> ty.Iterator[Pool]:
pools: ty.List[Pool] = []
@@ -757,3 +945,40 @@ class PciDeviceStats(object):
)
pools = self._filter_pools_for_spec(self.pools, dummy_req)
return bool(pools)
+
+ def populate_pools_metadata_from_assigned_devices(self):
+ """Populate the rp_uuid of each pool based on the rp_uuid of the
+ devices assigned to the pool. This can only be called from the compute
+ where devices are assigned to each pool. This should not be called from
+ the scheduler as there device - pool assignment is not known.
+ """
+ # PciDevices are tracked in placement and flavor based PCI requests
+ # are scheduled and allocated in placement. To be able to correlate
+ # what is allocated in placement and what is consumed in nova we
+ # need to map device pools to RPs. We can do that as the PciDevice
+ # contains the RP UUID that represents it in placement.
+ # NOTE(gibi): We cannot do this when the device is originally added to
+ # the pool as the device -> placement translation, that creates the
+ # RPs, runs after all the device is created and assigned to pools.
+ for pool in self.pools:
+ pool_rps = {
+ dev.extra_info.get("rp_uuid")
+ for dev in pool["devices"]
+ if "rp_uuid" in dev.extra_info
+ }
+ if len(pool_rps) >= 2:
+ # FIXME(gibi): Do we have a 1:1 pool - RP mapping even
+ # if two PFs providing very similar VFs?
+ raise ValueError(
+ "We have a pool %s connected to more than one RPs %s in "
+ "placement via devs %s" % (pool, pool_rps, pool["devices"])
+ )
+
+ if not pool_rps:
+ # this can happen if the nova-compute is upgraded to have the
+ # PCI in placement inventory handling code but
+ # [pci]report_in_placement is not turned on yet.
+ continue
+
+ if pool_rps: # now we know that it is a single RP
+ pool['rp_uuid'] = next(iter(pool_rps))
diff --git a/nova/policies/tenant_networks.py b/nova/policies/tenant_networks.py
index ee5bd66cdf..79f8d21eaa 100644
--- a/nova/policies/tenant_networks.py
+++ b/nova/policies/tenant_networks.py
@@ -38,7 +38,7 @@ DEPRECATED_POLICY = policy.DeprecatedRule(
tenant_networks_policies = [
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'list',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="""List project networks.
This API is proxy calls to the Network service. This is deprecated.""",
@@ -52,7 +52,7 @@ This API is proxy calls to the Network service. This is deprecated.""",
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'show',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="""Show project network details.
This API is proxy calls to the Network service. This is deprecated.""",
diff --git a/nova/policy.py b/nova/policy.py
index 55455a9271..c66489cc8d 100644
--- a/nova/policy.py
+++ b/nova/policy.py
@@ -41,11 +41,15 @@ USER_BASED_RESOURCES = ['os-keypairs']
saved_file_rules = []
KEY_EXPR = re.compile(r'%\((\w+)\)s')
-# TODO(gmann): Remove setting the default value of config policy_file
-# once oslo_policy change the default value to 'policy.yaml'.
-# https://github.com/openstack/oslo.policy/blob/a626ad12fe5a3abd49d70e3e5b95589d279ab578/oslo_policy/opts.py#L49
+# TODO(gmann): Remove overriding the default value of config options
+# 'policy_file', 'enforce_scope', and 'enforce_new_defaults' once
+# oslo_policy change their default value to what is overridden here.
DEFAULT_POLICY_FILE = 'policy.yaml'
-opts.set_defaults(cfg.CONF, DEFAULT_POLICY_FILE)
+opts.set_defaults(
+ cfg.CONF,
+ DEFAULT_POLICY_FILE,
+ enforce_scope=True,
+ enforce_new_defaults=True)
def reset():
diff --git a/nova/rpc.py b/nova/rpc.py
index a32b920e06..7a92650414 100644
--- a/nova/rpc.py
+++ b/nova/rpc.py
@@ -204,11 +204,9 @@ def get_client(target, version_cap=None, serializer=None,
else:
serializer = RequestContextSerializer(serializer)
- return messaging.RPCClient(TRANSPORT,
- target,
- version_cap=version_cap,
- serializer=serializer,
- call_monitor_timeout=call_monitor_timeout)
+ return messaging.get_rpc_client(TRANSPORT, target,
+ version_cap=version_cap, serializer=serializer,
+ call_monitor_timeout=call_monitor_timeout)
def get_server(target, endpoints, serializer=None):
@@ -436,9 +434,9 @@ class ClientRouter(periodic_task.PeriodicTasks):
transport = context.mq_connection
if transport:
cmt = self.default_client.call_monitor_timeout
- return messaging.RPCClient(transport, self.target,
- version_cap=self.version_cap,
- serializer=self.serializer,
- call_monitor_timeout=cmt)
+ return messaging.get_rpc_client(transport, self.target,
+ version_cap=self.version_cap,
+ serializer=self.serializer,
+ call_monitor_timeout=cmt)
else:
return self.default_client
diff --git a/nova/scheduler/filters/__init__.py b/nova/scheduler/filters/__init__.py
index 44f283f7ac..785a13279e 100644
--- a/nova/scheduler/filters/__init__.py
+++ b/nova/scheduler/filters/__init__.py
@@ -16,8 +16,12 @@
"""
Scheduler host filters
"""
+from oslo_log import log as logging
+
from nova import filters
+LOG = logging.getLogger(__name__)
+
class BaseHostFilter(filters.BaseFilter):
"""Base class for host filters."""
@@ -28,6 +32,9 @@ class BaseHostFilter(filters.BaseFilter):
# other parameters. We care about running policy filters (i.e.
# ImagePropertiesFilter) but not things that check usage on the
# existing compute node, etc.
+ # This also means that filters marked with RUN_ON_REBUILD = True cannot
+ # filter on allocation candidates or need to handle the rebuild case
+ # specially.
RUN_ON_REBUILD = False
def _filter_one(self, obj, spec):
@@ -50,6 +57,43 @@ class BaseHostFilter(filters.BaseFilter):
raise NotImplementedError()
+class CandidateFilterMixin:
+ """Mixing that helps to implement a Filter that needs to filter host by
+ Placement allocation candidates.
+ """
+
+ def filter_candidates(self, host_state, filter_func):
+ """Checks still viable allocation candidates by the filter_func and
+ keep only those that are passing it.
+
+ :param host_state: HostState object holding the list of still viable
+ allocation candidates
+ :param filter_func: A callable that takes an allocation candidate and
+ returns a True like object if the candidate passed the filter or a
+ False like object if it doesn't.
+ """
+ good_candidates = []
+ for candidate in host_state.allocation_candidates:
+ LOG.debug(
+ f'{self.__class__.__name__} tries allocation candidate: '
+ f'{candidate}',
+ )
+ if filter_func(candidate):
+ LOG.debug(
+ f'{self.__class__.__name__} accepted allocation '
+ f'candidate: {candidate}',
+ )
+ good_candidates.append(candidate)
+ else:
+ LOG.debug(
+ f'{self.__class__.__name__} rejected allocation '
+ f'candidate: {candidate}',
+ )
+
+ host_state.allocation_candidates = good_candidates
+ return good_candidates
+
+
class HostFilterHandler(filters.BaseFilterHandler):
def __init__(self):
super(HostFilterHandler, self).__init__(BaseHostFilter)
diff --git a/nova/scheduler/filters/numa_topology_filter.py b/nova/scheduler/filters/numa_topology_filter.py
index 74d6012f82..ae50db90e5 100644
--- a/nova/scheduler/filters/numa_topology_filter.py
+++ b/nova/scheduler/filters/numa_topology_filter.py
@@ -20,7 +20,10 @@ from nova.virt import hardware
LOG = logging.getLogger(__name__)
-class NUMATopologyFilter(filters.BaseHostFilter):
+class NUMATopologyFilter(
+ filters.BaseHostFilter,
+ filters.CandidateFilterMixin,
+):
"""Filter on requested NUMA topology."""
# NOTE(sean-k-mooney): In change I0322d872bdff68936033a6f5a54e8296a6fb343
@@ -97,12 +100,19 @@ class NUMATopologyFilter(filters.BaseHostFilter):
if network_metadata:
limits.network_metadata = network_metadata
- instance_topology = (hardware.numa_fit_instance_to_host(
- host_topology, requested_topology,
- limits=limits,
- pci_requests=pci_requests,
- pci_stats=host_state.pci_stats))
- if not instance_topology:
+ good_candidates = self.filter_candidates(
+ host_state,
+ lambda candidate: hardware.numa_fit_instance_to_host(
+ host_topology,
+ requested_topology,
+ limits=limits,
+ pci_requests=pci_requests,
+ pci_stats=host_state.pci_stats,
+ provider_mapping=candidate["mappings"],
+ ),
+ )
+
+ if not good_candidates:
LOG.debug("%(host)s, %(node)s fails NUMA topology "
"requirements. The instance does not fit on this "
"host.", {'host': host_state.host,
diff --git a/nova/scheduler/filters/pci_passthrough_filter.py b/nova/scheduler/filters/pci_passthrough_filter.py
index f08899586a..992879072a 100644
--- a/nova/scheduler/filters/pci_passthrough_filter.py
+++ b/nova/scheduler/filters/pci_passthrough_filter.py
@@ -20,7 +20,10 @@ from nova.scheduler import filters
LOG = logging.getLogger(__name__)
-class PciPassthroughFilter(filters.BaseHostFilter):
+class PciPassthroughFilter(
+ filters.BaseHostFilter,
+ filters.CandidateFilterMixin,
+):
"""Pci Passthrough Filter based on PCI request
Filter that schedules instances on a host if the host has devices
@@ -47,10 +50,24 @@ class PciPassthroughFilter(filters.BaseHostFilter):
pci_requests = spec_obj.pci_requests
if not pci_requests or not pci_requests.requests:
return True
- if (not host_state.pci_stats or
- not host_state.pci_stats.support_requests(pci_requests.requests)):
+
+ if not host_state.pci_stats:
+ LOG.debug("%(host_state)s doesn't have the required PCI devices"
+ " (%(requests)s)",
+ {'host_state': host_state, 'requests': pci_requests})
+ return False
+
+ good_candidates = self.filter_candidates(
+ host_state,
+ lambda candidate: host_state.pci_stats.support_requests(
+ pci_requests.requests, provider_mapping=candidate["mappings"]
+ ),
+ )
+
+ if not good_candidates:
LOG.debug("%(host_state)s doesn't have the required PCI devices"
" (%(requests)s)",
{'host_state': host_state, 'requests': pci_requests})
return False
+
return True
diff --git a/nova/scheduler/host_manager.py b/nova/scheduler/host_manager.py
index 80511ffad6..8cb775a923 100644
--- a/nova/scheduler/host_manager.py
+++ b/nova/scheduler/host_manager.py
@@ -153,6 +153,8 @@ class HostState(object):
self.updated = None
+ self.allocation_candidates = []
+
def update(self, compute=None, service=None, aggregates=None,
inst_dict=None):
"""Update all information about a host."""
@@ -296,7 +298,9 @@ class HostState(object):
spec_obj.numa_topology = hardware.numa_fit_instance_to_host(
self.numa_topology, spec_obj.numa_topology,
limits=self.limits.get('numa_topology'),
- pci_requests=pci_requests, pci_stats=self.pci_stats)
+ pci_requests=pci_requests,
+ pci_stats=self.pci_stats,
+ provider_mapping=spec_obj.get_request_group_mapping())
self.numa_topology = hardware.numa_usage_from_instance_numa(
self.numa_topology, spec_obj.numa_topology)
@@ -306,7 +310,11 @@ class HostState(object):
instance_cells = None
if spec_obj.numa_topology:
instance_cells = spec_obj.numa_topology.cells
- self.pci_stats.apply_requests(pci_requests, instance_cells)
+ self.pci_stats.apply_requests(
+ pci_requests,
+ spec_obj.get_request_group_mapping(),
+ instance_cells
+ )
# NOTE(sbauza): By considering all cases when the scheduler is called
# and when consume_from_request() is run, we can safely say that there
@@ -314,13 +322,21 @@ class HostState(object):
self.num_io_ops += 1
def __repr__(self):
- return ("(%(host)s, %(node)s) ram: %(free_ram)sMB "
- "disk: %(free_disk)sMB io_ops: %(num_io_ops)s "
- "instances: %(num_instances)s" %
- {'host': self.host, 'node': self.nodename,
- 'free_ram': self.free_ram_mb, 'free_disk': self.free_disk_mb,
- 'num_io_ops': self.num_io_ops,
- 'num_instances': self.num_instances})
+ return (
+ "(%(host)s, %(node)s) ram: %(free_ram)sMB "
+ "disk: %(free_disk)sMB io_ops: %(num_io_ops)s "
+ "instances: %(num_instances)s, "
+ "allocation_candidates: %(num_a_c)s"
+ % {
+ "host": self.host,
+ "node": self.nodename,
+ "free_ram": self.free_ram_mb,
+ "free_disk": self.free_disk_mb,
+ "num_io_ops": self.num_io_ops,
+ "num_instances": self.num_instances,
+ "num_a_c": len(self.allocation_candidates),
+ }
+ )
class HostManager(object):
diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py
index 10b330653d..11581c4f2d 100644
--- a/nova/scheduler/manager.py
+++ b/nova/scheduler/manager.py
@@ -20,6 +20,7 @@ Scheduler Service
"""
import collections
+import copy
import random
from oslo_log import log as logging
@@ -299,12 +300,29 @@ class SchedulerManager(manager.Manager):
# host, we virtually consume resources on it so subsequent
# selections can adjust accordingly.
+ def hosts_with_alloc_reqs(hosts_gen):
+ """Extend the HostState objects returned by the generator with
+ the allocation requests of that host
+ """
+ for host in hosts_gen:
+ host.allocation_candidates = copy.deepcopy(
+ alloc_reqs_by_rp_uuid[host.uuid])
+ yield host
+
# Note: remember, we are using a generator-iterator here. So only
# traverse this list once. This can bite you if the hosts
# are being scanned in a filter or weighing function.
hosts = self._get_all_host_states(
elevated, spec_obj, provider_summaries)
+ # alloc_reqs_by_rp_uuid is None during rebuild, so this mean we cannot
+ # run filters that are using allocation candidates during rebuild
+ if alloc_reqs_by_rp_uuid is not None:
+ # wrap the generator to extend the HostState objects with the
+ # allocation requests for that given host. This is needed to
+ # support scheduler filters filtering on allocation candidates.
+ hosts = hosts_with_alloc_reqs(hosts)
+
# NOTE(sbauza): The RequestSpec.num_instances field contains the number
# of instances created when the RequestSpec was used to first boot some
# instances. This is incorrect when doing a move or resize operation,
@@ -332,6 +350,13 @@ class SchedulerManager(manager.Manager):
# the older dict format representing HostState objects.
# TODO(stephenfin): Remove this when we bump scheduler the RPC API
# version to 5.0
+ # NOTE(gibi): We cannot remove this branch as it is actively used
+ # when nova calls the scheduler during rebuild (not evacuate) to
+ # check if the current host is still good for the new image used
+ # for the rebuild. In this case placement cannot be used to
+ # generate candidates as that would require space on the current
+ # compute for double allocation. So no allocation candidates for
+ # rebuild and therefore alloc_reqs_by_rp_uuid is None
return self._legacy_find_hosts(
context, num_instances, spec_obj, hosts, num_alts,
instance_uuids=instance_uuids)
@@ -345,6 +370,9 @@ class SchedulerManager(manager.Manager):
# The list of hosts that have been selected (and claimed).
claimed_hosts = []
+ # The allocation request allocated on the given claimed host
+ claimed_alloc_reqs = []
+
for num, instance_uuid in enumerate(instance_uuids):
# In a multi-create request, the first request spec from the list
# is passed to the scheduler and that request spec's instance_uuid
@@ -371,21 +399,20 @@ class SchedulerManager(manager.Manager):
# resource provider UUID
claimed_host = None
for host in hosts:
- cn_uuid = host.uuid
- if cn_uuid not in alloc_reqs_by_rp_uuid:
- msg = ("A host state with uuid = '%s' that did not have a "
- "matching allocation_request was encountered while "
- "scheduling. This host was skipped.")
- LOG.debug(msg, cn_uuid)
+ if not host.allocation_candidates:
+ LOG.debug(
+ "The nova scheduler removed every allocation candidate"
+ "for host %s so this host was skipped.",
+ host
+ )
continue
- alloc_reqs = alloc_reqs_by_rp_uuid[cn_uuid]
# TODO(jaypipes): Loop through all allocation_requests instead
# of just trying the first one. For now, since we'll likely
# want to order the allocation_requests in the future based on
# information in the provider summaries, we'll just try to
# claim resources using the first allocation_request
- alloc_req = alloc_reqs[0]
+ alloc_req = host.allocation_candidates[0]
if utils.claim_resources(
elevated, self.placement_client, spec_obj, instance_uuid,
alloc_req,
@@ -405,6 +432,15 @@ class SchedulerManager(manager.Manager):
claimed_instance_uuids.append(instance_uuid)
claimed_hosts.append(claimed_host)
+ claimed_alloc_reqs.append(alloc_req)
+
+ # update the provider mapping in the request spec based
+ # on the allocated candidate as the _consume_selected_host depends
+ # on this information to temporally consume PCI devices tracked in
+ # placement
+ for request_group in spec_obj.requested_resources:
+ request_group.provider_uuids = alloc_req[
+ 'mappings'][request_group.requester_id]
# Now consume the resources so the filter/weights will change for
# the next instance.
@@ -416,11 +452,19 @@ class SchedulerManager(manager.Manager):
self._ensure_sufficient_hosts(
context, claimed_hosts, num_instances, claimed_instance_uuids)
- # We have selected and claimed hosts for each instance. Now we need to
- # find alternates for each host.
+ # We have selected and claimed hosts for each instance along with a
+ # claimed allocation request. Now we need to find alternates for each
+ # host.
return self._get_alternate_hosts(
- claimed_hosts, spec_obj, hosts, num, num_alts,
- alloc_reqs_by_rp_uuid, allocation_request_version)
+ claimed_hosts,
+ spec_obj,
+ hosts,
+ num,
+ num_alts,
+ alloc_reqs_by_rp_uuid,
+ allocation_request_version,
+ claimed_alloc_reqs,
+ )
def _ensure_sufficient_hosts(
self, context, hosts, required_count, claimed_uuids=None,
@@ -532,7 +576,21 @@ class SchedulerManager(manager.Manager):
def _get_alternate_hosts(
self, selected_hosts, spec_obj, hosts, index, num_alts,
alloc_reqs_by_rp_uuid=None, allocation_request_version=None,
+ selected_alloc_reqs=None,
):
+ """Generate the main Selection and possible alternate Selection
+ objects for each "instance".
+
+ :param selected_hosts: This is a list of HostState objects. Each
+ HostState represents the main selection for a given instance being
+ scheduled (we can have multiple instances during multi create).
+ :param selected_alloc_reqs: This is a list of allocation requests that
+ are already allocated in placement for the main Selection for each
+ instance. This list is matching with selected_hosts by index. So
+ for the first instance the selected host is selected_host[0] and
+ the already allocated placement candidate is
+ selected_alloc_reqs[0].
+ """
# We only need to filter/weigh the hosts again if we're dealing with
# more than one instance and are going to be picking alternates.
if index > 0 and num_alts > 0:
@@ -546,11 +604,10 @@ class SchedulerManager(manager.Manager):
# representing the selected host along with alternates from the same
# cell.
selections_to_return = []
- for selected_host in selected_hosts:
+ for i, selected_host in enumerate(selected_hosts):
# This is the list of hosts for one particular instance.
if alloc_reqs_by_rp_uuid:
- selected_alloc_req = alloc_reqs_by_rp_uuid.get(
- selected_host.uuid)[0]
+ selected_alloc_req = selected_alloc_reqs[i]
else:
selected_alloc_req = None
@@ -571,15 +628,17 @@ class SchedulerManager(manager.Manager):
if len(selected_plus_alts) >= num_alts + 1:
break
+ # TODO(gibi): In theory we could generate alternatives on the
+ # same host if that host has different possible allocation
+ # candidates for the request. But we don't do that today
if host.cell_uuid == cell_uuid and host not in selected_hosts:
if alloc_reqs_by_rp_uuid is not None:
- alt_uuid = host.uuid
- if alt_uuid not in alloc_reqs_by_rp_uuid:
+ if not host.allocation_candidates:
msg = ("A host state with uuid = '%s' that did "
- "not have a matching allocation_request "
+ "not have any remaining allocation_request "
"was encountered while scheduling. This "
"host was skipped.")
- LOG.debug(msg, alt_uuid)
+ LOG.debug(msg, host.uuid)
continue
# TODO(jaypipes): Loop through all allocation_requests
@@ -588,7 +647,13 @@ class SchedulerManager(manager.Manager):
# the future based on information in the provider
# summaries, we'll just try to claim resources using
# the first allocation_request
- alloc_req = alloc_reqs_by_rp_uuid[alt_uuid][0]
+ # NOTE(gibi): we are using, and re-using, allocation
+ # candidates for alternatives here. This is OK as
+ # these candidates are not yet allocated in placement
+ # and we don't know if an alternate will ever be used.
+ # To increase our success we could try to use different
+ # candidate for different alternative though.
+ alloc_req = host.allocation_candidates[0]
alt_selection = objects.Selection.from_host_state(
host, alloc_req, allocation_request_version)
else:
diff --git a/nova/scheduler/utils.py b/nova/scheduler/utils.py
index c7e6ffed97..02c44093bd 100644
--- a/nova/scheduler/utils.py
+++ b/nova/scheduler/utils.py
@@ -1080,6 +1080,17 @@ _SUPPORTS_SOFT_AFFINITY = None
_SUPPORTS_SOFT_ANTI_AFFINITY = None
+def reset_globals():
+ global _SUPPORTS_AFFINITY
+ _SUPPORTS_AFFINITY = None
+ global _SUPPORTS_ANTI_AFFINITY
+ _SUPPORTS_ANTI_AFFINITY = None
+ global _SUPPORTS_SOFT_AFFINITY
+ _SUPPORTS_SOFT_AFFINITY = None
+ global _SUPPORTS_SOFT_ANTI_AFFINITY
+ _SUPPORTS_SOFT_ANTI_AFFINITY = None
+
+
def _get_group_details(context, instance_uuid, user_group_hosts=None):
"""Provide group_hosts and group_policies sets related to instances if
those instances are belonging to a group and if corresponding filters are
diff --git a/nova/service.py b/nova/service.py
index 2c10224926..bd3b49ae66 100644
--- a/nova/service.py
+++ b/nova/service.py
@@ -156,11 +156,11 @@ class Service(service.Service):
LOG.info('Starting %(topic)s node (version %(version)s)',
{'topic': self.topic, 'version': verstr})
self.basic_config_check()
- self.manager.init_host()
- self.model_disconnected = False
ctxt = context.get_admin_context()
self.service_ref = objects.Service.get_by_host_and_binary(
ctxt, self.host, self.binary)
+ self.manager.init_host(self.service_ref)
+ self.model_disconnected = False
if self.service_ref:
_update_service_ref(self.service_ref)
diff --git a/nova/test.py b/nova/test.py
index 689d5ba291..e37967b06d 100644
--- a/nova/test.py
+++ b/nova/test.py
@@ -62,6 +62,7 @@ from nova import objects
from nova.objects import base as objects_base
from nova import quota
from nova.scheduler.client import report
+from nova.scheduler import utils as scheduler_utils
from nova.tests import fixtures as nova_fixtures
from nova.tests.unit import matchers
from nova import utils
@@ -171,6 +172,12 @@ class TestCase(base.BaseTestCase):
# base class when USES_DB is True.
NUMBER_OF_CELLS = 1
+ # The stable compute id stuff is intentionally singleton-ish, which makes
+ # it a nightmare for testing multiple host/node combinations in tests like
+ # we do. So, mock it out by default, unless the test is specifically
+ # designed to handle it.
+ STUB_COMPUTE_ID = True
+
def setUp(self):
"""Run before each test method to initialize test environment."""
# Ensure BaseTestCase's ConfigureLogging fixture is disabled since
@@ -288,6 +295,10 @@ class TestCase(base.BaseTestCase):
self.useFixture(nova_fixtures.GenericPoisonFixture())
self.useFixture(nova_fixtures.SysFsPoisonFixture())
+ # Additional module names can be added to this set if needed
+ self.useFixture(nova_fixtures.ImportModulePoisonFixture(
+ set(['guestfs', 'libvirt'])))
+
# make sure that the wsgi app is fully initialized for all testcase
# instead of only once initialized for test worker
wsgi_app.init_global_data.reset()
@@ -295,6 +306,17 @@ class TestCase(base.BaseTestCase):
# Reset the placement client singleton
report.PLACEMENTCLIENT = None
+ # Reset our local node uuid cache (and avoid writing to the
+ # local filesystem when we generate a new one).
+ if self.STUB_COMPUTE_ID:
+ self.useFixture(nova_fixtures.ComputeNodeIdFixture())
+
+ # Reset globals indicating affinity filter support. Some tests may set
+ # self.flags(enabled_filters=...) which could make the affinity filter
+ # support globals get set to a non-default configuration which affects
+ # all other tests.
+ scheduler_utils.reset_globals()
+
def _setup_cells(self):
"""Setup a normal cellsv2 environment.
@@ -683,6 +705,7 @@ class SubclassSignatureTestCase(testtools.TestCase, metaclass=abc.ABCMeta):
raise NotImplementedError()
def setUp(self):
+ self.useFixture(nova_fixtures.ConfFixture(CONF))
self.base = self._get_base_class()
super(SubclassSignatureTestCase, self).setUp()
diff --git a/nova/tests/fixtures/__init__.py b/nova/tests/fixtures/__init__.py
index df254608fd..9ff4a2a601 100644
--- a/nova/tests/fixtures/__init__.py
+++ b/nova/tests/fixtures/__init__.py
@@ -16,6 +16,8 @@ from .cast_as_call import CastAsCallFixture # noqa: F401
from .cinder import CinderFixture # noqa: F401
from .conf import ConfFixture # noqa: F401, F403
from .cyborg import CyborgFixture # noqa: F401
+from .filesystem import SysFileSystemFixture # noqa: F401
+from .filesystem import TempFileSystemFixture # noqa: F401
from .glance import GlanceFixture # noqa: F401
from .libvirt import LibvirtFixture # noqa: F401
from .libvirt_imagebackend import LibvirtImageBackendFixture # noqa: F401
diff --git a/nova/tests/fixtures/cinder.py b/nova/tests/fixtures/cinder.py
index 29889c784a..025a3d8b81 100644
--- a/nova/tests/fixtures/cinder.py
+++ b/nova/tests/fixtures/cinder.py
@@ -47,6 +47,13 @@ class CinderFixture(fixtures.Fixture):
# This represents a bootable image-backed volume to test
# boot-from-volume scenarios.
IMAGE_BACKED_VOL = '6ca404f3-d844-4169-bb96-bc792f37de98'
+
+ # This represents a bootable image-backed volume to test
+ # boot-from-volume scenarios with
+ # os_require_quiesce
+ # hw_qemu_guest_agent
+ IMAGE_BACKED_VOL_QUIESCE = '6ca404f3-d844-4169-bb96-bc792f37de26'
+
# This represents a bootable image-backed volume with required traits
# as part of volume image metadata
IMAGE_WITH_TRAITS_BACKED_VOL = '6194fc02-c60e-4a01-a8e5-600798208b5f'
@@ -157,6 +164,13 @@ class CinderFixture(fixtures.Fixture):
'image_id': '155d900f-4e14-4e4c-a73d-069cbf4541e6'
}
+ if volume_id == self.IMAGE_BACKED_VOL_QUIESCE:
+ volume['bootable'] = True
+ volume['volume_image_metadata'] = {
+ "os_require_quiesce": "True",
+ "hw_qemu_guest_agent": "True"
+ }
+
if volume_id == self.IMAGE_WITH_TRAITS_BACKED_VOL:
volume['bootable'] = True
volume['volume_image_metadata'] = {
@@ -333,6 +347,10 @@ class CinderFixture(fixtures.Fixture):
if 'reimage_reserved' not in kwargs:
raise exception.InvalidInput('reimage_reserved not specified')
+ def fake_get_absolute_limits(_self, context):
+ limits = {'totalSnapshotsUsed': 0, 'maxTotalSnapshots': -1}
+ return limits
+
self.test.stub_out(
'nova.volume.cinder.API.attachment_create', fake_attachment_create)
self.test.stub_out(
@@ -375,6 +393,9 @@ class CinderFixture(fixtures.Fixture):
self.test.stub_out(
'nova.volume.cinder.API.reimage_volume',
fake_reimage_volume)
+ self.test.stub_out(
+ 'nova.volume.cinder.API.get_absolute_limits',
+ fake_get_absolute_limits)
def volume_ids_for_instance(self, instance_uuid):
for volume_id, attachments in self.volume_to_attachment.items():
diff --git a/nova/tests/fixtures/filesystem.py b/nova/tests/fixtures/filesystem.py
new file mode 100644
index 0000000000..932d42fe27
--- /dev/null
+++ b/nova/tests/fixtures/filesystem.py
@@ -0,0 +1,81 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+import shutil
+import tempfile
+from unittest import mock
+
+import fixtures
+
+from nova import filesystem
+from nova.virt.libvirt.cpu import core
+
+
+SYS = 'sys'
+
+
+class TempFileSystemFixture(fixtures.Fixture):
+ """Creates a fake / filesystem"""
+
+ def _setUp(self):
+ self.temp_dir = tempfile.TemporaryDirectory(prefix='fake_fs')
+ # NOTE(sbauza): I/O disk errors may raise an exception here, as we
+ # don't ignore them. If that's causing a problem in our CI jobs, the
+ # recommended solution is to use shutil.rmtree instead of cleanup()
+ # with ignore_errors parameter set to True (or wait for the minimum
+ # python version to be 3.10 as TemporaryDirectory will provide
+ # ignore_cleanup_errors parameter)
+ self.addCleanup(self.temp_dir.cleanup)
+
+
+class SysFileSystemFixture(TempFileSystemFixture):
+ """Creates a fake /sys filesystem"""
+
+ def __init__(self, cpus_supported=None):
+ self.cpus_supported = cpus_supported or 10
+
+ def _setUp(self):
+ super()._setUp()
+ self.sys_path = os.path.join(self.temp_dir.name, SYS)
+ self.addCleanup(shutil.rmtree, self.sys_path, ignore_errors=True)
+
+ sys_patcher = mock.patch(
+ 'nova.filesystem.SYS',
+ new_callable=mock.PropertyMock(return_value=self.sys_path))
+ self.sys_mock = sys_patcher.start()
+ self.addCleanup(sys_patcher.stop)
+
+ avail_path_patcher = mock.patch(
+ 'nova.virt.libvirt.cpu.core.AVAILABLE_PATH',
+ new_callable=mock.PropertyMock(
+ return_value=os.path.join(self.sys_path,
+ 'devices/system/cpu/present')))
+ self.avail_path_mock = avail_path_patcher.start()
+ self.addCleanup(avail_path_patcher.stop)
+
+ cpu_path_patcher = mock.patch(
+ 'nova.virt.libvirt.cpu.core.CPU_PATH_TEMPLATE',
+ new_callable=mock.PropertyMock(
+ return_value=os.path.join(self.sys_path,
+ 'devices/system/cpu/cpu%(core)s')))
+ self.cpu_path_mock = cpu_path_patcher.start()
+ self.addCleanup(cpu_path_patcher.stop)
+
+ for cpu_nr in range(self.cpus_supported):
+ cpu_dir = os.path.join(self.cpu_path_mock % {'core': cpu_nr})
+ os.makedirs(os.path.join(cpu_dir, 'cpufreq'))
+ filesystem.write_sys(
+ os.path.join(cpu_dir, 'cpufreq/scaling_governor'),
+ data='powersave')
+ filesystem.write_sys(core.AVAILABLE_PATH,
+ f'0-{self.cpus_supported - 1}')
diff --git a/nova/tests/fixtures/libvirt.py b/nova/tests/fixtures/libvirt.py
index 5d20e7d54f..4f48463118 100644
--- a/nova/tests/fixtures/libvirt.py
+++ b/nova/tests/fixtures/libvirt.py
@@ -2044,6 +2044,12 @@ class Connection(object):
return VIR_CPU_COMPARE_IDENTICAL
+ def compareHypervisorCPU(
+ self, emulator, arch, machine, virttype,
+ xml, flags
+ ):
+ return self.compareCPU(xml, flags)
+
def getCPUStats(self, cpuNum, flag):
if cpuNum < 2:
return {'kernel': 5664160000000,
diff --git a/nova/tests/fixtures/libvirt_imagebackend.py b/nova/tests/fixtures/libvirt_imagebackend.py
index ea32b6b34a..4ce3f03710 100644
--- a/nova/tests/fixtures/libvirt_imagebackend.py
+++ b/nova/tests/fixtures/libvirt_imagebackend.py
@@ -190,6 +190,9 @@ class LibvirtImageBackendFixture(fixtures.Fixture):
# Set the SUPPORTS_CLONE member variable to mimic the Image base
# class.
image_init.SUPPORTS_CLONE = False
+ # Set the SUPPORTS_LUKS member variable to mimic the Image base
+ # class.
+ image_init.SUPPORTS_LUKS = False
# Ditto for the 'is_shared_block_storage' and
# 'is_file_in_instance_path' functions
diff --git a/nova/tests/fixtures/nova.py b/nova/tests/fixtures/nova.py
index 129b2f9abb..5fd893e7dc 100644
--- a/nova/tests/fixtures/nova.py
+++ b/nova/tests/fixtures/nova.py
@@ -20,8 +20,10 @@ import collections
import contextlib
from contextlib import contextmanager
import functools
+from importlib.abc import MetaPathFinder
import logging as std_logging
import os
+import sys
import time
from unittest import mock
import warnings
@@ -63,6 +65,7 @@ from nova.scheduler import weights
from nova import service
from nova.tests.functional.api import client
from nova import utils
+from nova.virt import node
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
@@ -563,11 +566,10 @@ class CellDatabases(fixtures.Fixture):
call_monitor_timeout=None):
"""Mirror rpc.get_client() but with our special sauce."""
serializer = CheatingSerializer(serializer)
- return messaging.RPCClient(rpc.TRANSPORT,
- target,
- version_cap=version_cap,
- serializer=serializer,
- call_monitor_timeout=call_monitor_timeout)
+ return messaging.get_rpc_client(rpc.TRANSPORT, target,
+ version_cap=version_cap,
+ serializer=serializer,
+ call_monitor_timeout=call_monitor_timeout)
def add_cell_database(self, connection_str, default=False):
"""Add a cell database to the fixture.
@@ -1798,3 +1800,70 @@ class SysFsPoisonFixture(fixtures.Fixture):
# a bunch of test to fail
# self.inject_poison("os.path", "exists")
# self.inject_poison("os", "stat")
+
+
+class ImportModulePoisonFixture(fixtures.Fixture):
+ """Poison imports of modules unsuitable for the test environment.
+
+ Examples are guestfs and libvirt. Ordinarily, these would not be installed
+ in the test environment but if they _are_ present, it can result in
+ actual calls to libvirt, for example, which could cause tests to fail.
+
+ This fixture will inspect module imports and if they are in the disallowed
+ list, it will fail the test with a helpful message about mocking needed in
+ the test.
+ """
+
+ class ForbiddenModules(MetaPathFinder):
+ def __init__(self, test, modules):
+ super().__init__()
+ self.test = test
+ self.modules = modules
+
+ def find_spec(self, fullname, path, target=None):
+ if fullname in self.modules:
+ self.test.fail_message = (
+ f"This test imports the '{fullname}' module, which it "
+ f'should not in the test environment. Please add '
+ f'appropriate mocking to this test.'
+ )
+ raise ImportError(fullname)
+
+ def __init__(self, module_names):
+ self.module_names = module_names
+ self.fail_message = ''
+ if isinstance(module_names, str):
+ self.module_names = {module_names}
+ self.meta_path_finder = self.ForbiddenModules(self, self.module_names)
+
+ def setUp(self):
+ super().setUp()
+ self.addCleanup(self.cleanup)
+ sys.meta_path.insert(0, self.meta_path_finder)
+
+ def cleanup(self):
+ sys.meta_path.remove(self.meta_path_finder)
+ # We use a flag and check it during the cleanup phase to fail the test
+ # if needed. This is done because some module imports occur inside of a
+ # try-except block that ignores all exceptions, so raising an exception
+ # there (which is also what self.assert* and self.fail() do underneath)
+ # will not work to cause a failure in the test.
+ if self.fail_message:
+ raise ImportError(self.fail_message)
+
+
+class ComputeNodeIdFixture(fixtures.Fixture):
+ def setUp(self):
+ super().setUp()
+
+ node.LOCAL_NODE_UUID = None
+ self.useFixture(fixtures.MockPatch(
+ 'nova.virt.node.read_local_node_uuid',
+ lambda: None))
+ self.useFixture(fixtures.MockPatch(
+ 'nova.virt.node.write_local_node_uuid',
+ lambda uuid: None))
+ self.useFixture(fixtures.MockPatch(
+ 'nova.compute.manager.ComputeManager.'
+ '_ensure_existing_node_identity',
+ mock.DEFAULT))
diff --git a/nova/tests/functional/api_sample_tests/api_samples/os-evacuate/v2.95/server-evacuate-find-host-req.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/os-evacuate/v2.95/server-evacuate-find-host-req.json.tpl
new file mode 100644
index 0000000000..8abf0b4e18
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/os-evacuate/v2.95/server-evacuate-find-host-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "evacuate": {
+ "adminPass": "%(adminPass)s"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/os-evacuate/v2.95/server-evacuate-req.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/os-evacuate/v2.95/server-evacuate-req.json.tpl
new file mode 100644
index 0000000000..8abf0b4e18
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/os-evacuate/v2.95/server-evacuate-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "evacuate": {
+ "adminPass": "%(adminPass)s"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-action-rebuild-resp.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-action-rebuild-resp.json.tpl
new file mode 100644
index 0000000000..486433733d
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-action-rebuild-resp.json.tpl
@@ -0,0 +1,80 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO",
+ "OS-EXT-AZ:availability_zone": "us-west",
+ "OS-EXT-SRV-ATTR:hostname": "%(hostname)s",
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "OS-SRV-USG:launched_at": "%(strtime)s",
+ "OS-SRV-USG:terminated_at": null,
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "addresses": {
+ "private": [
+ {
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
+ "OS-EXT-IPS:type": "fixed",
+ "addr": "192.168.1.30",
+ "version": 4
+ }
+ ]
+ },
+ "adminPass": "seekr3t",
+ "config_drive": "",
+ "created": "%(isotime)s",
+ "description": null,
+ "flavor": {
+ "disk": 1,
+ "ephemeral": 0,
+ "extra_specs": {},
+ "original_name": "m1.tiny",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1
+ },
+ "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(compute_endpoint)s/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "locked": false,
+ "locked_reason": null,
+ "metadata": {
+ "meta_var": "meta_val"
+ },
+ "name": "foobar",
+ "os-extended-volumes:volumes_attached": [],
+ "progress": 0,
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "server_groups": [],
+ "status": "ACTIVE",
+ "tags": [],
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "trusted_image_certificates": null,
+ "updated": "%(isotime)s",
+ "user_data": "ZWNobyAiaGVsbG8gd29ybGQi",
+ "user_id": "fake"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-action-rebuild.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-action-rebuild.json.tpl
new file mode 100644
index 0000000000..3becc83fba
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-action-rebuild.json.tpl
@@ -0,0 +1,15 @@
+{
+ "rebuild" : {
+ "accessIPv4" : "%(access_ip_v4)s",
+ "accessIPv6" : "%(access_ip_v6)s",
+ "OS-DCF:diskConfig": "AUTO",
+ "imageRef" : "%(uuid)s",
+ "name" : "%(name)s",
+ "adminPass" : "%(pass)s",
+ "hostname": "%(hostname)s",
+ "metadata" : {
+ "meta_var" : "meta_val"
+ },
+ "user_data": "ZWNobyAiaGVsbG8gd29ybGQi"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-create-req.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-create-req.json.tpl
new file mode 100644
index 0000000000..f83c78fdc9
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-create-req.json.tpl
@@ -0,0 +1,21 @@
+{
+ "server" : {
+ "accessIPv4": "%(access_ip_v4)s",
+ "accessIPv6": "%(access_ip_v6)s",
+ "name" : "new-server-test",
+ "imageRef" : "%(image_id)s",
+ "flavorRef" : "1",
+ "OS-DCF:diskConfig": "AUTO",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "user_data" : "%(user_data)s",
+ "networks": "auto",
+ "hostname": "custom-hostname.example.com"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-create-resp.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-create-resp.json.tpl
new file mode 100644
index 0000000000..4b30e0cfbd
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-create-resp.json.tpl
@@ -0,0 +1,22 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO",
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ],
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-get-resp.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-get-resp.json.tpl
new file mode 100644
index 0000000000..ae2088619a
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-get-resp.json.tpl
@@ -0,0 +1,81 @@
+{
+ "server": {
+ "accessIPv4": "%(access_ip_v4)s",
+ "accessIPv6": "%(access_ip_v6)s",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
+ "OS-EXT-IPS:type": "fixed",
+ "version": 4
+ }
+ ]
+ },
+ "created": "%(isotime)s",
+ "description": null,
+ "locked": false,
+ "locked_reason": null,
+ "flavor": {
+ "disk": 1,
+ "ephemeral": 0,
+ "extra_specs": {},
+ "original_name": "m1.tiny",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(compute_endpoint)s/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "config_drive": "%(cdrive)s",
+ "OS-DCF:diskConfig": "AUTO",
+ "OS-EXT-AZ:availability_zone": "nova",
+ "OS-EXT-SRV-ATTR:hostname": "%(hostname)s",
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "os-extended-volumes:volumes_attached": [
+ {"id": "volume_id1", "delete_on_termination": false},
+ {"id": "volume_id2", "delete_on_termination": false}
+ ],
+ "OS-SRV-USG:launched_at": "%(strtime)s",
+ "OS-SRV-USG:terminated_at": null,
+ "progress": 0,
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "server_groups": [],
+ "status": "ACTIVE",
+ "tags": [],
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "trusted_image_certificates": null,
+ "updated": "%(isotime)s",
+ "user_id": "fake"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-update-req.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-update-req.json.tpl
new file mode 100644
index 0000000000..bc4be64a8e
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-update-req.json.tpl
@@ -0,0 +1,8 @@
+{
+ "server": {
+ "accessIPv4": "%(access_ip_v4)s",
+ "accessIPv6": "%(access_ip_v6)s",
+ "OS-DCF:diskConfig": "AUTO",
+ "hostname": "new-server-hostname.example.com"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-update-resp.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-update-resp.json.tpl
new file mode 100644
index 0000000000..2adc16df5e
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-update-resp.json.tpl
@@ -0,0 +1,78 @@
+{
+ "server": {
+ "accessIPv4": "%(access_ip_v4)s",
+ "accessIPv6": "%(access_ip_v6)s",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
+ "OS-EXT-IPS:type": "fixed",
+ "version": 4
+ }
+ ]
+ },
+ "created": "%(isotime)s",
+ "description": null,
+ "locked": false,
+ "locked_reason": null,
+ "flavor": {
+ "disk": 1,
+ "ephemeral": 0,
+ "extra_specs": {},
+ "original_name": "m1.tiny",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(compute_endpoint)s/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "config_drive": "",
+ "OS-DCF:diskConfig": "AUTO",
+ "OS-EXT-AZ:availability_zone": "us-west",
+ "OS-EXT-SRV-ATTR:hostname": "new-server-hostname.example.com",
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "os-extended-volumes:volumes_attached": [],
+ "OS-SRV-USG:launched_at": "%(strtime)s",
+ "OS-SRV-USG:terminated_at": null,
+ "progress": 0,
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "server_groups": [],
+ "status": "ACTIVE",
+ "tags": [],
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "trusted_image_certificates": null,
+ "updated": "%(isotime)s",
+ "user_id": "fake"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/servers-details-resp.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/servers-details-resp.json.tpl
new file mode 100644
index 0000000000..f49d21e7a2
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/servers-details-resp.json.tpl
@@ -0,0 +1,88 @@
+{
+ "servers": [
+ {
+ "accessIPv4": "%(access_ip_v4)s",
+ "accessIPv6": "%(access_ip_v6)s",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
+ "OS-EXT-IPS:type": "fixed",
+ "version": 4
+ }
+ ]
+ },
+ "created": "%(isotime)s",
+ "description": "",
+ "flavor": {
+ "disk": 1,
+ "ephemeral": 0,
+ "extra_specs": {},
+ "original_name": "m1.tiny",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(compute_endpoint)s/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "config_drive": "%(cdrive)s",
+ "locked": false,
+ "locked_reason": "",
+ "OS-DCF:diskConfig": "AUTO",
+ "OS-EXT-AZ:availability_zone": "nova",
+ "OS-EXT-SRV-ATTR:hostname": "custom-hostname.example.com",
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "os-extended-volumes:volumes_attached": [
+ {"id": "volume_id1", "delete_on_termination": false},
+ {"id": "volume_id2", "delete_on_termination": false}
+ ],
+ "OS-SRV-USG:launched_at": "%(strtime)s",
+ "OS-SRV-USG:terminated_at": null,
+ "progress": 0,
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "status": "ACTIVE",
+ "tags": [],
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "trusted_image_certificates": null,
+ "updated": "%(isotime)s",
+ "user_id": "fake"
+ }
+ ],
+ "servers_links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/servers/detail?limit=1&marker=%(id)s",
+ "rel": "next"
+ }
+ ]
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/servers-list-resp.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/servers-list-resp.json.tpl
new file mode 100644
index 0000000000..9cdb3aa644
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/servers-list-resp.json.tpl
@@ -0,0 +1,24 @@
+{
+ "servers": [
+ {
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "new-server-test"
+ }
+ ],
+ "servers_links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/servers?limit=1&marker=%(id)s",
+ "rel": "next"
+ }
+ ]
+}
diff --git a/nova/tests/functional/api_sample_tests/test_evacuate.py b/nova/tests/functional/api_sample_tests/test_evacuate.py
index ab3aa95ad8..15efb39d44 100644
--- a/nova/tests/functional/api_sample_tests/test_evacuate.py
+++ b/nova/tests/functional/api_sample_tests/test_evacuate.py
@@ -80,7 +80,7 @@ class EvacuateJsonTest(test_servers.ServersSampleBase):
orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
on_shared_storage=False, preserve_ephemeral=mock.ANY,
host='testHost', request_spec=mock.ANY,
- reimage_boot_volume=False)
+ reimage_boot_volume=False, target_state=None)
@mock.patch('nova.conductor.manager.ComputeTaskManager.rebuild_instance')
def test_server_evacuate_find_host(self, rebuild_mock):
@@ -97,7 +97,7 @@ class EvacuateJsonTest(test_servers.ServersSampleBase):
orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
on_shared_storage=False, preserve_ephemeral=mock.ANY,
host=None, request_spec=mock.ANY,
- reimage_boot_volume=False)
+ reimage_boot_volume=False, target_state=None)
class EvacuateJsonTestV214(EvacuateJsonTest):
@@ -119,7 +119,7 @@ class EvacuateJsonTestV214(EvacuateJsonTest):
orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
on_shared_storage=None, preserve_ephemeral=mock.ANY,
host='testHost', request_spec=mock.ANY,
- reimage_boot_volume=False)
+ reimage_boot_volume=False, target_state=None)
@mock.patch('nova.conductor.manager.ComputeTaskManager.rebuild_instance')
def test_server_evacuate_find_host(self, rebuild_mock):
@@ -135,7 +135,7 @@ class EvacuateJsonTestV214(EvacuateJsonTest):
orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
on_shared_storage=None, preserve_ephemeral=mock.ANY,
host=None, request_spec=mock.ANY,
- reimage_boot_volume=False)
+ reimage_boot_volume=False, target_state=None)
class EvacuateJsonTestV229(EvacuateJsonTestV214):
@@ -163,7 +163,7 @@ class EvacuateJsonTestV229(EvacuateJsonTestV214):
orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
on_shared_storage=None, preserve_ephemeral=mock.ANY,
host=None, request_spec=mock.ANY,
- reimage_boot_volume=False)
+ reimage_boot_volume=False, target_state=None)
@mock.patch('nova.conductor.manager.ComputeTaskManager.rebuild_instance')
@mock.patch('nova.objects.ComputeNodeList.get_all_by_host')
@@ -184,7 +184,7 @@ class EvacuateJsonTestV229(EvacuateJsonTestV214):
orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
on_shared_storage=None, preserve_ephemeral=mock.ANY,
host='testHost', request_spec=mock.ANY,
- reimage_boot_volume=False)
+ reimage_boot_volume=False, target_state=None)
class EvacuateJsonTestV268(EvacuateJsonTestV229):
@@ -211,8 +211,46 @@ class EvacuateJsonTestV268(EvacuateJsonTestV229):
orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
on_shared_storage=None, preserve_ephemeral=mock.ANY,
host=None, request_spec=mock.ANY,
- reimage_boot_volume=False)
+ reimage_boot_volume=False, target_state=None)
def test_server_evacuate_with_force(self):
# doesn't apply to v2.68+, which removed the ability to force migrate
pass
+
+
+class EvacuateJsonTestV295(EvacuateJsonTestV268):
+ microversion = '2.95'
+ scenarios = [('v2_95', {'api_major_version': 'v2.1'})]
+
+ @mock.patch('nova.conductor.manager.ComputeTaskManager.rebuild_instance')
+ def test_server_evacuate(self, rebuild_mock):
+ req_subs = {
+ "adminPass": "MySecretPass",
+ }
+ self._test_evacuate(req_subs, 'server-evacuate-req',
+ server_resp=None, expected_resp_code=200)
+
+ rebuild_mock.assert_called_once_with(mock.ANY, instance=mock.ANY,
+ orig_image_ref=mock.ANY, image_ref=mock.ANY,
+ injected_files=mock.ANY, new_pass="MySecretPass",
+ orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
+ on_shared_storage=None, preserve_ephemeral=mock.ANY,
+ host=None, request_spec=mock.ANY,
+ reimage_boot_volume=False, target_state="stopped")
+
+ @mock.patch('nova.conductor.manager.ComputeTaskManager.rebuild_instance')
+ def test_server_evacuate_find_host(self, rebuild_mock):
+ req_subs = {
+ 'host': 'testHost',
+ "adminPass": "MySecretPass",
+ }
+ self._test_evacuate(req_subs, 'server-evacuate-find-host-req',
+ server_resp=None, expected_resp_code=200)
+
+ rebuild_mock.assert_called_once_with(mock.ANY, instance=mock.ANY,
+ orig_image_ref=mock.ANY, image_ref=mock.ANY,
+ injected_files=mock.ANY, new_pass="MySecretPass",
+ orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
+ on_shared_storage=None, preserve_ephemeral=mock.ANY,
+ host=None, request_spec=mock.ANY,
+ reimage_boot_volume=False, target_state="stopped")
diff --git a/nova/tests/functional/api_sample_tests/test_remote_consoles.py b/nova/tests/functional/api_sample_tests/test_remote_consoles.py
index 986826bfee..e304402ee9 100644
--- a/nova/tests/functional/api_sample_tests/test_remote_consoles.py
+++ b/nova/tests/functional/api_sample_tests/test_remote_consoles.py
@@ -13,6 +13,10 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
+from nova.compute import api as compute
+from nova import exception
from nova.tests.functional.api_sample_tests import test_servers
HTTP_RE = r'(https?://)([\w\d:#@%/;$()~_?\+-=\\.&](#!)?)*'
@@ -38,6 +42,22 @@ class ConsolesSampleJsonTests(test_servers.ServersSampleBase):
self._verify_response('get-vnc-console-post-resp', {'url': HTTP_RE},
response, 200)
+ @mock.patch.object(compute.API, 'get_vnc_console')
+ def test_get_vnc_console_instance_invalid_state(self,
+ mock_get_vnc_console):
+ uuid = self._post_server()
+
+ def fake_get_vnc_console(*args, **kwargs):
+ raise exception.InstanceInvalidState(
+ attr='fake_attr', state='fake_state', method='fake_method',
+ instance_uuid=uuid)
+
+ mock_get_vnc_console.side_effect = fake_get_vnc_console
+ response = self._do_post('servers/%s/action' % uuid,
+ 'get-vnc-console-post-req',
+ {'action': 'os-getVNCConsole'})
+ self.assertEqual(409, response.status_code)
+
def test_get_spice_console(self):
uuid = self._post_server()
response = self._do_post('servers/%s/action' % uuid,
diff --git a/nova/tests/functional/api_sample_tests/test_servers.py b/nova/tests/functional/api_sample_tests/test_servers.py
index aa07b88247..7679c9b734 100644
--- a/nova/tests/functional/api_sample_tests/test_servers.py
+++ b/nova/tests/functional/api_sample_tests/test_servers.py
@@ -618,6 +618,13 @@ class ServersSampleJson290Test(ServersSampleJsonTest):
ADMIN_API = False
+class ServersSampleJson294Test(ServersSampleJsonTest):
+ microversion = '2.94'
+ scenarios = [('v2_94', {'api_major_version': 'v2.1'})]
+ use_common_server_post = False
+ ADMIN_API = False
+
+
class ServersUpdateSampleJsonTest(ServersSampleBase):
# Many of the 'os_compute_api:servers:*' policies are admin-only, and we
@@ -702,6 +709,44 @@ class ServersUpdateSampleJson290Test(ServersUpdateSampleJsonTest):
self._verify_response('server-action-rebuild-resp', subs, resp, 202)
+class ServersUpdateSampleJson294Test(ServersUpdateSampleJsonTest):
+ microversion = '2.94'
+ scenarios = [('v2_94', {'api_major_version': 'v2.1'})]
+ ADMIN_API = False
+
+ def test_update_server(self):
+ uuid = self._post_server()
+ subs = {}
+ subs['hostid'] = '[a-f0-9]+'
+ subs['access_ip_v4'] = '1.2.3.4'
+ subs['access_ip_v6'] = '80fe::'
+ subs['hostname'] = 'updated-hostname.example.com'
+ response = self._do_put('servers/%s' % uuid,
+ 'server-update-req', subs)
+ self._verify_response('server-update-resp', subs, response, 200)
+
+ def test_server_rebuild(self):
+ uuid = self._post_server()
+ params = {
+ 'uuid': self.glance.auto_disk_config_enabled_image['id'],
+ 'name': 'foobar',
+ 'pass': 'seekr3t',
+ 'hostid': '[a-f0-9]+',
+ 'access_ip_v4': '1.2.3.4',
+ 'access_ip_v6': '80fe::',
+ 'hostname': 'updated-hostname.example.com',
+ }
+
+ resp = self._do_post(
+ 'servers/%s/action' % uuid,
+ 'server-action-rebuild',
+ params,
+ )
+ subs = params.copy()
+ del subs['uuid']
+ self._verify_response('server-action-rebuild-resp', subs, resp, 202)
+
+
class ServerSortKeysJsonTests(ServersSampleBase):
sample_dir = 'servers-sort'
diff --git a/nova/tests/functional/compute/test_resource_tracker.py b/nova/tests/functional/compute/test_resource_tracker.py
index fff08697ae..139fb5e6ac 100644
--- a/nova/tests/functional/compute/test_resource_tracker.py
+++ b/nova/tests/functional/compute/test_resource_tracker.py
@@ -248,6 +248,7 @@ class IronicResourceTrackerTest(test.TestCase):
'numa_topology': None,
'resource_class': None, # Act like admin hasn't set yet...
'stats': stats,
+ 'uuid': str(getattr(uuids, nodename)),
}
self.rt.update_available_resource(self.ctx, nodename)
diff --git a/nova/tests/functional/integrated_helpers.py b/nova/tests/functional/integrated_helpers.py
index 91d99d7ec8..cdf71da0d4 100644
--- a/nova/tests/functional/integrated_helpers.py
+++ b/nova/tests/functional/integrated_helpers.py
@@ -598,7 +598,7 @@ class InstanceHelperMixin:
def _evacuate_server(
self, server, extra_post_args=None, expected_host=None,
- expected_state='ACTIVE', expected_task_state=NOT_SPECIFIED,
+ expected_state='SHUTOFF', expected_task_state=NOT_SPECIFIED,
expected_migration_status='done'):
"""Evacuate a server."""
api = getattr(self, 'admin_api', self.api)
@@ -633,6 +633,13 @@ class InstanceHelperMixin:
return self._wait_for_state_change(server, 'SHUTOFF')
return server
+ def _snapshot_server(self, server, snapshot_name):
+ """Create server snapshot."""
+ self.api.post_server_action(
+ server['id'],
+ {'createImage': {'name': snapshot_name}}
+ )
+
class PlacementHelperMixin:
"""A helper mixin for interacting with placement."""
@@ -869,6 +876,20 @@ class PlacementHelperMixin:
'Test expected a single migration but found %i' % len(migrations))
return migrations[0].uuid
+ def _reserve_placement_resource(self, rp_name, rc_name, reserved):
+ rp_uuid = self._get_provider_uuid_by_name(rp_name)
+ inv = self.placement.get(
+ '/resource_providers/%s/inventories/%s' % (rp_uuid, rc_name),
+ version='1.26'
+ ).body
+ inv["reserved"] = reserved
+ result = self.placement.put(
+ '/resource_providers/%s/inventories/%s' % (rp_uuid, rc_name),
+ version='1.26', body=inv
+ ).body
+ self.assertEqual(reserved, result["reserved"])
+ return result
+
class PlacementInstanceHelperMixin(InstanceHelperMixin, PlacementHelperMixin):
"""A placement-aware variant of InstanceHelperMixin."""
diff --git a/nova/tests/functional/libvirt/base.py b/nova/tests/functional/libvirt/base.py
index 47a8bbe81c..7b6ee10631 100644
--- a/nova/tests/functional/libvirt/base.py
+++ b/nova/tests/functional/libvirt/base.py
@@ -18,6 +18,7 @@ import io
from unittest import mock
import fixtures
+from oslo_utils.fixture import uuidsentinel as uuids
from nova import conf
from nova.tests import fixtures as nova_fixtures
@@ -177,7 +178,12 @@ class ServersTestBase(integrated_helpers._IntegratedTestBase):
self.assertNotIn(hostname, self.computes)
self.assertNotIn(hostname, self.compute_rp_uuids)
- self.computes[hostname] = _start_compute(hostname, host_info)
+ with mock.patch('nova.virt.node.get_local_node_uuid') as m:
+ m.return_value = str(getattr(uuids, 'node_%s' % hostname))
+ self.computes[hostname] = _start_compute(hostname, host_info)
+ # We need to trigger libvirt.Host() to capture the node-local
+ # uuid while we have it mocked out.
+ self.computes[hostname].driver._host.get_node_uuid()
self.compute_rp_uuids[hostname] = self.placement.get(
'/resource_providers?name=%s' % hostname).body[
diff --git a/nova/tests/functional/libvirt/test_evacuate.py b/nova/tests/functional/libvirt/test_evacuate.py
index 9d3deec99d..92d7ffba29 100644
--- a/nova/tests/functional/libvirt/test_evacuate.py
+++ b/nova/tests/functional/libvirt/test_evacuate.py
@@ -415,7 +415,9 @@ class _LibvirtEvacuateTest(integrated_helpers.InstanceHelperMixin):
with mock.patch.object(fakelibvirt.Connection, 'getHostname',
return_value=name):
- compute = self.start_service('compute', host=name)
+ with mock.patch('nova.virt.node.get_local_node_uuid') as m:
+ m.return_value = str(getattr(uuids, 'node_%s' % name))
+ compute = self.start_service('compute', host=name)
compute.driver._host.get_connection().getHostname = lambda: name
return compute
diff --git a/nova/tests/functional/libvirt/test_pci_in_placement.py b/nova/tests/functional/libvirt/test_pci_in_placement.py
index 32f6cfeca7..41d6c8e008 100644
--- a/nova/tests/functional/libvirt/test_pci_in_placement.py
+++ b/nova/tests/functional/libvirt/test_pci_in_placement.py
@@ -13,6 +13,7 @@
# under the License.
from unittest import mock
+import ddt
import fixtures
import os_resource_classes
import os_traits
@@ -73,10 +74,6 @@ class PlacementPCIReportingTests(test_pci_sriov_servers._PCIServersTestBase):
)
)
- @staticmethod
- def _to_device_spec_conf(spec_list):
- return [jsonutils.dumps(x) for x in spec_list]
-
class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
@@ -91,7 +88,7 @@ class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
num_pci=2, num_pfs=2, num_vfs=4)
# the emulated devices will then be filtered by the device_spec:
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
# PCI_PROD_ID will match two type-PCI devs (slot 0, 1)
{
@@ -168,7 +165,7 @@ class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
pci_info = fakelibvirt.HostPCIDevicesInfo(
num_pci=1, num_pfs=1, num_vfs=1)
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
# PCI_PROD_ID will match the type-PCI in slot 0
{
@@ -215,7 +212,7 @@ class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
pci_info = fakelibvirt.HostPCIDevicesInfo(
num_pci=0, num_pfs=1, num_vfs=1)
# both device will be matched by our config
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
# PF
{
@@ -248,7 +245,7 @@ class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
num_pci=0, num_pfs=1, num_vfs=2)
# the config matches the two VFs separately and tries to configure
# them with different resource class
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
{
"address": "0000:81:00.1",
@@ -282,7 +279,7 @@ class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
num_pci=0, num_pfs=1, num_vfs=2)
# the config matches the two VFs separately and tries to configure
# them with different trait list
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
{
"address": "0000:81:00.1",
@@ -316,7 +313,7 @@ class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
pci_info = fakelibvirt.HostPCIDevicesInfo(
num_pci=0, num_pfs=1, num_vfs=1)
# then the config assigns physnet to the dev
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
{
"vendor_id": fakelibvirt.PCI_VEND_ID,
@@ -336,7 +333,7 @@ class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
)
def test_devname_based_dev_spec_rejected(self):
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
{
"devname": "eth0",
@@ -364,7 +361,7 @@ class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
pci_info = fakelibvirt.HostPCIDevicesInfo(
num_pci=1, num_pfs=0, num_vfs=0)
# the config matches that PCI dev
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
{
"vendor_id": fakelibvirt.PCI_VEND_ID,
@@ -386,7 +383,7 @@ class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
)
# now un-configure the PCI device and restart the compute
- self.flags(group='pci', device_spec=self._to_device_spec_conf([]))
+ self.flags(group='pci', device_spec=self._to_list_of_json_str([]))
self.restart_compute_service(hostname="compute1")
# the RP had no allocation so nova could remove it
@@ -402,7 +399,7 @@ class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
pci_info = fakelibvirt.HostPCIDevicesInfo(
num_pci=0, num_pfs=1, num_vfs=2)
# then the config matching the VFs
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
{
"vendor_id": fakelibvirt.PCI_VEND_ID,
@@ -450,7 +447,7 @@ class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
pci_info = fakelibvirt.HostPCIDevicesInfo(
num_pci=0, num_pfs=1, num_vfs=2)
# then the config patches the VFs
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
{
"vendor_id": fakelibvirt.PCI_VEND_ID,
@@ -493,7 +490,7 @@ class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
pci_info = fakelibvirt.HostPCIDevicesInfo(
num_pci=0, num_pfs=1, num_vfs=2)
# then the config matches both VFs
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
{
"vendor_id": fakelibvirt.PCI_VEND_ID,
@@ -516,7 +513,7 @@ class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
# change the config to match the PF but do not match the VFs and
# restart the compute
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
{
"vendor_id": fakelibvirt.PCI_VEND_ID,
@@ -548,7 +545,7 @@ class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
pci_info = fakelibvirt.HostPCIDevicesInfo(
num_pci=0, num_pfs=1, num_vfs=2)
# then the config only matches the PF
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
{
"vendor_id": fakelibvirt.PCI_VEND_ID,
@@ -571,7 +568,7 @@ class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
# remove the PF from the config and add the VFs instead then restart
# the compute
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
{
"vendor_id": fakelibvirt.PCI_VEND_ID,
@@ -604,7 +601,7 @@ class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
num_pci=0, num_pfs=2, num_vfs=4)
# from slot 0 we match the PF only and ignore the VFs
# from slot 1 we match the VFs but ignore the parent PF
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
{
"vendor_id": fakelibvirt.PCI_VEND_ID,
@@ -649,7 +646,7 @@ class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
# change the resource class and traits configuration and restart the
# compute
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
{
"product_id": fakelibvirt.PF_PROD_ID,
@@ -702,7 +699,7 @@ class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
pci_info = fakelibvirt.HostPCIDevicesInfo(
num_pci=0, num_pfs=1, num_vfs=1)
# we match the PF only and ignore the VF
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
{
"vendor_id": fakelibvirt.PCI_VEND_ID,
@@ -756,7 +753,7 @@ class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
self._create_one_compute_with_a_pf_consumed_by_an_instance())
# remove 0000:81:00.0 from the device spec and restart the compute
- device_spec = self._to_device_spec_conf([])
+ device_spec = self._to_list_of_json_str([])
self.flags(group='pci', device_spec=device_spec)
# The PF is used but removed from the config. The PciTracker warns
# but keeps the device so the placement logic mimic this and only warns
@@ -800,7 +797,7 @@ class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
# in the config, then restart the compute service
# only match the VF now
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
{
"vendor_id": fakelibvirt.PCI_VEND_ID,
@@ -875,7 +872,7 @@ class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
pci_info = fakelibvirt.HostPCIDevicesInfo(
num_pci=1, num_pfs=0, num_vfs=0)
# the config matches the PCI dev
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
{
"vendor_id": fakelibvirt.PCI_VEND_ID,
@@ -902,7 +899,7 @@ class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
pci_info = fakelibvirt.HostPCIDevicesInfo(
num_pci=1, num_pfs=0, num_vfs=0)
# the config matches the PCI dev
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
{
"vendor_id": fakelibvirt.PCI_VEND_ID,
@@ -953,25 +950,13 @@ class PlacementPCIAllocationHealingTests(PlacementPCIReportingTests):
)
)
- @staticmethod
- def _move_allocation(allocations, from_uuid, to_uuid):
- allocations[to_uuid] = allocations[from_uuid]
- del allocations[from_uuid]
-
- def _move_server_allocation(self, allocations, server_uuid, revert=False):
- migration_uuid = self.get_migration_uuid_for_instance(server_uuid)
- if revert:
- self._move_allocation(allocations, migration_uuid, server_uuid)
- else:
- self._move_allocation(allocations, server_uuid, migration_uuid)
-
def test_heal_single_pci_allocation(self):
# The fake libvirt will emulate on the host:
# * one type-PCI in slot 0
pci_info = fakelibvirt.HostPCIDevicesInfo(
num_pci=1, num_pfs=0, num_vfs=0)
# the config matches the PCI dev
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
{
"vendor_id": fakelibvirt.PCI_VEND_ID,
@@ -1027,7 +1012,7 @@ class PlacementPCIAllocationHealingTests(PlacementPCIReportingTests):
pci_info = fakelibvirt.HostPCIDevicesInfo(
num_pci=2, num_pfs=2, num_vfs=8)
# the config matches:
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
# both type-PCI
{
@@ -1127,7 +1112,7 @@ class PlacementPCIAllocationHealingTests(PlacementPCIReportingTests):
pci_info = fakelibvirt.HostPCIDevicesInfo(
num_pci=2, num_pfs=2, num_vfs=8)
# the config matches:
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
# both type-PCI
{
@@ -1216,7 +1201,7 @@ class PlacementPCIAllocationHealingTests(PlacementPCIReportingTests):
compute1_pci_info = fakelibvirt.HostPCIDevicesInfo(
num_pci=0, num_pfs=1, num_vfs=2)
# the config matches just the VFs
- compute1_device_spec = self._to_device_spec_conf(
+ compute1_device_spec = self._to_list_of_json_str(
[
{
"vendor_id": fakelibvirt.PCI_VEND_ID,
@@ -1273,7 +1258,7 @@ class PlacementPCIAllocationHealingTests(PlacementPCIReportingTests):
compute2_pci_info = fakelibvirt.HostPCIDevicesInfo(
num_pci=0, num_pfs=1, num_vfs=1)
# the config matches just the VFs
- compute2_device_spec = self._to_device_spec_conf(
+ compute2_device_spec = self._to_list_of_json_str(
[
{
"vendor_id": fakelibvirt.PCI_VEND_ID,
@@ -1377,7 +1362,7 @@ class PlacementPCIAllocationHealingTests(PlacementPCIReportingTests):
compute1_pci_info = fakelibvirt.HostPCIDevicesInfo(
num_pci=0, num_pfs=1, num_vfs=1)
# the config matches just the VFs
- compute1_device_spec = self._to_device_spec_conf(
+ compute1_device_spec = self._to_list_of_json_str(
[
{
"vendor_id": fakelibvirt.PCI_VEND_ID,
@@ -1435,7 +1420,7 @@ class PlacementPCIAllocationHealingTests(PlacementPCIReportingTests):
compute2_pci_info = fakelibvirt.HostPCIDevicesInfo(
num_pci=2, num_pfs=1, num_vfs=1)
# the config matches the PCI devs and hte PF but not the VFs
- compute2_device_spec = self._to_device_spec_conf(
+ compute2_device_spec = self._to_list_of_json_str(
[
{
"vendor_id": fakelibvirt.PCI_VEND_ID,
@@ -1516,7 +1501,7 @@ class PlacementPCIAllocationHealingTests(PlacementPCIReportingTests):
compute1_pci_info = fakelibvirt.HostPCIDevicesInfo(
num_pci=0, num_pfs=1, num_vfs=3)
# the config matches just the VFs
- compute1_device_spec = self._to_device_spec_conf(
+ compute1_device_spec = self._to_list_of_json_str(
[
{
"vendor_id": fakelibvirt.PCI_VEND_ID,
@@ -1613,8 +1598,400 @@ class PlacementPCIAllocationHealingTests(PlacementPCIReportingTests):
compute1_expected_placement_view["allocations"][server["id"]] = {
"0000:81:00.0": {self.VF_RC: 2}
}
+ # NOTE(gibi): This is unfortunate but during same host resize
+ # confirm when the PCI scheduling is not enabled the healing logic
+ # cannot heal the dest host allocation during the claim. It will only
+ # heal it in the next run of the ResourceTracker._update(). This due
+ # to the fact that ResourceTracker.drop_move_claim runs both for
+ # revert (on the dest) and confirm (on the source) and in same host
+ # resize this means that it runs on both the source and the dest as
+ # they are the same.
+ # Anyhow the healing will happen just a bit later. And the end goal is
+ # to make the scheduler support enabled by default and delete the
+ # whole healing logic. So I think this is acceptable.
+ self._run_periodics()
self.assert_placement_pci_view(
"compute1", **compute1_expected_placement_view)
- self._run_periodics()
+
+
+@ddt.ddt
+class SimpleRCAndTraitBasedPCIAliasTests(PlacementPCIReportingTests):
+ def setUp(self):
+ super().setUp()
+ self.flags(group='filter_scheduler', pci_in_placement=True)
+
+ # The fake libvirt will emulate on the host:
+ # * one type-PCI in slot 0
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=1, num_pfs=0, num_vfs=0)
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "address": "0000:81:00.0",
+ "resource_class": "gpu",
+ "traits": ",".join(
+ [
+ os_traits.HW_GPU_API_VULKAN,
+ "purple",
+ "round",
+ ]
+ ),
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ self.start_compute(hostname="compute1", pci_info=pci_info)
+
+ self.assertPCIDeviceCounts("compute1", total=1, free=1)
+ self.compute1_expected_placement_view = {
+ "inventories": {
+ "0000:81:00.0": {"CUSTOM_GPU": 1},
+ },
+ "traits": {
+ "0000:81:00.0": [
+ "HW_GPU_API_VULKAN",
+ "CUSTOM_PURPLE",
+ "CUSTOM_ROUND",
+ ],
+ },
+ "usages": {
+ "0000:81:00.0": {"CUSTOM_GPU": 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "compute1", **self.compute1_expected_placement_view)
+
+ @ddt.data(
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PCI_PROD_ID,
+ "name": "a-gpu-wrong-rc",
+ },
+ {
+ "resource_class": os_resource_classes.PGPU,
+ "name": "a-gpu-wrong-rc-2",
+ },
+ {
+ "resource_class": "GPU",
+ # NOTE(gibi): "big" is missing from device spec
+ "traits": "purple,big",
+ "name": "a-gpu-missing-trait",
+ },
+ )
+ def test_boot_with_custom_rc_and_traits_no_matching_device(
+ self, pci_alias
+ ):
+ self.flags(group="pci", alias=self._to_list_of_json_str([pci_alias]))
+ extra_spec = {"pci_passthrough:alias": f"{pci_alias['name']}:1"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._create_server(
+ flavor_id=flavor_id, networks=[], expected_state="ERROR"
+ )
+ self.assertIn("fault", server)
+ self.assertIn("No valid host", server["fault"]["message"])
+
+ self.assertPCIDeviceCounts("compute1", total=1, free=1)
+ self.assert_placement_pci_view(
+ "compute1", **self.compute1_expected_placement_view
+ )
+
+ def test_boot_with_custom_rc_and_traits_succeeds(self):
+ pci_alias_gpu = {
+ "resource_class": "GPU",
+ "traits": "HW_GPU_API_VULKAN,PURPLE",
+ "name": "a-gpu",
+ }
+ self.flags(
+ group="pci", alias=self._to_list_of_json_str([pci_alias_gpu])
+ )
+
+ extra_spec = {"pci_passthrough:alias": "a-gpu:1"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._create_server(flavor_id=flavor_id, networks=[])
+
+ self.assertPCIDeviceCounts("compute1", total=1, free=0)
+ self.compute1_expected_placement_view["usages"]["0000:81:00.0"][
+ "CUSTOM_GPU"
+ ] = 1
+ self.compute1_expected_placement_view["allocations"][server["id"]] = {
+ "0000:81:00.0": {"CUSTOM_GPU": 1}
+ }
+ self.assert_placement_pci_view(
+ "compute1", **self.compute1_expected_placement_view
+ )
+ self.assert_no_pci_healing("compute1")
+
+
+class RCAndTraitBasedPCIAliasTests(PlacementPCIReportingTests):
+ def setUp(self):
+ super().setUp()
+ self.flags(group='filter_scheduler', pci_in_placement=True)
+
+ def test_device_claim_consistent_with_placement_allocation(self):
+ """As soon as [filter_scheduler]pci_in_placement is enabled the
+ nova-scheduler will allocate PCI devices in placement. Then on the
+ nova-compute side the PCI claim will also allocate PCI devices in the
+ nova DB. This test will create a situation where the two allocation
+ could contradict and observes that in a contradicting situation the PCI
+ claim will fail instead of allocating a device that is not allocated in
+ placement.
+
+ For the contradiction to happen we need two PCI devices that looks
+ different from placement perspective than from the nova DB perspective.
+
+ We can do that by assigning different traits from in placement and
+ having different product_id in the Nova DB. Then we will create a
+ request that would match from placement perspective to one of the
+ device only and would match to the other device from nova DB
+ perspective. Then we will expect that the boot request fails with no
+ valid host.
+ """
+ # The fake libvirt will emulate on the host:
+ # * one type-PCI in slot 0
+ # * one type-PF in slot 1
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=1, num_pfs=1, num_vfs=0)
+ # we allow both device to be consumed, but we assign different traits
+ # so we can selectively schedule to one of the devices in placement
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "address": "0000:81:00.0",
+ "resource_class": "MY_DEV",
+ "traits": "A_PCI",
+ },
+ {
+ "address": "0000:81:01.0",
+ "resource_class": "MY_DEV",
+ "traits": "A_PF",
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ self.start_compute(hostname="compute1", pci_info=pci_info)
+
+ self.assertPCIDeviceCounts("compute1", total=2, free=2)
+ compute1_expected_placement_view = {
+ "inventories": {
+ "0000:81:00.0": {"CUSTOM_MY_DEV": 1},
+ "0000:81:01.0": {"CUSTOM_MY_DEV": 1},
+ },
+ "traits": {
+ "0000:81:00.0": [
+ "CUSTOM_A_PCI",
+ ],
+ "0000:81:01.0": [
+ "CUSTOM_A_PF",
+ ],
+ },
+ "usages": {
+ "0000:81:00.0": {"CUSTOM_MY_DEV": 0},
+ "0000:81:01.0": {"CUSTOM_MY_DEV": 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+
+ # now we create a PCI alias that cannot be fulfilled from both
+ # nova and placement perspective at the same time, but can be fulfilled
+ # from each perspective individually
+ pci_alias_no_match = {
+ "resource_class": "MY_DEV",
+ # by product_id this matches 81.00 only
+ "product_id": fakelibvirt.PCI_PROD_ID,
+ # by trait this matches 81.01 only
+ "traits": "A_PF",
+ "name": "a-pci",
+ }
+ self.flags(
+ group="pci",
+ alias=self._to_list_of_json_str([pci_alias_no_match]),
+ )
+
+ # then try to boot with the alias and expect no valid host error
+ extra_spec = {"pci_passthrough:alias": "a-pci:1"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._create_server(
+ flavor_id=flavor_id, networks=[], expected_state='ERROR')
+ self.assertIn('fault', server)
+ self.assertIn('No valid host', server['fault']['message'])
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+ self.assert_no_pci_healing("compute1")
+
+ def test_vf_with_split_allocation(self):
+ # The fake libvirt will emulate on the host:
+ # * two type-PFs in slot 0, 1 with 2 VFs each
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=0, num_pfs=2, num_vfs=4)
+ # make all 4 VFs available
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "product_id": fakelibvirt.VF_PROD_ID,
+ "resource_class": "MY_VF",
+ "traits": "blue",
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ self.start_compute(hostname="compute1", pci_info=pci_info)
+
+ compute1_expected_placement_view = {
+ "inventories": {
+ "0000:81:00.0": {"CUSTOM_MY_VF": 2},
+ "0000:81:01.0": {"CUSTOM_MY_VF": 2},
+ },
+ "traits": {
+ "0000:81:00.0": [
+ "CUSTOM_BLUE",
+ ],
+ "0000:81:01.0": [
+ "CUSTOM_BLUE",
+ ],
+ },
+ "usages": {
+ "0000:81:00.0": {"CUSTOM_MY_VF": 0},
+ "0000:81:01.0": {"CUSTOM_MY_VF": 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+ self.assertPCIDeviceCounts('compute1', total=4, free=4)
+
+ pci_alias_vf = {
+ "resource_class": "MY_VF",
+ "traits": "blue",
+ "name": "a-vf",
+ }
+ self.flags(
+ group="pci",
+ alias=self._to_list_of_json_str([pci_alias_vf]),
+ )
+
+ # reserve VFs from 81.01 in placement to drive the first instance to
+ # 81.00
+ self._reserve_placement_resource(
+ "compute1_0000:81:01.0", "CUSTOM_MY_VF", 2)
+ # boot an instance with a single VF
+ # we expect that it is allocated from 81.00 as both VF on 81.01 is
+ # reserved
+ extra_spec = {"pci_passthrough:alias": "a-vf:1"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server_1vf = self._create_server(flavor_id=flavor_id, networks=[])
+
+ self.assertPCIDeviceCounts('compute1', total=4, free=3)
+ compute1_expected_placement_view["usages"] = {
+ "0000:81:00.0": {"CUSTOM_MY_VF": 1}
+ }
+ compute1_expected_placement_view["allocations"][server_1vf["id"]] = {
+ "0000:81:00.0": {"CUSTOM_MY_VF": 1},
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+ self.assert_no_pci_healing("compute1")
+
+ # Boot a second instance requesting two VFs and ensure that the only
+ # way that placement allows this is to split the two VFs between PFs.
+ # Let's remove the reservation of one resource from 81.01 so the only
+ # viable placement candidate is: one VF from 81.00 and one VF from
+ # 81.01
+ self._reserve_placement_resource(
+ "compute1_0000:81:01.0", "CUSTOM_MY_VF", 1)
+
+ extra_spec = {"pci_passthrough:alias": "a-vf:2"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server_2vf = self._create_server(flavor_id=flavor_id, networks=[])
+
+ self.assertPCIDeviceCounts('compute1', total=4, free=1)
+ compute1_expected_placement_view["usages"] = {
+ # both VM uses one VF
+ "0000:81:00.0": {"CUSTOM_MY_VF": 2},
+ "0000:81:01.0": {"CUSTOM_MY_VF": 1},
+ }
+ compute1_expected_placement_view["allocations"][server_2vf["id"]] = {
+ "0000:81:00.0": {"CUSTOM_MY_VF": 1},
+ "0000:81:01.0": {"CUSTOM_MY_VF": 1},
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+ self.assert_no_pci_healing("compute1")
+
+ def test_3vfs_asymmetric_split_between_pfs(self):
+ # The fake libvirt will emulate on the host:
+ # * two type-PFs in slot 0, 1 with 2 VFs each
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=0, num_pfs=2, num_vfs=4)
+ # make all 4 VFs available
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "product_id": fakelibvirt.VF_PROD_ID,
+ "resource_class": "MY_VF",
+ "traits": "blue",
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ self.start_compute(hostname="compute1", pci_info=pci_info)
+
+ compute1_expected_placement_view = {
+ "inventories": {
+ "0000:81:00.0": {"CUSTOM_MY_VF": 2},
+ "0000:81:01.0": {"CUSTOM_MY_VF": 2},
+ },
+ "traits": {
+ "0000:81:00.0": [
+ "CUSTOM_BLUE",
+ ],
+ "0000:81:01.0": [
+ "CUSTOM_BLUE",
+ ],
+ },
+ "usages": {
+ "0000:81:00.0": {"CUSTOM_MY_VF": 0},
+ "0000:81:01.0": {"CUSTOM_MY_VF": 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+ self.assertPCIDeviceCounts('compute1', total=4, free=4)
+
+ pci_alias_vf = {
+ "resource_class": "MY_VF",
+ "traits": "blue",
+ "name": "a-vf",
+ }
+ self.flags(
+ group="pci",
+ alias=self._to_list_of_json_str([pci_alias_vf]),
+ )
+
+ # Boot an instance requesting three VFs. The 3 VFs can be split between
+ # the two PFs two ways: 2 from 81.00 and 1 from 81.01, or 1 from 81.00
+ # and 2 from 81.01.
+ # Let's block the first way in placement by reserving 1 device from
+ # 81.00
+ self._reserve_placement_resource(
+ "compute1_0000:81:00.0", "CUSTOM_MY_VF", 1)
+ extra_spec = {"pci_passthrough:alias": "a-vf:3"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ # We expect this to fit.
+ server_3vf = self._create_server(flavor_id=flavor_id, networks=[])
+
+ self.assertPCIDeviceCounts('compute1', total=4, free=1)
+ compute1_expected_placement_view["usages"] = {
+ "0000:81:00.0": {"CUSTOM_MY_VF": 1},
+ "0000:81:01.0": {"CUSTOM_MY_VF": 2},
+ }
+ compute1_expected_placement_view["allocations"][server_3vf["id"]] = {
+ "0000:81:00.0": {"CUSTOM_MY_VF": 1},
+ "0000:81:01.0": {"CUSTOM_MY_VF": 2},
+ }
self.assert_placement_pci_view(
"compute1", **compute1_expected_placement_view)
+ self.assert_no_pci_healing("compute1")
diff --git a/nova/tests/functional/libvirt/test_pci_sriov_servers.py b/nova/tests/functional/libvirt/test_pci_sriov_servers.py
index 072eb2c212..135a457154 100644
--- a/nova/tests/functional/libvirt/test_pci_sriov_servers.py
+++ b/nova/tests/functional/libvirt/test_pci_sriov_servers.py
@@ -14,6 +14,8 @@
# under the License.
import copy
+import pprint
+import typing as ty
from unittest import mock
from urllib import parse as urlparse
@@ -27,6 +29,7 @@ from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import units
import nova
+from nova.compute import pci_placement_translator
from nova import context
from nova import exception
from nova.network import constants
@@ -42,10 +45,58 @@ CONF = cfg.CONF
LOG = logging.getLogger(__name__)
+class PciPlacementHealingFixture(fixtures.Fixture):
+ """Allow asserting if the pci_placement_translator module needed to
+ heal PCI allocations. Such healing is only normal during upgrade. After
+ every compute is upgraded and the scheduling support of PCI tracking in
+ placement is enabled there should be no need to heal PCI allocations in
+ the resource tracker. We assert this as we eventually want to remove the
+ automatic healing logic from the resource tracker.
+ """
+
+ def __init__(self):
+ super().__init__()
+ # a list of (nodename, result, allocation_before, allocation_after)
+ # tuples recoding the result of the calls to
+ # update_provider_tree_for_pci
+ self.calls = []
+
+ def setUp(self):
+ super().setUp()
+
+ orig = pci_placement_translator.update_provider_tree_for_pci
+
+ def wrapped_update(
+ provider_tree, nodename, pci_tracker, allocations, same_host
+ ):
+ alloc_before = copy.deepcopy(allocations)
+ updated = orig(
+ provider_tree, nodename, pci_tracker, allocations, same_host)
+ alloc_after = copy.deepcopy(allocations)
+ self.calls.append((nodename, updated, alloc_before, alloc_after))
+ return updated
+
+ self.useFixture(
+ fixtures.MonkeyPatch(
+ "nova.compute.pci_placement_translator."
+ "update_provider_tree_for_pci",
+ wrapped_update,
+ )
+ )
+
+ def last_healing(self, hostname: str) -> ty.Optional[ty.Tuple[dict, dict]]:
+ for h, updated, before, after in self.calls:
+ if h == hostname and updated:
+ return before, after
+ return None
+
+
class _PCIServersTestBase(base.ServersTestBase):
ADDITIONAL_FILTERS = ['NUMATopologyFilter', 'PciPassthroughFilter']
+ PCI_RC = f"CUSTOM_PCI_{fakelibvirt.PCI_VEND_ID}_{fakelibvirt.PCI_PROD_ID}"
+
def setUp(self):
self.ctxt = context.get_admin_context()
self.flags(
@@ -66,6 +117,9 @@ class _PCIServersTestBase(base.ServersTestBase):
'.PciPassthroughFilter.host_passes',
side_effect=host_pass_mock)).mock
+ self.pci_healing_fixture = self.useFixture(
+ PciPlacementHealingFixture())
+
def assertPCIDeviceCounts(self, hostname, total, free):
"""Ensure $hostname has $total devices, $free of which are free."""
devices = objects.PciDeviceList.get_by_compute_node(
@@ -75,21 +129,31 @@ class _PCIServersTestBase(base.ServersTestBase):
self.assertEqual(total, len(devices))
self.assertEqual(free, len([d for d in devices if d.is_available()]))
+ def assert_no_pci_healing(self, hostname):
+ last_healing = self.pci_healing_fixture.last_healing(hostname)
+ before = last_healing[0] if last_healing else None
+ after = last_healing[1] if last_healing else None
+ self.assertIsNone(
+ last_healing,
+ "The resource tracker needed to heal PCI allocation in placement "
+ "on host %s. This should not happen in normal operation as the "
+ "scheduler should create the proper allocation instead.\n"
+ "Allocations before healing:\n %s\n"
+ "Allocations after healing:\n %s\n"
+ % (
+ hostname,
+ pprint.pformat(before),
+ pprint.pformat(after),
+ ),
+ )
+
def _get_rp_by_name(self, name, rps):
for rp in rps:
if rp["name"] == name:
return rp
self.fail(f'RP {name} is not found in Placement {rps}')
- def assert_placement_pci_view(
- self, hostname, inventories, traits, usages=None, allocations=None
- ):
- if not usages:
- usages = {}
-
- if not allocations:
- allocations = {}
-
+ def assert_placement_pci_inventory(self, hostname, inventories, traits):
compute_rp_uuid = self.compute_rp_uuids[hostname]
rps = self._get_all_rps_in_a_tree(compute_rp_uuid)
@@ -129,6 +193,10 @@ class _PCIServersTestBase(base.ServersTestBase):
f"Traits on RP {real_rp_name} does not match with expectation"
)
+ def assert_placement_pci_usages(self, hostname, usages):
+ compute_rp_uuid = self.compute_rp_uuids[hostname]
+ rps = self._get_all_rps_in_a_tree(compute_rp_uuid)
+
for rp_name, usage in usages.items():
real_rp_name = f'{hostname}_{rp_name}'
rp = self._get_rp_by_name(real_rp_name, rps)
@@ -139,6 +207,38 @@ class _PCIServersTestBase(base.ServersTestBase):
f"Usage on RP {real_rp_name} does not match with expectation"
)
+ def assert_placement_pci_allocations(self, allocations):
+ for consumer, expected_allocations in allocations.items():
+ actual_allocations = self._get_allocations_by_server_uuid(consumer)
+ self.assertEqual(
+ len(expected_allocations),
+ len(actual_allocations),
+ f"The consumer {consumer} allocates from different number of "
+ f"RPs than expected. Expected: {expected_allocations}, "
+ f"Actual: {actual_allocations}"
+ )
+ for rp_name, expected_rp_allocs in expected_allocations.items():
+ rp_uuid = self._get_provider_uuid_by_name(rp_name)
+ self.assertIn(
+ rp_uuid,
+ actual_allocations,
+ f"The consumer {consumer} expected to allocate from "
+ f"{rp_name}. Expected: {expected_allocations}, "
+ f"Actual: {actual_allocations}"
+ )
+ actual_rp_allocs = actual_allocations[rp_uuid]['resources']
+ self.assertEqual(
+ expected_rp_allocs,
+ actual_rp_allocs,
+ f"The consumer {consumer} expected to have allocation "
+ f"{expected_rp_allocs} on {rp_name} but it has "
+ f"{actual_rp_allocs} instead."
+ )
+
+ def assert_placement_pci_allocations_on_host(self, hostname, allocations):
+ compute_rp_uuid = self.compute_rp_uuids[hostname]
+ rps = self._get_all_rps_in_a_tree(compute_rp_uuid)
+
for consumer, expected_allocations in allocations.items():
actual_allocations = self._get_allocations_by_server_uuid(consumer)
self.assertEqual(
@@ -170,6 +270,35 @@ class _PCIServersTestBase(base.ServersTestBase):
f"{actual_rp_allocs} instead."
)
+ def assert_placement_pci_view(
+ self, hostname, inventories, traits, usages=None, allocations=None
+ ):
+ if not usages:
+ usages = {}
+
+ if not allocations:
+ allocations = {}
+
+ self.assert_placement_pci_inventory(hostname, inventories, traits)
+ self.assert_placement_pci_usages(hostname, usages)
+ self.assert_placement_pci_allocations_on_host(hostname, allocations)
+
+ @staticmethod
+ def _to_list_of_json_str(list):
+ return [jsonutils.dumps(x) for x in list]
+
+ @staticmethod
+ def _move_allocation(allocations, from_uuid, to_uuid):
+ allocations[to_uuid] = allocations[from_uuid]
+ del allocations[from_uuid]
+
+ def _move_server_allocation(self, allocations, server_uuid, revert=False):
+ migration_uuid = self.get_migration_uuid_for_instance(server_uuid)
+ if revert:
+ self._move_allocation(allocations, migration_uuid, server_uuid)
+ else:
+ self._move_allocation(allocations, server_uuid, migration_uuid)
+
class _PCIServersWithMigrationTestBase(_PCIServersTestBase):
@@ -809,7 +938,7 @@ class SRIOVServersTest(_PCIServersWithMigrationTestBase):
# start two compute services with differing PCI device inventory
source_pci_info = fakelibvirt.HostPCIDevicesInfo(
- num_pfs=2, num_vfs=8, numa_node=0)
+ num_pfs=1, num_vfs=4, numa_node=0)
# add an extra PF without VF to be used by direct-physical ports
source_pci_info.add_device(
dev_type='PF',
@@ -862,7 +991,7 @@ class SRIOVServersTest(_PCIServersWithMigrationTestBase):
# our source host should have marked two PCI devices as used, the VF
# and the parent PF, while the future destination is currently unused
self.assertEqual('test_compute0', server['OS-EXT-SRV-ATTR:host'])
- self.assertPCIDeviceCounts('test_compute0', total=11, free=8)
+ self.assertPCIDeviceCounts('test_compute0', total=6, free=3)
self.assertPCIDeviceCounts('test_compute1', total=4, free=4)
# the instance should be on host NUMA node 0, since that's where our
@@ -886,7 +1015,7 @@ class SRIOVServersTest(_PCIServersWithMigrationTestBase):
# TODO(stephenfin): Stop relying on a side-effect of how nova
# chooses from multiple PCI devices (apparently the last
# matching one)
- 'pci_slot': '0000:81:01.4',
+ 'pci_slot': '0000:81:00.4',
'physical_network': 'physnet4',
},
port['binding:profile'],
@@ -910,7 +1039,7 @@ class SRIOVServersTest(_PCIServersWithMigrationTestBase):
# we should now have transitioned our usage to the destination, freeing
# up the source in the process
- self.assertPCIDeviceCounts('test_compute0', total=11, free=11)
+ self.assertPCIDeviceCounts('test_compute0', total=6, free=6)
self.assertPCIDeviceCounts('test_compute1', total=4, free=1)
# the instance should now be on host NUMA node 1, since that's where
@@ -1812,15 +1941,16 @@ class PCIServersTest(_PCIServersTestBase):
'name': ALIAS_NAME,
}
)]
- PCI_RC = f"CUSTOM_PCI_{fakelibvirt.PCI_VEND_ID}_{fakelibvirt.PCI_PROD_ID}"
def setUp(self):
super().setUp()
self.flags(group="pci", report_in_placement=True)
+ self.flags(group='filter_scheduler', pci_in_placement=True)
def test_create_server_with_pci_dev_and_numa(self):
"""Verifies that an instance can be booted with cpu pinning and with an
- assigned pci device.
+ assigned pci device with legacy policy and numa info for the pci
+ device.
"""
self.flags(cpu_dedicated_set='0-7', group='compute')
@@ -1832,6 +1962,7 @@ class PCIServersTest(_PCIServersTestBase):
"compute1",
inventories={"0000:81:00.0": {self.PCI_RC: 1}},
traits={"0000:81:00.0": []},
+ usages={"0000:81:00.0": {self.PCI_RC: 0}},
)
# create a flavor
@@ -1841,23 +1972,34 @@ class PCIServersTest(_PCIServersTestBase):
}
flavor_id = self._create_flavor(extra_spec=extra_spec)
- self._create_server(flavor_id=flavor_id, networks='none')
+ server = self._create_server(flavor_id=flavor_id, networks='none')
+
+ self.assert_placement_pci_view(
+ "compute1",
+ inventories={"0000:81:00.0": {self.PCI_RC: 1}},
+ traits={"0000:81:00.0": []},
+ usages={"0000:81:00.0": {self.PCI_RC: 1}},
+ allocations={server['id']: {"0000:81:00.0": {self.PCI_RC: 1}}},
+ )
+ self.assert_no_pci_healing("compute1")
def test_create_server_with_pci_dev_and_numa_fails(self):
"""This test ensures that it is not possible to allocated CPU and
- memory resources from one NUMA node and a PCI device from another.
+ memory resources from one NUMA node and a PCI device from another
+ if we use the legacy policy and the pci device reports numa info.
"""
-
self.flags(cpu_dedicated_set='0-7', group='compute')
pci_info = fakelibvirt.HostPCIDevicesInfo(num_pci=1, numa_node=0)
self.start_compute(pci_info=pci_info)
+ compute1_placement_pci_view = {
+ "inventories": {"0000:81:00.0": {self.PCI_RC: 1}},
+ "traits": {"0000:81:00.0": []},
+ "usages": {"0000:81:00.0": {self.PCI_RC: 0}},
+ }
self.assert_placement_pci_view(
- "compute1",
- inventories={"0000:81:00.0": {self.PCI_RC: 1}},
- traits={"0000:81:00.0": []},
- )
+ "compute1", **compute1_placement_pci_view)
# boot one instance with no PCI device to "fill up" NUMA node 0
extra_spec = {'hw:cpu_policy': 'dedicated'}
@@ -1870,6 +2012,10 @@ class PCIServersTest(_PCIServersTestBase):
self._create_server(
flavor_id=flavor_id, networks='none', expected_state='ERROR')
+ self.assert_placement_pci_view(
+ "compute1", **compute1_placement_pci_view)
+ self.assert_no_pci_healing("compute1")
+
def test_live_migrate_server_with_pci(self):
"""Live migrate an instance with a PCI passthrough device.
@@ -1882,26 +2028,41 @@ class PCIServersTest(_PCIServersTestBase):
hostname='test_compute0',
pci_info=fakelibvirt.HostPCIDevicesInfo(num_pci=1))
+ test_compute0_placement_pci_view = {
+ "inventories": {"0000:81:00.0": {self.PCI_RC: 1}},
+ "traits": {"0000:81:00.0": []},
+ "usages": {"0000:81:00.0": {self.PCI_RC: 0}},
+ "allocations": {},
+ }
self.assert_placement_pci_view(
- "test_compute0",
- inventories={"0000:81:00.0": {self.PCI_RC: 1}},
- traits={"0000:81:00.0": []},
- )
+ "test_compute0", **test_compute0_placement_pci_view)
self.start_compute(
hostname='test_compute1',
pci_info=fakelibvirt.HostPCIDevicesInfo(num_pci=1))
+ test_compute1_placement_pci_view = {
+ "inventories": {"0000:81:00.0": {self.PCI_RC: 1}},
+ "traits": {"0000:81:00.0": []},
+ "usages": {"0000:81:00.0": {self.PCI_RC: 0}},
+ }
self.assert_placement_pci_view(
- "test_compute1",
- inventories={"0000:81:00.0": {self.PCI_RC: 1}},
- traits={"0000:81:00.0": []},
- )
+ "test_compute1", **test_compute1_placement_pci_view)
# create a server
extra_spec = {'pci_passthrough:alias': f'{self.ALIAS_NAME}:1'}
flavor_id = self._create_flavor(extra_spec=extra_spec)
- server = self._create_server(flavor_id=flavor_id, networks='none')
+ server = self._create_server(
+ flavor_id=flavor_id, networks='none', host="test_compute0")
+
+ test_compute0_placement_pci_view[
+ "usages"]["0000:81:00.0"][self.PCI_RC] = 1
+ test_compute0_placement_pci_view[
+ "allocations"][server['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
# now live migrate that server
ex = self.assertRaises(
@@ -1913,29 +2074,51 @@ class PCIServersTest(_PCIServersTestBase):
# this will bubble to the API
self.assertEqual(500, ex.response.status_code)
self.assertIn('NoValidHost', str(ex))
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+ self.assert_no_pci_healing("test_compute0")
+ self.assert_no_pci_healing("test_compute1")
def test_resize_pci_to_vanilla(self):
# Start two computes, one with PCI and one without.
self.start_compute(
hostname='test_compute0',
pci_info=fakelibvirt.HostPCIDevicesInfo(num_pci=1))
+ test_compute0_placement_pci_view = {
+ "inventories": {"0000:81:00.0": {self.PCI_RC: 1}},
+ "traits": {"0000:81:00.0": []},
+ "usages": {"0000:81:00.0": {self.PCI_RC: 0}},
+ "allocations": {},
+ }
self.assert_placement_pci_view(
- "test_compute0",
- inventories={"0000:81:00.0": {self.PCI_RC: 1}},
- traits={"0000:81:00.0": []},
- )
+ "test_compute0", **test_compute0_placement_pci_view)
+
self.start_compute(hostname='test_compute1')
+ test_compute1_placement_pci_view = {
+ "inventories": {},
+ "traits": {},
+ "usages": {},
+ "allocations": {},
+ }
self.assert_placement_pci_view(
- "test_compute1",
- inventories={},
- traits={},
- )
+ "test_compute1", **test_compute1_placement_pci_view)
# Boot a server with a single PCI device.
extra_spec = {'pci_passthrough:alias': f'{self.ALIAS_NAME}:1'}
pci_flavor_id = self._create_flavor(extra_spec=extra_spec)
server = self._create_server(flavor_id=pci_flavor_id, networks='none')
+ test_compute0_placement_pci_view[
+ "usages"]["0000:81:00.0"][self.PCI_RC] = 1
+ test_compute0_placement_pci_view[
+ "allocations"][server['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
# Resize it to a flavor without PCI devices. We expect this to work, as
# test_compute1 is available.
flavor_id = self._create_flavor()
@@ -1948,6 +2131,343 @@ class PCIServersTest(_PCIServersTestBase):
self._confirm_resize(server)
self.assertPCIDeviceCounts('test_compute0', total=1, free=1)
self.assertPCIDeviceCounts('test_compute1', total=0, free=0)
+ test_compute0_placement_pci_view[
+ "usages"]["0000:81:00.0"][self.PCI_RC] = 0
+ del test_compute0_placement_pci_view["allocations"][server['id']]
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+ self.assert_no_pci_healing("test_compute0")
+ self.assert_no_pci_healing("test_compute1")
+
+ def test_resize_vanilla_to_pci(self):
+ """Resize an instance from a non PCI flavor to a PCI flavor"""
+ # Start two computes, one with PCI and one without.
+ self.start_compute(
+ hostname='test_compute0',
+ pci_info=fakelibvirt.HostPCIDevicesInfo(num_pci=1))
+ test_compute0_placement_pci_view = {
+ "inventories": {"0000:81:00.0": {self.PCI_RC: 1}},
+ "traits": {"0000:81:00.0": []},
+ "usages": {"0000:81:00.0": {self.PCI_RC: 0}},
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ self.start_compute(hostname='test_compute1')
+ test_compute1_placement_pci_view = {
+ "inventories": {},
+ "traits": {},
+ "usages": {},
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
+ # Boot a server without PCI device and make sure it lands on the
+ # compute that has no device, so we can resize it later to the other
+ # host having PCI device.
+ extra_spec = {}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._create_server(
+ flavor_id=flavor_id, networks='none', host="test_compute1")
+
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=1)
+ self.assertPCIDeviceCounts('test_compute1', total=0, free=0)
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
+ # Resize it to a flavor with a PCI devices. We expect this to work, as
+ # test_compute0 is available and having PCI devices.
+ extra_spec = {'pci_passthrough:alias': f'{self.ALIAS_NAME}:1'}
+ pci_flavor_id = self._create_flavor(extra_spec=extra_spec)
+ with mock.patch(
+ 'nova.virt.libvirt.driver.LibvirtDriver'
+ '.migrate_disk_and_power_off',
+ return_value='{}',
+ ):
+ self._resize_server(server, pci_flavor_id)
+ self._confirm_resize(server)
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=0)
+ self.assertPCIDeviceCounts('test_compute1', total=0, free=0)
+ test_compute0_placement_pci_view[
+ "usages"]["0000:81:00.0"][self.PCI_RC] = 1
+ test_compute0_placement_pci_view[
+ "allocations"][server['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+ self.assert_no_pci_healing("test_compute0")
+ self.assert_no_pci_healing("test_compute1")
+
+ def test_resize_from_one_dev_to_two(self):
+ self.start_compute(
+ hostname='test_compute0',
+ pci_info=fakelibvirt.HostPCIDevicesInfo(num_pci=1))
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=1)
+ test_compute0_placement_pci_view = {
+ "inventories": {"0000:81:00.0": {self.PCI_RC: 1}},
+ "traits": {"0000:81:00.0": []},
+ "usages": {"0000:81:00.0": {self.PCI_RC: 0}},
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ self.start_compute(
+ hostname='test_compute1',
+ pci_info=fakelibvirt.HostPCIDevicesInfo(num_pci=2),
+ )
+ self.assertPCIDeviceCounts('test_compute1', total=2, free=2)
+ test_compute1_placement_pci_view = {
+ "inventories": {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ },
+ "traits": {
+ "0000:81:00.0": [],
+ "0000:81:01.0": [],
+ },
+ "usages": {
+ "0000:81:00.0": {self.PCI_RC: 0},
+ "0000:81:01.0": {self.PCI_RC: 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
+ # boot a VM on test_compute0 with a single PCI dev
+ extra_spec = {'pci_passthrough:alias': f'{self.ALIAS_NAME}:1'}
+ pci_flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._create_server(
+ flavor_id=pci_flavor_id, networks='none', host="test_compute0")
+
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=0)
+ test_compute0_placement_pci_view["usages"][
+ "0000:81:00.0"][self.PCI_RC] = 1
+ test_compute0_placement_pci_view["allocations"][
+ server['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ # resize the server to a flavor requesting two devices
+ extra_spec = {'pci_passthrough:alias': f'{self.ALIAS_NAME}:2'}
+ pci_flavor_id = self._create_flavor(extra_spec=extra_spec)
+ with mock.patch(
+ 'nova.virt.libvirt.driver.LibvirtDriver'
+ '.migrate_disk_and_power_off',
+ return_value='{}',
+ ):
+ self._resize_server(server, pci_flavor_id)
+
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=0)
+ # one the source host the PCI allocation is now held by the migration
+ self._move_server_allocation(
+ test_compute0_placement_pci_view['allocations'], server['id'])
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+ # on the dest we have now two device allocated
+ self.assertPCIDeviceCounts('test_compute1', total=2, free=0)
+ test_compute1_placement_pci_view["usages"] = {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ }
+ test_compute1_placement_pci_view["allocations"][
+ server['id']] = {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ }
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
+ # now revert the resize
+ self._revert_resize(server)
+
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=0)
+ # on the host the allocation should move back to the instance UUID
+ self._move_server_allocation(
+ test_compute0_placement_pci_view["allocations"],
+ server["id"],
+ revert=True,
+ )
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+ # so the dest should be freed
+ self.assertPCIDeviceCounts('test_compute1', total=2, free=2)
+ test_compute1_placement_pci_view["usages"] = {
+ "0000:81:00.0": {self.PCI_RC: 0},
+ "0000:81:01.0": {self.PCI_RC: 0},
+ }
+ del test_compute1_placement_pci_view["allocations"][server['id']]
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
+ # now resize again and confirm it
+ with mock.patch(
+ 'nova.virt.libvirt.driver.LibvirtDriver'
+ '.migrate_disk_and_power_off',
+ return_value='{}',
+ ):
+ self._resize_server(server, pci_flavor_id)
+ self._confirm_resize(server)
+
+ # the source host now need to be freed up
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=1)
+ test_compute0_placement_pci_view["usages"] = {
+ "0000:81:00.0": {self.PCI_RC: 0},
+ }
+ test_compute0_placement_pci_view["allocations"] = {}
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+ # and dest allocated
+ self.assertPCIDeviceCounts('test_compute1', total=2, free=0)
+ test_compute1_placement_pci_view["usages"] = {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ }
+ test_compute1_placement_pci_view["allocations"][
+ server['id']] = {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ }
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
+ self.assert_no_pci_healing("test_compute0")
+ self.assert_no_pci_healing("test_compute1")
+
+ def test_same_host_resize_with_pci(self):
+ """Start a single compute with 3 PCI devs and resize and instance
+ from one dev to two devs
+ """
+ self.flags(allow_resize_to_same_host=True)
+ self.start_compute(
+ hostname='test_compute0',
+ pci_info=fakelibvirt.HostPCIDevicesInfo(num_pci=3))
+ self.assertPCIDeviceCounts('test_compute0', total=3, free=3)
+ test_compute0_placement_pci_view = {
+ "inventories": {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ "0000:81:02.0": {self.PCI_RC: 1},
+ },
+ "traits": {
+ "0000:81:00.0": [],
+ "0000:81:01.0": [],
+ "0000:81:02.0": [],
+ },
+ "usages": {
+ "0000:81:00.0": {self.PCI_RC: 0},
+ "0000:81:01.0": {self.PCI_RC: 0},
+ "0000:81:02.0": {self.PCI_RC: 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ # Boot a server with a single PCI device.
+ # To stabilize the test we reserve 81.01 and 81.02 in placement so
+ # we can be sure that the instance will use 81.00, otherwise the
+ # allocation will be random between 00, 01, and 02
+ self._reserve_placement_resource(
+ "test_compute0_0000:81:01.0", self.PCI_RC, 1)
+ self._reserve_placement_resource(
+ "test_compute0_0000:81:02.0", self.PCI_RC, 1)
+ extra_spec = {'pci_passthrough:alias': f'{self.ALIAS_NAME}:1'}
+ pci_flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._create_server(flavor_id=pci_flavor_id, networks='none')
+
+ self.assertPCIDeviceCounts('test_compute0', total=3, free=2)
+ test_compute0_placement_pci_view[
+ "usages"]["0000:81:00.0"][self.PCI_RC] = 1
+ test_compute0_placement_pci_view[
+ "allocations"][server['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+ # remove the reservations, so we can resize on the same host and
+ # consume 01 and 02
+ self._reserve_placement_resource(
+ "test_compute0_0000:81:01.0", self.PCI_RC, 0)
+ self._reserve_placement_resource(
+ "test_compute0_0000:81:02.0", self.PCI_RC, 0)
+
+ # Resize the server to use 2 PCI devices
+ extra_spec = {'pci_passthrough:alias': f'{self.ALIAS_NAME}:2'}
+ pci_flavor_id = self._create_flavor(extra_spec=extra_spec)
+ with mock.patch(
+ 'nova.virt.libvirt.driver.LibvirtDriver'
+ '.migrate_disk_and_power_off',
+ return_value='{}',
+ ):
+ self._resize_server(server, pci_flavor_id)
+
+ self.assertPCIDeviceCounts('test_compute0', total=3, free=0)
+ # the source host side of the allocation is now held by the migration
+ # UUID
+ self._move_server_allocation(
+ test_compute0_placement_pci_view["allocations"], server['id'])
+ # but we have the dest host side of the allocations on the same host
+ test_compute0_placement_pci_view[
+ "usages"]["0000:81:01.0"][self.PCI_RC] = 1
+ test_compute0_placement_pci_view[
+ "usages"]["0000:81:02.0"][self.PCI_RC] = 1
+ test_compute0_placement_pci_view["allocations"][server['id']] = {
+ "0000:81:01.0": {self.PCI_RC: 1},
+ "0000:81:02.0": {self.PCI_RC: 1},
+ }
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ # revert the resize so the instance should go back to use a single
+ # device
+ self._revert_resize(server)
+ self.assertPCIDeviceCounts('test_compute0', total=3, free=2)
+ # the migration allocation is moved back to the instance UUID
+ self._move_server_allocation(
+ test_compute0_placement_pci_view["allocations"],
+ server["id"],
+ revert=True,
+ )
+ # and the "dest" side of the allocation is dropped
+ test_compute0_placement_pci_view[
+ "usages"]["0000:81:01.0"][self.PCI_RC] = 0
+ test_compute0_placement_pci_view[
+ "usages"]["0000:81:02.0"][self.PCI_RC] = 0
+ test_compute0_placement_pci_view["allocations"][server['id']] = {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ }
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ # resize again but now confirm the same host resize and assert that
+ # only the new flavor usage remains
+ with mock.patch(
+ 'nova.virt.libvirt.driver.LibvirtDriver'
+ '.migrate_disk_and_power_off',
+ return_value='{}',
+ ):
+ self._resize_server(server, pci_flavor_id)
+ self._confirm_resize(server)
+
+ self.assertPCIDeviceCounts('test_compute0', total=3, free=1)
+ test_compute0_placement_pci_view["usages"] = {
+ "0000:81:01.0": {self.PCI_RC: 1},
+ "0000:81:02.0": {self.PCI_RC: 1},
+ }
+ test_compute0_placement_pci_view["allocations"][
+ server['id']] = {self.PCI_RC: 1}
+ test_compute0_placement_pci_view["allocations"][server['id']] = {
+ "0000:81:01.0": {self.PCI_RC: 1},
+ "0000:81:02.0": {self.PCI_RC: 1},
+ }
+ self.assert_no_pci_healing("test_compute0")
def _confirm_resize(self, server, host='host1'):
# NOTE(sbauza): Unfortunately, _cleanup_resize() in libvirt checks the
@@ -1962,7 +2482,6 @@ class PCIServersTest(_PCIServersTestBase):
self.flags(host=orig_host)
def test_cold_migrate_server_with_pci(self):
-
host_devices = {}
orig_create = nova.virt.libvirt.guest.Guest.create
@@ -1991,17 +2510,41 @@ class PCIServersTest(_PCIServersTestBase):
for hostname in ('test_compute0', 'test_compute1'):
pci_info = fakelibvirt.HostPCIDevicesInfo(num_pci=2)
self.start_compute(hostname=hostname, pci_info=pci_info)
- self.assert_placement_pci_view(
- hostname,
- inventories={
- "0000:81:00.0": {self.PCI_RC: 1},
- "0000:81:01.0": {self.PCI_RC: 1},
- },
- traits={
- "0000:81:00.0": [],
- "0000:81:01.0": [],
- },
- )
+ test_compute0_placement_pci_view = {
+ "inventories": {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ },
+ "traits": {
+ "0000:81:00.0": [],
+ "0000:81:01.0": [],
+ },
+ "usages": {
+ "0000:81:00.0": {self.PCI_RC: 0},
+ "0000:81:01.0": {self.PCI_RC: 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ test_compute1_placement_pci_view = {
+ "inventories": {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ },
+ "traits": {
+ "0000:81:00.0": [],
+ "0000:81:01.0": [],
+ },
+ "usages": {
+ "0000:81:00.0": {self.PCI_RC: 0},
+ "0000:81:01.0": {self.PCI_RC: 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
# boot an instance with a PCI device on each host
extra_spec = {
@@ -2009,8 +2552,16 @@ class PCIServersTest(_PCIServersTestBase):
}
flavor_id = self._create_flavor(extra_spec=extra_spec)
+ # force the allocation on test_compute0 to 81:00 to make it easy
+ # to assert the placement allocation
+ self._reserve_placement_resource(
+ "test_compute0_0000:81:01.0", self.PCI_RC, 1)
server_a = self._create_server(
flavor_id=flavor_id, networks='none', host='test_compute0')
+ # force the allocation on test_compute1 to 81:00 to make it easy
+ # to assert the placement allocation
+ self._reserve_placement_resource(
+ "test_compute1_0000:81:01.0", self.PCI_RC, 1)
server_b = self._create_server(
flavor_id=flavor_id, networks='none', host='test_compute1')
@@ -2022,6 +2573,25 @@ class PCIServersTest(_PCIServersTestBase):
for hostname in ('test_compute0', 'test_compute1'):
self.assertPCIDeviceCounts(hostname, total=2, free=1)
+ test_compute0_placement_pci_view["usages"][
+ "0000:81:00.0"][self.PCI_RC] = 1
+ test_compute0_placement_pci_view["allocations"][
+ server_a['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ test_compute1_placement_pci_view[
+ "usages"]["0000:81:00.0"][self.PCI_RC] = 1
+ test_compute1_placement_pci_view["allocations"][
+ server_b['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
+ # remove the resource reservation from test_compute1 to be able to
+ # migrate server_a there
+ self._reserve_placement_resource(
+ "test_compute1_0000:81:01.0", self.PCI_RC, 0)
+
# TODO(stephenfin): The mock of 'migrate_disk_and_power_off' should
# probably be less...dumb
with mock.patch(
@@ -2039,13 +2609,41 @@ class PCIServersTest(_PCIServersTestBase):
server_a['OS-EXT-SRV-ATTR:host'], server_b['OS-EXT-SRV-ATTR:host'],
)
self.assertPCIDeviceCounts('test_compute0', total=2, free=1)
+ # on the source host the allocation is now held by the migration UUID
+ self._move_server_allocation(
+ test_compute0_placement_pci_view["allocations"], server_a['id'])
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
self.assertPCIDeviceCounts('test_compute1', total=2, free=0)
+ # sever_a now have allocation on test_compute1 on 81:01
+ test_compute1_placement_pci_view["usages"][
+ "0000:81:01.0"][self.PCI_RC] = 1
+ test_compute1_placement_pci_view["allocations"][
+ server_a['id']] = {"0000:81:01.0": {self.PCI_RC: 1}}
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
# now, confirm the migration and check our counts once again
self._confirm_resize(server_a)
self.assertPCIDeviceCounts('test_compute0', total=2, free=2)
+ # the source host now has no allocations as the migration allocation
+ # is removed by confirm resize
+ test_compute0_placement_pci_view["usages"] = {
+ "0000:81:00.0": {self.PCI_RC: 0},
+ "0000:81:01.0": {self.PCI_RC: 0},
+ }
+ test_compute0_placement_pci_view["allocations"] = {}
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
self.assertPCIDeviceCounts('test_compute1', total=2, free=0)
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
+ self.assert_no_pci_healing("test_compute0")
+ self.assert_no_pci_healing("test_compute1")
def test_request_two_pci_but_host_has_one(self):
# simulate a single type-PCI device on the host
@@ -2076,6 +2674,320 @@ class PCIServersTest(_PCIServersTestBase):
self.assertIn('fault', server)
self.assertIn('No valid host', server['fault']['message'])
+ def _create_two_computes(self):
+ self.start_compute(
+ hostname='test_compute0',
+ pci_info=fakelibvirt.HostPCIDevicesInfo(num_pci=1))
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=1)
+ test_compute0_placement_pci_view = {
+ "inventories": {"0000:81:00.0": {self.PCI_RC: 1}},
+ "traits": {"0000:81:00.0": []},
+ "usages": {"0000:81:00.0": {self.PCI_RC: 0}},
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ self.start_compute(
+ hostname='test_compute1',
+ pci_info=fakelibvirt.HostPCIDevicesInfo(num_pci=1),
+ )
+ self.assertPCIDeviceCounts('test_compute1', total=1, free=1)
+ test_compute1_placement_pci_view = {
+ "inventories": {"0000:81:00.0": {self.PCI_RC: 1}},
+ "traits": {"0000:81:00.0": []},
+ "usages": {"0000:81:00.0": {self.PCI_RC: 0}},
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
+ return (
+ test_compute0_placement_pci_view,
+ test_compute1_placement_pci_view,
+ )
+
+ def _create_two_computes_and_an_instance_on_the_first(self):
+ (
+ test_compute0_placement_pci_view,
+ test_compute1_placement_pci_view,
+ ) = self._create_two_computes()
+
+ # boot a VM on test_compute0 with a single PCI dev
+ extra_spec = {'pci_passthrough:alias': f'{self.ALIAS_NAME}:1'}
+ pci_flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._create_server(
+ flavor_id=pci_flavor_id, networks='none', host="test_compute0")
+
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=0)
+ test_compute0_placement_pci_view["usages"][
+ "0000:81:00.0"][self.PCI_RC] = 1
+ test_compute0_placement_pci_view["allocations"][
+ server['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ return (
+ server,
+ test_compute0_placement_pci_view,
+ test_compute1_placement_pci_view,
+ )
+
+ def test_evacuate(self):
+ (
+ server,
+ test_compute0_placement_pci_view,
+ test_compute1_placement_pci_view,
+ ) = self._create_two_computes_and_an_instance_on_the_first()
+
+ # kill test_compute0 and evacuate the instance
+ self.computes['test_compute0'].stop()
+ self.api.put_service(
+ self.computes["test_compute0"].service_ref.uuid,
+ {"forced_down": True},
+ )
+ self._evacuate_server(server)
+ # source allocation should be kept as source is dead but the server
+ # now has allocation on both hosts as evacuation does not use migration
+ # allocations.
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=0)
+ self.assert_placement_pci_inventory(
+ "test_compute0",
+ test_compute0_placement_pci_view["inventories"],
+ test_compute0_placement_pci_view["traits"]
+ )
+ self.assert_placement_pci_usages(
+ "test_compute0", test_compute0_placement_pci_view["usages"]
+ )
+ self.assert_placement_pci_allocations(
+ {
+ server['id']: {
+ "test_compute0": {
+ "VCPU": 2,
+ "MEMORY_MB": 2048,
+ "DISK_GB": 20,
+ },
+ "test_compute0_0000:81:00.0": {self.PCI_RC: 1},
+ "test_compute1": {
+ "VCPU": 2,
+ "MEMORY_MB": 2048,
+ "DISK_GB": 20,
+ },
+ "test_compute1_0000:81:00.0": {self.PCI_RC: 1},
+ },
+ }
+ )
+
+ # dest allocation should be created
+ self.assertPCIDeviceCounts('test_compute1', total=1, free=0)
+ test_compute1_placement_pci_view["usages"][
+ "0000:81:00.0"][self.PCI_RC] = 1
+ test_compute1_placement_pci_view["allocations"][
+ server['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
+ self.assert_placement_pci_inventory(
+ "test_compute1",
+ test_compute1_placement_pci_view["inventories"],
+ test_compute1_placement_pci_view["traits"]
+ )
+ self.assert_placement_pci_usages(
+ "test_compute1", test_compute0_placement_pci_view["usages"]
+ )
+
+ # recover test_compute0 and check that it is cleaned
+ self.restart_compute_service('test_compute0')
+
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=1)
+ test_compute0_placement_pci_view = {
+ "inventories": {"0000:81:00.0": {self.PCI_RC: 1}},
+ "traits": {"0000:81:00.0": []},
+ "usages": {"0000:81:00.0": {self.PCI_RC: 0}},
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ # and test_compute1 is not changes (expect that the instance now has
+ # only allocation on this compute)
+ self.assertPCIDeviceCounts('test_compute1', total=1, free=0)
+ test_compute1_placement_pci_view["usages"][
+ "0000:81:00.0"][self.PCI_RC] = 1
+ test_compute1_placement_pci_view["allocations"][
+ server['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
+ self.assert_no_pci_healing("test_compute0")
+ self.assert_no_pci_healing("test_compute1")
+
+ def test_unshelve_after_offload(self):
+ (
+ server,
+ test_compute0_placement_pci_view,
+ test_compute1_placement_pci_view,
+ ) = self._create_two_computes_and_an_instance_on_the_first()
+
+ # shelve offload the server
+ self._shelve_server(server)
+
+ # source allocation should be freed
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=1)
+ test_compute0_placement_pci_view["usages"][
+ "0000:81:00.0"][self.PCI_RC] = 0
+ del test_compute0_placement_pci_view["allocations"][server['id']]
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ # test_compute1 should not be touched
+ self.assertPCIDeviceCounts('test_compute1', total=1, free=1)
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
+ # disable test_compute0 and unshelve the instance
+ self.api.put_service(
+ self.computes["test_compute0"].service_ref.uuid,
+ {"status": "disabled"},
+ )
+ self._unshelve_server(server)
+
+ # test_compute0 should be unchanged
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=1)
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ # test_compute1 should be allocated
+ self.assertPCIDeviceCounts('test_compute1', total=1, free=0)
+ test_compute1_placement_pci_view["usages"][
+ "0000:81:00.0"][self.PCI_RC] = 1
+ test_compute1_placement_pci_view["allocations"][
+ server['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
+ self.assert_no_pci_healing("test_compute0")
+ self.assert_no_pci_healing("test_compute1")
+
+ def test_reschedule(self):
+ (
+ test_compute0_placement_pci_view,
+ test_compute1_placement_pci_view,
+ ) = self._create_two_computes()
+
+ # try to boot a VM with a single device but inject fault on the first
+ # compute so that the VM is re-scheduled to the other
+ extra_spec = {'pci_passthrough:alias': f'{self.ALIAS_NAME}:1'}
+ pci_flavor_id = self._create_flavor(extra_spec=extra_spec)
+
+ calls = []
+ orig_guest_create = (
+ nova.virt.libvirt.driver.LibvirtDriver._create_guest)
+
+ def fake_guest_create(*args, **kwargs):
+ if not calls:
+ calls.append(1)
+ raise fakelibvirt.make_libvirtError(
+ fakelibvirt.libvirtError,
+ "internal error",
+ error_code=fakelibvirt.VIR_ERR_INTERNAL_ERROR,
+ )
+ else:
+ return orig_guest_create(*args, **kwargs)
+
+ with mock.patch(
+ 'nova.virt.libvirt.driver.LibvirtDriver._create_guest',
+ new=fake_guest_create
+ ):
+ server = self._create_server(
+ flavor_id=pci_flavor_id, networks='none')
+
+ compute_pci_view_map = {
+ 'test_compute0': test_compute0_placement_pci_view,
+ 'test_compute1': test_compute1_placement_pci_view,
+ }
+ allocated_compute = server['OS-EXT-SRV-ATTR:host']
+ not_allocated_compute = (
+ "test_compute0"
+ if allocated_compute == "test_compute1"
+ else "test_compute1"
+ )
+
+ allocated_pci_view = compute_pci_view_map.pop(
+ server['OS-EXT-SRV-ATTR:host'])
+ not_allocated_pci_view = list(compute_pci_view_map.values())[0]
+
+ self.assertPCIDeviceCounts(allocated_compute, total=1, free=0)
+ allocated_pci_view["usages"][
+ "0000:81:00.0"][self.PCI_RC] = 1
+ allocated_pci_view["allocations"][
+ server['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
+ self.assert_placement_pci_view(allocated_compute, **allocated_pci_view)
+
+ self.assertPCIDeviceCounts(not_allocated_compute, total=1, free=1)
+ self.assert_placement_pci_view(
+ not_allocated_compute, **not_allocated_pci_view)
+ self.assert_no_pci_healing("test_compute0")
+ self.assert_no_pci_healing("test_compute1")
+
+ def test_multi_create(self):
+ self.start_compute(
+ hostname='test_compute0',
+ pci_info=fakelibvirt.HostPCIDevicesInfo(num_pci=3))
+ self.assertPCIDeviceCounts('test_compute0', total=3, free=3)
+ test_compute0_placement_pci_view = {
+ "inventories": {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ "0000:81:02.0": {self.PCI_RC: 1},
+ },
+ "traits": {
+ "0000:81:00.0": [],
+ "0000:81:01.0": [],
+ "0000:81:02.0": [],
+ },
+ "usages": {
+ "0000:81:00.0": {self.PCI_RC: 0},
+ "0000:81:01.0": {self.PCI_RC: 0},
+ "0000:81:02.0": {self.PCI_RC: 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ extra_spec = {'pci_passthrough:alias': f'{self.ALIAS_NAME}:1'}
+ pci_flavor_id = self._create_flavor(extra_spec=extra_spec)
+ body = self._build_server(flavor_id=pci_flavor_id, networks='none')
+ body.update(
+ {
+ "min_count": "2",
+ }
+ )
+ self.api.post_server({'server': body})
+
+ servers = self.api.get_servers(detail=False)
+ for server in servers:
+ self._wait_for_state_change(server, 'ACTIVE')
+
+ self.assertEqual(2, len(servers))
+ self.assertPCIDeviceCounts('test_compute0', total=3, free=1)
+ # we have no way to influence which instance takes which device, so
+ # we need to look at the nova DB to properly assert the placement
+ # allocation
+ devices = objects.PciDeviceList.get_by_compute_node(
+ self.ctxt,
+ objects.ComputeNode.get_by_nodename(self.ctxt, 'test_compute0').id,
+ )
+ for dev in devices:
+ if dev.instance_uuid:
+ test_compute0_placement_pci_view["usages"][
+ dev.address][self.PCI_RC] = 1
+ test_compute0_placement_pci_view["allocations"][
+ dev.instance_uuid] = {dev.address: {self.PCI_RC: 1}}
+
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ self.assert_no_pci_healing("test_compute0")
+
class PCIServersWithPreferredNUMATest(_PCIServersTestBase):
@@ -2097,6 +3009,11 @@ class PCIServersWithPreferredNUMATest(_PCIServersTestBase):
)]
expected_state = 'ACTIVE'
+ def setUp(self):
+ super().setUp()
+ self.flags(group="pci", report_in_placement=True)
+ self.flags(group='filter_scheduler', pci_in_placement=True)
+
def test_create_server_with_pci_dev_and_numa(self):
"""Validate behavior of 'preferred' PCI NUMA policy.
@@ -2109,6 +3026,20 @@ class PCIServersWithPreferredNUMATest(_PCIServersTestBase):
pci_info = fakelibvirt.HostPCIDevicesInfo(num_pci=1, numa_node=0)
self.start_compute(pci_info=pci_info)
+ compute1_placement_pci_view = {
+ "inventories": {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ },
+ "traits": {
+ "0000:81:00.0": [],
+ },
+ "usages": {
+ "0000:81:00.0": {self.PCI_RC: 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_placement_pci_view)
# boot one instance with no PCI device to "fill up" NUMA node 0
extra_spec = {
@@ -2117,13 +3048,26 @@ class PCIServersWithPreferredNUMATest(_PCIServersTestBase):
flavor_id = self._create_flavor(vcpu=4, extra_spec=extra_spec)
self._create_server(flavor_id=flavor_id)
+ self.assert_placement_pci_view(
+ "compute1", **compute1_placement_pci_view)
+
# now boot one with a PCI device, which should succeed thanks to the
# use of the PCI policy
extra_spec['pci_passthrough:alias'] = '%s:1' % self.ALIAS_NAME
flavor_id = self._create_flavor(extra_spec=extra_spec)
- self._create_server(
+ server_with_pci = self._create_server(
flavor_id=flavor_id, expected_state=self.expected_state)
+ if self.expected_state == 'ACTIVE':
+ compute1_placement_pci_view["usages"][
+ "0000:81:00.0"][self.PCI_RC] = 1
+ compute1_placement_pci_view["allocations"][
+ server_with_pci['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
+
+ self.assert_placement_pci_view(
+ "compute1", **compute1_placement_pci_view)
+ self.assert_no_pci_healing("compute1")
+
class PCIServersWithRequiredNUMATest(PCIServersWithPreferredNUMATest):
@@ -2139,6 +3083,99 @@ class PCIServersWithRequiredNUMATest(PCIServersWithPreferredNUMATest):
)]
expected_state = 'ERROR'
+ def setUp(self):
+ super().setUp()
+ self.useFixture(
+ fixtures.MockPatch(
+ 'nova.pci.utils.is_physical_function', return_value=False
+ )
+ )
+
+ def test_create_server_with_pci_dev_and_numa_placement_conflict(self):
+ # fakelibvirt will simulate the devices:
+ # * one type-PCI in 81.00 on numa 0
+ # * one type-PCI in 81.01 on numa 1
+ pci_info = fakelibvirt.HostPCIDevicesInfo(num_pci=2)
+ # the device_spec will assign different traits to 81.00 than 81.01
+ # so the two devices become different from placement perspective
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ 'vendor_id': fakelibvirt.PCI_VEND_ID,
+ 'product_id': fakelibvirt.PCI_PROD_ID,
+ "address": "0000:81:00.0",
+ "traits": "green",
+ },
+ {
+ 'vendor_id': fakelibvirt.PCI_VEND_ID,
+ 'product_id': fakelibvirt.PCI_PROD_ID,
+ "address": "0000:81:01.0",
+ "traits": "red",
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ # both numa 0 and numa 1 has 4 PCPUs
+ self.flags(cpu_dedicated_set='0-7', group='compute')
+ self.start_compute(pci_info=pci_info)
+ compute1_placement_pci_view = {
+ "inventories": {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ },
+ "traits": {
+ "0000:81:00.0": ["CUSTOM_GREEN"],
+ "0000:81:01.0": ["CUSTOM_RED"],
+ },
+ "usages": {
+ "0000:81:00.0": {self.PCI_RC: 0},
+ "0000:81:01.0": {self.PCI_RC: 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_placement_pci_view)
+
+ # boot one instance with no PCI device to "fill up" NUMA node 0
+ # so we will have PCPUs on numa 0 and we have PCI on both nodes
+ extra_spec = {
+ 'hw:cpu_policy': 'dedicated',
+ }
+ flavor_id = self._create_flavor(vcpu=4, extra_spec=extra_spec)
+ self._create_server(flavor_id=flavor_id)
+
+ pci_alias = {
+ "resource_class": self.PCI_RC,
+ # this means only 81.00 will match in placement which is on numa 0
+ "traits": "green",
+ "name": "pci-dev",
+ # this forces the scheduler to only accept a solution where the
+ # PCI device is on the same numa node as the pinned CPUs
+ 'numa_policy': fields.PCINUMAAffinityPolicy.REQUIRED,
+ }
+ self.flags(
+ group="pci",
+ alias=self._to_list_of_json_str([pci_alias]),
+ )
+
+ # Ask for dedicated CPUs, that can only be fulfilled on numa 1.
+ # And ask for a PCI alias that can only be fulfilled on numa 0 due to
+ # trait request.
+ # We expect that this makes the scheduling fail.
+ extra_spec = {
+ "hw:cpu_policy": "dedicated",
+ "pci_passthrough:alias": "pci-dev:1",
+ }
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._create_server(
+ flavor_id=flavor_id, expected_state="ERROR")
+
+ self.assertIn('fault', server)
+ self.assertIn('No valid host', server['fault']['message'])
+ self.assert_placement_pci_view(
+ "compute1", **compute1_placement_pci_view)
+ self.assert_no_pci_healing("compute1")
+
@ddt.ddt
class PCIServersWithSRIOVAffinityPoliciesTest(_PCIServersTestBase):
diff --git a/nova/tests/functional/libvirt/test_power_manage.py b/nova/tests/functional/libvirt/test_power_manage.py
new file mode 100644
index 0000000000..9f80446bd6
--- /dev/null
+++ b/nova/tests/functional/libvirt/test_power_manage.py
@@ -0,0 +1,270 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from unittest import mock
+
+import fixtures
+
+from nova import context as nova_context
+from nova import exception
+from nova import objects
+from nova.tests import fixtures as nova_fixtures
+from nova.tests.fixtures import libvirt as fakelibvirt
+from nova.tests.functional.libvirt import base
+from nova.virt import hardware
+from nova.virt.libvirt.cpu import api as cpu_api
+
+
+class PowerManagementTestsBase(base.ServersTestBase):
+
+ ADDITIONAL_FILTERS = ['NUMATopologyFilter']
+
+ ADMIN_API = True
+
+ def setUp(self):
+ super(PowerManagementTestsBase, self).setUp()
+
+ self.ctxt = nova_context.get_admin_context()
+
+ # Mock the 'NUMATopologyFilter' filter, as most tests need to inspect
+ # this
+ host_manager = self.scheduler.manager.host_manager
+ numa_filter_class = host_manager.filter_cls_map['NUMATopologyFilter']
+ host_pass_mock = mock.Mock(wraps=numa_filter_class().host_passes)
+ _p = mock.patch('nova.scheduler.filters'
+ '.numa_topology_filter.NUMATopologyFilter.host_passes',
+ side_effect=host_pass_mock)
+ self.mock_filter = _p.start()
+ self.addCleanup(_p.stop)
+
+ # for the sake of resizing, we need to patch the two methods below
+ self.useFixture(fixtures.MockPatch(
+ 'nova.virt.libvirt.LibvirtDriver._get_instance_disk_info',
+ return_value=[]))
+ self.useFixture(fixtures.MockPatch('os.rename'))
+
+ self.useFixture(nova_fixtures.PrivsepFixture())
+
+ # Defining the main flavor for 4 vCPUs all pinned
+ self.extra_spec = {
+ 'hw:cpu_policy': 'dedicated',
+ 'hw:cpu_thread_policy': 'prefer',
+ }
+ self.pcpu_flavor_id = self._create_flavor(
+ vcpu=4, extra_spec=self.extra_spec)
+
+ def _assert_server_cpus_state(self, server, expected='online'):
+ inst = objects.Instance.get_by_uuid(self.ctxt, server['id'])
+ if not inst.numa_topology:
+ self.fail('Instance should have a NUMA topology in order to know '
+ 'its physical CPUs')
+ instance_pcpus = inst.numa_topology.cpu_pinning
+ self._assert_cpu_set_state(instance_pcpus, expected=expected)
+ return instance_pcpus
+
+ def _assert_cpu_set_state(self, cpu_set, expected='online'):
+ for i in cpu_set:
+ core = cpu_api.Core(i)
+ if expected == 'online':
+ self.assertTrue(core.online, f'{i} is not online')
+ elif expected == 'offline':
+ self.assertFalse(core.online, f'{i} is online')
+ elif expected == 'powersave':
+ self.assertEqual('powersave', core.governor)
+ elif expected == 'performance':
+ self.assertEqual('performance', core.governor)
+
+
+class PowerManagementTests(PowerManagementTestsBase):
+ """Test suite for a single host with 9 dedicated cores and 1 used for OS"""
+
+ def setUp(self):
+ super(PowerManagementTests, self).setUp()
+
+ self.useFixture(nova_fixtures.SysFileSystemFixture())
+
+ # Definining the CPUs to be pinned.
+ self.flags(cpu_dedicated_set='1-9', cpu_shared_set=None,
+ group='compute')
+ self.flags(vcpu_pin_set=None)
+ self.flags(cpu_power_management=True, group='libvirt')
+
+ self.flags(allow_resize_to_same_host=True)
+ self.host_info = fakelibvirt.HostInfo(cpu_nodes=1, cpu_sockets=1,
+ cpu_cores=5, cpu_threads=2)
+ self.compute1 = self.start_compute(host_info=self.host_info,
+ hostname='compute1')
+
+ # All cores are shutdown at startup, let's check.
+ cpu_dedicated_set = hardware.get_cpu_dedicated_set()
+ self._assert_cpu_set_state(cpu_dedicated_set, expected='offline')
+
+ def test_hardstop_compute_service_if_wrong_opt(self):
+ self.flags(cpu_dedicated_set=None, cpu_shared_set=None,
+ group='compute')
+ self.flags(vcpu_pin_set=None)
+ self.flags(cpu_power_management=True, group='libvirt')
+ self.assertRaises(exception.InvalidConfiguration,
+ self.start_compute, host_info=self.host_info,
+ hostname='compute2')
+
+ def test_create_server(self):
+ server = self._create_server(
+ flavor_id=self.pcpu_flavor_id,
+ expected_state='ACTIVE')
+ # Let's verify that the pinned CPUs are now online
+ self._assert_server_cpus_state(server, expected='online')
+
+ # Verify that the unused CPUs are still offline
+ inst = objects.Instance.get_by_uuid(self.ctxt, server['id'])
+ instance_pcpus = inst.numa_topology.cpu_pinning
+ cpu_dedicated_set = hardware.get_cpu_dedicated_set()
+ unused_cpus = cpu_dedicated_set - instance_pcpus
+ self._assert_cpu_set_state(unused_cpus, expected='offline')
+
+ def test_stop_start_server(self):
+ server = self._create_server(
+ flavor_id=self.pcpu_flavor_id,
+ expected_state='ACTIVE')
+
+ server = self._stop_server(server)
+ # Let's verify that the pinned CPUs are now stopped...
+ self._assert_server_cpus_state(server, expected='offline')
+
+ server = self._start_server(server)
+ # ...and now, they should be back.
+ self._assert_server_cpus_state(server, expected='online')
+
+ def test_resize(self):
+ server = self._create_server(
+ flavor_id=self.pcpu_flavor_id,
+ expected_state='ACTIVE')
+ server_pcpus = self._assert_server_cpus_state(server,
+ expected='online')
+
+ new_flavor_id = self._create_flavor(
+ vcpu=5, extra_spec=self.extra_spec)
+ self._resize_server(server, new_flavor_id)
+ server2_pcpus = self._assert_server_cpus_state(server,
+ expected='online')
+ # Even if the resize is not confirmed yet, the original guest is now
+ # destroyed so the cores are now offline.
+ self._assert_cpu_set_state(server_pcpus, expected='offline')
+
+ # let's revert the resize
+ self._revert_resize(server)
+ # So now the original CPUs will be online again, while the previous
+ # cores should be back offline.
+ self._assert_cpu_set_state(server_pcpus, expected='online')
+ self._assert_cpu_set_state(server2_pcpus, expected='offline')
+
+ def test_changing_strategy_fails(self):
+ # As a reminder, all cores have been shutdown before.
+ # Now we want to change the strategy and then we restart the service
+ self.flags(cpu_power_management_strategy='governor', group='libvirt')
+ # See, this is not possible as we would have offline CPUs.
+ self.assertRaises(exception.InvalidConfiguration,
+ self.restart_compute_service, hostname='compute1')
+
+
+class PowerManagementTestsGovernor(PowerManagementTestsBase):
+ """Test suite for speific governor usage (same 10-core host)"""
+
+ def setUp(self):
+ super(PowerManagementTestsGovernor, self).setUp()
+
+ self.useFixture(nova_fixtures.SysFileSystemFixture())
+
+ # Definining the CPUs to be pinned.
+ self.flags(cpu_dedicated_set='1-9', cpu_shared_set=None,
+ group='compute')
+ self.flags(vcpu_pin_set=None)
+ self.flags(cpu_power_management=True, group='libvirt')
+ self.flags(cpu_power_management_strategy='governor', group='libvirt')
+
+ self.flags(allow_resize_to_same_host=True)
+ self.host_info = fakelibvirt.HostInfo(cpu_nodes=1, cpu_sockets=1,
+ cpu_cores=5, cpu_threads=2)
+ self.compute1 = self.start_compute(host_info=self.host_info,
+ hostname='compute1')
+
+ def test_create(self):
+ cpu_dedicated_set = hardware.get_cpu_dedicated_set()
+ # With the governor strategy, cores are still online but run with a
+ # powersave governor.
+ self._assert_cpu_set_state(cpu_dedicated_set, expected='powersave')
+
+ # Now, start an instance
+ server = self._create_server(
+ flavor_id=self.pcpu_flavor_id,
+ expected_state='ACTIVE')
+ # When pinned cores are run, the governor state is now performance
+ self._assert_server_cpus_state(server, expected='performance')
+
+ def test_changing_strategy_fails(self):
+ # Arbitratly set a core governor strategy to be performance
+ cpu_api.Core(1).set_high_governor()
+ # and then forget about it while changing the strategy.
+ self.flags(cpu_power_management_strategy='cpu_state', group='libvirt')
+ # This time, this wouldn't be acceptable as some core would have a
+ # difference performance while Nova would only online/offline it.
+ self.assertRaises(exception.InvalidConfiguration,
+ self.restart_compute_service, hostname='compute1')
+
+
+class PowerManagementMixedInstances(PowerManagementTestsBase):
+ """Test suite for a single host with 6 dedicated cores, 3 shared and one
+ OS-restricted.
+ """
+
+ def setUp(self):
+ super(PowerManagementMixedInstances, self).setUp()
+
+ self.useFixture(nova_fixtures.SysFileSystemFixture())
+
+ # Definining 6 CPUs to be dedicated, not all of them in a series.
+ self.flags(cpu_dedicated_set='1-3,5-7', cpu_shared_set='4,8-9',
+ group='compute')
+ self.flags(vcpu_pin_set=None)
+ self.flags(cpu_power_management=True, group='libvirt')
+
+ self.host_info = fakelibvirt.HostInfo(cpu_nodes=1, cpu_sockets=1,
+ cpu_cores=5, cpu_threads=2)
+ self.compute1 = self.start_compute(host_info=self.host_info,
+ hostname='compute1')
+
+ # Make sure only 6 are offline now
+ cpu_dedicated_set = hardware.get_cpu_dedicated_set()
+ self._assert_cpu_set_state(cpu_dedicated_set, expected='offline')
+
+ # cores 4 and 8-9 should be online
+ self._assert_cpu_set_state({4, 8, 9}, expected='online')
+
+ def test_standard_server_works_and_passes(self):
+
+ std_flavor_id = self._create_flavor(vcpu=2)
+ self._create_server(flavor_id=std_flavor_id, expected_state='ACTIVE')
+
+ # Since this is an instance with floating vCPUs on the shared set, we
+ # can only lookup the host CPUs and see they haven't changed state.
+ cpu_dedicated_set = hardware.get_cpu_dedicated_set()
+ self._assert_cpu_set_state(cpu_dedicated_set, expected='offline')
+ self._assert_cpu_set_state({4, 8, 9}, expected='online')
+
+ # We can now try to boot an instance with pinned CPUs to test the mix
+ pinned_server = self._create_server(
+ flavor_id=self.pcpu_flavor_id,
+ expected_state='ACTIVE')
+ # We'll see that its CPUs are now online
+ self._assert_server_cpus_state(pinned_server, expected='online')
+ # but it doesn't change the shared set
+ self._assert_cpu_set_state({4, 8, 9}, expected='online')
diff --git a/nova/tests/functional/libvirt/test_vpmem.py b/nova/tests/functional/libvirt/test_vpmem.py
index d1cad0e376..1200f80357 100644
--- a/nova/tests/functional/libvirt/test_vpmem.py
+++ b/nova/tests/functional/libvirt/test_vpmem.py
@@ -12,9 +12,11 @@
# under the License.
import fixtures
+from unittest import mock
from oslo_config import cfg
from oslo_log import log as logging
+from oslo_utils.fixture import uuidsentinel as uuids
from nova import objects
from nova.tests import fixtures as nova_fixtures
@@ -99,7 +101,9 @@ class VPMEMTestBase(integrated_helpers.LibvirtProviderUsageBaseTestCase):
cpu_cores=2, cpu_threads=2),
hostname=hostname)
self.mock_conn.return_value = fake_connection
- compute = self._start_compute(host=hostname)
+ with mock.patch('nova.virt.node.get_local_node_uuid') as m:
+ m.return_value = str(getattr(uuids, 'node_%s' % hostname))
+ compute = self._start_compute(host=hostname)
# Ensure populating the existing pmems correctly.
vpmems = compute.driver._vpmems_by_name
diff --git a/nova/tests/functional/notification_sample_tests/test_compute_task.py b/nova/tests/functional/notification_sample_tests/test_compute_task.py
index 3de1c7d4e1..05d2d32fde 100644
--- a/nova/tests/functional/notification_sample_tests/test_compute_task.py
+++ b/nova/tests/functional/notification_sample_tests/test_compute_task.py
@@ -10,6 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
+from nova import objects
from nova.tests import fixtures
from nova.tests.functional.notification_sample_tests \
import notification_sample_base
@@ -53,6 +56,10 @@ class TestComputeTaskNotificationSample(
},
actual=self.notifier.versioned_notifications[1])
+ @mock.patch.object(
+ objects.service, 'get_minimum_version_all_cells',
+ new=mock.Mock(return_value=62)
+ )
def test_rebuild_fault(self):
server = self._boot_a_server(
extra_params={'networks': [{'port': self.neutron.port_1['id']}]},
diff --git a/nova/tests/functional/notification_sample_tests/test_instance.py b/nova/tests/functional/notification_sample_tests/test_instance.py
index f671a8abca..5a52c2dad6 100644
--- a/nova/tests/functional/notification_sample_tests/test_instance.py
+++ b/nova/tests/functional/notification_sample_tests/test_instance.py
@@ -46,18 +46,18 @@ class TestInstanceNotificationSampleWithMultipleCompute(
self.compute2 = self.start_service('compute', host='host2')
actions = [
- self._test_live_migration_rollback,
- self._test_live_migration_abort,
- self._test_live_migration_success,
- self._test_evacuate_server,
- self._test_live_migration_force_complete
+ (self._test_live_migration_rollback, 'ACTIVE'),
+ (self._test_live_migration_abort, 'ACTIVE'),
+ (self._test_live_migration_success, 'ACTIVE'),
+ (self._test_evacuate_server, 'SHUTOFF'),
+ (self._test_live_migration_force_complete, 'ACTIVE'),
]
- for action in actions:
+ for action, expected_state in actions:
self.notifier.reset()
action(server)
# Ensure that instance is in active state after an action
- self._wait_for_state_change(server, 'ACTIVE')
+ self._wait_for_state_change(server, expected_state)
@mock.patch('nova.compute.manager.ComputeManager.'
'_live_migration_cleanup_flags', return_value=[True, False])
@@ -275,6 +275,12 @@ class TestInstanceNotificationSampleWithMultipleCompute(
self.admin_api.put_service(service_id, {'forced_down': False})
def _test_live_migration_force_complete(self, server):
+ # In the scenario evacuate happened before which stopped the
+ # server.
+ self._start_server(server)
+ self._wait_for_state_change(server, 'ACTIVE')
+ self.notifier.reset()
+
post = {
'os-migrateLive': {
'host': 'host2',
diff --git a/nova/tests/functional/regressions/test_bug_1669054.py b/nova/tests/functional/regressions/test_bug_1669054.py
index 6180dbfbaa..b20e1530cc 100644
--- a/nova/tests/functional/regressions/test_bug_1669054.py
+++ b/nova/tests/functional/regressions/test_bug_1669054.py
@@ -59,7 +59,8 @@ class ResizeEvacuateTestCase(integrated_helpers._IntegratedTestBase):
# Now try to evacuate the server back to the original source compute.
server = self._evacuate_server(
server, {'onSharedStorage': 'False'},
- expected_host=self.compute.host, expected_migration_status='done')
+ expected_host=self.compute.host, expected_migration_status='done',
+ expected_state='ACTIVE')
# Assert the RequestSpec.ignore_hosts field is not populated.
reqspec = objects.RequestSpec.get_by_instance_uuid(
diff --git a/nova/tests/functional/regressions/test_bug_1713783.py b/nova/tests/functional/regressions/test_bug_1713783.py
index 9a6a79d7a2..8088ccfe06 100644
--- a/nova/tests/functional/regressions/test_bug_1713783.py
+++ b/nova/tests/functional/regressions/test_bug_1713783.py
@@ -13,9 +13,11 @@
# limitations under the License.
import time
+from unittest import mock
from oslo_log import log as logging
+from nova import objects
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import fixtures as func_fixtures
@@ -81,6 +83,10 @@ class FailedEvacuateStateTests(test.TestCase,
created_server = self.api.post_server({'server': server_req})
return self._wait_for_state_change(created_server, 'ACTIVE')
+ @mock.patch.object(
+ objects.service, 'get_minimum_version_all_cells',
+ new=mock.Mock(return_value=62)
+ )
def test_evacuate_no_valid_host(self):
# Boot a server
server = self._boot_a_server()
diff --git a/nova/tests/functional/regressions/test_bug_1764883.py b/nova/tests/functional/regressions/test_bug_1764883.py
index aa86770584..59bbed4f46 100644
--- a/nova/tests/functional/regressions/test_bug_1764883.py
+++ b/nova/tests/functional/regressions/test_bug_1764883.py
@@ -95,7 +95,8 @@ class TestEvacuationWithSourceReturningDuringRebuild(
# Evacuate the instance from the source_host
server = self._evacuate_server(
- server, expected_migration_status='done')
+ server, expected_migration_status='done',
+ expected_state='ACTIVE')
host = server['OS-EXT-SRV-ATTR:host']
migrations = self.api.get_migrations()
diff --git a/nova/tests/functional/regressions/test_bug_1823370.py b/nova/tests/functional/regressions/test_bug_1823370.py
index 5e69905f5f..af134070cd 100644
--- a/nova/tests/functional/regressions/test_bug_1823370.py
+++ b/nova/tests/functional/regressions/test_bug_1823370.py
@@ -66,4 +66,5 @@ class MultiCellEvacuateTestCase(integrated_helpers._IntegratedTestBase):
# higher than host3.
self._evacuate_server(
server, {'onSharedStorage': 'False'}, expected_host='host3',
- expected_migration_status='done')
+ expected_migration_status='done',
+ expected_state='ACTIVE')
diff --git a/nova/tests/functional/regressions/test_bug_1896463.py b/nova/tests/functional/regressions/test_bug_1896463.py
index dc74791e0e..3cfece8d36 100644
--- a/nova/tests/functional/regressions/test_bug_1896463.py
+++ b/nova/tests/functional/regressions/test_bug_1896463.py
@@ -216,7 +216,7 @@ class TestEvacuateResourceTrackerRace(
self._run_periodics()
self._wait_for_server_parameter(
- server, {'OS-EXT-SRV-ATTR:host': 'host2', 'status': 'ACTIVE'})
+ server, {'OS-EXT-SRV-ATTR:host': 'host2', 'status': 'SHUTOFF'})
self._assert_pci_device_allocated(server['id'], self.compute1_id)
self._assert_pci_device_allocated(server['id'], self.compute2_id)
diff --git a/nova/tests/functional/regressions/test_bug_1922053.py b/nova/tests/functional/regressions/test_bug_1922053.py
index 612be27b2b..70bb3d4cab 100644
--- a/nova/tests/functional/regressions/test_bug_1922053.py
+++ b/nova/tests/functional/regressions/test_bug_1922053.py
@@ -1,3 +1,4 @@
+
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@@ -27,6 +28,7 @@ class ForceUpWithDoneEvacuations(integrated_helpers._IntegratedTestBase):
ADMIN_API = True
microversion = 'latest'
+ expected_state = 'SHUTOFF'
def _create_test_server(self, compute_host):
return self._create_server(host=compute_host, networks='none')
@@ -59,7 +61,8 @@ class ForceUpWithDoneEvacuations(integrated_helpers._IntegratedTestBase):
server = self._evacuate_server(
server,
expected_host='compute2',
- expected_migration_status='done'
+ expected_migration_status='done',
+ expected_state=self.expected_state
)
# Assert that the request to force up the host is rejected
@@ -97,6 +100,7 @@ class ForceUpWithDoneEvacuationsv252(ForceUpWithDoneEvacuations):
"""
microversion = '2.52'
+ expected_state = 'ACTIVE'
def _create_test_server(self, compute_host):
return self._create_server(az='nova:compute', networks='none')
diff --git a/nova/tests/functional/regressions/test_bug_1951656.py b/nova/tests/functional/regressions/test_bug_1951656.py
new file mode 100644
index 0000000000..d705ff6fe3
--- /dev/null
+++ b/nova/tests/functional/regressions/test_bug_1951656.py
@@ -0,0 +1,73 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_utils import uuidutils
+
+
+from nova.tests.fixtures import libvirt as fakelibvirt
+from nova.tests.functional.libvirt import test_vgpu
+from nova.virt.libvirt import utils as libvirt_utils
+
+
+class VGPUTestsLibvirt7_7(test_vgpu.VGPUTestBase):
+
+ def _create_mdev(self, physical_device, mdev_type, uuid=None):
+ # We need to fake the newly created sysfs object by adding a new
+ # FakeMdevDevice in the existing persisted Connection object so
+ # when asking to get the existing mdevs, we would see it.
+ if not uuid:
+ uuid = uuidutils.generate_uuid()
+ mdev_name = libvirt_utils.mdev_uuid2name(uuid)
+ libvirt_parent = self.pci2libvirt_address(physical_device)
+
+ # Libvirt 7.7 now creates mdevs with a parent_addr suffix.
+ new_mdev_name = '_'.join([mdev_name, libvirt_parent])
+
+ # Here, we get the right compute thanks by the self.current_host that
+ # was modified just before
+ connection = self.computes[
+ self._current_host].driver._host.get_connection()
+ connection.mdev_info.devices.update(
+ {mdev_name: fakelibvirt.FakeMdevDevice(dev_name=new_mdev_name,
+ type_id=mdev_type,
+ parent=libvirt_parent)})
+ return uuid
+
+ def setUp(self):
+ super(VGPUTestsLibvirt7_7, self).setUp()
+ extra_spec = {"resources:VGPU": "1"}
+ self.flavor = self._create_flavor(extra_spec=extra_spec)
+
+ # Start compute1 supporting only nvidia-11
+ self.flags(
+ enabled_mdev_types=fakelibvirt.NVIDIA_11_VGPU_TYPE,
+ group='devices')
+
+ self.compute1 = self.start_compute_with_vgpu('host1')
+
+ def test_create_servers_with_vgpu(self):
+
+ # Create a single instance against a specific compute node.
+ self._create_server(
+ image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
+ flavor_id=self.flavor, host=self.compute1.host,
+ networks='auto', expected_state='ACTIVE')
+
+ self.assert_mdev_usage(self.compute1, expected_amount=1)
+
+ self._create_server(
+ image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
+ flavor_id=self.flavor, host=self.compute1.host,
+ networks='auto', expected_state='ACTIVE')
+
+ self.assert_mdev_usage(self.compute1, expected_amount=2)
diff --git a/nova/tests/functional/regressions/test_bug_1980720.py b/nova/tests/functional/regressions/test_bug_1980720.py
new file mode 100644
index 0000000000..ad2e6e6ba2
--- /dev/null
+++ b/nova/tests/functional/regressions/test_bug_1980720.py
@@ -0,0 +1,68 @@
+# Copyright (C) 2022 Red Hat, Inc
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from nova.tests import fixtures as nova_fixtures
+from nova.tests.functional.api import client
+from nova.tests.functional import integrated_helpers
+from nova.tests.functional.libvirt import base
+from unittest import mock
+
+
+class LibvirtDriverTests(
+ base.ServersTestBase,
+ integrated_helpers.InstanceHelperMixin
+):
+ api_major_version = 'v2.1'
+ microversion = 'latest'
+
+ def setUp(self):
+ super(LibvirtDriverTests, self).setUp()
+ self.cinder = self.useFixture(nova_fixtures.CinderFixture(self))
+ self.start_compute()
+
+ def _create_server_with_block_device(self):
+ server_request = self._build_server(
+ networks=[],
+ )
+ # removing imageRef is required as we want
+ # to boot from volume
+ server_request.pop('imageRef')
+ server_request['block_device_mapping_v2'] = [{
+ 'boot_index': 0,
+ 'source_type': 'volume',
+ 'uuid': nova_fixtures.CinderFixture.IMAGE_BACKED_VOL_QUIESCE,
+ 'destination_type': 'volume'}]
+
+ server = self.api.post_server({
+ 'server': server_request,
+ })
+ self._wait_for_state_change(server, 'ACTIVE')
+ return server
+
+ def test_snapshot_quiesce_fail(self):
+ server = self._create_server_with_block_device()
+ with mock.patch.object(
+ nova_fixtures.libvirt.Domain, 'fsFreeze'
+ ) as mock_obj:
+ ex = nova_fixtures.libvirt.libvirtError("Error")
+ ex.err = (nova_fixtures.libvirt.VIR_ERR_AGENT_UNRESPONSIVE,)
+
+ mock_obj.side_effect = ex
+ excep = self.assertRaises(
+ client.OpenStackApiException,
+ self._snapshot_server, server, "snapshot-1"
+ )
+ self.assertEqual(409, excep.response.status_code)
diff --git a/nova/tests/functional/test_instance_actions.py b/nova/tests/functional/test_instance_actions.py
index c20b053459..060133ce93 100644
--- a/nova/tests/functional/test_instance_actions.py
+++ b/nova/tests/functional/test_instance_actions.py
@@ -59,6 +59,15 @@ class InstanceActionsTestV221(InstanceActionsTestV21):
self.assertEqual('delete', actions[0]['action'])
self.assertEqual('create', actions[1]['action'])
+ def test_get_instance_actions_shelve_deleted(self):
+ server = self._create_server()
+ self._shelve_server(server)
+ self._delete_server(server)
+ actions = self.api.get_instance_actions(server['id'])
+ self.assertEqual('delete', actions[0]['action'])
+ self.assertEqual('shelve', actions[1]['action'])
+ self.assertEqual('create', actions[2]['action'])
+
class HypervisorError(Exception):
"""This is just used to make sure the exception type is in the events."""
diff --git a/nova/tests/functional/test_server_group.py b/nova/tests/functional/test_server_group.py
index 7cbe8bdb67..01e3547f7e 100644
--- a/nova/tests/functional/test_server_group.py
+++ b/nova/tests/functional/test_server_group.py
@@ -444,7 +444,8 @@ class ServerGroupTestV21(ServerGroupTestBase):
evacuated_server = self._evacuate_server(
servers[1], {'onSharedStorage': 'False'},
- expected_migration_status='done')
+ expected_migration_status='done',
+ expected_state='ACTIVE')
# check that the server is evacuated to another host
self.assertNotEqual(evacuated_server['OS-EXT-SRV-ATTR:host'],
@@ -621,7 +622,8 @@ class ServerGroupTestV215(ServerGroupTestV21):
compute3 = self.start_service('compute', host='host3')
evacuated_server = self._evacuate_server(
- servers[1], expected_migration_status='done')
+ servers[1], expected_migration_status='done',
+ expected_state='ACTIVE')
# check that the server is evacuated
self.assertNotEqual(evacuated_server['OS-EXT-SRV-ATTR:host'],
@@ -800,7 +802,8 @@ class ServerGroupTestV215(ServerGroupTestV21):
self._set_forced_down(host, True)
evacuated_server = self._evacuate_server(
- servers[1], expected_migration_status='done')
+ servers[1], expected_migration_status='done',
+ expected_state='ACTIVE')
# Note(gibi): need to get the server again as the state of the instance
# goes to ACTIVE first then the host of the instance changes to the
@@ -870,6 +873,54 @@ class ServerGroupTestV264(ServerGroupTestV215):
self.assertEqual(2, hosts.count(host))
+class ServerGroupTestV295(ServerGroupTestV264):
+ microversion = '2.95'
+
+ def _evacuate_with_soft_anti_affinity_policies(self, group):
+ created_group = self.api.post_server_groups(group)
+ servers = self._boot_servers_to_group(created_group)
+
+ host = self._get_compute_service_by_host_name(
+ servers[1]['OS-EXT-SRV-ATTR:host'])
+ # Set forced_down on the host to ensure nova considers the host down.
+ self._set_forced_down(host, True)
+
+ evacuated_server = self._evacuate_server(
+ servers[1], expected_migration_status='done')
+
+ # Note(gibi): need to get the server again as the state of the instance
+ # goes to ACTIVE first then the host of the instance changes to the
+ # new host later
+ evacuated_server = self.admin_api.get_server(evacuated_server['id'])
+
+ return [evacuated_server['OS-EXT-SRV-ATTR:host'],
+ servers[0]['OS-EXT-SRV-ATTR:host']]
+
+ def test_evacuate_with_anti_affinity(self):
+ created_group = self.api.post_server_groups(self.anti_affinity)
+ servers = self._boot_servers_to_group(created_group)
+
+ host = self._get_compute_service_by_host_name(
+ servers[1]['OS-EXT-SRV-ATTR:host'])
+ # Set forced_down on the host to ensure nova considers the host down.
+ self._set_forced_down(host, True)
+
+ # Start additional host to test evacuation
+ compute3 = self.start_service('compute', host='host3')
+
+ evacuated_server = self._evacuate_server(
+ servers[1], expected_migration_status='done')
+
+ # check that the server is evacuated
+ self.assertNotEqual(evacuated_server['OS-EXT-SRV-ATTR:host'],
+ servers[1]['OS-EXT-SRV-ATTR:host'])
+ # check that policy is kept
+ self.assertNotEqual(evacuated_server['OS-EXT-SRV-ATTR:host'],
+ servers[0]['OS-EXT-SRV-ATTR:host'])
+
+ compute3.kill()
+
+
class ServerGroupTestMultiCell(ServerGroupTestBase):
NUMBER_OF_CELLS = 2
diff --git a/nova/tests/functional/test_server_rescue.py b/nova/tests/functional/test_server_rescue.py
index fa96c10344..8f5b912943 100644
--- a/nova/tests/functional/test_server_rescue.py
+++ b/nova/tests/functional/test_server_rescue.py
@@ -10,6 +10,10 @@
# License for the specific language governing permissions and limitations
# under the License.
+import datetime
+
+from oslo_utils.fixture import uuidsentinel as uuids
+
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional.api import client
from nova.tests.functional import integrated_helpers
@@ -23,7 +27,37 @@ class BFVRescue(integrated_helpers.ProviderUsageBaseTestCase):
self.useFixture(nova_fixtures.CinderFixture(self))
self._start_compute(host='host1')
- def _create_bfv_server(self):
+ def _create_image(self, metadata=None):
+ image = {
+ 'id': uuids.stable_rescue_image,
+ 'name': 'fake-image-rescue-property',
+ 'created_at': datetime.datetime(2011, 1, 1, 1, 2, 3),
+ 'updated_at': datetime.datetime(2011, 1, 1, 1, 2, 3),
+ 'deleted_at': None,
+ 'deleted': False,
+ 'status': 'active',
+ 'is_public': False,
+ 'container_format': 'raw',
+ 'disk_format': 'raw',
+ 'size': '25165824',
+ 'min_ram': 0,
+ 'min_disk': 0,
+ 'protected': False,
+ 'visibility': 'public',
+ 'tags': ['tag1', 'tag2'],
+ 'properties': {
+ 'kernel_id': 'nokernel',
+ 'ramdisk_id': 'nokernel',
+ 'hw_rescue_device': 'disk',
+ 'hw_rescue_bus': 'scsi',
+ },
+ }
+ if metadata:
+ image['properties'].update(metadata)
+ return self.glance.create(None, image)
+
+ def _create_bfv_server(self, metadata=None):
+ image = self._create_image(metadata=metadata)
server_request = self._build_server(networks=[])
server_request.pop('imageRef')
server_request['block_device_mapping_v2'] = [{
@@ -33,7 +67,7 @@ class BFVRescue(integrated_helpers.ProviderUsageBaseTestCase):
'destination_type': 'volume'}]
server = self.api.post_server({'server': server_request})
self._wait_for_state_change(server, 'ACTIVE')
- return server
+ return server, image
class DisallowBFVRescuev286(BFVRescue):
@@ -43,10 +77,10 @@ class DisallowBFVRescuev286(BFVRescue):
microversion = '2.86'
def test_bfv_rescue_not_supported(self):
- server = self._create_bfv_server()
+ server, image = self._create_bfv_server()
ex = self.assertRaises(client.OpenStackApiException,
self.api.post_server_action, server['id'], {'rescue': {
- 'rescue_image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6'}})
+ 'rescue_image_ref': image['id']}})
self.assertEqual(400, ex.response.status_code)
self.assertIn('Cannot rescue a volume-backed instance',
ex.response.text)
@@ -60,10 +94,10 @@ class DisallowBFVRescuev286WithTrait(BFVRescue):
microversion = '2.86'
def test_bfv_rescue_not_supported(self):
- server = self._create_bfv_server()
+ server, image = self._create_bfv_server()
ex = self.assertRaises(client.OpenStackApiException,
self.api.post_server_action, server['id'], {'rescue': {
- 'rescue_image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6'}})
+ 'rescue_image_ref': image['id']}})
self.assertEqual(400, ex.response.status_code)
self.assertIn('Cannot rescue a volume-backed instance',
ex.response.text)
@@ -77,10 +111,10 @@ class DisallowBFVRescuev287WithoutTrait(BFVRescue):
microversion = '2.87'
def test_bfv_rescue_not_supported(self):
- server = self._create_bfv_server()
+ server, image = self._create_bfv_server()
ex = self.assertRaises(client.OpenStackApiException,
self.api.post_server_action, server['id'], {'rescue': {
- 'rescue_image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6'}})
+ 'rescue_image_ref': image['id']}})
self.assertEqual(400, ex.response.status_code)
self.assertIn('Host unable to rescue a volume-backed instance',
ex.response.text)
@@ -94,7 +128,41 @@ class AllowBFVRescuev287WithTrait(BFVRescue):
microversion = '2.87'
def test_bfv_rescue_supported(self):
- server = self._create_bfv_server()
+ server, image = self._create_bfv_server()
self.api.post_server_action(server['id'], {'rescue': {
+ 'rescue_image_ref': image['id']}})
+ self._wait_for_state_change(server, 'RESCUE')
+
+
+class DisallowBFVRescuev287WithoutRescueImageProperties(BFVRescue):
+ """Asserts that BFV rescue requests fail with microversion 2.87 (or later)
+ when the required hw_rescue_device and hw_rescue_bus image properties
+ are not set on the image.
+ """
+ compute_driver = 'fake.MediumFakeDriver'
+ microversion = '2.87'
+
+ def test_bfv_rescue_failed(self):
+ server, image = self._create_bfv_server()
+ # try rescue without hw_rescue_device and hw_rescue_bus properties set
+ ex = self.assertRaises(client.OpenStackApiException,
+ self.api.post_server_action, server['id'], {'rescue': {
'rescue_image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6'}})
+ self.assertEqual(400, ex.response.status_code)
+ self.assertIn('Cannot rescue a volume-backed instance',
+ ex.response.text)
+
+
+class AllowBFVRescuev287WithRescueImageProperties(BFVRescue):
+ """Asserts that BFV rescue requests pass with microversion 2.87 (or later)
+ when the required hw_rescue_device and hw_rescue_bus image properties
+ are set on the image.
+ """
+ compute_driver = 'fake.RescueBFVDriver'
+ microversion = '2.87'
+
+ def test_bfv_rescue_done(self):
+ server, image = self._create_bfv_server()
+ self.api.post_server_action(server['id'], {'rescue': {
+ 'rescue_image_ref': image['id']}})
self._wait_for_state_change(server, 'RESCUE')
diff --git a/nova/tests/functional/test_servers.py b/nova/tests/functional/test_servers.py
index 4c9ab739d4..5887c99081 100644
--- a/nova/tests/functional/test_servers.py
+++ b/nova/tests/functional/test_servers.py
@@ -2260,7 +2260,8 @@ class ServerMovingTests(integrated_helpers.ProviderUsageBaseTestCase):
}
server = self._evacuate_server(
- server, extra_post_args=post, expected_host=dest_hostname)
+ server, extra_post_args=post, expected_host=dest_hostname,
+ expected_state='ACTIVE')
# Run the periodics to show those don't modify allocations.
self._run_periodics()
@@ -2437,7 +2438,8 @@ class ServerMovingTests(integrated_helpers.ProviderUsageBaseTestCase):
# stay ACTIVE and task_state will be set to None.
server = self._evacuate_server(
server, expected_task_state=None,
- expected_migration_status='failed')
+ expected_migration_status='failed',
+ expected_state='ACTIVE')
# Run the periodics to show those don't modify allocations.
self._run_periodics()
@@ -5324,7 +5326,8 @@ class ServerMovingTestsWithNestedResourceRequests(
server = self._evacuate_server(
server, extra_post_args=post, expected_migration_status='error',
- expected_host=source_hostname)
+ expected_host=source_hostname,
+ expected_state='ACTIVE')
self.assertIn('Unable to move instance %s to host host2. The instance '
'has complex allocations on the source host so move '
@@ -5530,7 +5533,8 @@ class ServerMovingTestsFromFlatToNested(
self._evacuate_server(
server, extra_post_args=post, expected_host='host1',
- expected_migration_status='error')
+ expected_migration_status='error',
+ expected_state='ACTIVE')
# We expect that the evacuation will fail as force evacuate tries to
# blindly copy the source allocation to the destination but on the
diff --git a/nova/tests/functional/test_servers_resource_request.py b/nova/tests/functional/test_servers_resource_request.py
index e31ff42f14..9c91af7218 100644
--- a/nova/tests/functional/test_servers_resource_request.py
+++ b/nova/tests/functional/test_servers_resource_request.py
@@ -1068,7 +1068,7 @@ class PortResourceRequestBasedSchedulingTest(
def test_interface_attach_sriov_with_qos_pci_update_fails(self):
# Update the name of the network device RP of PF2 on host2 to something
# unexpected. This will cause
- # update_pci_request_spec_with_allocated_interface_name() to raise
+ # update_pci_request_with_placement_allocations() to raise
# when the sriov interface is attached.
rsp = self.placement.put(
'/resource_providers/%s'
@@ -1115,7 +1115,7 @@ class PortResourceRequestBasedSchedulingTest(
):
# Update the name of the network device RP of PF2 on host2 to something
# unexpected. This will cause
- # update_pci_request_spec_with_allocated_interface_name() to raise
+ # update_pci_request_with_placement_allocations() to raise
# when the sriov interface is attached.
rsp = self.placement.put(
'/resource_providers/%s'
@@ -1923,7 +1923,7 @@ class ServerMoveWithPortResourceRequestTest(
def test_migrate_server_with_qos_port_pci_update_fail_not_reschedule(self):
# Update the name of the network device RP of PF2 on host2 to something
# unexpected. This will cause
- # update_pci_request_spec_with_allocated_interface_name() to raise
+ # update_pci_request_with_placement_allocations() to raise
# when the instance is migrated to the host2.
rsp = self.placement.put(
'/resource_providers/%s'
@@ -1943,7 +1943,7 @@ class ServerMoveWithPortResourceRequestTest(
non_qos_port, qos_port, qos_sriov_port)
# The compute manager on host2 will raise from
- # update_pci_request_spec_with_allocated_interface_name which will
+ # update_pci_request_with_placement_allocations which will
# intentionally not trigger a re-schedule even if there is host3 as an
# alternate.
self.api.post_server_action(server['id'], {'migrate': None})
@@ -2162,7 +2162,8 @@ class ServerMoveWithPortResourceRequestTest(
# simply fail and the server remains on the source host
server = self._evacuate_server(
server, expected_host='host1', expected_task_state=None,
- expected_migration_status='failed')
+ expected_migration_status='failed',
+ expected_state="ACTIVE")
# As evacuation failed the resource allocation should be untouched
self._check_allocation(
@@ -2186,7 +2187,7 @@ class ServerMoveWithPortResourceRequestTest(
def test_evacuate_with_qos_port_pci_update_fail(self):
# Update the name of the network device RP of PF2 on host2 to something
# unexpected. This will cause
- # update_pci_request_spec_with_allocated_interface_name() to raise
+ # update_pci_request_with_placement_allocations() to raise
# when the instance is evacuated to the host2.
rsp = self.placement.put(
'/resource_providers/%s'
@@ -2207,7 +2208,7 @@ class ServerMoveWithPortResourceRequestTest(
self.compute1_service_id, {'forced_down': 'true'})
# The compute manager on host2 will raise from
- # update_pci_request_spec_with_allocated_interface_name
+ # update_pci_request_with_placement_allocations
server = self._evacuate_server(
server, expected_host='host1', expected_state='ERROR',
expected_task_state=None, expected_migration_status='failed')
@@ -2363,7 +2364,7 @@ class ServerMoveWithPortResourceRequestTest(
def test_live_migrate_with_qos_port_pci_update_fails(self):
# Update the name of the network device RP of PF2 on host2 to something
# unexpected. This will cause
- # update_pci_request_spec_with_allocated_interface_name() to raise
+ # update_pci_request_with_placement_allocations() to raise
# when the instance is live migrated to the host2.
rsp = self.placement.put(
'/resource_providers/%s'
@@ -2504,7 +2505,7 @@ class ServerMoveWithPortResourceRequestTest(
def test_unshelve_offloaded_server_with_qos_port_pci_update_fails(self):
# Update the name of the network device RP of PF2 on host2 to something
# unexpected. This will cause
- # update_pci_request_spec_with_allocated_interface_name() to raise
+ # update_pci_request_with_placement_allocations() to raise
# when the instance is unshelved to the host2.
rsp = self.placement.put(
'/resource_providers/%s'
@@ -2537,7 +2538,7 @@ class ServerMoveWithPortResourceRequestTest(
self.api.post_server_action(server['id'], {'unshelve': None})
# Unshelve fails on host2 due to
- # update_pci_request_spec_with_allocated_interface_name fails so the
+ # update_pci_request_with_placement_allocations fails so the
# instance goes back to shelve offloaded state
self.notifier.wait_for_versioned_notifications(
'instance.unshelve.start')
@@ -2979,6 +2980,7 @@ class ExtendedResourceRequestOldCompute(
super().setUp()
self.neutron = self.useFixture(
ExtendedResourceRequestNeutronFixture(self))
+ self.api.microversion = '2.72'
@mock.patch.object(
objects.service, 'get_minimum_version_all_cells',
diff --git a/nova/tests/functional/test_service.py b/nova/tests/functional/test_service.py
index 65b41594bd..21e9a519ee 100644
--- a/nova/tests/functional/test_service.py
+++ b/nova/tests/functional/test_service.py
@@ -10,8 +10,12 @@
# License for the specific language governing permissions and limitations
# under the License.
+import functools
from unittest import mock
+import fixtures
+from oslo_utils.fixture import uuidsentinel as uuids
+
from nova import context as nova_context
from nova import exception
from nova.objects import service
@@ -19,6 +23,7 @@ from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import fixtures as func_fixtures
from nova.tests.functional import integrated_helpers
+from nova.virt import node
class ServiceTestCase(test.TestCase,
@@ -137,3 +142,83 @@ class TestOldComputeCheck(
return_value=old_version):
self.assertRaises(
exception.TooOldComputeService, self._start_compute, 'host1')
+
+
+class TestComputeStartupChecks(test.TestCase):
+ STUB_COMPUTE_ID = False
+
+ def setUp(self):
+ super().setUp()
+ self.useFixture(nova_fixtures.RealPolicyFixture())
+ self.useFixture(nova_fixtures.NeutronFixture(self))
+ self.useFixture(nova_fixtures.GlanceFixture(self))
+ self.useFixture(func_fixtures.PlacementFixture())
+
+ self._local_uuid = str(uuids.node)
+
+ self.useFixture(fixtures.MockPatch(
+ 'nova.virt.node.get_local_node_uuid',
+ functools.partial(self.local_uuid, True)))
+ self.useFixture(fixtures.MockPatch(
+ 'nova.virt.node.read_local_node_uuid',
+ self.local_uuid))
+ self.useFixture(fixtures.MockPatch(
+ 'nova.virt.node.write_local_node_uuid',
+ mock.DEFAULT))
+ self.flags(compute_driver='fake.FakeDriverWithoutFakeNodes')
+
+ def local_uuid(self, get=False):
+ if get and not self._local_uuid:
+ # Simulate the get_local_node_uuid behavior of calling write once
+ self._local_uuid = str(uuids.node)
+ node.write_local_node_uuid(self._local_uuid)
+ return self._local_uuid
+
+ def test_compute_node_identity_greenfield(self):
+ # Level-set test case to show that starting and re-starting without
+ # any error cases works as expected.
+
+ # Start with no local compute_id
+ self._local_uuid = None
+ self.start_service('compute')
+
+ # Start should have generated and written a compute id
+ node.write_local_node_uuid.assert_called_once_with(str(uuids.node))
+
+ # Starting again should succeed and not cause another write
+ self.start_service('compute')
+ node.write_local_node_uuid.assert_called_once_with(str(uuids.node))
+
+ def test_compute_node_identity_deleted(self):
+ self.start_service('compute')
+
+ # Simulate the compute_id file being deleted
+ self._local_uuid = None
+
+ # Should refuse to start because it's not our first time and the file
+ # being missing is a hard error.
+ exc = self.assertRaises(exception.InvalidConfiguration,
+ self.start_service, 'compute')
+ self.assertIn('lost that state', str(exc))
+
+ def test_compute_node_hostname_changed(self):
+ # Start our compute once to create the node record
+ self.start_service('compute')
+
+ # Starting with a different hostname should trigger the abort
+ exc = self.assertRaises(exception.InvalidConfiguration,
+ self.start_service, 'compute', host='other')
+ self.assertIn('hypervisor_hostname', str(exc))
+
+ def test_compute_node_uuid_changed(self):
+ # Start our compute once to create the node record
+ self.start_service('compute')
+
+ # Simulate a changed local compute_id file
+ self._local_uuid = str(uuids.othernode)
+
+ # We should fail to create the compute node record again, but with a
+ # useful error message about why.
+ exc = self.assertRaises(exception.InvalidConfiguration,
+ self.start_service, 'compute')
+ self.assertIn('Duplicate compute node record', str(exc))
diff --git a/nova/tests/unit/api/openstack/compute/test_evacuate.py b/nova/tests/unit/api/openstack/compute/test_evacuate.py
index fb7f7662d8..bd88bb8d6e 100644
--- a/nova/tests/unit/api/openstack/compute/test_evacuate.py
+++ b/nova/tests/unit/api/openstack/compute/test_evacuate.py
@@ -416,3 +416,32 @@ class EvacuateTestV268(EvacuateTestV229):
def test_forced_evacuate_with_no_host_provided(self):
# not applicable for v2.68, which removed the 'force' parameter
pass
+
+
+class EvacuateTestV295(EvacuateTestV268):
+ def setUp(self):
+ super(EvacuateTestV268, self).setUp()
+ self.admin_req = fakes.HTTPRequest.blank('', use_admin_context=True,
+ version='2.95')
+ self.req = fakes.HTTPRequest.blank('', version='2.95')
+ self.mock_get_min_ver = self.useFixture(fixtures.MockPatch(
+ 'nova.objects.service.get_minimum_version_all_cells',
+ return_value=62)).mock
+
+ def test_evacuate_version_error(self):
+ self.mock_get_min_ver.return_value = 61
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self._get_evacuate_response,
+ {'host': 'my-host', 'adminPass': 'foo'})
+
+ def test_evacuate_unsupported_rpc(self):
+ def fake_evacuate(*args, **kwargs):
+ raise exception.UnsupportedRPCVersion(
+ api="fakeapi",
+ required="x.xx")
+
+ self.stub_out('nova.compute.api.API.evacuate', fake_evacuate)
+ self._check_evacuate_failure(webob.exc.HTTPConflict,
+ {'host': 'my-host',
+ 'onSharedStorage': 'False',
+ 'adminPass': 'MyNewPass'})
diff --git a/nova/tests/unit/api/openstack/compute/test_flavor_access.py b/nova/tests/unit/api/openstack/compute/test_flavor_access.py
index 0581a47c84..ea9ca2f632 100644
--- a/nova/tests/unit/api/openstack/compute/test_flavor_access.py
+++ b/nova/tests/unit/api/openstack/compute/test_flavor_access.py
@@ -353,14 +353,37 @@ class FlavorAccessTestV21(test.NoDBTestCase):
mock_verify.assert_called_once_with(
req.environ['nova.context'], 'proj2')
+ @mock.patch('nova.objects.Flavor.remove_access')
@mock.patch('nova.api.openstack.identity.verify_project_id',
side_effect=exc.HTTPBadRequest(
explanation="Project ID proj2 is not a valid project."))
- def test_remove_tenant_access_with_invalid_tenant(self, mock_verify):
+ def test_remove_tenant_access_with_invalid_tenant(self,
+ mock_verify,
+ mock_remove_access):
"""Tests the case that the tenant does not exist in Keystone."""
req = fakes.HTTPRequest.blank(self._prefix + '/flavors/2/action',
use_admin_context=True)
body = {'removeTenantAccess': {'tenant': 'proj2'}}
+
+ self.flavor_action_controller._remove_tenant_access(
+ req, '2', body=body)
+ mock_verify.assert_called_once_with(
+ req.environ['nova.context'], 'proj2')
+ mock_remove_access.assert_called_once_with('proj2')
+
+ @mock.patch('nova.api.openstack.identity.verify_project_id',
+ side_effect=exc.HTTPBadRequest(
+ explanation="Nova was unable to find Keystone "
+ "service endpoint."))
+ def test_remove_tenant_access_missing_keystone_endpoint(self,
+ mock_verify):
+ """Tests the case that Keystone identity service endpoint
+ version 3.0 was not found.
+ """
+ req = fakes.HTTPRequest.blank(self._prefix + '/flavors/2/action',
+ use_admin_context=True)
+ body = {'removeTenantAccess': {'tenant': 'proj2'}}
+
self.assertRaises(exc.HTTPBadRequest,
self.flavor_action_controller._remove_tenant_access,
req, '2', body=body)
diff --git a/nova/tests/unit/api/openstack/compute/test_remote_consoles.py b/nova/tests/unit/api/openstack/compute/test_remote_consoles.py
index bd09307567..961f4a02c9 100644
--- a/nova/tests/unit/api/openstack/compute/test_remote_consoles.py
+++ b/nova/tests/unit/api/openstack/compute/test_remote_consoles.py
@@ -104,6 +104,18 @@ class ConsolesExtensionTestV21(test.NoDBTestCase):
'get_vnc_console',
exception.InstanceNotFound(instance_id=fakes.FAKE_UUID))
+ def test_get_vnc_console_instance_invalid_state(self):
+ body = {'os-getVNCConsole': {'type': 'novnc'}}
+ self._check_console_failure(
+ self.controller.get_vnc_console,
+ webob.exc.HTTPConflict,
+ body,
+ 'get_vnc_console',
+ exception.InstanceInvalidState(
+ attr='fake-attr', state='fake-state', method='fake-method',
+ instance_uuid=fakes.FAKE_UUID)
+ )
+
def test_get_vnc_console_invalid_type(self):
body = {'os-getVNCConsole': {'type': 'invalid'}}
self._check_console_failure(
diff --git a/nova/tests/unit/api/openstack/compute/test_server_groups.py b/nova/tests/unit/api/openstack/compute/test_server_groups.py
index 636682a6b7..9d99c3ae6d 100644
--- a/nova/tests/unit/api/openstack/compute/test_server_groups.py
+++ b/nova/tests/unit/api/openstack/compute/test_server_groups.py
@@ -87,7 +87,8 @@ class ServerGroupTestV21(test.NoDBTestCase):
def setUp(self):
super(ServerGroupTestV21, self).setUp()
self._setup_controller()
- self.req = fakes.HTTPRequest.blank('')
+ self.member_req = fakes.HTTPRequest.member_req('')
+ self.reader_req = fakes.HTTPRequest.reader_req('')
self.admin_req = fakes.HTTPRequest.blank('', use_admin_context=True)
self.foo_req = fakes.HTTPRequest.blank('', project_id='foo')
self.policy = self.useFixture(fixtures.RealPolicyFixture())
@@ -114,20 +115,20 @@ class ServerGroupTestV21(test.NoDBTestCase):
def test_create_server_group_with_no_policies(self):
sgroup = server_group_template()
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
def _create_server_group_normal(self, policies=None, policy=None,
rules=None):
sgroup = server_group_template()
sgroup['policies'] = policies
- res_dict = self.controller.create(self.req,
+ res_dict = self.controller.create(self.member_req,
body={'server_group': sgroup})
self.assertEqual(res_dict['server_group']['name'], 'test')
self.assertTrue(uuidutils.is_uuid_like(res_dict['server_group']['id']))
self.assertEqual(res_dict['server_group']['policies'], policies)
def test_create_server_group_with_new_policy_before_264(self):
- req = fakes.HTTPRequest.blank('', version='2.63')
+ req = fakes.HTTPRequest.member_req('', version='2.63')
policy = 'anti-affinity'
rules = {'max_server_per_host': 3}
# 'policy' isn't an acceptable request key before 2.64
@@ -162,7 +163,7 @@ class ServerGroupTestV21(test.NoDBTestCase):
self.controller.create(self.admin_req, body={'server_group': sgroup})
# test as non-admin
- self.controller.create(self.req, body={'server_group': sgroup})
+ self.controller.create(self.member_req, body={'server_group': sgroup})
def _create_instance(self, ctx, cell):
with context.target_cell(ctx, cell) as cctx:
@@ -289,7 +290,7 @@ class ServerGroupTestV21(test.NoDBTestCase):
path = path or '/os-server-groups?all_projects=True'
if limited:
path += limited
- req = fakes.HTTPRequest.blank(path, version=api_version)
+ reader_req = fakes.HTTPRequest.reader_req(path, version=api_version)
admin_req = fakes.HTTPRequest.blank(path, use_admin_context=True,
version=api_version)
@@ -298,7 +299,7 @@ class ServerGroupTestV21(test.NoDBTestCase):
self.assertEqual(all, res_dict)
# test as non-admin
- res_dict = self.controller.index(req)
+ res_dict = self.controller.index(reader_req)
self.assertEqual(tenant_specific, res_dict)
@mock.patch('nova.objects.InstanceGroupList.get_by_project_id')
@@ -347,25 +348,27 @@ class ServerGroupTestV21(test.NoDBTestCase):
return_get_by_project = return_server_groups()
mock_get_by_project.return_value = return_get_by_project
path = '/os-server-groups'
- req = fakes.HTTPRequest.blank(path, version=api_version)
+ req = fakes.HTTPRequest.reader_req(path, version=api_version)
res_dict = self.controller.index(req)
self.assertEqual(expected, res_dict)
def test_display_members(self):
ctx = context.RequestContext('fake_user', fakes.FAKE_PROJECT_ID)
(ig_uuid, instances, members) = self._create_groups_and_instances(ctx)
- res_dict = self.controller.show(self.req, ig_uuid)
+ res_dict = self.controller.show(self.reader_req, ig_uuid)
result_members = res_dict['server_group']['members']
self.assertEqual(3, len(result_members))
for member in members:
self.assertIn(member, result_members)
def test_display_members_with_nonexistent_group(self):
- self.assertRaises(webob.exc.HTTPNotFound,
- self.controller.show, self.req, uuidsentinel.group)
+ self.assertRaises(
+ webob.exc.HTTPNotFound,
+ self.controller.show, self.reader_req, uuidsentinel.group)
def test_display_active_members_only(self):
- ctx = context.RequestContext('fake_user', fakes.FAKE_PROJECT_ID)
+ ctx = context.RequestContext('fake_user', fakes.FAKE_PROJECT_ID,
+ roles=['member', 'reader'])
(ig_uuid, instances, members) = self._create_groups_and_instances(ctx)
# delete an instance
@@ -379,7 +382,7 @@ class ServerGroupTestV21(test.NoDBTestCase):
self.assertRaises(exception.InstanceNotFound,
objects.Instance.get_by_uuid,
ctx, instances[1].uuid)
- res_dict = self.controller.show(self.req, ig_uuid)
+ res_dict = self.controller.show(self.reader_req, ig_uuid)
result_members = res_dict['server_group']['members']
# check that only the active instance is displayed
self.assertEqual(2, len(result_members))
@@ -393,7 +396,7 @@ class ServerGroupTestV21(test.NoDBTestCase):
self.controller.show(self.admin_req, ig_uuid)
# test as non-admin, same project
- self.controller.show(self.req, ig_uuid)
+ self.controller.show(self.reader_req, ig_uuid)
# test as non-admin, different project
self.assertRaises(webob.exc.HTTPNotFound,
@@ -406,7 +409,7 @@ class ServerGroupTestV21(test.NoDBTestCase):
sgroup = server_group_template(name='good* $%name',
policies=['affinity'])
- res_dict = self.controller.create(self.req,
+ res_dict = self.controller.create(self.member_req,
body={'server_group': sgroup})
self.assertEqual(res_dict['server_group']['name'], 'good* $%name')
@@ -414,99 +417,99 @@ class ServerGroupTestV21(test.NoDBTestCase):
# blank name
sgroup = server_group_template(name='', policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
# name with length 256
sgroup = server_group_template(name='1234567890' * 26,
policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
# non-string name
sgroup = server_group_template(name=12, policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
# name with leading spaces
sgroup = server_group_template(name=' leading spaces',
policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
# name with trailing spaces
sgroup = server_group_template(name='trailing space ',
policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
# name with all spaces
sgroup = server_group_template(name=' ',
policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
# name with unprintable character
sgroup = server_group_template(name='bad\x00name',
policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
# name with out of range char U0001F4A9
sgroup = server_group_template(name=u"\U0001F4A9",
policies=['affinity'])
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
def test_create_server_group_with_illegal_policies(self):
# blank policy
sgroup = server_group_template(name='fake-name', policies='')
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
# policy as integer
sgroup = server_group_template(name='fake-name', policies=7)
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
# policy as string
sgroup = server_group_template(name='fake-name', policies='invalid')
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
# policy as None
sgroup = server_group_template(name='fake-name', policies=None)
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
def test_create_server_group_conflicting_policies(self):
sgroup = server_group_template()
policies = ['anti-affinity', 'affinity']
sgroup['policies'] = policies
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
def test_create_server_group_with_duplicate_policies(self):
sgroup = server_group_template()
policies = ['affinity', 'affinity']
sgroup['policies'] = policies
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
def test_create_server_group_not_supported(self):
sgroup = server_group_template()
policies = ['storage-affinity', 'anti-affinity', 'rack-affinity']
sgroup['policies'] = policies
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
def test_create_server_group_with_no_body(self):
self.assertRaises(self.validation_error,
- self.controller.create, self.req, body=None)
+ self.controller.create, self.member_req, body=None)
def test_create_server_group_with_no_server_group(self):
body = {'no-instanceGroup': None}
self.assertRaises(self.validation_error,
- self.controller.create, self.req, body=body)
+ self.controller.create, self.member_req, body=body)
def test_list_server_group_by_tenant(self):
self._test_list_server_group_by_tenant(
@@ -528,7 +531,7 @@ class ServerGroupTestV21(test.NoDBTestCase):
self.controller.index(self.admin_req)
# test as non-admin
- self.controller.index(self.req)
+ self.controller.index(self.reader_req)
def test_list_server_group_multiple_param(self):
self._test_list_server_group(api_version=self.wsgi_api_version,
@@ -598,7 +601,7 @@ class ServerGroupTestV21(test.NoDBTestCase):
self.stub_out('nova.objects.InstanceGroup.get_by_uuid',
return_server_group)
- resp = self.controller.delete(self.req, uuidsentinel.sg1_id)
+ resp = self.controller.delete(self.member_req, uuidsentinel.sg1_id)
mock_destroy.assert_called_once_with()
# NOTE: on v2.1, http status code is set as wsgi_code of API
@@ -611,7 +614,7 @@ class ServerGroupTestV21(test.NoDBTestCase):
def test_delete_non_existing_server_group(self):
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
- self.req, 'invalid')
+ self.member_req, 'invalid')
def test_delete_server_group_rbac_default(self):
ctx = context.RequestContext('fake_user', fakes.FAKE_PROJECT_ID)
@@ -622,7 +625,7 @@ class ServerGroupTestV21(test.NoDBTestCase):
# test as non-admin
ig_uuid = self._create_groups_and_instances(ctx)[0]
- self.controller.delete(self.req, ig_uuid)
+ self.controller.delete(self.member_req, ig_uuid)
class ServerGroupTestV213(ServerGroupTestV21):
@@ -649,7 +652,7 @@ class ServerGroupTestV264(ServerGroupTestV213):
def _create_server_group_normal(self, policies=None, policy=None,
rules=None):
- req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
+ req = fakes.HTTPRequest.member_req('', version=self.wsgi_api_version)
sgroup = server_group_template()
sgroup['rules'] = rules or {}
sgroup['policy'] = policy
@@ -674,7 +677,7 @@ class ServerGroupTestV264(ServerGroupTestV213):
self.assertEqual(res_dict['server_group']['rules'], {})
def _display_server_group(self, uuid):
- req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
+ req = fakes.HTTPRequest.reader_req('', version=self.wsgi_api_version)
group = self.controller.show(req, uuid)
return group
@@ -690,7 +693,7 @@ class ServerGroupTestV264(ServerGroupTestV213):
self.assertEqual(res_dict['server_group']['rules'], rules)
def test_create_affinity_server_group_with_invalid_policy(self):
- req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
+ req = fakes.HTTPRequest.member_req('', version=self.wsgi_api_version)
sgroup = server_group_template(policy='affinity',
rules={'max_server_per_host': 3})
result = self.assertRaises(webob.exc.HTTPBadRequest,
@@ -698,7 +701,7 @@ class ServerGroupTestV264(ServerGroupTestV213):
self.assertIn("Only anti-affinity policy supports rules", str(result))
def test_create_anti_affinity_server_group_with_invalid_rules(self):
- req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
+ req = fakes.HTTPRequest.member_req('', version=self.wsgi_api_version)
# A negative test for key is unknown, the value is not positive
# and not integer
invalid_rules = [{'unknown_key': '3'},
@@ -718,7 +721,7 @@ class ServerGroupTestV264(ServerGroupTestV213):
return_value=32)
def test_create_server_group_with_low_version_compute_service(self,
mock_get_v):
- req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
+ req = fakes.HTTPRequest.member_req('', version=self.wsgi_api_version)
sgroup = server_group_template(policy='anti-affinity',
rules={'max_server_per_host': 3})
result = self.assertRaises(
@@ -734,7 +737,7 @@ class ServerGroupTestV264(ServerGroupTestV213):
self._create_server_group_normal(policy=policy)
def test_policies_since_264(self):
- req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
+ req = fakes.HTTPRequest.member_req('', version=self.wsgi_api_version)
# 'policies' isn't allowed in request >= 2.64
sgroup = server_group_template(policies=['anti-affinity'])
self.assertRaises(
@@ -742,14 +745,14 @@ class ServerGroupTestV264(ServerGroupTestV213):
req, body={'server_group': sgroup})
def test_create_server_group_without_policy(self):
- req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
+ req = fakes.HTTPRequest.member_req('', version=self.wsgi_api_version)
# 'policy' is required request key in request >= 2.64
sgroup = server_group_template()
self.assertRaises(self.validation_error, self.controller.create,
req, body={'server_group': sgroup})
def test_create_server_group_with_illegal_policies(self):
- req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
+ req = fakes.HTTPRequest.member_req('', version=self.wsgi_api_version)
# blank policy
sgroup = server_group_template(policy='')
self.assertRaises(self.validation_error, self.controller.create,
@@ -771,7 +774,7 @@ class ServerGroupTestV264(ServerGroupTestV213):
req, body={'server_group': sgroup})
def test_additional_params(self):
- req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
+ req = fakes.HTTPRequest.member_req('', version=self.wsgi_api_version)
sgroup = server_group_template(unknown='unknown')
self.assertRaises(self.validation_error, self.controller.create,
req, body={'server_group': sgroup})
@@ -786,7 +789,7 @@ class ServerGroupTestV275(ServerGroupTestV264):
path='/os-server-groups?dummy=False&all_projects=True')
def test_list_server_group_additional_param(self):
- req = fakes.HTTPRequest.blank('/os-server-groups?dummy=False',
- version=self.wsgi_api_version)
+ req = fakes.HTTPRequest.reader_req('/os-server-groups?dummy=False',
+ version=self.wsgi_api_version)
self.assertRaises(self.validation_error, self.controller.index,
req)
diff --git a/nova/tests/unit/api/openstack/fakes.py b/nova/tests/unit/api/openstack/fakes.py
index 8cf90ddebe..9ac970f787 100644
--- a/nova/tests/unit/api/openstack/fakes.py
+++ b/nova/tests/unit/api/openstack/fakes.py
@@ -240,6 +240,9 @@ class HTTPRequest(os_wsgi.Request):
def blank(cls, *args, **kwargs):
defaults = {'base_url': 'http://localhost/v2'}
use_admin_context = kwargs.pop('use_admin_context', False)
+ roles = kwargs.pop('roles', [])
+ if use_admin_context:
+ roles.append('admin')
project_id = kwargs.pop('project_id', FAKE_PROJECT_ID)
version = kwargs.pop('version', os_wsgi.DEFAULT_API_VERSION)
defaults.update(kwargs)
@@ -247,10 +250,19 @@ class HTTPRequest(os_wsgi.Request):
out.environ['nova.context'] = FakeRequestContext(
user_id='fake_user',
project_id=project_id,
- is_admin=use_admin_context)
+ is_admin=use_admin_context,
+ roles=roles)
out.api_version_request = api_version.APIVersionRequest(version)
return out
+ @classmethod
+ def member_req(cls, *args, **kwargs):
+ return cls.blank(*args, roles=['member', 'reader'], **kwargs)
+
+ @classmethod
+ def reader_req(cls, *args, **kwargs):
+ return cls.blank(*args, roles=['reader'], **kwargs)
+
class HTTPRequestV21(HTTPRequest):
pass
diff --git a/nova/tests/unit/api/openstack/test_wsgi_app.py b/nova/tests/unit/api/openstack/test_wsgi_app.py
index 94e2fe5cb1..0eb7011c11 100644
--- a/nova/tests/unit/api/openstack/test_wsgi_app.py
+++ b/nova/tests/unit/api/openstack/test_wsgi_app.py
@@ -104,3 +104,18 @@ document_root = /tmp
'disable_compute_service_check_for_ffu', True,
group='workarounds')
wsgi_app._setup_service('myhost', 'api')
+
+ def test__get_config_files_empty_env(self):
+ env = {}
+ result = wsgi_app._get_config_files(env)
+ expected = ['/etc/nova/api-paste.ini', '/etc/nova/nova.conf']
+ self.assertEqual(result, expected)
+
+ def test__get_config_files_with_env(self):
+ env = {
+ "OS_NOVA_CONFIG_DIR": "/nova",
+ "OS_NOVA_CONFIG_FILES": "api.conf",
+ }
+ result = wsgi_app._get_config_files(env)
+ expected = ['/nova/api.conf']
+ self.assertEqual(result, expected)
diff --git a/nova/tests/unit/cmd/test_policy.py b/nova/tests/unit/cmd/test_policy.py
index df51665959..29dd5610f6 100644
--- a/nova/tests/unit/cmd/test_policy.py
+++ b/nova/tests/unit/cmd/test_policy.py
@@ -128,20 +128,21 @@ class TestPolicyCheck(test.NoDBTestCase):
self.assertEqual(set(expected_rules), set(passing_rules))
def test_filter_rules_non_admin(self):
- context = nova_context.RequestContext()
+ context = nova_context.RequestContext(roles=['reader'])
rule_conditions = [base_policies.PROJECT_READER_OR_ADMIN]
expected_rules = [r.name for r in ia_policies.list_rules() if
r.check_str in rule_conditions]
self._check_filter_rules(context, expected_rules=expected_rules)
def test_filter_rules_admin(self):
- self._check_filter_rules()
+ context = nova_context.RequestContext(roles=['admin'])
+ self._check_filter_rules(context)
def test_filter_rules_instance_non_admin(self):
db_context = nova_context.RequestContext(user_id='fake-user',
project_id='fake-project')
instance = fake_instance.fake_instance_obj(db_context)
- context = nova_context.RequestContext()
+ context = nova_context.RequestContext(roles=['reader'])
expected_rules = [r.name for r in ia_policies.list_rules() if
r.check_str == base_policies.RULE_ANY]
self._check_filter_rules(context, instance, expected_rules)
@@ -150,11 +151,13 @@ class TestPolicyCheck(test.NoDBTestCase):
db_context = nova_context.RequestContext(user_id='fake-user',
project_id='fake-project')
instance = fake_instance.fake_instance_obj(db_context)
- self._check_filter_rules(target=instance)
+ context = nova_context.RequestContext(roles=['admin'])
+ self._check_filter_rules(context, target=instance)
def test_filter_rules_instance_owner(self):
db_context = nova_context.RequestContext(user_id='fake-user',
- project_id='fake-project')
+ project_id='fake-project',
+ roles=['reader'])
instance = fake_instance.fake_instance_obj(db_context)
rule_conditions = [base_policies.PROJECT_READER_OR_ADMIN]
expected_rules = [r.name for r in ia_policies.list_rules() if
diff --git a/nova/tests/unit/compute/test_api.py b/nova/tests/unit/compute/test_api.py
index ca72474a4c..9d6e9ba4bd 100644
--- a/nova/tests/unit/compute/test_api.py
+++ b/nova/tests/unit/compute/test_api.py
@@ -1254,10 +1254,12 @@ class _ComputeAPIUnitTestMixIn(object):
@mock.patch('nova.objects.Instance.save')
@mock.patch('nova.compute.utils.notify_about_instance_usage')
@mock.patch('nova.objects.Service.get_by_compute_host')
+ @mock.patch('nova.compute.api.API._record_action_start')
@mock.patch('nova.compute.api.API._local_delete')
def test_delete_error_state_with_no_host(
- self, mock_local_delete, mock_service_get, _mock_notify,
- _mock_save, mock_bdm_get, mock_lookup, _mock_del_booting):
+ self, mock_local_delete, mock_record, mock_service_get,
+ _mock_notify, _mock_save, mock_bdm_get, mock_lookup,
+ _mock_del_booting):
# Instance in error state with no host should be a local delete
# for non API cells
inst = self._create_instance_obj(params=dict(vm_state=vm_states.ERROR,
@@ -1269,6 +1271,8 @@ class _ComputeAPIUnitTestMixIn(object):
mock_local_delete.assert_called_once_with(
self.context, inst, mock_bdm_get.return_value,
'delete', self.compute_api._do_delete)
+ mock_record.assert_called_once_with(self.context, inst,
+ instance_actions.DELETE)
mock_terminate.assert_not_called()
mock_service_get.assert_not_called()
@@ -4077,7 +4081,8 @@ class _ComputeAPIUnitTestMixIn(object):
injected_files=[], bdms=bdms,
preserve_ephemeral=False, host=None,
request_spec=fake_spec,
- reimage_boot_volume=True)
+ reimage_boot_volume=True,
+ target_state=None)
_check_auto_disk_config.assert_called_once_with(
image=image, auto_disk_config=None)
_checks_for_create_and_rebuild.assert_called_once_with(
@@ -4092,7 +4097,8 @@ class _ComputeAPIUnitTestMixIn(object):
instance,
uuids.image_ref,
admin_pass,
- reimage_boot_volume=False)
+ reimage_boot_volume=False,
+ target_state=None)
@mock.patch.object(objects.RequestSpec, 'save')
@mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
@@ -4151,7 +4157,8 @@ class _ComputeAPIUnitTestMixIn(object):
instance,
uuids.image_ref,
admin_pass,
- reimage_boot_volume=False)
+ reimage_boot_volume=False,
+ target_state=None)
@mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
@mock.patch.object(objects.Instance, 'save')
@@ -4201,7 +4208,8 @@ class _ComputeAPIUnitTestMixIn(object):
orig_image_ref=uuids.image_ref,
orig_sys_metadata=orig_system_metadata, bdms=bdms,
preserve_ephemeral=False, host=instance.host,
- request_spec=fake_spec, reimage_boot_volume=False)
+ request_spec=fake_spec, reimage_boot_volume=False,
+ target_state=None)
_check_auto_disk_config.assert_called_once_with(
image=image, auto_disk_config=None)
@@ -4274,7 +4282,8 @@ class _ComputeAPIUnitTestMixIn(object):
orig_image_ref=uuids.image_ref,
orig_sys_metadata=orig_system_metadata, bdms=bdms,
preserve_ephemeral=False, host=None,
- request_spec=fake_spec, reimage_boot_volume=False)
+ request_spec=fake_spec, reimage_boot_volume=False,
+ target_state=None)
# assert the request spec was modified so the scheduler picks
# the existing instance host/node
req_spec_save.assert_called_once_with()
@@ -4342,7 +4351,8 @@ class _ComputeAPIUnitTestMixIn(object):
orig_image_ref=uuids.image_ref,
orig_sys_metadata=orig_system_metadata, bdms=bdms,
preserve_ephemeral=False, host=instance.host,
- request_spec=fake_spec, reimage_boot_volume=False)
+ request_spec=fake_spec, reimage_boot_volume=False,
+ target_state=None)
_check_auto_disk_config.assert_called_once_with(
image=image, auto_disk_config=None)
@@ -4401,7 +4411,8 @@ class _ComputeAPIUnitTestMixIn(object):
orig_image_ref=uuids.image_ref,
orig_sys_metadata=orig_system_metadata, bdms=bdms,
preserve_ephemeral=False, host=instance.host,
- request_spec=fake_spec, reimage_boot_volume=False)
+ request_spec=fake_spec, reimage_boot_volume=False,
+ target_state=None)
_check_auto_disk_config.assert_called_once_with(
image=image, auto_disk_config=None)
@@ -4465,7 +4476,8 @@ class _ComputeAPIUnitTestMixIn(object):
orig_image_ref=uuids.image_ref,
orig_sys_metadata=orig_system_metadata, bdms=bdms,
preserve_ephemeral=False, host=instance.host,
- request_spec=fake_spec, reimage_boot_volume=False)
+ request_spec=fake_spec, reimage_boot_volume=False,
+ target_state=None)
_check_auto_disk_config.assert_called_once_with(
image=image, auto_disk_config=None)
@@ -5790,7 +5802,10 @@ class _ComputeAPIUnitTestMixIn(object):
destination_type='volume', volume_type=None,
snapshot_id=None, volume_id=uuids.volume_id,
volume_size=None)])
- rescue_image_meta_obj = image_meta_obj.ImageMeta.from_dict({})
+ rescue_image_meta_obj = image_meta_obj.ImageMeta.from_dict({
+ 'properties': {'hw_rescue_device': 'disk',
+ 'hw_rescue_bus': 'scsi'}
+ })
with test.nested(
mock.patch.object(self.compute_api.placementclient,
@@ -5842,6 +5857,7 @@ class _ComputeAPIUnitTestMixIn(object):
# Assert that the instance task state as set in the compute API
self.assertEqual(task_states.RESCUING, instance.task_state)
+ @mock.patch('nova.objects.instance.Instance.image_meta')
@mock.patch('nova.objects.compute_node.ComputeNode'
'.get_by_host_and_nodename')
@mock.patch('nova.compute.utils.is_volume_backed_instance',
@@ -5850,7 +5866,8 @@ class _ComputeAPIUnitTestMixIn(object):
'.get_by_instance_uuid')
def test_rescue_bfv_without_required_trait(self, mock_get_bdms,
mock_is_volume_backed,
- mock_get_cn):
+ mock_get_cn,
+ mock_image_meta):
instance = self._create_instance_obj()
bdms = objects.BlockDeviceMappingList(objects=[
objects.BlockDeviceMapping(
@@ -5858,6 +5875,12 @@ class _ComputeAPIUnitTestMixIn(object):
destination_type='volume', volume_type=None,
snapshot_id=None, volume_id=uuids.volume_id,
volume_size=None)])
+
+ instance.image_meta = image_meta_obj.ImageMeta.from_dict({
+ 'properties': {'hw_rescue_device': 'disk',
+ 'hw_rescue_bus': 'scsi'}
+ })
+
with test.nested(
mock.patch.object(self.compute_api.placementclient,
'get_provider_traits'),
@@ -5895,6 +5918,124 @@ class _ComputeAPIUnitTestMixIn(object):
mock_get_traits.assert_called_once_with(
self.context, uuids.cn)
+ @mock.patch('nova.objects.image_meta.ImageMeta.from_image_ref')
+ @mock.patch('nova.objects.compute_node.ComputeNode'
+ '.get_by_host_and_nodename')
+ @mock.patch('nova.compute.utils.is_volume_backed_instance',
+ return_value=True)
+ @mock.patch('nova.objects.block_device.BlockDeviceMappingList'
+ '.get_by_instance_uuid')
+ def test_rescue_bfv_with_required_image_properties(
+ self, mock_get_bdms, mock_is_volume_backed, mock_get_cn,
+ mock_image_meta_obj_from_ref):
+ instance = self._create_instance_obj()
+ bdms = objects.BlockDeviceMappingList(objects=[
+ objects.BlockDeviceMapping(
+ boot_index=0, image_id=uuids.image_id, source_type='image',
+ destination_type='volume', volume_type=None,
+ snapshot_id=None, volume_id=uuids.volume_id,
+ volume_size=None)])
+ rescue_image_meta_obj = image_meta_obj.ImageMeta.from_dict({
+ 'properties': {'hw_rescue_device': 'disk',
+ 'hw_rescue_bus': 'scsi'}
+ })
+
+ with test.nested(
+ mock.patch.object(self.compute_api.placementclient,
+ 'get_provider_traits'),
+ mock.patch.object(self.compute_api.volume_api, 'get'),
+ mock.patch.object(self.compute_api.volume_api, 'check_attached'),
+ mock.patch.object(instance, 'save'),
+ mock.patch.object(self.compute_api, '_record_action_start'),
+ mock.patch.object(self.compute_api.compute_rpcapi,
+ 'rescue_instance')
+ ) as (
+ mock_get_traits, mock_get_volume, mock_check_attached,
+ mock_instance_save, mock_record_start, mock_rpcapi_rescue
+ ):
+ # Mock out the returned compute node, image_meta, bdms and volume
+ mock_image_meta_obj_from_ref.return_value = rescue_image_meta_obj
+ mock_get_bdms.return_value = bdms
+ mock_get_volume.return_value = mock.sentinel.volume
+ mock_get_cn.return_value = mock.Mock(uuid=uuids.cn)
+
+ # Ensure the required trait is returned, allowing BFV rescue
+ mock_trait_info = mock.Mock(traits=[ot.COMPUTE_RESCUE_BFV])
+ mock_get_traits.return_value = mock_trait_info
+
+ # Try to rescue the instance
+ self.compute_api.rescue(self.context, instance,
+ rescue_image_ref=uuids.rescue_image_id,
+ allow_bfv_rescue=True)
+
+ # Assert all of the calls made in the compute API
+ mock_get_bdms.assert_called_once_with(self.context, instance.uuid)
+ mock_get_volume.assert_called_once_with(
+ self.context, uuids.volume_id)
+ mock_check_attached.assert_called_once_with(
+ self.context, mock.sentinel.volume)
+ mock_is_volume_backed.assert_called_once_with(
+ self.context, instance, bdms)
+ mock_get_cn.assert_called_once_with(
+ self.context, instance.host, instance.node)
+ mock_get_traits.assert_called_once_with(self.context, uuids.cn)
+ mock_instance_save.assert_called_once_with(
+ expected_task_state=[None])
+ mock_record_start.assert_called_once_with(
+ self.context, instance, instance_actions.RESCUE)
+ mock_rpcapi_rescue.assert_called_once_with(
+ self.context, instance=instance, rescue_password=None,
+ rescue_image_ref=uuids.rescue_image_id, clean_shutdown=True)
+
+ # Assert that the instance task state as set in the compute API
+ self.assertEqual(task_states.RESCUING, instance.task_state)
+
+ @mock.patch('nova.objects.image_meta.ImageMeta.from_image_ref')
+ @mock.patch('nova.compute.utils.is_volume_backed_instance',
+ return_value=True)
+ @mock.patch('nova.objects.block_device.BlockDeviceMappingList'
+ '.get_by_instance_uuid')
+ def test_rescue_bfv_without_required_image_properties(
+ self, mock_get_bdms, mock_is_volume_backed,
+ mock_image_meta_obj_from_ref):
+ instance = self._create_instance_obj()
+ bdms = objects.BlockDeviceMappingList(objects=[
+ objects.BlockDeviceMapping(
+ boot_index=0, image_id=uuids.image_id, source_type='image',
+ destination_type='volume', volume_type=None,
+ snapshot_id=None, volume_id=uuids.volume_id,
+ volume_size=None)])
+ rescue_image_meta_obj = image_meta_obj.ImageMeta.from_dict({
+ 'properties': {}
+ })
+
+ with test.nested(
+ mock.patch.object(self.compute_api.volume_api, 'get'),
+ mock.patch.object(self.compute_api.volume_api, 'check_attached'),
+ ) as (
+ mock_get_volume, mock_check_attached
+ ):
+ # Mock out the returned bdms, volume and image_meta
+ mock_get_bdms.return_value = bdms
+ mock_get_volume.return_value = mock.sentinel.volume
+ mock_image_meta_obj_from_ref.return_value = rescue_image_meta_obj
+
+ # Assert that any attempt to rescue a bfv instance on a compute
+ # node that does not report the COMPUTE_RESCUE_BFV trait fails and
+ # raises InstanceNotRescuable
+ self.assertRaises(exception.InstanceNotRescuable,
+ self.compute_api.rescue, self.context, instance,
+ rescue_image_ref=None, allow_bfv_rescue=True)
+
+ # Assert the calls made in the compute API prior to the failure
+ mock_get_bdms.assert_called_once_with(self.context, instance.uuid)
+ mock_get_volume.assert_called_once_with(
+ self.context, uuids.volume_id)
+ mock_check_attached.assert_called_once_with(
+ self.context, mock.sentinel.volume)
+ mock_is_volume_backed.assert_called_once_with(
+ self.context, instance, bdms)
+
@mock.patch('nova.compute.utils.is_volume_backed_instance',
return_value=True)
@mock.patch('nova.objects.block_device.BlockDeviceMappingList'
@@ -7905,8 +8046,9 @@ class ComputeAPIUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase):
@mock.patch.object(compute_utils, 'notify_about_instance_usage')
@mock.patch.object(objects.BlockDeviceMapping, 'destroy')
@mock.patch.object(objects.Instance, 'destroy')
+ @mock.patch('nova.compute.api.API._record_action_start')
def _test_delete_volume_backed_instance(
- self, vm_state, mock_instance_destroy, bdm_destroy,
+ self, vm_state, mock_record, mock_instance_destroy, bdm_destroy,
notify_about_instance_usage, mock_save, mock_elevated,
bdm_get_by_instance_uuid, mock_lookup, _mock_del_booting,
notify_about_instance_action):
@@ -7935,6 +8077,8 @@ class ComputeAPIUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase):
'detach') as mock_detach:
self.compute_api.delete(self.context, inst)
+ mock_record.assert_called_once_with(self.context, inst,
+ instance_actions.DELETE)
mock_deallocate.assert_called_once_with(self.context, inst)
mock_detach.assert_called_once_with(self.context, volume_id,
inst.uuid)
diff --git a/nova/tests/unit/compute/test_claims.py b/nova/tests/unit/compute/test_claims.py
index 9ef3999441..dcdef56fbe 100644
--- a/nova/tests/unit/compute/test_claims.py
+++ b/nova/tests/unit/compute/test_claims.py
@@ -169,7 +169,8 @@ class ClaimTestCase(test.NoDBTestCase):
spec=[{'vendor_id': 'v', 'product_id': 'p'}])
requests = objects.InstancePCIRequests(requests=[request])
self._claim(requests=requests)
- mock_pci_supports_requests.assert_called_once_with([request])
+ mock_pci_supports_requests.assert_called_once_with(
+ [request], provider_mapping=None)
@mock.patch('nova.pci.stats.PciDeviceStats.support_requests',
return_value=False)
@@ -181,7 +182,8 @@ class ClaimTestCase(test.NoDBTestCase):
exception.ComputeResourcesUnavailable,
'Claim pci failed.',
self._claim, requests=requests)
- mock_pci_supports_requests.assert_called_once_with([request])
+ mock_pci_supports_requests.assert_called_once_with(
+ [request], provider_mapping=None)
@mock.patch('nova.pci.stats.PciDeviceStats.support_requests')
def test_pci_pass_no_requests(self, mock_pci_supports_requests):
diff --git a/nova/tests/unit/compute/test_compute.py b/nova/tests/unit/compute/test_compute.py
index 314c29f583..49cf15ec17 100644
--- a/nova/tests/unit/compute/test_compute.py
+++ b/nova/tests/unit/compute/test_compute.py
@@ -2731,7 +2731,7 @@ class ComputeTestCase(BaseTestCase,
bdms=[], recreate=False, on_shared_storage=False,
preserve_ephemeral=False, migration=None, scheduled_node=None,
limits={}, request_spec=None, accel_uuids=[],
- reimage_boot_volume=False)
+ reimage_boot_volume=False, target_state=None)
self.compute.terminate_instance(self.context, instance, [])
def test_rebuild_driver(self):
@@ -2762,7 +2762,7 @@ class ComputeTestCase(BaseTestCase,
bdms=[], recreate=False, on_shared_storage=False,
preserve_ephemeral=False, migration=None, scheduled_node=None,
limits={}, request_spec=None, accel_uuids=[],
- reimage_boot_volume=False)
+ reimage_boot_volume=False, target_state=None)
self.assertTrue(called['rebuild'])
self.compute.terminate_instance(self.context, instance, [])
@@ -2815,7 +2815,7 @@ class ComputeTestCase(BaseTestCase,
bdms=bdms, recreate=False, preserve_ephemeral=False,
migration=None, scheduled_node=None, limits={},
on_shared_storage=False, request_spec=None, accel_uuids=[],
- reimage_boot_volume=False)
+ reimage_boot_volume=False, target_state=None)
self.assertTrue(called['rebuild'])
self.compute.terminate_instance(self.context, instance, [])
@@ -2834,7 +2834,8 @@ class ComputeTestCase(BaseTestCase,
new_pass="new_password", orig_sys_metadata=sys_metadata, bdms=[],
recreate=False, on_shared_storage=False, preserve_ephemeral=False,
migration=None, scheduled_node=None, limits=None,
- request_spec=None, accel_uuids=[], reimage_boot_volume=False)
+ request_spec=None, accel_uuids=[], reimage_boot_volume=False,
+ target_state=None)
self.compute.terminate_instance(self.context, instance, [])
def test_rebuild_launched_at_time(self):
@@ -2855,7 +2856,7 @@ class ComputeTestCase(BaseTestCase,
new_pass="new_password", orig_sys_metadata={}, bdms=[],
recreate=False, on_shared_storage=False, preserve_ephemeral=False,
migration=None, scheduled_node=None, limits={}, request_spec=None,
- accel_uuids=[], reimage_boot_volume=False)
+ accel_uuids=[], reimage_boot_volume=False, target_state=None)
instance.refresh()
self.assertEqual(cur_time,
instance['launched_at'].replace(tzinfo=None))
@@ -2889,7 +2890,7 @@ class ComputeTestCase(BaseTestCase,
orig_sys_metadata=sys_metadata, bdms=[], recreate=False,
on_shared_storage=False, preserve_ephemeral=False, migration=None,
scheduled_node=None, limits={}, request_spec=None, accel_uuids=[],
- reimage_boot_volume=False)
+ reimage_boot_volume=False, target_state=None)
self.compute.terminate_instance(self.context, instance, [])
@mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
@@ -4617,7 +4618,8 @@ class ComputeTestCase(BaseTestCase,
'request_spec': None,
'on_shared_storage': False,
'accel_uuids': (),
- 'reimage_boot_volume': False}),
+ 'reimage_boot_volume': False,
+ 'target_state': None}),
("set_admin_password", task_states.UPDATING_PASSWORD,
{'new_pass': None}),
("rescue_instance", task_states.RESCUING,
@@ -5136,7 +5138,7 @@ class ComputeTestCase(BaseTestCase,
orig_sys_metadata=orig_sys_metadata, bdms=[], recreate=False,
on_shared_storage=False, preserve_ephemeral=False, migration=None,
scheduled_node=None, limits={}, request_spec=None, accel_uuids=[],
- reimage_boot_volume=False)
+ reimage_boot_volume=False, target_state=None)
inst_ref.refresh()
@@ -10843,7 +10845,7 @@ class ComputeAPITestCase(BaseTestCase):
'add_resources_to_instance_allocation'),
mock.patch(
'nova.compute.utils.'
- 'update_pci_request_spec_with_allocated_interface_name'),
+ 'update_pci_request_with_placement_allocations'),
) as (
mock_get_nodename, mock_get_alloc_candidates, mock_add_res,
mock_update_pci
@@ -10913,7 +10915,7 @@ class ComputeAPITestCase(BaseTestCase):
new=mock.NonCallableMock()),
mock.patch(
'nova.compute.utils.'
- 'update_pci_request_spec_with_allocated_interface_name',
+ 'update_pci_request_with_placement_allocations',
new=mock.NonCallableMock()),
) as (
mock_get_nodename, mock_get_alloc_candidates, mock_add_res,
@@ -10958,7 +10960,7 @@ class ComputeAPITestCase(BaseTestCase):
'add_resources_to_instance_allocation'),
mock.patch(
'nova.compute.utils.'
- 'update_pci_request_spec_with_allocated_interface_name',
+ 'update_pci_request_with_placement_allocations',
new=mock.NonCallableMock()),
) as (
mock_get_nodename, mock_get_alloc_candidates, mock_add_res,
@@ -11025,7 +11027,7 @@ class ComputeAPITestCase(BaseTestCase):
'add_resources_to_instance_allocation'),
mock.patch(
'nova.compute.utils.'
- 'update_pci_request_spec_with_allocated_interface_name'),
+ 'update_pci_request_with_placement_allocations'),
mock.patch(
'nova.scheduler.client.report.SchedulerReportClient.'
'remove_resources_from_instance_allocation'),
@@ -11961,7 +11963,7 @@ class ComputeAPITestCase(BaseTestCase):
force=False)
@mock.patch('nova.compute.utils.notify_about_instance_action')
- def _test_evacuate(self, mock_notify, force=None):
+ def _test_evacuate(self, mock_notify, force=None, target_state=None):
instance = self._create_fake_instance_obj(services=True)
self.assertIsNone(instance.task_state)
@@ -11998,7 +12000,8 @@ class ComputeAPITestCase(BaseTestCase):
host='fake_dest_host',
on_shared_storage=True,
admin_password=None,
- force=force)
+ force=force,
+ target_state=target_state)
if force is False:
host = None
else:
@@ -12015,7 +12018,8 @@ class ComputeAPITestCase(BaseTestCase):
recreate=True,
on_shared_storage=True,
request_spec=fake_spec,
- host=host)
+ host=host,
+ target_state=target_state)
do_test()
instance.refresh()
@@ -12047,6 +12051,9 @@ class ComputeAPITestCase(BaseTestCase):
def test_evacuate_with_forced_host(self):
self._test_evacuate(force=True)
+ def test_evacuate_with_target_state(self):
+ self._test_evacuate(target_state="stopped")
+
@mock.patch('nova.servicegroup.api.API.service_is_up',
return_value=False)
def test_fail_evacuate_with_non_existing_destination(self, _service_is_up):
@@ -13520,7 +13527,7 @@ class EvacuateHostTestCase(BaseTestCase):
return_value=mock.sentinel.mapping)
@mock.patch(
'nova.compute.utils.'
- 'update_pci_request_spec_with_allocated_interface_name')
+ 'update_pci_request_with_placement_allocations')
@mock.patch('nova.compute.utils.notify_about_instance_action')
@mock.patch('nova.compute.utils.notify_about_instance_rebuild')
@mock.patch.object(network_api, 'setup_networks_on_host')
@@ -13540,7 +13547,8 @@ class EvacuateHostTestCase(BaseTestCase):
image_ref, injected_files, 'newpass', {}, bdms, recreate=True,
on_shared_storage=on_shared_storage, migration=migration,
preserve_ephemeral=False, scheduled_node=node, limits=limits,
- request_spec=None, accel_uuids=[], reimage_boot_volume=False)
+ request_spec=None, accel_uuids=[], reimage_boot_volume=False,
+ target_state=None)
if vm_states_is_stopped:
mock_notify_rebuild.assert_has_calls([
mock.call(ctxt, self.inst, self.inst.host, phase='start',
diff --git a/nova/tests/unit/compute/test_compute_mgr.py b/nova/tests/unit/compute/test_compute_mgr.py
index 1a4935f482..1c69cd8f1c 100644
--- a/nova/tests/unit/compute/test_compute_mgr.py
+++ b/nova/tests/unit/compute/test_compute_mgr.py
@@ -57,6 +57,7 @@ from nova.objects import fields
from nova.objects import instance as instance_obj
from nova.objects import migrate_data as migrate_data_obj
from nova.objects import network_request as net_req_obj
+from nova.objects import service as service_obj
from nova.pci import request as pci_request
from nova.scheduler.client import report
from nova import test
@@ -76,6 +77,7 @@ from nova.virt import driver as virt_driver
from nova.virt import event as virtevent
from nova.virt import fake as fake_driver
from nova.virt import hardware
+from nova.virt import node as virt_node
from nova.volume import cinder
@@ -89,6 +91,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
# os-brick>=5.1 now uses external file system locks instead of internal
# locks so we need to set up locking
REQUIRES_LOCKING = True
+ STUB_COMPUTE_ID = False
def setUp(self):
super(ComputeManagerUnitTestCase, self).setUp()
@@ -906,6 +909,8 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
return instance_obj._make_instance_list(
self.context, objects.InstanceList(), db_list, None)
+ @mock.patch.object(manager.ComputeManager,
+ '_ensure_existing_node_identity')
@mock.patch.object(manager.ComputeManager, '_get_nodes')
@mock.patch.object(manager.ComputeManager,
'_error_out_instances_whose_build_was_interrupted')
@@ -924,17 +929,19 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
mock_validate_vtpm, mock_validate_pinning,
mock_destroy, mock_admin_ctxt, mock_host_get,
mock_init_host,
- mock_error_interrupted, mock_get_nodes):
+ mock_error_interrupted, mock_get_nodes,
+ mock_existing_node):
mock_admin_ctxt.return_value = self.context
inst_list = _make_instance_list(startup_instances)
mock_host_get.return_value = inst_list
our_node = objects.ComputeNode(
- host='fake-host', uuid=uuids.our_node_uuid,
+ host=self.compute.host, uuid=uuids.our_node_uuid,
hypervisor_hostname='fake-node')
mock_get_nodes.return_value = {uuids.our_node_uuid: our_node}
- self.compute.init_host()
+ self.compute.init_host(None)
+ mock_existing_node.assert_not_called()
mock_validate_pinning.assert_called_once_with(inst_list)
mock_validate_vtpm.assert_called_once_with(inst_list)
mock_destroy.assert_called_once_with(
@@ -977,8 +984,9 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
"""
mock_get_nodes.return_value = {
uuids.cn_uuid1: objects.ComputeNode(
- uuid=uuids.cn_uuid1, hypervisor_hostname='node1')}
- self.compute.init_host()
+ uuid=uuids.cn_uuid1, hypervisor_hostname='node1',
+ host=self.compute.host)}
+ self.compute.init_host(None)
mock_error_interrupted.assert_called_once_with(
test.MatchType(nova.context.RequestContext), set(),
@@ -988,16 +996,19 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
@mock.patch('nova.objects.InstanceList')
@mock.patch('nova.objects.MigrationList.get_by_filters')
- def test_cleanup_host(self, mock_miglist_get, mock_instance_list):
+ @mock.patch('nova.objects.ComputeNodeList.get_all_by_uuids')
+ def test_cleanup_host(self, mock_cnlist_get, mock_miglist_get,
+ mock_instance_list):
# just testing whether the cleanup_host method
# when fired will invoke the underlying driver's
# equivalent method.
mock_miglist_get.return_value = []
mock_instance_list.get_by_host.return_value = []
+ mock_cnlist_get.return_value = []
with mock.patch.object(self.compute, 'driver') as mock_driver:
- self.compute.init_host()
+ self.compute.init_host(None)
mock_driver.init_host.assert_called_once_with(host='fake-mini')
self.compute.cleanup_host()
@@ -1086,7 +1097,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
'remove_provider_tree_from_instance_allocation')
) as (mock_get_net, mock_remove_allocation):
- self.compute.init_host()
+ self.compute.init_host(None)
mock_remove_allocation.assert_called_once_with(
self.context, deleted_instance.uuid, uuids.our_node_uuid)
@@ -1139,11 +1150,11 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
uuids.evac_instance: evacuating_instance
}
our_node = objects.ComputeNode(
- host='fake-host', uuid=uuids.our_node_uuid,
+ host=self.compute.host, uuid=uuids.our_node_uuid,
hypervisor_hostname='fake-node')
mock_get_nodes.return_value = {uuids.our_node_uuid: our_node}
- self.compute.init_host()
+ self.compute.init_host(None)
mock_init_instance.assert_called_once_with(
self.context, active_instance)
@@ -1151,23 +1162,49 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self.context, {active_instance.uuid, evacuating_instance.uuid},
mock_get_nodes.return_value.keys())
- @mock.patch.object(objects.ComputeNode, 'get_by_host_and_nodename')
- @mock.patch.object(fake_driver.FakeDriver, 'get_available_nodes')
- def test_get_nodes(self, mock_driver_get_nodes, mock_get_by_host_and_node):
- mock_driver_get_nodes.return_value = ['fake-node1', 'fake-node2']
- cn1 = objects.ComputeNode(uuid=uuids.cn1)
- cn2 = objects.ComputeNode(uuid=uuids.cn2)
- mock_get_by_host_and_node.side_effect = [cn1, cn2]
+ @mock.patch.object(objects.ComputeNodeList, 'get_all_by_uuids')
+ @mock.patch.object(fake_driver.FakeDriver, 'get_nodenames_by_uuid')
+ def test_get_nodes(self, mock_driver_get_nodes, mock_get_by_uuid):
+ mock_driver_get_nodes.return_value = {uuids.node_fake_node1: 'host',
+ uuids.node_fake_node2: 'host'}
+ # NOTE(danms): The fake driver, by default, uses
+ # uuidsentinel.node_$node_name, so we can predict the uuids it will
+ # return here.
+ cn1 = objects.ComputeNode(uuid=uuids.node_fake_node1,
+ hypervisor_hostname='host')
+ cn2 = objects.ComputeNode(uuid=uuids.node_fake_node2,
+ hypervisor_hostname='host')
+ mock_get_by_uuid.return_value = [cn1, cn2]
nodes = self.compute._get_nodes(self.context)
- self.assertEqual({uuids.cn1: cn1, uuids.cn2: cn2}, nodes)
+ self.assertEqual({uuids.node_fake_node1: cn1,
+ uuids.node_fake_node2: cn2}, nodes)
mock_driver_get_nodes.assert_called_once_with()
- mock_get_by_host_and_node.assert_has_calls([
- mock.call(self.context, self.compute.host, 'fake-node1'),
- mock.call(self.context, self.compute.host, 'fake-node2'),
- ])
+ mock_get_by_uuid.assert_called_once_with(self.context,
+ [uuids.node_fake_node1,
+ uuids.node_fake_node2])
+
+ @mock.patch.object(objects.ComputeNodeList, 'get_all_by_uuids')
+ @mock.patch.object(fake_driver.FakeDriver, 'get_nodenames_by_uuid')
+ def test_get_nodes_mismatch(self, mock_driver_get_nodes, mock_get_by_uuid):
+ # Virt driver reports a (hypervisor_) hostname of 'host1'
+ mock_driver_get_nodes.return_value = {uuids.node_fake_node1: 'host1',
+ uuids.node_fake_node2: 'host1'}
+
+ # The database records for our compute nodes (by UUID) show a
+ # hypervisor_hostname of 'host2'
+ cn1 = objects.ComputeNode(uuid=uuids.node_fake_node1,
+ hypervisor_hostname='host2')
+ cn2 = objects.ComputeNode(uuid=uuids.node_fake_node2,
+ hypervisor_hostname='host2')
+ mock_get_by_uuid.return_value = [cn1, cn2]
+
+ # Possible hostname (as reported by the virt driver) rename,
+ # which should abort our startup
+ self.assertRaises(exception.InvalidConfiguration,
+ self.compute._get_nodes, self.context)
@mock.patch.object(manager.LOG, 'warning')
@mock.patch.object(
@@ -1189,37 +1226,35 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
"is starting on this host, then you can ignore this warning.")
@mock.patch.object(manager.LOG, 'warning')
- @mock.patch.object(objects.ComputeNode, 'get_by_host_and_nodename')
- @mock.patch.object(fake_driver.FakeDriver, 'get_available_nodes')
+ @mock.patch.object(objects.ComputeNodeList, 'get_all_by_uuids')
+ @mock.patch.object(fake_driver.FakeDriver, 'get_nodenames_by_uuid')
def test_get_nodes_node_not_found(
- self, mock_driver_get_nodes, mock_get_by_host_and_node,
+ self, mock_driver_get_nodes, mock_get_all_by_uuids,
mock_log_warning):
- mock_driver_get_nodes.return_value = ['fake-node1', 'fake-node2']
- cn2 = objects.ComputeNode(uuid=uuids.cn2)
- mock_get_by_host_and_node.side_effect = [
- exception.ComputeHostNotFound(host='fake-node1'), cn2]
+ mock_driver_get_nodes.return_value = {uuids.node_1: 'fake-node1'}
+ mock_get_all_by_uuids.return_value = []
nodes = self.compute._get_nodes(self.context)
- self.assertEqual({uuids.cn2: cn2}, nodes)
+ self.assertEqual({}, nodes)
mock_driver_get_nodes.assert_called_once_with()
- mock_get_by_host_and_node.assert_has_calls([
- mock.call(self.context, self.compute.host, 'fake-node1'),
- mock.call(self.context, self.compute.host, 'fake-node2'),
- ])
+ mock_get_all_by_uuids.assert_called_once_with(self.context,
+ [uuids.node_1])
mock_log_warning.assert_called_once_with(
- "Compute node %s not found in the database. If this is the first "
- "time this service is starting on this host, then you can ignore "
- "this warning.", 'fake-node1')
+ "Compute nodes %s for host %s were not found in the database. "
+ "If this is the first time this service is starting on this host, "
+ "then you can ignore this warning.", [uuids.node_1], 'fake-mini')
def test_init_host_disk_devices_configuration_failure(self):
self.flags(max_disk_devices_to_attach=0, group='compute')
self.assertRaises(exception.InvalidConfiguration,
- self.compute.init_host)
+ self.compute.init_host, None)
@mock.patch.object(objects.InstanceList, 'get_by_host',
new=mock.Mock())
+ @mock.patch('nova.objects.ComputeNodeList.get_all_by_uuids',
+ new=mock.Mock(return_value=[mock.MagicMock()]))
@mock.patch('nova.compute.manager.ComputeManager.'
'_validate_pinning_configuration')
def test_init_host_pinning_configuration_validation_failure(self,
@@ -1230,13 +1265,15 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
mock_validate_pinning.side_effect = exception.InvalidConfiguration
self.assertRaises(exception.InvalidConfiguration,
- self.compute.init_host)
+ self.compute.init_host, None)
@mock.patch.object(objects.InstanceList, 'get_by_host',
new=mock.Mock())
@mock.patch('nova.compute.manager.ComputeManager.'
'_validate_pinning_configuration',
new=mock.Mock())
+ @mock.patch('nova.objects.ComputeNodeList.get_all_by_uuids',
+ new=mock.Mock(return_value=[mock.MagicMock()]))
@mock.patch('nova.compute.manager.ComputeManager.'
'_validate_vtpm_configuration')
def test_init_host_vtpm_configuration_validation_failure(self,
@@ -1247,7 +1284,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
mock_validate_vtpm.side_effect = exception.InvalidConfiguration
self.assertRaises(exception.InvalidConfiguration,
- self.compute.init_host)
+ self.compute.init_host, None)
@mock.patch.object(objects.Instance, 'save')
@mock.patch.object(objects.InstanceList, 'get_by_filters')
@@ -5145,7 +5182,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
group='pci'
)
self.assertRaises(exception.PciDeviceInvalidDeviceName,
- self.compute.init_host)
+ self.compute.init_host, None)
@mock.patch('nova.compute.manager.ComputeManager._instance_update')
def test_error_out_instance_on_exception_not_implemented_err(self,
@@ -5335,7 +5372,8 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self.compute.rebuild_instance(
self.context, instance, None, None, None, None, None, None,
- recreate, False, False, None, scheduled_node, {}, None, [], False)
+ recreate, False, False, None, scheduled_node, {}, None, [], False,
+ None)
mock_set.assert_called_once_with(None, 'failed')
mock_notify_about_instance_usage.assert_called_once_with(
mock.ANY, instance, 'rebuild.error', fault=mock_rebuild.side_effect
@@ -5447,7 +5485,8 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
preserve_ephemeral=False, migration=None,
scheduled_node='fake-node',
limits={}, request_spec=request_spec, accel_uuids=[],
- reimage_boot_volume=False)
+ reimage_boot_volume=False,
+ target_state=None)
mock_validate_policy.assert_called_once_with(
elevated_context, instance, {'group': [uuids.group]})
@@ -5487,7 +5526,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
recreate=True, on_shared_storage=None, preserve_ephemeral=False,
migration=None, scheduled_node='fake-node', limits={},
request_spec=request_spec, accel_uuids=[],
- reimage_boot_volume=False)
+ reimage_boot_volume=False, target_state=None)
mock_validate_policy.assert_called_once_with(
elevated_context, instance, {'group': [uuids.group]})
@@ -5513,7 +5552,8 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self.compute.rebuild_instance(
self.context, instance, None, None,
None, None, None, None, False,
- False, False, migration, None, {}, None, [], False)
+ False, False, migration, None, {}, None, [], False,
+ None)
self.assertFalse(mock_get.called)
self.assertEqual(node, instance.node)
self.assertEqual('done', migration.status)
@@ -5535,7 +5575,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self.compute.rebuild_instance(
self.context, instance, None, None, None, None, None,
None, True, False, False, mock.sentinel.migration, None, {},
- None, [], False)
+ None, [], False, None)
mock_get.assert_called_once_with(mock.ANY, self.compute.host)
mock_rt.finish_evacuation.assert_called_once_with(
instance, 'new-node', mock.sentinel.migration)
@@ -5618,7 +5658,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
preserve_ephemeral, {}, {},
self.allocations,
mock.sentinel.mapping, [],
- False)
+ False, None)
mock_notify_usage.assert_has_calls(
[mock.call(self.context, instance, "rebuild.start",
@@ -5877,7 +5917,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
request_spec=objects.RequestSpec(),
allocations=self.allocations,
request_group_resource_providers_mapping=mock.sentinel.mapping,
- accel_uuids=[], reimage_boot_volume=False)
+ accel_uuids=[], reimage_boot_volume=False, target_state=None)
self.assertIn('Trusted image certificates provided on host', str(ex))
def test_reverts_task_state_instance_not_found(self):
@@ -6321,6 +6361,171 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self.assertEqual({'one-image': 'cached',
'two-image': 'existing'}, r)
+ @mock.patch.object(virt_node, 'write_local_node_uuid')
+ @mock.patch.object(virt_node, 'read_local_node_uuid')
+ def test_ensure_node_uuid_not_needed_version(self, mock_read, mock_write):
+ # Make sure an up-to-date service bypasses the persistence
+ service_ref = service_obj.Service()
+ self.assertEqual(service_obj.SERVICE_VERSION, service_ref.version)
+ mock_read.return_value = 'not none'
+ mock_write.assert_not_called()
+ self.compute._ensure_existing_node_identity(service_ref)
+ mock_write.assert_not_called()
+
+ @mock.patch.object(virt_node, 'write_local_node_uuid')
+ def test_ensure_node_uuid_not_needed_ironic(self, mock_node):
+ # Make sure an old service for ironic does not write a local node uuid
+ with mock.patch.object(service_obj, 'SERVICE_VERSION', new=60):
+ service_ref = service_obj.Service()
+ self.assertEqual(60, service_ref.version)
+ self.flags(compute_driver='ironic')
+ self.compute._ensure_existing_node_identity(service_ref)
+ mock_node.assert_not_called()
+
+ @mock.patch.object(virt_node, 'write_local_node_uuid')
+ @mock.patch.object(virt_node, 'read_local_node_uuid')
+ def test_ensure_node_uuid_not_needed_preprovisioned(self,
+ mock_read_node,
+ mock_write_node):
+ # Make sure an old service does not write a uuid if one is present
+ with mock.patch.object(service_obj, 'SERVICE_VERSION', new=60):
+ service_ref = service_obj.Service()
+ self.assertEqual(60, service_ref.version)
+ mock_read_node.return_value = str(uuids.SOME_UUID)
+ self.compute._ensure_existing_node_identity(service_ref)
+ mock_write_node.assert_not_called()
+
+ @mock.patch.object(virt_node, 'write_local_node_uuid')
+ @mock.patch.object(virt_node, 'read_local_node_uuid')
+ @mock.patch('nova.objects.ComputeNodeList.get_all_by_host')
+ def test_ensure_node_uuid_upgrade_no_node(self, mock_get_cn,
+ mock_read_node,
+ mock_write_node):
+ # If we are not a new service, we have no pre-provisioned node uuid
+ # and we find no nodes in the database, we do not write a local
+ # node uuid *and* we abort startup since something is likely wrong.
+ with mock.patch.object(service_obj, 'SERVICE_VERSION', new=60):
+ service_ref = service_obj.Service()
+ self.assertEqual(60, service_ref.version)
+ mock_read_node.return_value = None
+ mock_get_cn.return_value = []
+ self.assertRaises(exception.InvalidConfiguration,
+ self.compute._ensure_existing_node_identity,
+ service_ref)
+ mock_get_cn.assert_called_once_with(mock.ANY, self.compute.host)
+ mock_write_node.assert_not_called()
+
+ @mock.patch.object(virt_node, 'write_local_node_uuid')
+ @mock.patch.object(virt_node, 'read_local_node_uuid')
+ @mock.patch('nova.objects.ComputeNodeList.get_all_by_host')
+ def test_ensure_node_uuid_upgrade_multi_node(self, mock_get_cn,
+ mock_read_node,
+ mock_write_node):
+ # If we are not a new service, we have no pre-provisioned node uuid
+ # and we find multiple nodes in the database, we do not write a local
+ # node uuid *and* we abort startup since something is likely wrong.
+ with mock.patch.object(service_obj, 'SERVICE_VERSION', new=60):
+ service_ref = service_obj.Service()
+ self.assertEqual(60, service_ref.version)
+ mock_read_node.return_value = None
+ mock_get_cn.return_value = [1, 2]
+ self.assertRaises(exception.InvalidConfiguration,
+ self.compute._ensure_existing_node_identity,
+ service_ref)
+ mock_get_cn.assert_called_once_with(mock.ANY, self.compute.host)
+ mock_write_node.assert_not_called()
+
+ @mock.patch.object(virt_node, 'write_local_node_uuid')
+ @mock.patch.object(virt_node, 'read_local_node_uuid')
+ @mock.patch('nova.objects.ComputeNodeList.get_all_by_host')
+ def test_ensure_node_uuid_upgrade_writes_node_uuid(self, mock_get_cn,
+ mock_read_node,
+ mock_write_node):
+ # If we are not a new service, there is no pre-provisioned local
+ # compute node uuid, and we find exactly one compute node in the
+ # database for our host, we persist that.
+ with mock.patch.object(service_obj, 'SERVICE_VERSION', new=60):
+ service_ref = service_obj.Service()
+ self.assertEqual(60, service_ref.version)
+ mock_read_node.return_value = None
+ mock_get_cn.return_value = [
+ objects.ComputeNode(uuid=str(uuids.compute)),
+ ]
+ self.compute._ensure_existing_node_identity(service_ref)
+ mock_get_cn.assert_called_once_with(mock.ANY, self.compute.host)
+ mock_write_node.assert_called_once_with(str(uuids.compute))
+
+ @mock.patch.object(virt_node, 'read_local_node_uuid')
+ def test_ensure_node_uuid_missing_file_ironic(self, mock_read):
+ mock_service = mock.MagicMock(
+ version=service_obj.NODE_IDENTITY_VERSION)
+ mock_read.return_value = None
+ self.assertRaises(exception.InvalidConfiguration,
+ self.compute._ensure_existing_node_identity,
+ mock_service)
+ mock_read.assert_called_once_with()
+
+ # Now make sure that ironic causes this exact configuration to pass
+ self.flags(compute_driver='ironic')
+ self.compute._ensure_existing_node_identity(mock_service)
+
+ def test_ensure_node_uuid_called_by_init_host(self):
+ # test_init_host() above ensures that we do not call
+ # _ensure_existing_node_identity() in the service_ref=None case.
+ # Since testing init_host() requires a billion mocks, this
+ # tests that we do call it when expected, but make it raise
+ # to avoid running the rest of init_host().
+ with mock.patch.object(self.compute,
+ '_ensure_existing_node_identity') as m:
+ m.side_effect = test.TestingException
+ self.assertRaises(test.TestingException,
+ self.compute.init_host,
+ mock.sentinel.service_ref)
+ m.assert_called_once_with(mock.sentinel.service_ref)
+
+ def test_check_for_host_rename_ironic(self):
+ self.flags(compute_driver='ironic')
+ # Passing None here makes sure we take the early exit because of our
+ # virt driver
+ nodes = {uuids.node1: mock.MagicMock(uuid=uuids.node1,
+ host='not-this-host')}
+ self.compute._check_for_host_rename(nodes)
+
+ def test_check_for_host_rename_renamed_only(self):
+ nodes = {uuids.node1: mock.MagicMock(uuid=uuids.node1,
+ host='not-this-host')}
+ self.assertRaises(exception.InvalidConfiguration,
+ self.compute._check_for_host_rename, nodes)
+
+ def test_check_for_host_rename_renamed_one(self):
+ nodes = {uuids.node1: mock.MagicMock(uuid=uuids.node1,
+ host=self.compute.host),
+ uuids.node2: mock.MagicMock(uuid=uuids.node2,
+ host='not-this-host')}
+ self.assertRaises(exception.InvalidConfiguration,
+ self.compute._check_for_host_rename, nodes)
+
+ def test_check_for_host_rename_not_renamed(self):
+ nodes = {uuids.node1: mock.MagicMock(uuid=uuids.node1,
+ host=self.compute.host)}
+ with mock.patch.object(manager.LOG, 'debug') as mock_debug:
+ self.compute._check_for_host_rename(nodes)
+ mock_debug.assert_called_once_with(
+ 'Verified node %s matches my host %s',
+ uuids.node1, self.compute.host)
+
+ @mock.patch('nova.compute.manager.ComputeManager._get_nodes')
+ def test_check_for_host_rename_called_by_init_host(self, mock_nodes):
+ # Since testing init_host() requires a billion mocks, this
+ # tests that we do call it when expected, but make it raise
+ # to avoid running the rest of init_host().
+ with mock.patch.object(self.compute,
+ '_check_for_host_rename') as m:
+ m.side_effect = test.TestingException
+ self.assertRaises(test.TestingException,
+ self.compute.init_host, None)
+ m.assert_called_once_with(mock_nodes.return_value)
+
class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
def setUp(self):
@@ -7927,6 +8132,42 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
@mock.patch.object(virt_driver.ComputeDriver, 'failed_spawn_cleanup')
@mock.patch.object(virt_driver.ComputeDriver, 'prepare_for_spawn')
+ @mock.patch.object(virt_driver.ComputeDriver,
+ 'prepare_networks_before_block_device_mapping')
+ @mock.patch.object(virt_driver.ComputeDriver,
+ 'clean_networks_preparation')
+ def test_failed_prepare_for_spawn(self, mock_clean, mock_prepnet,
+ mock_prepspawn, mock_failedspawn):
+ mock_prepspawn.side_effect = exception.ComputeResourcesUnavailable(
+ reason="asdf")
+ with mock.patch.object(self.compute,
+ '_build_networks_for_instance',
+ return_value=self.network_info
+ ) as _build_networks_for_instance:
+
+ try:
+ with self.compute._build_resources(self.context, self.instance,
+ self.requested_networks, self.security_groups,
+ self.image, self.block_device_mapping,
+ self.resource_provider_mapping, self.accel_uuids):
+ pass
+ except Exception as e:
+ self.assertIsInstance(e,
+ exception.ComputeResourcesUnavailable)
+
+ _build_networks_for_instance.assert_has_calls(
+ [mock.call(self.context, self.instance,
+ self.requested_networks, self.security_groups,
+ self.resource_provider_mapping,
+ self.network_arqs)])
+
+ mock_prepnet.assert_not_called()
+ mock_clean.assert_called_once_with(self.instance, self.network_info)
+ mock_prepspawn.assert_called_once_with(self.instance)
+ mock_failedspawn.assert_called_once_with(self.instance)
+
+ @mock.patch.object(virt_driver.ComputeDriver, 'failed_spawn_cleanup')
+ @mock.patch.object(virt_driver.ComputeDriver, 'prepare_for_spawn')
@mock.patch.object(manager.ComputeManager, '_build_networks_for_instance')
def test_build_resources_aborts_on_failed_network_alloc(self, mock_build,
mock_prepspawn,
@@ -8487,11 +8728,17 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
# resource request and therefore no matching request group exists in
# the request spec.
self.instance.pci_requests = objects.InstancePCIRequests(requests=[
- objects.InstancePCIRequest(),
objects.InstancePCIRequest(
+ request_id=uuids.req0,
+ ),
+ objects.InstancePCIRequest(
+ request_id=uuids.req1,
requester_id=uuids.port1,
spec=[{'vendor_id': '1377', 'product_id': '0047'}]),
- objects.InstancePCIRequest(requester_id=uuids.port2),
+ objects.InstancePCIRequest(
+ request_id=uuids.req2,
+ requester_id=uuids.port2,
+ ),
])
with test.nested(
mock.patch.object(self.compute.driver, 'spawn'),
@@ -8536,8 +8783,13 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
requester_id=uuids.port1,
provider_uuids=[uuids.rp1])])
- self.instance.pci_requests = objects.InstancePCIRequests(requests=[
- objects.InstancePCIRequest(requester_id=uuids.port1)])
+ self.instance.pci_requests = objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ requester_id=uuids.port1, request_id=uuids.req1
+ )
+ ]
+ )
with mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_get_resource_provider') as (mock_get_rp):
mock_get_rp.return_value = None
@@ -8559,8 +8811,13 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
requester_id=uuids.port1,
provider_uuids=[uuids.rp1])])
- self.instance.pci_requests = objects.InstancePCIRequests(requests=[
- objects.InstancePCIRequest(requester_id=uuids.port1)])
+ self.instance.pci_requests = objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ requester_id=uuids.port1, request_id=uuids.req1
+ )
+ ]
+ )
with mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_get_resource_provider') as (mock_get_rp):
mock_get_rp.return_value = {
@@ -8584,8 +8841,13 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
requester_id=uuids.port1,
provider_uuids=[uuids.rp1, uuids.rp2])])
- self.instance.pci_requests = objects.InstancePCIRequests(requests=[
- objects.InstancePCIRequest(requester_id=uuids.port1)])
+ self.instance.pci_requests = objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ requester_id=uuids.port1, request_id=uuids.req1
+ )
+ ]
+ )
self.assertRaises(
exception.BuildAbortException,
@@ -9386,9 +9648,15 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
self.assertEqual(driver_console.get_connection_info.return_value,
console)
+ @mock.patch('nova.utils.pass_context')
@mock.patch('nova.compute.manager.ComputeManager.'
'_do_live_migration')
- def _test_max_concurrent_live(self, mock_lm):
+ def _test_max_concurrent_live(self, mock_lm, mock_pass_context):
+ # pass_context wraps the function, which doesn't work with a mock
+ # So we simply mock it too
+ def _mock_pass_context(runner, func, *args, **kwargs):
+ return runner(func, *args, **kwargs)
+ mock_pass_context.side_effect = _mock_pass_context
@mock.patch('nova.objects.Migration.save')
def _do_it(mock_mig_save):
@@ -11061,7 +11329,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
@mock.patch('nova.compute.resource_tracker.ResourceTracker.resize_claim')
@mock.patch('nova.objects.Instance.save')
@mock.patch('nova.compute.utils.'
- 'update_pci_request_spec_with_allocated_interface_name')
+ 'update_pci_request_with_placement_allocations')
@mock.patch('nova.compute.utils.notify_usage_exists')
@mock.patch('nova.compute.manager.ComputeManager.'
'_notify_about_instance_usage')
@@ -11095,7 +11363,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
@mock.patch('nova.compute.resource_tracker.ResourceTracker.resize_claim')
@mock.patch('nova.objects.Instance.save')
@mock.patch('nova.compute.utils.'
- 'update_pci_request_spec_with_allocated_interface_name')
+ 'update_pci_request_with_placement_allocations')
@mock.patch('nova.compute.utils.notify_usage_exists')
@mock.patch('nova.compute.manager.ComputeManager.'
'_notify_about_instance_usage')
diff --git a/nova/tests/unit/compute/test_pci_placement_translator.py b/nova/tests/unit/compute/test_pci_placement_translator.py
index ee6a0469ac..0592186e54 100644
--- a/nova/tests/unit/compute/test_pci_placement_translator.py
+++ b/nova/tests/unit/compute/test_pci_placement_translator.py
@@ -12,12 +12,15 @@
# License for the specific language governing permissions and limitations
# under the License.
import ddt
+from oslo_utils.fixture import uuidsentinel as uuids
from unittest import mock
from nova.compute import pci_placement_translator as ppt
+from nova.compute import provider_tree
from nova import exception
from nova.objects import fields
from nova.objects import pci_device
+from nova.pci import devspec
from nova import test
@@ -88,8 +91,8 @@ class TestTranslator(test.NoDBTestCase):
)
def test_trait_normalization(self, trait_names, expected_traits):
self.assertEqual(
- expected_traits | {"COMPUTE_MANAGED_PCI_DEVICE"},
- ppt._get_traits_for_dev({"traits": trait_names})
+ expected_traits,
+ ppt.get_traits(trait_names)
)
@ddt.unpack
@@ -110,7 +113,9 @@ class TestTranslator(test.NoDBTestCase):
def test_resource_class_normalization(self, pci_dev, rc_name, expected_rc):
self.assertEqual(
expected_rc,
- ppt._get_rc_for_dev(pci_dev, {"resource_class": rc_name})
+ ppt.get_resource_class(
+ rc_name, pci_dev.vendor_id, pci_dev.product_id
+ ),
)
def test_dependent_device_pf_then_vf(self):
@@ -118,12 +123,16 @@ class TestTranslator(test.NoDBTestCase):
"fake-node", instances_under_same_host_resize=[])
pf = pci_device.PciDevice(
address="0000:81:00.0",
- dev_type=fields.PciDeviceType.SRIOV_PF
+ dev_type=fields.PciDeviceType.SRIOV_PF,
+ vendor_id="dead",
+ product_id="beef",
)
vf = pci_device.PciDevice(
address="0000:81:00.1",
parent_addr=pf.address,
- dev_type=fields.PciDeviceType.SRIOV_VF
+ dev_type=fields.PciDeviceType.SRIOV_VF,
+ vendor_id="dead",
+ product_id="beef",
)
pv._add_dev(pf, {"resource_class": "foo"})
@@ -146,17 +155,23 @@ class TestTranslator(test.NoDBTestCase):
"fake-node", instances_under_same_host_resize=[])
pf = pci_device.PciDevice(
address="0000:81:00.0",
- dev_type=fields.PciDeviceType.SRIOV_PF
+ dev_type=fields.PciDeviceType.SRIOV_PF,
+ vendor_id="dead",
+ product_id="beef",
)
vf = pci_device.PciDevice(
address="0000:81:00.1",
parent_addr=pf.address,
- dev_type=fields.PciDeviceType.SRIOV_VF
+ dev_type=fields.PciDeviceType.SRIOV_VF,
+ vendor_id="dead",
+ product_id="beef",
)
vf2 = pci_device.PciDevice(
address="0000:81:00.2",
parent_addr=pf.address,
- dev_type=fields.PciDeviceType.SRIOV_VF
+ dev_type=fields.PciDeviceType.SRIOV_VF,
+ vendor_id="dead",
+ product_id="beef",
)
pv._add_dev(vf, {"resource_class": "foo"})
@@ -182,7 +197,10 @@ class TestTranslator(test.NoDBTestCase):
pci_device.PciDevice(
address="0000:81:00.%d" % f,
parent_addr="0000:71:00.0",
- dev_type=fields.PciDeviceType.SRIOV_VF)
+ dev_type=fields.PciDeviceType.SRIOV_VF,
+ vendor_id="dead",
+ product_id="beef",
+ )
for f in range(0, 4)
]
@@ -220,3 +238,54 @@ class TestTranslator(test.NoDBTestCase):
"CUSTOM_BAR,CUSTOM_BAZ,CUSTOM_FOO for 0000:81:00.0,0000:81:00.1.",
str(ex),
)
+
+ def test_translator_maps_pci_device_to_rp(self):
+ pv = ppt.PlacementView(
+ "fake-node", instances_under_same_host_resize=[])
+ vf = pci_device.PciDevice(
+ address="0000:81:00.1",
+ parent_addr="0000:71:00.0",
+ dev_type=fields.PciDeviceType.SRIOV_VF,
+ vendor_id="dead",
+ product_id="beef",
+ )
+ pf = pci_device.PciDevice(
+ address="0000:72:00.0",
+ parent_addr=None,
+ dev_type=fields.PciDeviceType.SRIOV_PF,
+ vendor_id="dead",
+ product_id="beef",
+ )
+ pt = provider_tree.ProviderTree()
+ pt.new_root("fake-node", uuids.compute_rp)
+
+ pv._add_dev(vf, {})
+ pv._add_dev(pf, {})
+ pv.update_provider_tree(pt)
+
+ self.assertEqual(
+ pt.data("fake-node_0000:71:00.0").uuid, vf.extra_info["rp_uuid"]
+ )
+ self.assertEqual(
+ pt.data("fake-node_0000:72:00.0").uuid, pf.extra_info["rp_uuid"]
+ )
+
+ def test_update_provider_tree_for_pci_update_pools(self):
+ pt = provider_tree.ProviderTree()
+ pt.new_root("fake-node", uuids.compute_rp)
+ pf = pci_device.PciDevice(
+ address="0000:72:00.0",
+ parent_addr=None,
+ dev_type=fields.PciDeviceType.SRIOV_PF,
+ vendor_id="dead",
+ product_id="beef",
+ status=fields.PciDeviceStatus.AVAILABLE,
+ )
+ pci_tracker = mock.Mock()
+ pci_tracker.pci_devs = [pf]
+ pci_tracker.dev_filter.specs = [devspec.PciDeviceSpec({})]
+
+ ppt.update_provider_tree_for_pci(pt, 'fake-node', pci_tracker, {}, [])
+
+ pci_tracker.stats.populate_pools_metadata_from_assigned_devices.\
+ assert_called_once_with()
diff --git a/nova/tests/unit/compute/test_resource_tracker.py b/nova/tests/unit/compute/test_resource_tracker.py
index b81d7365d2..cd36b8987f 100644
--- a/nova/tests/unit/compute/test_resource_tracker.py
+++ b/nova/tests/unit/compute/test_resource_tracker.py
@@ -64,11 +64,13 @@ _VIRT_DRIVER_AVAIL_RESOURCES = {
'hypervisor_hostname': _NODENAME,
'cpu_info': '',
'numa_topology': None,
+ 'uuid': uuids.cn1,
}
_COMPUTE_NODE_FIXTURES = [
objects.ComputeNode(
id=1,
+ deleted=False,
uuid=uuids.cn1,
host=_HOSTNAME,
vcpus=_VIRT_DRIVER_AVAIL_RESOURCES['vcpus'],
@@ -586,7 +588,7 @@ class TestUpdateAvailableResources(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_disabled(self, get_mock, migr_mock, get_cn_mock, pci_mock,
@@ -619,7 +621,7 @@ class TestUpdateAvailableResources(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_no_instances_no_migrations_no_reserved(self, get_mock, migr_mock,
@@ -643,8 +645,7 @@ class TestUpdateAvailableResources(BaseTestCase):
'flavor',
'migration_context',
'resources'])
- get_cn_mock.assert_called_once_with(mock.ANY, _HOSTNAME,
- _NODENAME)
+ get_cn_mock.assert_called_once_with(mock.ANY, uuids.cn1)
migr_mock.assert_called_once_with(mock.ANY, _HOSTNAME,
_NODENAME)
@@ -671,7 +672,7 @@ class TestUpdateAvailableResources(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
@@ -730,7 +731,7 @@ class TestUpdateAvailableResources(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_no_instances_no_migrations_reserved_disk_ram_and_cpu(
@@ -747,7 +748,7 @@ class TestUpdateAvailableResources(BaseTestCase):
update_mock = self._update_available_resources()
- get_cn_mock.assert_called_once_with(mock.ANY, _HOSTNAME, _NODENAME)
+ get_cn_mock.assert_called_once_with(mock.ANY, uuids.cn1)
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
vals = {
'free_disk_gb': 5, # 6GB avail - 1 GB reserved
@@ -772,7 +773,7 @@ class TestUpdateAvailableResources(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_some_instances_no_migrations(self, get_mock, migr_mock,
@@ -797,7 +798,7 @@ class TestUpdateAvailableResources(BaseTestCase):
update_mock = self._update_available_resources()
- get_cn_mock.assert_called_once_with(mock.ANY, _HOSTNAME, _NODENAME)
+ get_cn_mock.assert_called_once_with(mock.ANY, uuids.cn1)
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
vals = {
'free_disk_gb': 5, # 6 - 1 used
@@ -823,7 +824,7 @@ class TestUpdateAvailableResources(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@@ -863,7 +864,7 @@ class TestUpdateAvailableResources(BaseTestCase):
update_mock = self._update_available_resources()
- get_cn_mock.assert_called_once_with(mock.ANY, _HOSTNAME, _NODENAME)
+ get_cn_mock.assert_called_once_with(mock.ANY, uuids.cn1)
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
vals = {
'free_disk_gb': 5,
@@ -889,7 +890,7 @@ class TestUpdateAvailableResources(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@@ -926,7 +927,7 @@ class TestUpdateAvailableResources(BaseTestCase):
update_mock = self._update_available_resources()
- get_cn_mock.assert_called_once_with(mock.ANY, _HOSTNAME, _NODENAME)
+ get_cn_mock.assert_called_once_with(mock.ANY, uuids.cn1)
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
vals = {
'free_disk_gb': 1,
@@ -952,7 +953,7 @@ class TestUpdateAvailableResources(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@@ -987,7 +988,7 @@ class TestUpdateAvailableResources(BaseTestCase):
update_mock = self._update_available_resources()
- get_cn_mock.assert_called_once_with(mock.ANY, _HOSTNAME, _NODENAME)
+ get_cn_mock.assert_called_once_with(mock.ANY, uuids.cn1)
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
vals = {
'free_disk_gb': 1,
@@ -1013,7 +1014,7 @@ class TestUpdateAvailableResources(BaseTestCase):
return_value=objects.PciDeviceList())
@mock.patch('nova.objects.MigrationContext.get_by_instance_uuid',
return_value=None)
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@@ -1056,7 +1057,7 @@ class TestUpdateAvailableResources(BaseTestCase):
update_mock = self._update_available_resources()
- get_cn_mock.assert_called_once_with(mock.ANY, _HOSTNAME, _NODENAME)
+ get_cn_mock.assert_called_once_with(mock.ANY, uuids.cn1)
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
vals = {
# 6 total - 1G existing - 5G new flav - 1G old flav
@@ -1084,7 +1085,7 @@ class TestUpdateAvailableResources(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@@ -1121,7 +1122,7 @@ class TestUpdateAvailableResources(BaseTestCase):
update_mock = self._update_available_resources()
- get_cn_mock.assert_called_once_with(mock.ANY, _HOSTNAME, _NODENAME)
+ get_cn_mock.assert_called_once_with(mock.ANY, uuids.cn1)
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
vals = {
'free_disk_gb': 1,
@@ -1147,7 +1148,7 @@ class TestUpdateAvailableResources(BaseTestCase):
new=mock.Mock(return_value=objects.PciDeviceList()))
@mock.patch('nova.objects.MigrationContext.get_by_instance_uuid',
new=mock.Mock(return_value=None))
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@@ -1199,7 +1200,7 @@ class TestUpdateAvailableResources(BaseTestCase):
new=mock.Mock(return_value=objects.PciDeviceList()))
@mock.patch('nova.objects.MigrationContext.get_by_instance_uuid',
new=mock.Mock(return_value=None))
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@@ -1240,7 +1241,7 @@ class TestUpdateAvailableResources(BaseTestCase):
new=mock.Mock(return_value=False))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
new=mock.Mock(return_value=objects.PciDeviceList()))
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_check_resources_startup_fail(self, mock_get_instances,
@@ -1273,7 +1274,7 @@ class TestInitComputeNode(BaseTestCase):
return_value=objects.PciDeviceList())
@mock.patch('nova.objects.ComputeNode.create')
@mock.patch('nova.objects.Service.get_by_compute_host')
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_update')
def test_no_op_init_compute_node(self, update_mock, get_mock, service_mock,
@@ -1296,14 +1297,14 @@ class TestInitComputeNode(BaseTestCase):
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
@mock.patch('nova.objects.ComputeNode.create')
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_update')
def test_compute_node_loaded(self, update_mock, get_mock, create_mock,
pci_mock):
self._setup_rt()
- def fake_get_node(_ctx, host, node):
+ def fake_get_node(_ctx, uuid):
res = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
return res
@@ -1313,85 +1314,67 @@ class TestInitComputeNode(BaseTestCase):
self.assertFalse(
self.rt._init_compute_node(mock.sentinel.ctx, resources))
- get_mock.assert_called_once_with(mock.sentinel.ctx, _HOSTNAME,
- _NODENAME)
+ get_mock.assert_called_once_with(mock.sentinel.ctx,
+ uuids.cn1)
self.assertFalse(create_mock.called)
self.assertFalse(update_mock.called)
- @mock.patch('nova.objects.ComputeNodeList.get_by_hypervisor')
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
@mock.patch('nova.objects.ComputeNode.create')
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_update')
def test_compute_node_rebalanced(self, update_mock, get_mock, create_mock,
- pci_mock, get_by_hypervisor_mock):
+ pci_mock):
self._setup_rt()
self.driver_mock.rebalances_nodes = True
cn = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
cn.host = "old-host"
- def fake_get_all(_ctx, nodename):
- return [cn]
+ def fake_get_node(_ctx, uuid):
+ return cn
- get_mock.side_effect = exc.NotFound
- get_by_hypervisor_mock.side_effect = fake_get_all
+ get_mock.side_effect = fake_get_node
resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES)
self.assertFalse(
self.rt._init_compute_node(mock.sentinel.ctx, resources))
- get_mock.assert_called_once_with(mock.sentinel.ctx, _HOSTNAME,
- _NODENAME)
- get_by_hypervisor_mock.assert_called_once_with(mock.sentinel.ctx,
- _NODENAME)
+ get_mock.assert_called_once_with(mock.sentinel.ctx, uuids.cn1)
create_mock.assert_not_called()
update_mock.assert_called_once_with(mock.sentinel.ctx, cn)
self.assertEqual(_HOSTNAME, self.rt.compute_nodes[_NODENAME].host)
- @mock.patch('nova.objects.ComputeNodeList.get_by_hypervisor')
@mock.patch('nova.objects.ComputeNode.create')
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_update')
def test_compute_node_created_on_empty(self, update_mock, get_mock,
- create_mock,
- get_by_hypervisor_mock):
- get_by_hypervisor_mock.return_value = []
- self._test_compute_node_created(update_mock, get_mock, create_mock,
- get_by_hypervisor_mock)
+ create_mock):
+ self._test_compute_node_created(update_mock, get_mock, create_mock)
- @mock.patch('nova.objects.ComputeNodeList.get_by_hypervisor')
@mock.patch('nova.objects.ComputeNode.create')
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_update')
def test_compute_node_created_on_empty_rebalance(self, update_mock,
get_mock,
- create_mock,
- get_by_hypervisor_mock):
- get_by_hypervisor_mock.return_value = []
+ create_mock):
self._test_compute_node_created(update_mock, get_mock, create_mock,
- get_by_hypervisor_mock,
rebalances_nodes=True)
- @mock.patch('nova.objects.ComputeNodeList.get_by_hypervisor')
@mock.patch('nova.objects.ComputeNode.create')
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_update')
def test_compute_node_created_too_many(self, update_mock, get_mock,
- create_mock,
- get_by_hypervisor_mock):
- get_by_hypervisor_mock.return_value = ["fake_node_1", "fake_node_2"]
+ create_mock):
self._test_compute_node_created(update_mock, get_mock, create_mock,
- get_by_hypervisor_mock,
rebalances_nodes=True)
def _test_compute_node_created(self, update_mock, get_mock, create_mock,
- get_by_hypervisor_mock,
rebalances_nodes=False):
self.flags(cpu_allocation_ratio=1.0, ram_allocation_ratio=1.0,
disk_allocation_ratio=1.0)
@@ -1452,13 +1435,9 @@ class TestInitComputeNode(BaseTestCase):
self.rt._init_compute_node(mock.sentinel.ctx, resources))
cn = self.rt.compute_nodes[_NODENAME]
- get_mock.assert_called_once_with(mock.sentinel.ctx, _HOSTNAME,
- _NODENAME)
- if rebalances_nodes:
- get_by_hypervisor_mock.assert_called_once_with(
- mock.sentinel.ctx, _NODENAME)
- else:
- get_by_hypervisor_mock.assert_not_called()
+ get_mock.assert_called_once_with(mock.sentinel.ctx,
+ uuids.compute_node_uuid)
+
create_mock.assert_called_once_with()
self.assertTrue(obj_base.obj_equal_prims(expected_compute, cn))
setup_pci.assert_called_once_with(mock.sentinel.ctx, cn, resources)
@@ -1466,7 +1445,7 @@ class TestInitComputeNode(BaseTestCase):
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_setup_pci_tracker')
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename',
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid',
side_effect=exc.ComputeHostNotFound(host=_HOSTNAME))
@mock.patch('nova.objects.ComputeNode.create',
side_effect=(test.TestingException, None))
@@ -1489,14 +1468,14 @@ class TestInitComputeNode(BaseTestCase):
self.assertTrue(self.rt._init_compute_node(ctxt, resources))
self.assertIn(_NODENAME, self.rt.compute_nodes)
mock_get.assert_has_calls([mock.call(
- ctxt, _HOSTNAME, _NODENAME)] * 2)
+ ctxt, uuids.cn_uuid)] * 2)
self.assertEqual(2, mock_create.call_count)
mock_setup_pci.assert_called_once_with(
ctxt, test.MatchType(objects.ComputeNode), resources)
@mock.patch('nova.objects.ComputeNodeList.get_by_hypervisor')
@mock.patch('nova.objects.ComputeNode.create')
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_update')
def test_node_removed(self, update_mock, get_mock,
@@ -1512,6 +1491,81 @@ class TestInitComputeNode(BaseTestCase):
self.assertNotIn(_NODENAME, self.rt.stats)
self.assertNotIn(_NODENAME, self.rt.old_resources)
+ @mock.patch.object(resource_tracker.ResourceTracker,
+ '_update')
+ @mock.patch.object(resource_tracker.ResourceTracker,
+ '_setup_pci_tracker')
+ def test_undelete_node(self, mock_pci, mock_update):
+ self._setup_rt()
+ node = mock.MagicMock()
+ node.deleted = True
+ node.uuid = str(uuids.host1)
+ node.host = 'fake-host'
+ context_mock = mock.MagicMock()
+ resources = {'hypervisor_hostname': 'fake-host',
+ 'uuid': str(uuids.host1)}
+ with mock.patch.object(self.rt, '_get_compute_node') as getcn:
+ getcn.return_value = node
+
+ # _init_compute_node() should return False to indicate that
+ # it found an existing node
+ self.assertFalse(
+ self.rt._init_compute_node(context_mock, resources))
+
+ # Node should be undeleted and saved
+ self.assertFalse(node.deleted)
+ self.assertIsNone(node.deleted_at)
+ node.save.assert_called_once_with()
+
+ # Host is the same, no _update()
+ self.assertEqual('fake-host', node.host)
+ mock_update.assert_not_called()
+
+ @mock.patch.object(resource_tracker.ResourceTracker,
+ '_update')
+ @mock.patch.object(resource_tracker.ResourceTracker,
+ '_setup_pci_tracker')
+ def test_undelete_node_move_host(self, mock_pci, mock_update):
+ self._setup_rt()
+ node = mock.MagicMock()
+ node.deleted = True
+ node.uuid = str(uuids.host1)
+ node.host = 'old-host'
+ context_mock = mock.MagicMock()
+ resources = {'hypervisor_hostname': 'fake-host',
+ 'uuid': str(uuids.host1)}
+ with mock.patch.object(self.rt, '_get_compute_node') as getcn:
+ getcn.return_value = node
+
+ # _init_compute_node() should return False to indicate that
+ # it found an existing node
+ self.assertFalse(
+ self.rt._init_compute_node(context_mock, resources))
+
+ # Node should be undeleted and saved
+ self.assertFalse(node.deleted)
+ self.assertIsNone(node.deleted_at)
+ node.save.assert_called_once_with()
+
+ # Our host changed, so we should have the updated value and have
+ # called _update()
+ self.assertEqual('fake-host', node.host)
+ mock_update.assert_called()
+
+ @mock.patch.object(resource_tracker.ResourceTracker,
+ '_get_compute_node',
+ return_value=None)
+ @mock.patch('nova.objects.compute_node.ComputeNode.create')
+ def test_create_failed_conflict(self, mock_create, mock_getcn):
+ self._setup_rt()
+ resources = {'hypervisor_hostname': 'node1',
+ 'uuid': uuids.node1}
+ mock_create.side_effect = exc.DuplicateRecord(target='foo')
+ self.assertRaises(exc.InvalidConfiguration,
+ self.rt._init_compute_node,
+ mock.MagicMock,
+ resources)
+
@ddt.ddt
class TestUpdateComputeNode(BaseTestCase):
@@ -1580,6 +1634,7 @@ class TestUpdateComputeNode(BaseTestCase):
self.rt._update(mock.sentinel.ctx, new_compute)
save_mock.assert_called_once_with()
+ @mock.patch('nova.objects.ComputeNode.save', new=mock.Mock())
@mock.patch(
'nova.pci.stats.PciDeviceStats.has_remote_managed_device_pools',
return_value=True)
@@ -1773,7 +1828,7 @@ class TestUpdateComputeNode(BaseTestCase):
self.assertEqual(4, ufpt_mock.call_count)
self.assertEqual(4, mock_sync_disabled.call_count)
# The retry is restricted to _update_to_placement
- self.assertEqual(1, mock_resource_change.call_count)
+ self.assertEqual(0, mock_resource_change.call_count)
@mock.patch(
'nova.compute.resource_tracker.ResourceTracker.'
@@ -2041,6 +2096,10 @@ class TestUpdateComputeNode(BaseTestCase):
self.assertIn('Unable to find services table record for nova-compute',
mock_log_error.call_args[0][0])
+ @mock.patch(
+ 'nova.compute.resource_tracker.ResourceTracker.'
+ '_update_to_placement',
+ new=mock.Mock())
def test_update_compute_node_save_fails_restores_old_resources(self):
"""Tests the scenario that compute_node.save() fails and the
old_resources value for the node is restored to its previous value
@@ -2401,7 +2460,10 @@ class TestInstanceClaim(BaseTestCase):
vendor_id='0001',
product_id='0002',
numa_node=0,
- tags={'dev_type': 'type-PCI'},
+ tags={
+ 'dev_type': 'type-PCI',
+ 'address': '0000:81:00.0'
+ },
count=0
)
]
@@ -2422,7 +2484,8 @@ class TestInstanceClaim(BaseTestCase):
self.allocations, None)
cn = self.rt.compute_nodes[_NODENAME]
update_mock.assert_called_once_with(self.elevated, cn)
- pci_stats_mock.assert_called_once_with([request])
+ pci_stats_mock.assert_called_once_with(
+ [request], provider_mapping=None)
self.assertTrue(obj_base.obj_equal_prims(expected, cn))
@mock.patch('nova.compute.utils.is_volume_backed_instance',
@@ -2626,7 +2689,7 @@ class TestResize(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@mock.patch('nova.objects.ComputeNode.save')
@@ -2730,7 +2793,7 @@ class TestResize(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename',
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid',
return_value=_COMPUTE_NODE_FIXTURES[0])
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error',
return_value=[])
@@ -2902,7 +2965,7 @@ class TestResize(BaseTestCase):
@mock.patch('nova.pci.manager.PciDevTracker.claim_instance')
@mock.patch('nova.pci.request.get_pci_requests_from_flavor')
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node')
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@mock.patch('nova.objects.ComputeNode.save')
@@ -3072,7 +3135,7 @@ class TestResize(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@mock.patch('nova.objects.ComputeNode.save')
@@ -3204,7 +3267,7 @@ class TestRebuild(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@mock.patch('nova.objects.ComputeNode.save')
diff --git a/nova/tests/unit/compute/test_rpcapi.py b/nova/tests/unit/compute/test_rpcapi.py
index 55d0fc53e8..6f78678a92 100644
--- a/nova/tests/unit/compute/test_rpcapi.py
+++ b/nova/tests/unit/compute/test_rpcapi.py
@@ -836,7 +836,8 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
orig_sys_metadata=None, recreate=True, on_shared_storage=True,
preserve_ephemeral=True, migration=None, node=None,
limits=None, request_spec=None, accel_uuids=[],
- reimage_boot_volume=False, version='6.1')
+ reimage_boot_volume=False, target_state=None,
+ version='6.2')
def test_rebuild_instance_old_rpcapi(self):
# With rpcapi < 5.12, accel_uuids must be dropped in the client call.
@@ -868,9 +869,11 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
ctxt, instance=self.fake_instance_obj,
accel_uuids=['938af7f9-f136-4e5a-bdbe-3b6feab54311'],
node=None, host=None, reimage_boot_volume=False,
- **rebuild_args)
+ target_state=None, **rebuild_args)
- mock_client.can_send_version.assert_has_calls([mock.call('6.0'),
+ mock_client.can_send_version.assert_has_calls([mock.call('6.2'),
+ mock.call('6.1'),
+ mock.call('6.0'),
mock.call('5.12')])
mock_client.prepare.assert_called_with(
server=self.fake_instance_obj.host, version='5.0')
@@ -890,7 +893,7 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
compute_api.router.client.return_value = mock_client
# Force can_send_version to [False, True, True], so that 6.0
# version is used.
- mock_client.can_send_version.side_effect = [False, True, True]
+ mock_client.can_send_version.side_effect = [False, False, True, True]
mock_cctx = mock.MagicMock()
mock_client.prepare.return_value = mock_cctx
rebuild_args = {
@@ -908,12 +911,47 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
'limits': None,
'accel_uuids': [],
'reimage_boot_volume': True,
+ 'target_state': None,
}
self.assertRaises(
exception.NovaException, compute_api.rebuild_instance,
ctxt, instance=self.fake_instance_obj,
node=None, host=None, **rebuild_args)
- mock_client.can_send_version.assert_has_calls([mock.call('6.1')])
+ mock_client.can_send_version.assert_has_calls([mock.call('6.2')])
+
+ def test_rebuild_instance_evacuate_old_rpcapi(self):
+ # With rpcapi < 6.2, if evacuate we should raise error.
+ ctxt = context.RequestContext('fake_user', 'fake_project')
+ compute_api = compute_rpcapi.ComputeAPI()
+ compute_api.router.client = mock.Mock()
+ mock_client = mock.MagicMock()
+ compute_api.router.client.return_value = mock_client
+ # Force can_send_version to return False.
+ mock_client.can_send_version.return_value = False
+ mock_cctx = mock.MagicMock()
+ mock_client.prepare.return_value = mock_cctx
+ rebuild_args = {
+ 'new_pass': 'admin_password',
+ 'injected_files': 'files_to_inject',
+ 'image_ref': uuids.image_ref,
+ 'orig_image_ref': uuids.orig_image_ref,
+ 'orig_sys_metadata': 'orig_sys_meta',
+ 'bdms': {},
+ 'recreate': False,
+ 'on_shared_storage': False,
+ 'preserve_ephemeral': False,
+ 'request_spec': None,
+ 'migration': None,
+ 'limits': None,
+ 'accel_uuids': [],
+ 'reimage_boot_volume': True,
+ 'target_state': 'stopped',
+ }
+ self.assertRaises(
+ exception.UnsupportedRPCVersion,
+ compute_api.rebuild_instance,
+ ctxt, instance=self.fake_instance_obj,
+ node=None, host=None, **rebuild_args)
def test_reserve_block_device_name(self):
self.flags(long_rpc_timeout=1234)
diff --git a/nova/tests/unit/compute/test_shelve.py b/nova/tests/unit/compute/test_shelve.py
index 705ca2f34f..0a1e3f54fc 100644
--- a/nova/tests/unit/compute/test_shelve.py
+++ b/nova/tests/unit/compute/test_shelve.py
@@ -285,7 +285,7 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
return instance
@mock.patch('nova.compute.utils.'
- 'update_pci_request_spec_with_allocated_interface_name',
+ 'update_pci_request_with_placement_allocations',
new=mock.NonCallableMock())
@mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid')
@mock.patch('nova.compute.utils.notify_about_instance_action')
@@ -635,7 +635,7 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
@mock.patch('nova.network.neutron.API.setup_instance_network_on_host')
@mock.patch('nova.compute.utils.'
- 'update_pci_request_spec_with_allocated_interface_name')
+ 'update_pci_request_with_placement_allocations')
def test_unshelve_with_resource_request(
self, mock_update_pci, mock_setup_network):
requested_res = [objects.RequestGroup(
@@ -659,7 +659,7 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
@mock.patch('nova.network.neutron.API.setup_instance_network_on_host',
new=mock.NonCallableMock())
@mock.patch('nova.compute.utils.'
- 'update_pci_request_spec_with_allocated_interface_name')
+ 'update_pci_request_with_placement_allocations')
def test_unshelve_with_resource_request_update_raises(
self, mock_update_pci):
requested_res = [objects.RequestGroup(
diff --git a/nova/tests/unit/compute/test_utils.py b/nova/tests/unit/compute/test_utils.py
index 848050d769..dd10ecd7df 100644
--- a/nova/tests/unit/compute/test_utils.py
+++ b/nova/tests/unit/compute/test_utils.py
@@ -1558,47 +1558,86 @@ class PciRequestUpdateTestCase(test.NoDBTestCase):
def test_no_pci_request(self):
provider_mapping = {}
- compute_utils.update_pci_request_spec_with_allocated_interface_name(
+ compute_utils.update_pci_request_with_placement_allocations(
self.context, mock.sentinel.report_client, [], provider_mapping)
- def test_pci_request_from_flavor(self):
- pci_requests = [objects.InstancePCIRequest(requester_id=None)]
+ def test_pci_request_from_flavor_no_mapping(self):
+ req = objects.InstancePCIRequest(
+ requester_id=None,
+ request_id=uuids.req1,
+ alias_name="a-dev",
+ spec=[{}],
+ )
+ pci_requests = [req]
+
provider_mapping = {}
- compute_utils.update_pci_request_spec_with_allocated_interface_name(
+ compute_utils.update_pci_request_with_placement_allocations(
+ self.context, mock.sentinel.report_client, pci_requests,
+ provider_mapping)
+
+ self.assertNotIn('rp_uuids', req.spec[0])
+
+ def test_pci_request_from_flavor_with_mapping(self):
+ req = objects.InstancePCIRequest(
+ requester_id=None,
+ request_id=uuids.req1,
+ alias_name="a-dev",
+ spec=[{}],
+ )
+ pci_requests = [req]
+
+ provider_mapping = {
+ f"{uuids.req1}-0": [uuids.rp1],
+ f"{uuids.req1}-1": [uuids.rp2],
+ }
+
+ compute_utils.update_pci_request_with_placement_allocations(
self.context, mock.sentinel.report_client, pci_requests,
provider_mapping)
+ self.assertEqual(
+ {uuids.rp1, uuids.rp2}, set(req.spec[0]["rp_uuids"].split(','))
+ )
+
def test_pci_request_has_no_mapping(self):
pci_requests = [objects.InstancePCIRequest(requester_id=uuids.port_1)]
provider_mapping = {}
- compute_utils.update_pci_request_spec_with_allocated_interface_name(
+ compute_utils.update_pci_request_with_placement_allocations(
self.context, mock.sentinel.report_client, pci_requests,
provider_mapping)
def test_pci_request_ambiguous_mapping(self):
- pci_requests = [objects.InstancePCIRequest(requester_id=uuids.port_1)]
+ pci_requests = [
+ objects.InstancePCIRequest(
+ requester_id=uuids.port_1, request_id=uuids.req1
+ )
+ ]
provider_mapping = {uuids.port_1: [uuids.rp1, uuids.rp2]}
self.assertRaises(
exception.AmbiguousResourceProviderForPCIRequest,
(compute_utils.
- update_pci_request_spec_with_allocated_interface_name),
+ update_pci_request_with_placement_allocations),
self.context, mock.sentinel.report_client, pci_requests,
provider_mapping)
def test_unexpected_provider_name(self):
report_client = mock.Mock(spec=report.SchedulerReportClient)
report_client.get_resource_provider_name.return_value = 'unexpected'
- pci_requests = [objects.InstancePCIRequest(
- requester_id=uuids.port_1, spec=[{}])]
+ pci_requests = [
+ objects.InstancePCIRequest(
+ requester_id=uuids.port_1, spec=[{}], request_id=uuids.req1
+ )
+ ]
+
provider_mapping = {uuids.port_1: [uuids.rp1]}
self.assertRaises(
exception.UnexpectedResourceProviderNameForPCIRequest,
(compute_utils.
- update_pci_request_spec_with_allocated_interface_name),
+ update_pci_request_with_placement_allocations),
self.context, report_client, pci_requests,
provider_mapping)
@@ -1610,11 +1649,14 @@ class PciRequestUpdateTestCase(test.NoDBTestCase):
report_client = mock.Mock(spec=report.SchedulerReportClient)
report_client.get_resource_provider_name.return_value = (
'host:agent:enp0s31f6')
- pci_requests = [objects.InstancePCIRequest(
- requester_id=uuids.port_1, spec=[{}],)]
+ pci_requests = [
+ objects.InstancePCIRequest(
+ requester_id=uuids.port_1, spec=[{}], request_id=uuids.req1
+ )
+ ]
provider_mapping = {uuids.port_1: [uuids.rp1]}
- compute_utils.update_pci_request_spec_with_allocated_interface_name(
+ compute_utils.update_pci_request_with_placement_allocations(
self.context, report_client, pci_requests, provider_mapping)
report_client.get_resource_provider_name.assert_called_once_with(
diff --git a/nova/tests/unit/conductor/tasks/test_live_migrate.py b/nova/tests/unit/conductor/tasks/test_live_migrate.py
index de15be28bd..4e888139f6 100644
--- a/nova/tests/unit/conductor/tasks/test_live_migrate.py
+++ b/nova/tests/unit/conductor/tasks/test_live_migrate.py
@@ -761,7 +761,7 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
@mock.patch(
'nova.compute.utils.'
- 'update_pci_request_spec_with_allocated_interface_name')
+ 'update_pci_request_with_placement_allocations')
@mock.patch('nova.scheduler.utils.fill_provider_mapping')
@mock.patch.object(live_migrate.LiveMigrationTask,
'_call_livem_checks_on_host')
diff --git a/nova/tests/unit/conductor/test_conductor.py b/nova/tests/unit/conductor/test_conductor.py
index e942217a6c..971570dfb5 100644
--- a/nova/tests/unit/conductor/test_conductor.py
+++ b/nova/tests/unit/conductor/test_conductor.py
@@ -389,7 +389,8 @@ class _BaseTaskTestCase(object):
'preserve_ephemeral': False,
'host': 'compute-host',
'request_spec': None,
- 'reimage_boot_volume': False}
+ 'reimage_boot_volume': False,
+ 'target_state': None}
if update_args:
rebuild_args.update(update_args)
compute_rebuild_args = copy.deepcopy(rebuild_args)
@@ -4751,9 +4752,34 @@ class ConductorTaskRPCAPITestCase(_BaseTaskTestCase,
mock.sentinel.migration)
can_send_version.assert_called_once_with('1.23')
+ def test_evacuate_old_rpc_with_target_state(self):
+ inst_obj = self._create_fake_instance_obj()
+ rebuild_args, compute_args = self._prepare_rebuild_args(
+ {'host': inst_obj.host,
+ 'target_state': 'stopped'})
+ with mock.patch.object(
+ self.conductor.client, 'can_send_version', return_value=False):
+ self.assertRaises(exc.UnsupportedRPCVersion,
+ self.conductor.rebuild_instance,
+ self.context, inst_obj, **rebuild_args)
+
+ def test_evacuate_old_rpc_without_target_state(self):
+ inst_obj = self._create_fake_instance_obj()
+ rebuild_args, compute_args = self._prepare_rebuild_args(
+ {'host': inst_obj.host,
+ 'target_state': None})
+ with mock.patch.object(
+ self.conductor.client, 'can_send_version',
+ return_value=False) as can_send_version:
+ self.conductor.rebuild_instance(
+ self.context, inst_obj, **rebuild_args)
+ can_send_version.assert_has_calls([
+ mock.call('1.25'), mock.call('1.24'),
+ mock.call('1.12')])
+
def test_rebuild_instance_volume_backed(self):
inst_obj = self._create_fake_instance_obj()
- version = '1.24'
+ version = '1.25'
cctxt_mock = mock.MagicMock()
rebuild_args, compute_args = self._prepare_rebuild_args(
{'host': inst_obj.host})
@@ -4785,7 +4811,8 @@ class ConductorTaskRPCAPITestCase(_BaseTaskTestCase,
self.conductor.rebuild_instance,
self.context, inst_obj,
**rebuild_args)
- can_send_version.assert_called_once_with('1.24')
+ can_send_version.assert_has_calls([mock.call('1.25'),
+ mock.call('1.24')])
class ConductorTaskAPITestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
diff --git a/nova/tests/unit/console/test_websocketproxy.py b/nova/tests/unit/console/test_websocketproxy.py
index fc25bef2bc..639623bbb5 100644
--- a/nova/tests/unit/console/test_websocketproxy.py
+++ b/nova/tests/unit/console/test_websocketproxy.py
@@ -635,7 +635,9 @@ class NovaProxyRequestHandlerTestCase(test.NoDBTestCase):
# now the same url but with extra leading '/' characters removed.
if expected_cpython in errmsg:
location = result[3].decode()
- location = location.removeprefix('Location: ').rstrip('\r\n')
+ if location.startswith('Location: '):
+ location = location[len('Location: '):]
+ location = location.rstrip('\r\n')
self.assertTrue(
location.startswith('/example.com/%2F..'),
msg='Redirect location is not the expected sanitized URL',
diff --git a/nova/tests/unit/db/main/test_migrations.py b/nova/tests/unit/db/main/test_migrations.py
index d2c4ef9762..e52deb262a 100644
--- a/nova/tests/unit/db/main/test_migrations.py
+++ b/nova/tests/unit/db/main/test_migrations.py
@@ -314,6 +314,15 @@ class NovaMigrationsWalk(
self.assertIsInstance(
table.c.encryption_options.type, sa.types.String)
+ def _check_960aac0e09ea(self, connection):
+ self.assertIndexNotExists(
+ connection, 'console_auth_tokens',
+ 'console_auth_tokens_token_hash_idx',
+ )
+ self.assertIndexNotExists(
+ connection, 'instances', 'uuid',
+ )
+
def test_single_base_revision(self):
"""Ensure we only have a single base revision.
diff --git a/nova/tests/unit/objects/test_compute_node.py b/nova/tests/unit/objects/test_compute_node.py
index 7e6894a1cc..84c4e87785 100644
--- a/nova/tests/unit/objects/test_compute_node.py
+++ b/nova/tests/unit/objects/test_compute_node.py
@@ -16,6 +16,7 @@ import copy
from unittest import mock
import netaddr
+from oslo_db import exception as db_exc
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel
from oslo_utils import timeutils
@@ -341,6 +342,14 @@ class _TestComputeNodeObject(object):
'uuid': uuidsentinel.fake_compute_node}
mock_create.assert_called_once_with(self.context, param_dict)
+ @mock.patch('nova.db.main.api.compute_node_create')
+ def test_create_duplicate(self, mock_create):
+ mock_create.side_effect = db_exc.DBDuplicateEntry
+ compute = compute_node.ComputeNode(context=self.context)
+ compute.service_id = 456
+ compute.hypervisor_hostname = 'node1'
+ self.assertRaises(exception.DuplicateRecord, compute.create)
+
@mock.patch.object(db, 'compute_node_update')
@mock.patch(
'nova.db.main.api.compute_node_get', return_value=fake_compute_node)
@@ -553,17 +562,15 @@ class _TestComputeNodeObject(object):
def test_update_from_virt_driver_uuid_already_set(self):
"""Tests update_from_virt_driver where the compute node object already
- has a uuid value so the uuid from the virt driver is ignored.
+ has a uuid value so an error is raised.
"""
# copy in case the update has a side effect
resources = copy.deepcopy(fake_resources)
# Emulate the ironic driver which adds a uuid field.
resources['uuid'] = uuidsentinel.node_uuid
compute = compute_node.ComputeNode(uuid=uuidsentinel.something_else)
- compute.update_from_virt_driver(resources)
- expected = fake_compute_with_resources.obj_clone()
- expected.uuid = uuidsentinel.something_else
- self.assertTrue(base.obj_equal_prims(expected, compute))
+ self.assertRaises(exception.InvalidNodeConfiguration,
+ compute.update_from_virt_driver, resources)
def test_update_from_virt_driver_missing_field(self):
# NOTE(pmurray): update_from_virt_driver does not require
diff --git a/nova/tests/unit/objects/test_request_spec.py b/nova/tests/unit/objects/test_request_spec.py
index d91015a699..58b9859234 100644
--- a/nova/tests/unit/objects/test_request_spec.py
+++ b/nova/tests/unit/objects/test_request_spec.py
@@ -430,6 +430,62 @@ class _TestRequestSpecObject(object):
self.assertListEqual([rg], spec.requested_resources)
self.assertEqual(req_lvl_params, spec.request_level_params)
+ def test_from_components_flavor_based_pci_requests(self):
+ self.flags(group='filter_scheduler', pci_in_placement=True)
+ ctxt = context.RequestContext(
+ fakes.FAKE_USER_ID, fakes.FAKE_PROJECT_ID
+ )
+ instance = fake_instance.fake_instance_obj(ctxt)
+ image = {
+ "id": uuids.image_id,
+ "properties": {"mappings": []},
+ "status": "fake-status",
+ "location": "far-away",
+ }
+ flavor = fake_flavor.fake_flavor_obj(ctxt)
+ filter_properties = {"fake": "property"}
+
+ qos_port_rg = request_spec.RequestGroup()
+ req_lvl_params = request_spec.RequestLevelParams()
+
+ pci_requests = objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ count=1,
+ alias_name='a-dev',
+ request_id=uuids.req1,
+ spec=[{"vendor_id": "1234", "product_id": "fe12"}],
+ )
+ ]
+ )
+ pci_request_group = request_spec.RequestGroup(
+ requester_id=f"{uuids.req1}-0",
+ resources={"CUSTOM_PCI_1234_FE12": 1},
+ same_provider=True,
+ )
+
+ spec = objects.RequestSpec.from_components(
+ ctxt,
+ instance.uuid,
+ image,
+ flavor,
+ instance.numa_topology,
+ pci_requests,
+ filter_properties,
+ None,
+ instance.availability_zone,
+ port_resource_requests=[qos_port_rg],
+ request_level_params=req_lvl_params,
+ )
+
+ self.assertEqual(2, len(spec.requested_resources))
+ self.assertEqual(qos_port_rg, spec.requested_resources[0])
+ self.assertEqual(
+ pci_request_group.obj_to_primitive(),
+ spec.requested_resources[1].obj_to_primitive(),
+ )
+ self.assertEqual(req_lvl_params, spec.request_level_params)
+
def test_get_scheduler_hint(self):
spec_obj = objects.RequestSpec(scheduler_hints={'foo_single': ['1'],
'foo_mul': ['1', '2']})
@@ -1054,6 +1110,183 @@ class TestRemoteRequestSpecObject(test_objects._RemoteTest,
pass
+class TestInstancePCIRequestToRequestGroups(test.NoDBTestCase):
+ def setUp(self):
+ super().setUp()
+ self.flags(group='filter_scheduler', pci_in_placement=True)
+
+ def test_pci_reqs_ignored_if_disabled(self):
+ self.flags(group='filter_scheduler', pci_in_placement=False)
+
+ spec = request_spec.RequestSpec(
+ requested_resources=[],
+ pci_requests=objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ count=1,
+ request_id=uuids.req1,
+ spec=[{"vendor_id": "de12", "product_id": "1234"}],
+ alias_name="a-dev",
+ ),
+ ]
+ ),
+ )
+
+ spec.generate_request_groups_from_pci_requests()
+
+ self.assertEqual(0, len(spec.requested_resources))
+
+ def test_neutron_based_requests_are_ignored(self):
+ pci_req = objects.InstancePCIRequest(
+ count=1,
+ request_id=uuids.req1,
+ spec=[],
+ )
+ spec = request_spec.RequestSpec(
+ requested_resources=[],
+ pci_requests=objects.InstancePCIRequests(requests=[pci_req]),
+ )
+ self.assertEqual(
+ objects.InstancePCIRequest.NEUTRON_PORT, pci_req.source
+ )
+
+ spec.generate_request_groups_from_pci_requests()
+
+ self.assertEqual(0, len(spec.requested_resources))
+
+ def test_rc_from_product_and_vendor(self):
+ spec = request_spec.RequestSpec(
+ requested_resources=[],
+ pci_requests=objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ count=1,
+ request_id=uuids.req1,
+ spec=[{"vendor_id": "de12", "product_id": "1234"}],
+ alias_name="a-dev",
+ ),
+ objects.InstancePCIRequest(
+ count=1,
+ request_id=uuids.req2,
+ spec=[{"vendor_id": "fff", "product_id": "dead"}],
+ alias_name="a-dev",
+ ),
+ ]
+ ),
+ )
+
+ spec.generate_request_groups_from_pci_requests()
+
+ self.assertEqual(2, len(spec.requested_resources))
+ self.assertEqual(
+ request_spec.RequestGroup(
+ requester_id=f"{uuids.req1}-0",
+ resources={"CUSTOM_PCI_DE12_1234": 1},
+ use_same_provider=True,
+ ).obj_to_primitive(),
+ spec.requested_resources[0].obj_to_primitive(),
+ )
+ self.assertEqual(
+ request_spec.RequestGroup(
+ requester_id=f"{uuids.req2}-0",
+ resources={"CUSTOM_PCI_FFF_DEAD": 1},
+ use_same_provider=True,
+ ).obj_to_primitive(),
+ spec.requested_resources[1].obj_to_primitive(),
+ )
+
+ def test_multi_device_split_to_multiple_groups(self):
+ spec = request_spec.RequestSpec(
+ requested_resources=[],
+ pci_requests=objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ count=2,
+ request_id=uuids.req1,
+ spec=[{"vendor_id": "de12", "product_id": "1234"}],
+ alias_name="a-dev",
+ ),
+ ]
+ ),
+ )
+
+ spec.generate_request_groups_from_pci_requests()
+
+ self.assertEqual(2, len(spec.requested_resources))
+ self.assertEqual(
+ request_spec.RequestGroup(
+ requester_id=f"{uuids.req1}-0",
+ resources={"CUSTOM_PCI_DE12_1234": 1},
+ use_same_provider=True,
+ ).obj_to_primitive(),
+ spec.requested_resources[0].obj_to_primitive(),
+ )
+ self.assertEqual(
+ request_spec.RequestGroup(
+ requester_id=f"{uuids.req1}-1",
+ resources={"CUSTOM_PCI_DE12_1234": 1},
+ use_same_provider=True,
+ ).obj_to_primitive(),
+ spec.requested_resources[1].obj_to_primitive(),
+ )
+
+ def test_with_rc_and_traits_from_the_pci_req_spec(self):
+ spec = request_spec.RequestSpec(
+ requested_resources=[],
+ pci_requests=objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ count=1,
+ request_id=uuids.req1,
+ spec=[
+ {
+ "vendor_id": "de12",
+ "product_id": "1234",
+ "resource_class": "gpu",
+ }
+ ],
+ alias_name="a-dev",
+ ),
+ objects.InstancePCIRequest(
+ count=1,
+ request_id=uuids.req2,
+ spec=[
+ {
+ "vendor_id": "fff",
+ "product_id": "dead",
+ "traits": "foo,bar,CUSTOM_BLUE",
+ }
+ ],
+ alias_name="a-dev",
+ ),
+ ]
+ ),
+ )
+
+ spec.generate_request_groups_from_pci_requests()
+
+ self.assertEqual(2, len(spec.requested_resources))
+ self.assertEqual(
+ request_spec.RequestGroup(
+ requester_id=f"{uuids.req1}-0",
+ resources={"CUSTOM_GPU": 1},
+ use_same_provider=True,
+ ).obj_to_primitive(),
+ spec.requested_resources[0].obj_to_primitive(),
+ )
+ # Note that sets would be serialized to tuples by obj_to_primitive in
+ # random order, so we need to match this spec field by field
+ expected = request_spec.RequestGroup(
+ requester_id=f"{uuids.req2}-0",
+ resources={"CUSTOM_PCI_FFF_DEAD": 1},
+ required_traits={"CUSTOM_FOO", "CUSTOM_BAR", "CUSTOM_BLUE"},
+ use_same_provider=True,
+ )
+ actual = spec.requested_resources[1]
+ for field in request_spec.RequestGroup.fields.keys():
+ self.assertEqual(getattr(expected, field), getattr(actual, field))
+
+
class TestRequestGroupObject(test.NoDBTestCase):
def setUp(self):
super(TestRequestGroupObject, self).setUp()
diff --git a/nova/tests/unit/pci/test_request.py b/nova/tests/unit/pci/test_request.py
index 7aefbd15fd..4a3f17f6cb 100644
--- a/nova/tests/unit/pci/test_request.py
+++ b/nova/tests/unit/pci/test_request.py
@@ -187,6 +187,21 @@ class PciRequestTestCase(test.NoDBTestCase):
self.assertIn("xxx", aliases)
self.assertEqual(policy, aliases["xxx"][0])
+ def test_get_alias_from_config_valid_rc_and_traits(self):
+ fake_alias = jsonutils.dumps({
+ "name": "xxx",
+ "resource_class": "foo",
+ "traits": "bar,baz",
+ })
+ self.flags(alias=[fake_alias], group='pci')
+ aliases = request._get_alias_from_config()
+ self.assertIsNotNone(aliases)
+ self.assertIn("xxx", aliases)
+ self.assertEqual(
+ ("legacy", [{"resource_class": "foo", "traits": "bar,baz"}]),
+ aliases["xxx"],
+ )
+
def test_get_alias_from_config_conflicting_device_type(self):
"""Check behavior when device_type conflicts occur."""
fake_alias_a = jsonutils.dumps({
diff --git a/nova/tests/unit/pci/test_stats.py b/nova/tests/unit/pci/test_stats.py
index ef8eb2b2b8..7eb43a05f4 100644
--- a/nova/tests/unit/pci/test_stats.py
+++ b/nova/tests/unit/pci/test_stats.py
@@ -12,11 +12,12 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-
+import collections
from unittest import mock
from oslo_config import cfg
from oslo_serialization import jsonutils
+from oslo_utils.fixture import uuidsentinel as uuids
from nova import exception
from nova import objects
@@ -107,17 +108,19 @@ class PciDeviceStatsTestCase(test.NoDBTestCase):
self._create_fake_devs()
def test_add_device(self):
- self.assertEqual(len(self.pci_stats.pools), 3)
+ self.assertEqual(len(self.pci_stats.pools), 4)
self.assertEqual(set([d['vendor_id'] for d in self.pci_stats]),
set(['v1', 'v2', 'v3']))
- self.assertEqual(set([d['count'] for d in self.pci_stats]),
- set([1, 2]))
+ self.assertEqual([d['count'] for d in self.pci_stats], [1, 1, 1, 1])
def test_remove_device(self):
+ self.assertEqual(len(self.pci_stats.pools), 4)
self.pci_stats.remove_device(self.fake_dev_2)
- self.assertEqual(len(self.pci_stats.pools), 2)
- self.assertEqual(self.pci_stats.pools[0]['count'], 2)
+ self.assertEqual(len(self.pci_stats.pools), 3)
+ self.assertEqual(self.pci_stats.pools[0]['count'], 1)
self.assertEqual(self.pci_stats.pools[0]['vendor_id'], 'v1')
+ self.assertEqual(self.pci_stats.pools[1]['count'], 1)
+ self.assertEqual(self.pci_stats.pools[1]['vendor_id'], 'v1')
def test_remove_device_exception(self):
self.pci_stats.remove_device(self.fake_dev_2)
@@ -146,36 +149,36 @@ class PciDeviceStatsTestCase(test.NoDBTestCase):
m = self.pci_stats.to_device_pools_obj()
new_stats = stats.PciDeviceStats(objects.NUMATopology(), m)
- self.assertEqual(len(new_stats.pools), 3)
- self.assertEqual(set([d['count'] for d in new_stats]),
- set([1, 2]))
+ self.assertEqual(len(new_stats.pools), 4)
+ self.assertEqual([d['count'] for d in new_stats], [1, 1, 1, 1])
self.assertEqual(set([d['vendor_id'] for d in new_stats]),
set(['v1', 'v2', 'v3']))
def test_apply_requests(self):
- self.assertEqual(len(self.pci_stats.pools), 3)
- self.pci_stats.apply_requests(pci_requests)
+ self.assertEqual(len(self.pci_stats.pools), 4)
+ self.pci_stats.apply_requests(pci_requests, {})
self.assertEqual(len(self.pci_stats.pools), 2)
self.assertEqual(self.pci_stats.pools[0]['vendor_id'], 'v1')
self.assertEqual(self.pci_stats.pools[0]['count'], 1)
def test_apply_requests_failed(self):
- self.assertRaises(exception.PciDeviceRequestFailed,
+ self.assertRaises(
+ exception.PciDeviceRequestFailed,
self.pci_stats.apply_requests,
- pci_requests_multiple)
+ pci_requests_multiple,
+ {},
+ )
def test_support_requests(self):
- self.assertTrue(self.pci_stats.support_requests(pci_requests))
- self.assertEqual(len(self.pci_stats.pools), 3)
- self.assertEqual(set([d['count'] for d in self.pci_stats]),
- set((1, 2)))
+ self.assertTrue(self.pci_stats.support_requests(pci_requests, {}))
+ self.assertEqual(len(self.pci_stats.pools), 4)
+ self.assertEqual([d['count'] for d in self.pci_stats], [1, 1, 1, 1])
def test_support_requests_failed(self):
self.assertFalse(
- self.pci_stats.support_requests(pci_requests_multiple))
- self.assertEqual(len(self.pci_stats.pools), 3)
- self.assertEqual(set([d['count'] for d in self.pci_stats]),
- set([1, 2]))
+ self.pci_stats.support_requests(pci_requests_multiple, {}))
+ self.assertEqual(len(self.pci_stats.pools), 4)
+ self.assertEqual([d['count'] for d in self.pci_stats], [1, 1, 1, 1])
def test_support_requests_numa(self):
cells = [
@@ -184,14 +187,18 @@ class PciDeviceStatsTestCase(test.NoDBTestCase):
objects.InstanceNUMACell(
id=1, cpuset=set(), pcpuset=set(), memory=0),
]
- self.assertTrue(self.pci_stats.support_requests(pci_requests, cells))
+ self.assertTrue(
+ self.pci_stats.support_requests(pci_requests, {}, cells)
+ )
def test_support_requests_numa_failed(self):
cells = [
objects.InstanceNUMACell(
id=0, cpuset=set(), pcpuset=set(), memory=0),
]
- self.assertFalse(self.pci_stats.support_requests(pci_requests, cells))
+ self.assertFalse(
+ self.pci_stats.support_requests(pci_requests, {}, cells)
+ )
def test_support_requests_no_numa_info(self):
cells = [
@@ -199,12 +206,16 @@ class PciDeviceStatsTestCase(test.NoDBTestCase):
id=0, cpuset=set(), pcpuset=set(), memory=0),
]
pci_requests = self._get_fake_requests(vendor_ids=['v3'])
- self.assertTrue(self.pci_stats.support_requests(pci_requests, cells))
+ self.assertTrue(
+ self.pci_stats.support_requests(pci_requests, {}, cells)
+ )
# 'legacy' is the default numa_policy so the result must be same
pci_requests = self._get_fake_requests(vendor_ids=['v3'],
numa_policy = fields.PCINUMAAffinityPolicy.LEGACY)
- self.assertTrue(self.pci_stats.support_requests(pci_requests, cells))
+ self.assertTrue(
+ self.pci_stats.support_requests(pci_requests, {}, cells)
+ )
def test_support_requests_numa_pci_numa_policy_preferred(self):
# numa node 0 has 2 devices with vendor_id 'v1'
@@ -218,7 +229,9 @@ class PciDeviceStatsTestCase(test.NoDBTestCase):
pci_requests = self._get_fake_requests(
numa_policy=fields.PCINUMAAffinityPolicy.PREFERRED)
- self.assertTrue(self.pci_stats.support_requests(pci_requests, cells))
+ self.assertTrue(
+ self.pci_stats.support_requests(pci_requests, {}, cells)
+ )
def test_support_requests_no_numa_info_pci_numa_policy_required(self):
# pci device with vendor_id 'v3' has numa_node=None.
@@ -230,7 +243,9 @@ class PciDeviceStatsTestCase(test.NoDBTestCase):
pci_requests = self._get_fake_requests(vendor_ids=['v3'],
numa_policy=fields.PCINUMAAffinityPolicy.REQUIRED)
- self.assertFalse(self.pci_stats.support_requests(pci_requests, cells))
+ self.assertFalse(
+ self.pci_stats.support_requests(pci_requests, {}, cells)
+ )
def test_filter_pools_for_socket_affinity_no_socket(self):
self.pci_stats.numa_topology = objects.NUMATopology(
@@ -571,7 +586,7 @@ class PciDeviceStatsWithTagsTestCase(test.NoDBTestCase):
'compute_node_id': 1,
'address': '0000:0e:00.1',
'vendor_id': '15b3',
- 'product_id': '1018',
+ 'product_id': '101c',
'status': 'available',
'request_id': None,
'dev_type': fields.PciDeviceType.SRIOV_VF,
@@ -599,35 +614,68 @@ class PciDeviceStatsWithTagsTestCase(test.NoDBTestCase):
self.assertEqual(v, pool[k])
def _assertPools(self):
+ nr_tagged = len(self.pci_tagged_devices)
+ nr_untagged = len(self.pci_untagged_devices)
+ nr_remote = len(self.remote_managed_netdevs)
+ nr_local = len(self.locally_managed_netdevs)
+ self.assertEqual(
+ nr_tagged + nr_untagged + nr_remote + nr_local,
+ len(self.pci_stats.pools),
+ )
# Pools are ordered based on the number of keys. 'product_id',
# 'vendor_id' are always part of the keys. When tags are present,
- # they are also part of the keys. In this test class, we have
- # 5 pools with the second one having the tag 'physical_network'
- # and the value 'physnet1' and multiple pools for testing
- # variations of explicit/implicit remote_managed tagging.
- self.assertEqual(5, len(self.pci_stats.pools))
- self._assertPoolContent(self.pci_stats.pools[0], '1137', '0072',
- len(self.pci_untagged_devices))
- self.assertEqual(self.pci_untagged_devices,
- self.pci_stats.pools[0]['devices'])
- self._assertPoolContent(self.pci_stats.pools[1], '1137', '0071',
- len(self.pci_tagged_devices),
- physical_network='physnet1')
- self.assertEqual(self.pci_tagged_devices,
- self.pci_stats.pools[1]['devices'])
- self._assertPoolContent(self.pci_stats.pools[2], '15b3', '101e',
- len(self.remote_managed_netdevs),
- remote_managed='true')
- self.assertEqual(self.remote_managed_netdevs,
- self.pci_stats.pools[2]['devices'])
- self._assertPoolContent(self.pci_stats.pools[3], '15b3', '101c', 1,
- remote_managed='false')
- self.assertEqual([self.locally_managed_netdevs[0]],
- self.pci_stats.pools[3]['devices'])
- self._assertPoolContent(self.pci_stats.pools[4], '15b3', '1018', 1,
- remote_managed='false')
- self.assertEqual([self.locally_managed_netdevs[1]],
- self.pci_stats.pools[4]['devices'])
+ # they are also part of the keys.
+
+ # 3 pools for the pci_untagged_devices
+ devs = []
+ j = 0
+ for i in range(j, j + nr_untagged):
+ self._assertPoolContent(self.pci_stats.pools[i], '1137', '0072', 1)
+ devs += self.pci_stats.pools[i]['devices']
+ self.assertEqual(self.pci_untagged_devices, devs)
+ j += nr_untagged
+
+ # 4 pools for the pci_tagged_devices'
+ devs = []
+ for i in range(j, j + nr_tagged):
+ self._assertPoolContent(
+ self.pci_stats.pools[i],
+ "1137",
+ "0071",
+ 1,
+ physical_network="physnet1",
+ )
+ devs += self.pci_stats.pools[i]['devices']
+ self.assertEqual(self.pci_tagged_devices, devs)
+ j += nr_tagged
+
+ # one with remote_managed_netdevs
+ devs = []
+ for i in range(j, j + nr_remote):
+ self._assertPoolContent(
+ self.pci_stats.pools[i],
+ "15b3",
+ "101e",
+ 1,
+ remote_managed="true",
+ )
+ devs += self.pci_stats.pools[i]['devices']
+ self.assertEqual(self.remote_managed_netdevs, devs)
+ j += nr_remote
+
+ # two with locally_managed_netdevs
+ devs = []
+ for i in range(j, j + nr_local):
+ self._assertPoolContent(
+ self.pci_stats.pools[i],
+ "15b3",
+ "101c",
+ 1,
+ remote_managed="false",
+ )
+ devs += self.pci_stats.pools[i]['devices']
+ self.assertEqual(self.locally_managed_netdevs, devs)
+ j += nr_local
def test_add_devices(self):
self._create_pci_devices()
@@ -650,20 +698,30 @@ class PciDeviceStatsWithTagsTestCase(test.NoDBTestCase):
PCI_REMOTE_MANAGED_TAG: 'False'}]),
objects.InstancePCIRequest(count=1,
spec=[{'vendor_id': '15b3',
- 'product_id': '1018',
+ 'product_id': '101c',
PCI_REMOTE_MANAGED_TAG: 'False'}])]
devs = self.pci_stats.consume_requests(pci_requests)
self.assertEqual(5, len(devs))
- self.assertEqual(set(['0071', '0072', '1018', '101e', '101c']),
+ self.assertEqual(set(['0071', '0072', '101e', '101c']),
set([dev.product_id for dev in devs]))
- self._assertPoolContent(self.pci_stats.pools[0], '1137', '0072', 2)
- self._assertPoolContent(self.pci_stats.pools[1], '1137', '0071', 3,
+ self._assertPoolContent(self.pci_stats.pools[0], '1137', '0072', 0)
+ self._assertPoolContent(self.pci_stats.pools[1], '1137', '0072', 1)
+ self._assertPoolContent(self.pci_stats.pools[2], '1137', '0072', 1)
+
+ self._assertPoolContent(self.pci_stats.pools[3], '1137', '0071', 0,
+ physical_network='physnet1')
+ self._assertPoolContent(self.pci_stats.pools[4], '1137', '0071', 1,
+ physical_network='physnet1')
+ self._assertPoolContent(self.pci_stats.pools[5], '1137', '0071', 1,
+ physical_network='physnet1')
+ self._assertPoolContent(self.pci_stats.pools[6], '1137', '0071', 1,
physical_network='physnet1')
- self._assertPoolContent(self.pci_stats.pools[2], '15b3', '101e', 0,
+
+ self._assertPoolContent(self.pci_stats.pools[7], '15b3', '101e', 0,
remote_managed='true')
- self._assertPoolContent(self.pci_stats.pools[3], '15b3', '101c', 0,
+ self._assertPoolContent(self.pci_stats.pools[8], '15b3', '101c', 0,
remote_managed='false')
- self._assertPoolContent(self.pci_stats.pools[4], '15b3', '1018', 0,
+ self._assertPoolContent(self.pci_stats.pools[9], '15b3', '101c', 0,
remote_managed='false')
def test_add_device_no_devspec(self):
@@ -706,30 +764,754 @@ class PciDeviceStatsWithTagsTestCase(test.NoDBTestCase):
self.pci_stats.remove_device(dev2)
self._assertPools()
- def test_update_device(self):
- # Update device type of one of the device from type-PCI to
+ def test_update_device_splits_the_pool(self):
+ # Update device type of one of the device from type-VF to
# type-PF. Verify if the existing pool is updated and a new
# pool is created with dev_type type-PF.
- self._create_pci_devices()
- dev1 = self.pci_tagged_devices.pop()
- dev1.dev_type = 'type-PF'
- self.pci_stats.update_device(dev1)
- self.assertEqual(6, len(self.pci_stats.pools))
- self._assertPoolContent(self.pci_stats.pools[0], '1137', '0072',
- len(self.pci_untagged_devices))
- self.assertEqual(self.pci_untagged_devices,
- self.pci_stats.pools[0]['devices'])
- self._assertPoolContent(self.pci_stats.pools[1], '1137', '0071',
- len(self.pci_tagged_devices),
- physical_network='physnet1')
- self.assertEqual(self.pci_tagged_devices,
- self.pci_stats.pools[1]['devices'])
- self._assertPoolContent(self.pci_stats.pools[5], '1137', '0071',
- 1,
- physical_network='physnet1',
- remote_managed='false')
- self.assertEqual(dev1,
- self.pci_stats.pools[5]['devices'][0])
+ vfs = []
+ for i in range(3):
+ dev = objects.PciDevice(
+ compute_node_id=1,
+ address="0000:0a:00.%d" % i,
+ vendor_id="1137",
+ product_id="0071",
+ status="available",
+ dev_type="type-VF",
+ parent_addr="0000:0a:01.0",
+ numa_node=0
+ )
+ vfs.append(dev)
+ self.pci_stats.add_device(dev)
+
+ self.assertEqual(1, len(self.pci_stats.pools))
+ self.assertEqual(3, self.pci_stats.pools[0]["count"])
+ self.assertEqual(vfs, self.pci_stats.pools[0]["devices"])
+
+ dev = vfs.pop()
+ dev.dev_type = 'type-PF'
+ dev.parent_addr = None
+ self.pci_stats.update_device(dev)
+ self.assertEqual(2, len(self.pci_stats.pools))
+ self.assertEqual(2, self.pci_stats.pools[0]["count"])
+ self.assertEqual(vfs, self.pci_stats.pools[0]["devices"])
+ self.assertEqual(1, self.pci_stats.pools[1]["count"])
+ self.assertEqual([dev], self.pci_stats.pools[1]["devices"])
+
+ def test_only_vfs_from_the_same_parent_are_pooled(self):
+ pf1_vfs = []
+ for i in range(2):
+ dev = objects.PciDevice(
+ compute_node_id=1,
+ address="0000:0a:00.%d" % i,
+ vendor_id="15b3",
+ product_id="1018",
+ status="available",
+ dev_type="type-VF",
+ parent_addr="0000:0a:01.0",
+ numa_node=0
+ )
+ pf1_vfs.append(dev)
+ self.pci_stats.add_device(dev)
+
+ pf2_vfs = []
+ for i in range(2):
+ dev = objects.PciDevice(
+ compute_node_id=1,
+ address="0000:0b:00.%d" % i,
+ vendor_id="15b3",
+ product_id="1018",
+ status="available",
+ dev_type="type-VF",
+ parent_addr="0000:0b:01.0",
+ numa_node=0
+ )
+ pf2_vfs.append(dev)
+ self.pci_stats.add_device(dev)
+
+ self.assertEqual(2, len(self.pci_stats.pools))
+ self.assertEqual(2, self.pci_stats.pools[0]["count"])
+ self.assertEqual(pf1_vfs, self.pci_stats.pools[0]["devices"])
+ self.assertEqual(2, len(self.pci_stats.pools))
+ self.assertEqual(2, self.pci_stats.pools[1]["count"])
+ self.assertEqual(pf2_vfs, self.pci_stats.pools[1]["devices"])
+
+
+class PciDeviceStatsPlacementSupportTestCase(test.NoDBTestCase):
+
+ def test_device_spec_rc_and_traits_ignored_during_pooling(self):
+ """Assert that resource_class and traits from the device spec are not
+ used as discriminator for pool creation.
+ """
+ device_spec = [
+ jsonutils.dumps(
+ {
+ "resource_class": "foo",
+ "address": "*:81:00.1",
+ "traits": "gold",
+ }
+ ),
+ jsonutils.dumps(
+ {
+ "resource_class": "baar",
+ "address": "*:81:00.2",
+ "traits": "silver",
+ }
+ ),
+ ]
+ self.flags(device_spec=device_spec, group="pci")
+ dev_filter = whitelist.Whitelist(device_spec)
+ pci_stats = stats.PciDeviceStats(
+ objects.NUMATopology(),
+ dev_filter=dev_filter)
+ pci_dev1 = objects.PciDevice(
+ vendor_id="dead",
+ product_id="beef",
+ address="0000:81:00.1",
+ parent_addr="0000:81:00.0",
+ numa_node=0,
+ dev_type="type-VF",
+ )
+ pci_dev2 = objects.PciDevice(
+ vendor_id="dead",
+ product_id="beef",
+ address="0000:81:00.2",
+ parent_addr="0000:81:00.0",
+ numa_node=0,
+ dev_type="type-VF",
+ )
+ # the two device matched by different device_specs with different
+ # resource_class and traits fields
+ pci_stats.add_device(pci_dev1)
+ pci_stats.add_device(pci_dev2)
+
+ # but they are put in the same pool as all the other fields are
+ # matching
+ self.assertEqual(1, len(pci_stats.pools))
+ self.assertEqual(2, pci_stats.pools[0]["count"])
+
+ def test_filter_pools_for_spec_ignores_rc_and_traits_in_spec(self):
+ """Assert that resource_class and traits are ignored in the pci
+ request spec during matching the request to pools.
+ """
+ pci_stats = stats.PciDeviceStats(objects.NUMATopology())
+ pools = [{"vendor_id": "dead", "product_id": "beef"}]
+
+ matching_pools = pci_stats._filter_pools_for_spec(
+ pools=pools,
+ request=objects.InstancePCIRequest(
+ spec=[
+ {
+ "vendor_id": "dead",
+ "product_id": "beef",
+ "resource_class": "foo",
+ "traits": "blue",
+ }
+ ]
+ ),
+ )
+
+ self.assertEqual(pools, matching_pools)
+
+ def test_populate_pools_metadata_from_assigned_devices(self):
+ device_spec = [
+ jsonutils.dumps(
+ {
+ "address": "0000:81:00.*",
+ }
+ ),
+ ]
+ self.flags(device_spec=device_spec, group="pci")
+ dev_filter = whitelist.Whitelist(device_spec)
+ pci_stats = stats.PciDeviceStats(
+ objects.NUMATopology(),
+ dev_filter=dev_filter)
+ pci_dev1 = objects.PciDevice(
+ vendor_id="dead",
+ product_id="beef",
+ address="0000:81:00.1",
+ parent_addr="0000:81:00.0",
+ numa_node=0,
+ dev_type="type-VF",
+ )
+ pci_dev2 = objects.PciDevice(
+ vendor_id="dead",
+ product_id="beef",
+ address="0000:81:00.2",
+ parent_addr="0000:81:00.0",
+ numa_node=0,
+ dev_type="type-VF",
+ )
+ pci_stats.add_device(pci_dev1)
+ pci_dev1.extra_info = {'rp_uuid': uuids.rp1}
+ pci_stats.add_device(pci_dev2)
+ pci_dev2.extra_info = {'rp_uuid': uuids.rp1}
+
+ self.assertEqual(1, len(pci_stats.pools))
+
+ pci_stats.populate_pools_metadata_from_assigned_devices()
+
+ self.assertEqual(uuids.rp1, pci_stats.pools[0]['rp_uuid'])
+
+ def test_populate_pools_metadata_from_assigned_devices_device_without_rp(
+ self
+ ):
+ device_spec = [
+ jsonutils.dumps(
+ {
+ "address": "0000:81:00.*",
+ }
+ ),
+ ]
+ self.flags(device_spec=device_spec, group="pci")
+ dev_filter = whitelist.Whitelist(device_spec)
+ pci_stats = stats.PciDeviceStats(
+ objects.NUMATopology(),
+ dev_filter=dev_filter)
+ pci_dev1 = objects.PciDevice(
+ vendor_id="dead",
+ product_id="beef",
+ address="0000:81:00.1",
+ parent_addr="0000:81:00.0",
+ numa_node=0,
+ dev_type="type-VF",
+ )
+ pci_stats.add_device(pci_dev1)
+
+ self.assertEqual(1, len(pci_stats.pools))
+
+ pci_stats.populate_pools_metadata_from_assigned_devices()
+
+ self.assertNotIn('rp_uuid', pci_stats.pools[0])
+
+ def test_populate_pools_metadata_from_assigned_devices_multiple_rp(self):
+ device_spec = [
+ jsonutils.dumps(
+ {
+ "address": "0000:81:00.*",
+ }
+ ),
+ ]
+ self.flags(device_spec=device_spec, group="pci")
+ dev_filter = whitelist.Whitelist(device_spec)
+ pci_stats = stats.PciDeviceStats(
+ objects.NUMATopology(),
+ dev_filter=dev_filter)
+ pci_dev1 = objects.PciDevice(
+ compute_node_id=1,
+ vendor_id="dead",
+ product_id="beef",
+ address="0000:81:00.1",
+ parent_addr="0000:81:00.0",
+ numa_node=0,
+ dev_type="type-VF",
+ )
+ pci_dev2 = objects.PciDevice(
+ compute_node_id=1,
+ vendor_id="dead",
+ product_id="beef",
+ address="0000:81:00.2",
+ parent_addr="0000:81:00.0",
+ numa_node=0,
+ dev_type="type-VF",
+ )
+ pci_stats.add_device(pci_dev1)
+ pci_dev1.extra_info = {'rp_uuid': uuids.rp1}
+ pci_stats.add_device(pci_dev2)
+ pci_dev2.extra_info = {'rp_uuid': uuids.rp2}
+
+ self.assertEqual(1, len(pci_stats.pools))
+
+ self.assertRaises(
+ ValueError,
+ pci_stats.populate_pools_metadata_from_assigned_devices,
+ )
+
+
+class PciDeviceStatsProviderMappingTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super().setUp()
+ # for simplicity accept any devices
+ device_spec = [
+ jsonutils.dumps(
+ {
+ "address": "*:*:*.*",
+ }
+ ),
+ ]
+ self.flags(device_spec=device_spec, group="pci")
+ self.dev_filter = whitelist.Whitelist(device_spec)
+ self.pci_stats = stats.PciDeviceStats(
+ objects.NUMATopology(), dev_filter=self.dev_filter
+ )
+ # add devices represented by different RPs in placement
+ # two VFs on the same PF
+ self.vf1 = objects.PciDevice(
+ compute_node_id=1,
+ vendor_id="dead",
+ product_id="beef",
+ address="0000:81:00.1",
+ parent_addr="0000:81:00.0",
+ numa_node=0,
+ dev_type="type-VF",
+ )
+ self.vf2 = objects.PciDevice(
+ compute_node_id=1,
+ vendor_id="dead",
+ product_id="beef",
+ address="0000:81:00.2",
+ parent_addr="0000:81:00.0",
+ numa_node=0,
+ dev_type="type-VF",
+ )
+ self.pci_stats.add_device(self.vf1)
+ self.vf1.extra_info = {'rp_uuid': uuids.pf1}
+ self.pci_stats.add_device(self.vf2)
+ self.vf2.extra_info = {'rp_uuid': uuids.pf1}
+ # two PFs pf2 and pf3 (pf1 is used for the paren of the above VFs)
+ self.pf2 = objects.PciDevice(
+ compute_node_id=1,
+ vendor_id="dead",
+ product_id="beef",
+ address="0000:82:00.0",
+ parent_addr=None,
+ numa_node=0,
+ dev_type="type-PF",
+ )
+ self.pci_stats.add_device(self.pf2)
+ self.pf2.extra_info = {'rp_uuid': uuids.pf2}
+
+ self.pf3 = objects.PciDevice(
+ compute_node_id=1,
+ vendor_id="dead",
+ product_id="beef",
+ address="0000:83:00.0",
+ parent_addr=None,
+ numa_node=0,
+ dev_type="type-PF",
+ )
+ self.pci_stats.add_device(self.pf3)
+ self.pf3.extra_info = {'rp_uuid': uuids.pf3}
+ # a PCI
+ self.pci1 = objects.PciDevice(
+ compute_node_id=1,
+ vendor_id="dead",
+ product_id="beef",
+ address="0000:84:00.0",
+ parent_addr=None,
+ numa_node=0,
+ dev_type="type-PCI",
+ )
+ self.pci_stats.add_device(self.pci1)
+ self.pci1.extra_info = {'rp_uuid': uuids.pci1}
+
+ # populate the RP -> pool mapping from the devices to its pools
+ self.pci_stats.populate_pools_metadata_from_assigned_devices()
+
+ # we have 1 pool for the two VFs then the rest has it own pool one by
+ # one
+ self.num_pools = 4
+ self.assertEqual(self.num_pools, len(self.pci_stats.pools))
+ self.num_devs = 5
+ self.assertEqual(
+ self.num_devs, sum(pool["count"] for pool in self.pci_stats.pools)
+ )
+
+ def test_support_request_unrestricted(self):
+ reqs = []
+ for dev_type in ["type-VF", "type-PF", "type-PCI"]:
+ req = objects.InstancePCIRequest(
+ count=1,
+ alias_name='a-dev',
+ spec=[
+ {
+ "vendor_id": "dead",
+ "product_id": "beef",
+ "dev_type": dev_type,
+ }
+ ],
+ )
+ reqs.append(req)
+
+ # an empty mapping means unrestricted by any provider
+ # we have devs for all type so each request should fit
+ self.assertTrue(self.pci_stats.support_requests(reqs, {}))
+
+ # the support_requests call is expected not to consume any device
+ self.assertEqual(self.num_pools, len(self.pci_stats.pools))
+ self.assertEqual(
+ self.num_devs, sum(pool["count"] for pool in self.pci_stats.pools)
+ )
+
+ # now apply the same request to consume the pools
+ self.pci_stats.apply_requests(reqs, {})
+ # we have consumed a 3 devs (a VF, a PF, and a PCI)
+ self.assertEqual(
+ self.num_devs - 3,
+ sum(pool["count"] for pool in self.pci_stats.pools),
+ )
+ # the empty pools are purged. We have one pool for the remaining VF
+ # and the remaining PF
+ self.assertEqual(2, len(self.pci_stats.pools))
+
+ def test_support_request_restricted_by_provider_mapping(self):
+ pf_req = objects.InstancePCIRequest(
+ count=1,
+ alias_name='a-dev',
+ request_id=uuids.req1,
+ spec=[
+ {
+ "vendor_id": "dead",
+ "product_id": "beef",
+ "dev_type": "type-PF",
+ }
+ ],
+ )
+
+ # simulate the placement restricted the possible RPs to pf3
+ self.assertTrue(
+ self.pci_stats.support_requests(
+ [pf_req], {f"{uuids.req1}-0": [uuids.pf3]}
+ )
+ )
+
+ # the support_requests call is expected not to consume any device
+ self.assertEqual(self.num_pools, len(self.pci_stats.pools))
+ self.assertEqual(
+ self.num_devs, sum(pool["count"] for pool in self.pci_stats.pools)
+ )
+
+ # now apply the request and see if the right device is consumed
+ self.pci_stats.apply_requests(
+ [pf_req], {f"{uuids.req1}-0": [uuids.pf3]}
+ )
+
+ self.assertEqual(self.num_pools - 1, len(self.pci_stats.pools))
+ self.assertEqual(
+ self.num_devs - 1,
+ sum(pool["count"] for pool in self.pci_stats.pools),
+ )
+ # pf3 is not available in the pools any more
+ self.assertEqual(
+ {uuids.pf1, uuids.pf2, uuids.pci1},
+ {pool['rp_uuid'] for pool in self.pci_stats.pools},
+ )
+
+ def test_support_request_restricted_by_provider_mapping_does_not_fit(self):
+ pf_req = objects.InstancePCIRequest(
+ count=1,
+ alias_name='a-dev',
+ request_id=uuids.req1,
+ spec=[
+ {
+ "vendor_id": "dead",
+ "product_id": "beef",
+ "dev_type": "type-PF",
+ }
+ ],
+ )
+
+ # Simulate that placement returned an allocation candidate with a PF
+ # that is not in the pools anymore, e.g. filtered out by numa cell.
+ # We expect the request to fail
+ self.assertFalse(
+ self.pci_stats.support_requests(
+ [pf_req], {f"{uuids.req1}-0": [uuids.pf4]}
+ )
+ )
+ self.assertRaises(
+ exception.PciDeviceRequestFailed,
+ self.pci_stats.apply_requests,
+ [pf_req],
+ {f"{uuids.req1}-0": [uuids.pf4]},
+ )
+ # and the pools are not changed
+ self.assertEqual(self.num_pools, len(self.pci_stats.pools))
+ self.assertEqual(
+ self.num_devs, sum(pool["count"] for pool in self.pci_stats.pools)
+ )
+
+ def test_support_request_neutron_port_based_request_ignore_mapping(self):
+ # by not having the alias_name set this becomes a neutron port based
+ # PCI request
+ pf_req = objects.InstancePCIRequest(
+ count=1,
+ request_id=uuids.req1,
+ spec=[
+ {
+ "vendor_id": "dead",
+ "product_id": "beef",
+ "dev_type": "type-PF",
+ }
+ ],
+ )
+
+ # Simulate that placement returned an allocation candidate with a PF
+ # that is not in the pools anymore, e.g. filtered out by numa cell.
+ # We expect that the placement selection is ignored for neutron port
+ # based requests so this request should fit as we have PFs in the pools
+ self.assertTrue(
+ self.pci_stats.support_requests(
+ [pf_req], {f"{uuids.req1}-0": [uuids.pf4]}
+ )
+ )
+ self.pci_stats.apply_requests(
+ [pf_req],
+ {f"{uuids.req1}-0": [uuids.pf4]},
+ )
+ # and a PF is consumed
+ self.assertEqual(self.num_pools - 1, len(self.pci_stats.pools))
+ self.assertEqual(
+ self.num_devs - 1,
+ sum(pool["count"] for pool in self.pci_stats.pools),
+ )
+
+ def test_support_request_req_with_count_2(self):
+ # now ask for two PFs in a single request
+ pf_req = objects.InstancePCIRequest(
+ count=2,
+ alias_name='a-dev',
+ request_id=uuids.req1,
+ spec=[
+ {
+ "vendor_id": "dead",
+ "product_id": "beef",
+ "dev_type": "type-PF",
+ }
+ ],
+ )
+
+ # Simulate that placement returned one candidate RP for both PF reqs
+ mapping = {
+ f"{uuids.req1}-0": [uuids.pf2],
+ f"{uuids.req1}-1": [uuids.pf3],
+ }
+ # so the request fits
+ self.assertTrue(self.pci_stats.support_requests([pf_req], mapping))
+ self.pci_stats.apply_requests([pf_req], mapping)
+ # and both PFs are consumed
+ self.assertEqual(self.num_pools - 2, len(self.pci_stats.pools))
+ self.assertEqual(
+ self.num_devs - 2,
+ sum(pool["count"] for pool in self.pci_stats.pools),
+ )
+ self.assertEqual(
+ {uuids.pf1, uuids.pci1},
+ {pool['rp_uuid'] for pool in self.pci_stats.pools},
+ )
+
+ def test_support_requests_multiple_reqs(self):
+ # request both a VF and a PF
+ vf_req = objects.InstancePCIRequest(
+ count=1,
+ alias_name='a-dev',
+ request_id=uuids.vf_req,
+ spec=[
+ {
+ "vendor_id": "dead",
+ "product_id": "beef",
+ "dev_type": "type-VF",
+ }
+ ],
+ )
+ pf_req = objects.InstancePCIRequest(
+ count=1,
+ alias_name='a-dev',
+ request_id=uuids.pf_req,
+ spec=[
+ {
+ "vendor_id": "dead",
+ "product_id": "beef",
+ "dev_type": "type-PF",
+ }
+ ],
+ )
+
+ # Simulate that placement returned one candidate RP for both reqs
+ mapping = {
+ # the VF is represented by the parent PF RP
+ f"{uuids.vf_req}-0": [uuids.pf1],
+ f"{uuids.pf_req}-0": [uuids.pf3],
+ }
+ # so the request fits
+ self.assertTrue(
+ self.pci_stats.support_requests([vf_req, pf_req], mapping)
+ )
+ self.pci_stats.apply_requests([vf_req, pf_req], mapping)
+ # and the proper devices are consumed
+ # Note that the VF pool still has a device so it remains
+ self.assertEqual(self.num_pools - 1, len(self.pci_stats.pools))
+ self.assertEqual(
+ self.num_devs - 2,
+ sum(pool["count"] for pool in self.pci_stats.pools),
+ )
+ self.assertEqual(
+ {uuids.pf1, uuids.pf2, uuids.pci1},
+ {pool['rp_uuid'] for pool in self.pci_stats.pools},
+ )
+
+ def test_apply_gets_requested_uuids_from_pci_req(self):
+ pf_req = objects.InstancePCIRequest(
+ count=1,
+ alias_name='a-dev',
+ request_id=uuids.req1,
+ spec=[
+ {
+ "vendor_id": "dead",
+ "product_id": "beef",
+ "dev_type": "type-PF",
+ # Simulate that the scheduler already allocate a candidate
+ # and the mapping is stored in the request.
+ # The allocation restricts that we can only consume from
+ # PF3
+ "rp_uuids": ",".join([uuids.pf3])
+ }
+ ],
+ )
+
+ # call apply with None mapping signalling that the allocation is
+ # already done and the resulted mapping is stored in the request
+ self.pci_stats.apply_requests([pf_req], provider_mapping=None)
+
+ # assert that the right device is consumed
+ self.assertEqual(self.num_pools - 1, len(self.pci_stats.pools))
+ self.assertEqual(
+ self.num_devs - 1,
+ sum(pool["count"] for pool in self.pci_stats.pools),
+ )
+ # pf3 is not available in the pools anymore
+ self.assertEqual(
+ {uuids.pf1, uuids.pf2, uuids.pci1},
+ {pool['rp_uuid'] for pool in self.pci_stats.pools},
+ )
+
+ def _create_two_pools_with_two_vfs(self):
+ # create two pools (PFs) with two VFs each
+ self.pci_stats = stats.PciDeviceStats(
+ objects.NUMATopology(), dev_filter=self.dev_filter
+ )
+ for pf_index in [1, 2]:
+ for vf_index in [1, 2]:
+ dev = objects.PciDevice(
+ compute_node_id=1,
+ vendor_id="dead",
+ product_id="beef",
+ address=f"0000:81:0{pf_index}.{vf_index}",
+ parent_addr=f"0000:81:0{pf_index}.0",
+ numa_node=0,
+ dev_type="type-VF",
+ )
+ self.pci_stats.add_device(dev)
+ dev.extra_info = {'rp_uuid': getattr(uuids, f"pf{pf_index}")}
+
+ # populate the RP -> pool mapping from the devices to its pools
+ self.pci_stats.populate_pools_metadata_from_assigned_devices()
+
+ # we have 2 pool and 4 devs in total
+ self.num_pools = 2
+ self.assertEqual(self.num_pools, len(self.pci_stats.pools))
+ self.num_devs = 4
+ self.assertEqual(
+ self.num_devs, sum(pool["count"] for pool in self.pci_stats.pools)
+ )
+
+ def test_apply_asymmetric_allocation(self):
+ self._create_two_pools_with_two_vfs()
+ # ask for 3 VFs
+ vf_req = objects.InstancePCIRequest(
+ count=3,
+ alias_name='a-vf',
+ request_id=uuids.vf_req,
+ spec=[
+ {
+ "vendor_id": "dead",
+ "product_id": "beef",
+ "dev_type": "type-VF",
+ }
+ ],
+ )
+
+ # Simulate that placement returned an allocation candidate where 1 VF
+ # is consumed from PF1 and two from PF2
+ mapping = {
+ # the VF is represented by the parent PF RP
+ f"{uuids.vf_req}-0": [uuids.pf1],
+ f"{uuids.vf_req}-1": [uuids.pf2],
+ f"{uuids.vf_req}-2": [uuids.pf2],
+ }
+ # This should fit
+ self.assertTrue(
+ self.pci_stats.support_requests([vf_req], mapping)
+ )
+ # and when consumed the consumption from the pools should be in sync
+ # with the placement allocation. So the PF2 pool is expected to
+ # disappear as it is fully consumed and the PF1 pool should have
+ # one free device.
+ self.pci_stats.apply_requests([vf_req], mapping)
+ self.assertEqual(1, len(self.pci_stats.pools))
+ self.assertEqual(uuids.pf1, self.pci_stats.pools[0]['rp_uuid'])
+ self.assertEqual(1, self.pci_stats.pools[0]['count'])
+
+ def test_consume_asymmetric_allocation(self):
+ self._create_two_pools_with_two_vfs()
+ # ask for 3 VFs
+ vf_req = objects.InstancePCIRequest(
+ count=3,
+ alias_name='a-vf',
+ request_id=uuids.vf_req,
+ spec=[
+ {
+ "vendor_id": "dead",
+ "product_id": "beef",
+ "dev_type": "type-VF",
+ # Simulate that the scheduler already allocate a candidate
+ # and the mapping is stored in the request.
+ # In placement 1 VF is allocated from PF1 and two from PF2
+ "rp_uuids": ",".join([uuids.pf1, uuids.pf2, uuids.pf2])
+ }
+ ],
+ )
+
+ # So when the PCI claim consumes devices based on this request we
+ # expect that nova follows what is allocated in placement.
+ devs = self.pci_stats.consume_requests([vf_req])
+ self.assertEqual(
+ {"0000:81:01.0": 1, "0000:81:02.0": 2},
+ collections.Counter(dev.parent_addr for dev in devs),
+ )
+
+ def test_consume_restricted_by_allocation(self):
+ pf_req = objects.InstancePCIRequest(
+ count=1,
+ alias_name='a-dev',
+ request_id=uuids.req1,
+ spec=[
+ {
+ "vendor_id": "dead",
+ "product_id": "beef",
+ "dev_type": "type-PF",
+ # Simulate that the scheduler already allocate a candidate
+ # and the mapping is stored in the request.
+ # The allocation restricts that we can only consume from
+ # PF3
+ "rp_uuids": ",".join([uuids.pf3])
+ }
+ ],
+ )
+
+ # Call consume. It always expects the allocated mapping to be stores
+ # the in PCI request as it is always called from the compute side.
+ consumed_devs = self.pci_stats.consume_requests([pf_req])
+ # assert that the right device is consumed
+ self.assertEqual([self.pf3], consumed_devs)
+ # pf3 is not available in the pools anymore
+ self.assertEqual(
+ {uuids.pf1, uuids.pf2, uuids.pci1},
+ {
+ pool["rp_uuid"]
+ for pool in self.pci_stats.pools
+ if pool["count"] > 0
+ },
+ )
class PciDeviceVFPFStatsTestCase(test.NoDBTestCase):
diff --git a/nova/tests/unit/policies/base.py b/nova/tests/unit/policies/base.py
index 68a051b26c..7490441d92 100644
--- a/nova/tests/unit/policies/base.py
+++ b/nova/tests/unit/policies/base.py
@@ -58,6 +58,16 @@ class BasePolicyTest(test.TestCase):
def setUp(self):
super(BasePolicyTest, self).setUp()
+ # TODO(gmann): enforce_scope and enforce_new_defaults are enabled
+ # by default in the code so disable them in base test class until
+ # we have deprecated rules and their tests. We have enforce_scope
+ # and no-legacy tests which are explicitly enabling scope and new
+ # defaults to test the new defaults and scope. In future, once
+ # we remove the deprecated rules, along with refactoring the unit
+ # tests we can remove overriding the oslo policy flags.
+ self.flags(enforce_scope=False, group="oslo_policy")
+ if not self.without_deprecated_rules:
+ self.flags(enforce_new_defaults=False, group="oslo_policy")
self.useFixture(fixtures.NeutronFixture(self))
self.policy = self.useFixture(fixtures.RealPolicyFixture())
diff --git a/nova/tests/unit/policies/test_evacuate.py b/nova/tests/unit/policies/test_evacuate.py
index ddc8241003..b9e4c29dba 100644
--- a/nova/tests/unit/policies/test_evacuate.py
+++ b/nova/tests/unit/policies/test_evacuate.py
@@ -103,7 +103,7 @@ class EvacuatePolicyTest(base.BasePolicyTest):
evacuate_mock.assert_called_once_with(
self.user_req.environ['nova.context'],
mock.ANY, 'my-host', False,
- 'MyNewPass', None)
+ 'MyNewPass', None, None)
class EvacuateNoLegacyNoScopePolicyTest(EvacuatePolicyTest):
diff --git a/nova/tests/unit/scheduler/filters/test_numa_topology_filters.py b/nova/tests/unit/scheduler/filters/test_numa_topology_filters.py
index 0ebe95d5e4..ba9073e0df 100644
--- a/nova/tests/unit/scheduler/filters/test_numa_topology_filters.py
+++ b/nova/tests/unit/scheduler/filters/test_numa_topology_filters.py
@@ -11,6 +11,7 @@
# under the License.
import itertools
+from unittest import mock
from oslo_utils.fixture import uuidsentinel as uuids
@@ -53,7 +54,9 @@ class TestNUMATopologyFilter(test.NoDBTestCase):
{'numa_topology': fakes.NUMA_TOPOLOGY,
'pci_stats': None,
'cpu_allocation_ratio': 16.0,
- 'ram_allocation_ratio': 1.5})
+ 'ram_allocation_ratio': 1.5,
+ 'allocation_candidates': [{"mappings": {}}]
+ })
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
def test_numa_topology_filter_numa_instance_no_numa_host_fail(self):
@@ -132,7 +135,9 @@ class TestNUMATopologyFilter(test.NoDBTestCase):
{'numa_topology': fakes.NUMA_TOPOLOGY,
'pci_stats': None,
'cpu_allocation_ratio': 21,
- 'ram_allocation_ratio': 1.3})
+ 'ram_allocation_ratio': 1.3,
+ 'allocation_candidates': [{"mappings": {}}]
+ })
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
limits = host.limits['numa_topology']
self.assertEqual(limits.cpu_allocation_ratio, 21)
@@ -180,7 +185,9 @@ class TestNUMATopologyFilter(test.NoDBTestCase):
'numa_topology': numa_topology,
'pci_stats': None,
'cpu_allocation_ratio': 1,
- 'ram_allocation_ratio': 1.5})
+ 'ram_allocation_ratio': 1.5,
+ 'allocation_candidates': [{"mappings": {}}],
+ })
assertion = self.assertTrue if passes else self.assertFalse
# test combinations of image properties and extra specs
@@ -237,7 +244,9 @@ class TestNUMATopologyFilter(test.NoDBTestCase):
{'numa_topology': fakes.NUMA_TOPOLOGY,
'pci_stats': None,
'cpu_allocation_ratio': 16.0,
- 'ram_allocation_ratio': 1.5})
+ 'ram_allocation_ratio': 1.5,
+ 'allocation_candidates': [{"mappings": {}}]
+ })
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
def test_numa_topology_filter_fail_mempages(self):
@@ -287,7 +296,9 @@ class TestNUMATopologyFilter(test.NoDBTestCase):
'numa_topology': host_topology,
'pci_stats': None,
'cpu_allocation_ratio': 16.0,
- 'ram_allocation_ratio': 1.5})
+ 'ram_allocation_ratio': 1.5,
+ 'allocation_candidates': [{"mappings": {}}],
+ })
def test_numa_topology_filter_pass_networks(self):
host = self._get_fake_host_state_with_networks()
@@ -329,3 +340,79 @@ class TestNUMATopologyFilter(test.NoDBTestCase):
network_metadata=network_metadata)
self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
+
+ @mock.patch("nova.virt.hardware.numa_fit_instance_to_host")
+ def test_filters_candidates(self, mock_numa_fit):
+ instance_topology = objects.InstanceNUMATopology(
+ cells=[
+ objects.InstanceNUMACell(
+ id=0, cpuset=set([1]), pcpuset=set(), memory=512
+ ),
+ ]
+ )
+ spec_obj = self._get_spec_obj(numa_topology=instance_topology)
+ host = fakes.FakeHostState(
+ "host1",
+ "node1",
+ {
+ "numa_topology": fakes.NUMA_TOPOLOGY,
+ "pci_stats": None,
+ "cpu_allocation_ratio": 16.0,
+ "ram_allocation_ratio": 1.5,
+ # simulate that placement returned 3 candidates for this host
+ "allocation_candidates": [
+ {"mappings": {f"{uuids.req1}-0": ["candidate_rp_1"]}},
+ {"mappings": {f"{uuids.req1}-0": ["candidate_rp_2"]}},
+ {"mappings": {f"{uuids.req1}-0": ["candidate_rp_3"]}},
+ ],
+ },
+ )
+ # and that from those candidates only the second matches the numa logic
+ mock_numa_fit.side_effect = [False, True, False]
+
+ # run the filter and expect that the host passes as it has at least
+ # one viable candidate
+ self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
+ # also assert that the filter checked all three candidates
+ self.assertEqual(3, len(mock_numa_fit.mock_calls))
+ # and also it reduced the candidates in the host state to the only
+ # matching one
+ self.assertEqual(1, len(host.allocation_candidates))
+ self.assertEqual(
+ {"mappings": {f"{uuids.req1}-0": ["candidate_rp_2"]}},
+ host.allocation_candidates[0],
+ )
+
+ @mock.patch("nova.virt.hardware.numa_fit_instance_to_host")
+ def test_filter_fails_if_no_matching_candidate_left(self, mock_numa_fit):
+ instance_topology = objects.InstanceNUMATopology(
+ cells=[
+ objects.InstanceNUMACell(
+ id=0, cpuset=set([1]), pcpuset=set(), memory=512
+ ),
+ ]
+ )
+ spec_obj = self._get_spec_obj(numa_topology=instance_topology)
+ host = fakes.FakeHostState(
+ "host1",
+ "node1",
+ {
+ "numa_topology": fakes.NUMA_TOPOLOGY,
+ "pci_stats": None,
+ "cpu_allocation_ratio": 16.0,
+ "ram_allocation_ratio": 1.5,
+ # simulate that placement returned 1 candidate for this host
+ "allocation_candidates": [
+ {"mappings": {f"{uuids.req1}-0": ["candidate_rp_1"]}},
+ ],
+ },
+ )
+ # simulate that the only candidate we have does not match
+ mock_numa_fit.side_effect = [False]
+
+ # run the filter and expect that it fails the host as there is no
+ # viable candidate left
+ self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
+ self.assertEqual(1, len(mock_numa_fit.mock_calls))
+ # and also it made the candidates list empty in the host state
+ self.assertEqual(0, len(host.allocation_candidates))
diff --git a/nova/tests/unit/scheduler/filters/test_pci_passthrough_filters.py b/nova/tests/unit/scheduler/filters/test_pci_passthrough_filters.py
index edd9735b34..27d80b884e 100644
--- a/nova/tests/unit/scheduler/filters/test_pci_passthrough_filters.py
+++ b/nova/tests/unit/scheduler/filters/test_pci_passthrough_filters.py
@@ -12,6 +12,8 @@
from unittest import mock
+from oslo_utils.fixture import uuidsentinel as uuids
+
from nova import objects
from nova.pci import stats
from nova.scheduler.filters import pci_passthrough_filter
@@ -33,11 +35,16 @@ class TestPCIPassthroughFilter(test.NoDBTestCase):
requests = objects.InstancePCIRequests(requests=[request])
spec_obj = objects.RequestSpec(pci_requests=requests)
host = fakes.FakeHostState(
- 'host1', 'node1',
- attribute_dict={'pci_stats': pci_stats_mock})
+ "host1",
+ "node1",
+ attribute_dict={
+ "pci_stats": pci_stats_mock,
+ "allocation_candidates": [{"mappings": {}}],
+ },
+ )
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
pci_stats_mock.support_requests.assert_called_once_with(
- requests.requests)
+ requests.requests, provider_mapping={})
def test_pci_passthrough_fail(self):
pci_stats_mock = mock.MagicMock()
@@ -47,11 +54,16 @@ class TestPCIPassthroughFilter(test.NoDBTestCase):
requests = objects.InstancePCIRequests(requests=[request])
spec_obj = objects.RequestSpec(pci_requests=requests)
host = fakes.FakeHostState(
- 'host1', 'node1',
- attribute_dict={'pci_stats': pci_stats_mock})
+ "host1",
+ "node1",
+ attribute_dict={
+ "pci_stats": pci_stats_mock,
+ "allocation_candidates": [{"mappings": {}}],
+ },
+ )
self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
pci_stats_mock.support_requests.assert_called_once_with(
- requests.requests)
+ requests.requests, provider_mapping={})
def test_pci_passthrough_no_pci_request(self):
spec_obj = objects.RequestSpec(pci_requests=None)
@@ -82,3 +94,92 @@ class TestPCIPassthroughFilter(test.NoDBTestCase):
host = fakes.FakeHostState('host1', 'node1',
attribute_dict={'pci_stats': None})
self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
+
+ def test_filters_candidates(self):
+ pci_stats_mock = mock.MagicMock()
+ # simulate that only the second allocation candidate fits
+ pci_stats_mock.support_requests.side_effect = [False, True, False]
+ request = objects.InstancePCIRequest(
+ count=1,
+ spec=[{"vendor_id": "8086"}],
+ request_id=uuids.req1,
+ )
+ requests = objects.InstancePCIRequests(requests=[request])
+ spec_obj = objects.RequestSpec(pci_requests=requests)
+ host = fakes.FakeHostState(
+ "host1",
+ "node1",
+ attribute_dict={
+ "pci_stats": pci_stats_mock,
+ # simulate the placement returned 3 possible candidates
+ "allocation_candidates": [
+ {"mappings": {f"{uuids.req1}-0": ["candidate_rp_1"]}},
+ {"mappings": {f"{uuids.req1}-0": ["candidate_rp_2"]}},
+ {"mappings": {f"{uuids.req1}-0": ["candidate_rp_3"]}},
+ ],
+ },
+ )
+
+ # run the filter and expect that it passes the host as there is at
+ # least one viable candidate
+ self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
+
+ # also assert that the filter checked all three candidates
+ pci_stats_mock.support_requests.assert_has_calls(
+ [
+ mock.call(
+ requests.requests,
+ provider_mapping={f"{uuids.req1}-0": ["candidate_rp_1"]},
+ ),
+ mock.call(
+ requests.requests,
+ provider_mapping={f"{uuids.req1}-0": ["candidate_rp_2"]},
+ ),
+ mock.call(
+ requests.requests,
+ provider_mapping={f"{uuids.req1}-0": ["candidate_rp_3"]},
+ ),
+ ]
+ )
+ # and also it reduced the candidates in the host state to the only
+ # matching one
+ self.assertEqual(1, len(host.allocation_candidates))
+ self.assertEqual(
+ {"mappings": {f"{uuids.req1}-0": ["candidate_rp_2"]}},
+ host.allocation_candidates[0],
+ )
+
+ def test_filter_fails_if_no_matching_candidate_left(self):
+ pci_stats_mock = mock.MagicMock()
+ # simulate that the only candidate we have does not match
+ pci_stats_mock.support_requests.side_effect = [False]
+ request = objects.InstancePCIRequest(
+ count=1,
+ spec=[{"vendor_id": "8086"}],
+ request_id=uuids.req1,
+ )
+ requests = objects.InstancePCIRequests(requests=[request])
+ spec_obj = objects.RequestSpec(pci_requests=requests)
+ host = fakes.FakeHostState(
+ "host1",
+ "node1",
+ attribute_dict={
+ "pci_stats": pci_stats_mock,
+ # simulate the placement returned 3 possible candidates
+ "allocation_candidates": [
+ {"mappings": {f"{uuids.req1}-0": ["candidate_rp_1"]}},
+ ],
+ },
+ )
+
+ # run the filter and expect that it fails the host as there is no
+ # viable candidate left
+ self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
+
+ # also assert that the filter checked our candidate
+ pci_stats_mock.support_requests.assert_called_once_with(
+ requests.requests,
+ provider_mapping={f"{uuids.req1}-0": ["candidate_rp_1"]},
+ )
+ # and also it made the candidates list empty in the host state
+ self.assertEqual(0, len(host.allocation_candidates))
diff --git a/nova/tests/unit/scheduler/test_host_manager.py b/nova/tests/unit/scheduler/test_host_manager.py
index c4445d5578..1a7daa515f 100644
--- a/nova/tests/unit/scheduler/test_host_manager.py
+++ b/nova/tests/unit/scheduler/test_host_manager.py
@@ -1562,10 +1562,14 @@ class HostStateTestCase(test.NoDBTestCase):
self.assertIsNone(host.updated)
host.consume_from_request(spec_obj)
- numa_fit_mock.assert_called_once_with(fake_host_numa_topology,
- fake_numa_topology,
- limits=None, pci_requests=None,
- pci_stats=None)
+ numa_fit_mock.assert_called_once_with(
+ fake_host_numa_topology,
+ fake_numa_topology,
+ limits=None,
+ pci_requests=None,
+ pci_stats=None,
+ provider_mapping=None,
+ )
numa_usage_mock.assert_called_once_with(fake_host_numa_topology,
fake_numa_topology)
sync_mock.assert_called_once_with(("fakehost", "fakenode"))
diff --git a/nova/tests/unit/scheduler/test_manager.py b/nova/tests/unit/scheduler/test_manager.py
index 9356292918..e7866069b3 100644
--- a/nova/tests/unit/scheduler/test_manager.py
+++ b/nova/tests/unit/scheduler/test_manager.py
@@ -26,6 +26,7 @@ from oslo_utils.fixture import uuidsentinel as uuids
from nova import context
from nova import exception
from nova import objects
+from nova.scheduler import filters
from nova.scheduler import host_manager
from nova.scheduler import manager
from nova.scheduler import utils as scheduler_utils
@@ -396,9 +397,16 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
# assertion at the end of the test.
spec_obj.obj_reset_changes(recursive=True)
- host_state = mock.Mock(spec=host_manager.HostState, host="fake_host",
- uuid=uuids.cn1, cell_uuid=uuids.cell, nodename="fake_node",
- limits={}, aggregates=[])
+ host_state = mock.Mock(
+ spec=host_manager.HostState,
+ host="fake_host",
+ uuid=uuids.cn1,
+ cell_uuid=uuids.cell,
+ nodename="fake_node",
+ limits={},
+ aggregates=[],
+ allocation_candidates=[],
+ )
all_host_states = [host_state]
mock_get_all_states.return_value = all_host_states
@@ -459,20 +467,29 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
is_public=True,
name="small_flavor"),
project_id=uuids.project_id,
- instance_group=group)
-
- host_state = mock.Mock(spec=host_manager.HostState,
- host="fake_host", nodename="fake_node", uuid=uuids.cn1,
- limits={}, cell_uuid=uuids.cell, instances={}, aggregates=[])
+ instance_group=group,
+ requested_resources=[],
+ )
+
+ host_state = mock.Mock(
+ spec=host_manager.HostState,
+ host="fake_host",
+ nodename="fake_node",
+ uuid=uuids.cn1,
+ limits={},
+ cell_uuid=uuids.cell,
+ instances={},
+ aggregates=[],
+ allocation_candidates=[],
+ )
all_host_states = [host_state]
mock_get_all_states.return_value = all_host_states
- mock_get_hosts.return_value = all_host_states
+ mock_get_hosts.side_effect = lambda spec_obj, hosts, num: list(hosts)
instance_uuids = None
ctx = mock.Mock()
selected_hosts = self.manager._schedule(ctx, spec_obj,
- instance_uuids, mock.sentinel.alloc_reqs_by_rp_uuid,
- mock.sentinel.provider_summaries)
+ instance_uuids, None, mock.sentinel.provider_summaries)
mock_get_all_states.assert_called_once_with(
ctx.elevated.return_value, spec_obj,
@@ -510,14 +527,24 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
is_public=True,
name="small_flavor"),
project_id=uuids.project_id,
- instance_group=None)
-
- host_state = mock.Mock(spec=host_manager.HostState,
- host="fake_host", nodename="fake_node", uuid=uuids.cn1,
- cell_uuid=uuids.cell1, limits={}, aggregates=[])
+ instance_group=None,
+ requested_resources=[],
+ )
+
+ host_state = mock.Mock(
+ spec=host_manager.HostState,
+ host="fake_host",
+ nodename="fake_node",
+ uuid=uuids.cn1,
+ cell_uuid=uuids.cell1,
+ limits={},
+ aggregates=[],
+ allocation_candidates=[],
+ )
all_host_states = [host_state]
mock_get_all_states.return_value = all_host_states
- mock_get_hosts.return_value = all_host_states
+ # simulate that every host passes the filtering
+ mock_get_hosts.side_effect = lambda spec_obj, hosts, num: list(hosts)
mock_claim.return_value = True
instance_uuids = [uuids.instance]
@@ -583,11 +610,16 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
project_id=uuids.project_id,
instance_group=None)
- host_state = mock.Mock(spec=host_manager.HostState,
- host=mock.sentinel.host, uuid=uuids.cn1, cell_uuid=uuids.cell1)
+ host_state = mock.Mock(
+ spec=host_manager.HostState,
+ host=mock.sentinel.host,
+ uuid=uuids.cn1,
+ cell_uuid=uuids.cell1,
+ allocations_candidates=[],
+ )
all_host_states = [host_state]
mock_get_all_states.return_value = all_host_states
- mock_get_hosts.return_value = all_host_states
+ mock_get_hosts.side_effect = lambda spec_obj, hosts, num: list(hosts)
mock_claim.return_value = False
instance_uuids = [uuids.instance]
@@ -604,7 +636,7 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
mock_get_all_states.assert_called_once_with(
ctx.elevated.return_value, spec_obj,
mock.sentinel.provider_summaries)
- mock_get_hosts.assert_called_once_with(spec_obj, all_host_states, 0)
+ mock_get_hosts.assert_called_once_with(spec_obj, mock.ANY, 0)
mock_claim.assert_called_once_with(ctx.elevated.return_value,
self.manager.placement_client, spec_obj, uuids.instance,
alloc_reqs_by_rp_uuid[uuids.cn1][0],
@@ -635,18 +667,41 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
is_public=True,
name="small_flavor"),
project_id=uuids.project_id,
- instance_group=None)
-
- host_state = mock.Mock(spec=host_manager.HostState,
- host="fake_host", nodename="fake_node", uuid=uuids.cn1,
- cell_uuid=uuids.cell1, limits={}, updated='fake')
+ instance_group=None,
+ requested_resources=[],
+ )
+
+ host_state = mock.Mock(
+ spec=host_manager.HostState,
+ host="fake_host",
+ nodename="fake_node",
+ uuid=uuids.cn1,
+ cell_uuid=uuids.cell1,
+ limits={},
+ updated="fake",
+ allocation_candidates=[],
+ )
all_host_states = [host_state]
mock_get_all_states.return_value = all_host_states
- mock_get_hosts.side_effect = [
- all_host_states, # first instance: return all the hosts (only one)
- [], # second: act as if no more hosts that meet criteria
- all_host_states, # the final call when creating alternates
- ]
+
+ calls = []
+
+ def fake_get_sorted_hosts(spec_obj, hosts, num):
+ c = len(calls)
+ calls.append(1)
+ # first instance: return all the hosts (only one)
+ if c == 0:
+ return hosts
+ # second: act as if no more hosts that meet criteria
+ elif c == 1:
+ return []
+ # the final call when creating alternates
+ elif c == 2:
+ return hosts
+ else:
+ raise StopIteration()
+
+ mock_get_hosts.side_effect = fake_get_sorted_hosts
mock_claim.return_value = True
instance_uuids = [uuids.instance1, uuids.instance2]
@@ -679,20 +734,44 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
swap=0,
vcpus=1),
project_id=uuids.project_id,
- instance_group=None)
-
- host_state0 = mock.Mock(spec=host_manager.HostState,
- host="fake_host0", nodename="fake_node0", uuid=uuids.cn0,
- cell_uuid=uuids.cell, limits={}, aggregates=[])
- host_state1 = mock.Mock(spec=host_manager.HostState,
- host="fake_host1", nodename="fake_node1", uuid=uuids.cn1,
- cell_uuid=uuids.cell, limits={}, aggregates=[])
- host_state2 = mock.Mock(spec=host_manager.HostState,
- host="fake_host2", nodename="fake_node2", uuid=uuids.cn2,
- cell_uuid=uuids.cell, limits={}, aggregates=[])
+ instance_group=None,
+ requested_resources=[],
+ )
+
+ host_state0 = mock.Mock(
+ spec=host_manager.HostState,
+ host="fake_host0",
+ nodename="fake_node0",
+ uuid=uuids.cn0,
+ cell_uuid=uuids.cell,
+ limits={},
+ aggregates=[],
+ allocation_candidates=[],
+ )
+ host_state1 = mock.Mock(
+ spec=host_manager.HostState,
+ host="fake_host1",
+ nodename="fake_node1",
+ uuid=uuids.cn1,
+ cell_uuid=uuids.cell,
+ limits={},
+ aggregates=[],
+ allocation_candidates=[],
+ )
+ host_state2 = mock.Mock(
+ spec=host_manager.HostState,
+ host="fake_host2",
+ nodename="fake_node2",
+ uuid=uuids.cn2,
+ cell_uuid=uuids.cell,
+ limits={},
+ aggregates=[],
+ allocation_candidates=[],
+ )
all_host_states = [host_state0, host_state1, host_state2]
mock_get_all_states.return_value = all_host_states
- mock_get_hosts.return_value = all_host_states
+ # simulate that every host passes the filtering
+ mock_get_hosts.side_effect = lambda spec_obj, hosts, num: list(hosts)
mock_claim.return_value = True
instance_uuids = [uuids.instance0]
@@ -744,20 +823,44 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
swap=0,
vcpus=1),
project_id=uuids.project_id,
- instance_group=None)
-
- host_state0 = mock.Mock(spec=host_manager.HostState,
- host="fake_host0", nodename="fake_node0", uuid=uuids.cn0,
- cell_uuid=uuids.cell, limits={}, aggregates=[])
- host_state1 = mock.Mock(spec=host_manager.HostState,
- host="fake_host1", nodename="fake_node1", uuid=uuids.cn1,
- cell_uuid=uuids.cell, limits={}, aggregates=[])
- host_state2 = mock.Mock(spec=host_manager.HostState,
- host="fake_host2", nodename="fake_node2", uuid=uuids.cn2,
- cell_uuid=uuids.cell, limits={}, aggregates=[])
+ instance_group=None,
+ requested_resources=[],
+ )
+
+ host_state0 = mock.Mock(
+ spec=host_manager.HostState,
+ host="fake_host0",
+ nodename="fake_node0",
+ uuid=uuids.cn0,
+ cell_uuid=uuids.cell,
+ limits={},
+ aggregates=[],
+ allocation_candidates=[],
+ )
+ host_state1 = mock.Mock(
+ spec=host_manager.HostState,
+ host="fake_host1",
+ nodename="fake_node1",
+ uuid=uuids.cn1,
+ cell_uuid=uuids.cell,
+ limits={},
+ aggregates=[],
+ allocation_candidates=[],
+ )
+ host_state2 = mock.Mock(
+ spec=host_manager.HostState,
+ host="fake_host2",
+ nodename="fake_node2",
+ uuid=uuids.cn2,
+ cell_uuid=uuids.cell,
+ limits={},
+ aggregates=[],
+ allocation_candidates=[],
+ )
all_host_states = [host_state0, host_state1, host_state2]
mock_get_all_states.return_value = all_host_states
- mock_get_hosts.return_value = all_host_states
+ # simulate that every host passes the filtering
+ mock_get_hosts.side_effect = lambda spec_obj, hosts, num: list(hosts)
mock_claim.return_value = True
instance_uuids = [uuids.instance0]
@@ -814,17 +917,36 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
is_public=True,
name="small_flavor"),
project_id=uuids.project_id,
- instance_group=ig, instance_uuid=uuids.instance0)
+ instance_group=ig,
+ instance_uuid=uuids.instance0,
+ requested_resources=[],
+ )
# Reset the RequestSpec changes so they don't interfere with the
# assertion at the end of the test.
spec_obj.obj_reset_changes(recursive=True)
- hs1 = mock.Mock(spec=host_manager.HostState, host='host1',
- nodename="node1", limits={}, uuid=uuids.cn1,
- cell_uuid=uuids.cell1, instances={}, aggregates=[])
- hs2 = mock.Mock(spec=host_manager.HostState, host='host2',
- nodename="node2", limits={}, uuid=uuids.cn2,
- cell_uuid=uuids.cell2, instances={}, aggregates=[])
+ hs1 = mock.Mock(
+ spec=host_manager.HostState,
+ host="host1",
+ nodename="node1",
+ limits={},
+ uuid=uuids.cn1,
+ cell_uuid=uuids.cell1,
+ instances={},
+ aggregates=[],
+ allocation_candidates=[],
+ )
+ hs2 = mock.Mock(
+ spec=host_manager.HostState,
+ host="host2",
+ nodename="node2",
+ limits={},
+ uuid=uuids.cn2,
+ cell_uuid=uuids.cell2,
+ instances={},
+ aggregates=[],
+ allocation_candidates=[],
+ )
all_host_states = [hs1, hs2]
mock_get_all_states.return_value = all_host_states
mock_claim.return_value = True
@@ -838,13 +960,18 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
# _get_sorted_hosts() in the two iterations for each instance in
# num_instances
visited_instances = set([])
+ get_sorted_hosts_called_with_host_states = []
def fake_get_sorted_hosts(_spec_obj, host_states, index):
# Keep track of which instances are passed to the filters.
visited_instances.add(_spec_obj.instance_uuid)
if index % 2:
- return [hs1, hs2]
- return [hs2, hs1]
+ s = list(host_states)
+ get_sorted_hosts_called_with_host_states.append(s)
+ return s
+ s = list(host_states)
+ get_sorted_hosts_called_with_host_states.append(s)
+ return reversed(s)
mock_get_hosts.side_effect = fake_get_sorted_hosts
instance_uuids = [
getattr(uuids, 'instance%d' % x) for x in range(num_instances)
@@ -871,10 +998,14 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
# second time, we pass it the hosts that were returned from
# _get_sorted_hosts() the first time
sorted_host_calls = [
- mock.call(spec_obj, all_host_states, 0),
- mock.call(spec_obj, [hs2, hs1], 1),
+ mock.call(spec_obj, mock.ANY, 0),
+ mock.call(spec_obj, mock.ANY, 1),
]
mock_get_hosts.assert_has_calls(sorted_host_calls)
+ self.assertEqual(
+ all_host_states, get_sorted_hosts_called_with_host_states[0])
+ self.assertEqual(
+ [hs1], get_sorted_hosts_called_with_host_states[1])
# The instance group object should have both host1 and host2 in its
# instance group hosts list and there should not be any "changes" to
@@ -1168,14 +1299,36 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
name="small_flavor"),
project_id=uuids.project_id,
instance_uuid=uuids.instance_id,
- instance_group=None)
-
- host_state = mock.Mock(spec=host_manager.HostState, host="fake_host",
- uuid=uuids.cn1, cell_uuid=uuids.cell, nodename="fake_node",
- limits={}, updated="Not None")
+ instance_group=None,
+ requested_resources=[],
+ )
+
+ host_state = mock.Mock(
+ spec=host_manager.HostState,
+ host="fake_host",
+ uuid=uuids.cn1,
+ cell_uuid=uuids.cell,
+ nodename="fake_node",
+ limits={},
+ updated="Not None",
+ allocation_candidates=[],
+ )
all_host_states = [host_state]
mock_get_all_states.return_value = all_host_states
- mock_get_hosts.side_effect = [all_host_states, []]
+
+ calls = []
+
+ def fake_get_sorted_hosts(spec_obj, hosts, num):
+ c = len(calls)
+ calls.append(1)
+ if c == 0:
+ return list(hosts)
+ elif c == 1:
+ return []
+ else:
+ raise StopIteration
+
+ mock_get_hosts.side_effect = fake_get_sorted_hosts
instance_uuids = [uuids.inst1, uuids.inst2]
fake_allocs_by_rp = {uuids.cn1: [{}]}
@@ -1204,7 +1357,7 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
alloc_reqs[hs.uuid] = [{}]
mock_get_all_hosts.return_value = all_host_states
- mock_sorted.return_value = all_host_states
+ mock_sorted.side_effect = lambda spec_obj, hosts, num: list(hosts)
mock_claim.return_value = True
total_returned = num_alternates + 1
self.flags(max_attempts=total_returned, group="scheduler")
@@ -1212,14 +1365,14 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
for num in range(num_instances)]
spec_obj = objects.RequestSpec(
- num_instances=num_instances,
- flavor=objects.Flavor(memory_mb=512,
- root_gb=512,
- ephemeral_gb=0,
- swap=0,
- vcpus=1),
- project_id=uuids.project_id,
- instance_group=None)
+ num_instances=num_instances,
+ flavor=objects.Flavor(
+ memory_mb=512, root_gb=512, ephemeral_gb=0, swap=0, vcpus=1
+ ),
+ project_id=uuids.project_id,
+ instance_group=None,
+ requested_resources=[],
+ )
dests = self.manager._schedule(self.context, spec_obj,
instance_uuids, alloc_reqs, None, return_alternates=True)
@@ -1270,11 +1423,24 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
alloc_reqs[hs.uuid] = [{}]
mock_get_all_hosts.return_value = all_host_states
+
# There are two instances so _get_sorted_hosts is called once per
# instance and then once again before picking alternates.
- mock_sorted.side_effect = [all_host_states,
- list(reversed(all_host_states)),
- all_host_states]
+ calls = []
+
+ def fake_get_sorted_hosts(spec_obj, hosts, num):
+ c = len(calls)
+ calls.append(1)
+ if c == 0:
+ return list(hosts)
+ elif c == 1:
+ return list(reversed(all_host_states))
+ elif c == 2:
+ return list(hosts)
+ else:
+ raise StopIteration()
+
+ mock_sorted.side_effect = fake_get_sorted_hosts
mock_claim.return_value = True
total_returned = 3
self.flags(max_attempts=total_returned, group="scheduler")
@@ -1282,14 +1448,14 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
num_instances = len(instance_uuids)
spec_obj = objects.RequestSpec(
- num_instances=num_instances,
- flavor=objects.Flavor(memory_mb=512,
- root_gb=512,
- ephemeral_gb=0,
- swap=0,
- vcpus=1),
- project_id=uuids.project_id,
- instance_group=None)
+ num_instances=num_instances,
+ flavor=objects.Flavor(
+ memory_mb=512, root_gb=512, ephemeral_gb=0, swap=0, vcpus=1
+ ),
+ project_id=uuids.project_id,
+ instance_group=None,
+ requested_resources=[],
+ )
dests = self.manager._schedule(self.context, spec_obj,
instance_uuids, alloc_reqs, None, return_alternates=True)
@@ -1323,7 +1489,7 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
alloc_reqs[hs.uuid] = [{}]
mock_get_all_hosts.return_value = all_host_states
- mock_sorted.return_value = all_host_states
+ mock_sorted.side_effect = lambda spec_obj, hosts, num: list(hosts)
mock_claim.return_value = True
# Set the total returned to more than the number of available hosts
self.flags(max_attempts=max_attempts, group="scheduler")
@@ -1331,14 +1497,14 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
num_instances = len(instance_uuids)
spec_obj = objects.RequestSpec(
- num_instances=num_instances,
- flavor=objects.Flavor(memory_mb=512,
- root_gb=512,
- ephemeral_gb=0,
- swap=0,
- vcpus=1),
- project_id=uuids.project_id,
- instance_group=None)
+ num_instances=num_instances,
+ flavor=objects.Flavor(
+ memory_mb=512, root_gb=512, ephemeral_gb=0, swap=0, vcpus=1
+ ),
+ project_id=uuids.project_id,
+ instance_group=None,
+ requested_resources=[],
+ )
dests = self.manager._schedule(self.context, spec_obj,
instance_uuids, alloc_reqs, None, return_alternates=True)
@@ -1521,3 +1687,506 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
self.manager._discover_hosts_in_cells(mock.sentinel.context)
mock_log_warning.assert_not_called()
mock_log_debug.assert_called_once_with(msg)
+
+
+class SchedulerManagerAllocationCandidateTestCase(test.NoDBTestCase):
+
+ class ACRecorderFilter(filters.BaseHostFilter):
+ """A filter that records what allocation candidates it saw on each host
+ """
+
+ def __init__(self):
+ super().__init__()
+ self.seen_candidates = []
+
+ def host_passes(self, host_state, filter_properties):
+ # record what candidate the filter saw for each host
+ self.seen_candidates.append(list(host_state.allocation_candidates))
+ return True
+
+ class DropFirstFilter(filters.BaseHostFilter):
+ """A filter that removes one candidate and keeps the rest on each
+ host
+ """
+
+ def host_passes(self, host_state, filter_properties):
+ host_state.allocation_candidates.pop(0)
+ return bool(host_state.allocation_candidates)
+
+ @mock.patch.object(
+ host_manager.HostManager, '_init_instance_info', new=mock.Mock())
+ @mock.patch.object(
+ host_manager.HostManager, '_init_aggregates', new=mock.Mock())
+ def setUp(self):
+ super().setUp()
+ self.context = context.RequestContext('fake_user', 'fake_project')
+ self.manager = manager.SchedulerManager()
+ self.manager.host_manager.weighers = []
+ self.request_spec = objects.RequestSpec(
+ ignore_hosts=[],
+ force_hosts=[],
+ force_nodes=[],
+ requested_resources=[],
+ )
+
+ @mock.patch("nova.objects.selection.Selection.from_host_state")
+ @mock.patch(
+ "nova.scheduler.manager.SchedulerManager._consume_selected_host",
+ )
+ @mock.patch(
+ "nova.scheduler.utils.claim_resources",
+ new=mock.Mock(return_value=True),
+ )
+ @mock.patch("nova.scheduler.manager.SchedulerManager._get_all_host_states")
+ def test_filters_see_allocation_candidates_for_each_host(
+ self,
+ mock_get_all_host_states,
+ mock_consume,
+ mock_selection_from_host_state,
+ ):
+ # have a single filter configured where we can assert that the filter
+ # see the allocation_candidates of each host
+ filter = self.ACRecorderFilter()
+ self.manager.host_manager.enabled_filters = [filter]
+
+ instance_uuids = [uuids.inst1]
+
+ alloc_reqs_by_rp_uuid = {}
+ # have two hosts with different candidates
+ host1 = host_manager.HostState("host1", "node1", uuids.cell1)
+ host1.uuid = uuids.host1
+ alloc_reqs_by_rp_uuid[uuids.host1] = [
+ mock.sentinel.host1_a_c_1,
+ mock.sentinel.host1_a_c_2,
+ ]
+ host2 = host_manager.HostState("host2", "node2", uuids.cell1)
+ host2.uuid = uuids.host2
+ alloc_reqs_by_rp_uuid[uuids.host2] = [
+ mock.sentinel.host2_a_c_1,
+ ]
+ mock_get_all_host_states.return_value = iter([host1, host2])
+
+ self.manager._schedule(
+ self.context,
+ self.request_spec,
+ instance_uuids,
+ alloc_reqs_by_rp_uuid,
+ mock.sentinel.provider_summaries,
+ mock.sentinel.allocation_request_version,
+ )
+
+ # we expect that our filter seen the allocation candidate list of
+ # each host respectively
+ self.assertEqual(
+ [
+ alloc_reqs_by_rp_uuid[uuids.host1],
+ alloc_reqs_by_rp_uuid[uuids.host2],
+ ],
+ filter.seen_candidates,
+ )
+
+ @mock.patch(
+ "nova.scheduler.manager.SchedulerManager._consume_selected_host",
+ )
+ @mock.patch(
+ "nova.scheduler.utils.claim_resources",
+ new=mock.Mock(return_value=True),
+ )
+ @mock.patch("nova.scheduler.manager.SchedulerManager._get_all_host_states")
+ def test_scheduler_selects_filtered_a_c_from_hosts_state(
+ self,
+ mock_get_all_host_states,
+ mock_consume,
+ ):
+ """Assert that if a filter removes an allocation candidate from a host
+ then even if that host is selected the removed allocation candidate
+ is not used by the scheduler.
+ """
+
+ self.manager.host_manager.enabled_filters = [self.DropFirstFilter()]
+
+ instance_uuids = [uuids.inst1]
+ alloc_reqs_by_rp_uuid = {}
+ # have a host with two candidates
+ host1 = host_manager.HostState("host1", "node1", uuids.cell1)
+ host1.uuid = uuids.host1
+ alloc_reqs_by_rp_uuid[uuids.host1] = [
+ "host1-candidate1",
+ "host1-candidate2",
+ ]
+ mock_get_all_host_states.return_value = iter([host1])
+
+ result = self.manager._schedule(
+ self.context,
+ self.request_spec,
+ instance_uuids,
+ alloc_reqs_by_rp_uuid,
+ mock.sentinel.provider_summaries,
+ 'fake-alloc-req-version',
+ )
+ # we have requested one instance to be scheduled so expect on set
+ # of selections
+ self.assertEqual(1, len(result))
+ selections = result[0]
+ # we did not ask for alternatives so a single selection is expected
+ self.assertEqual(1, len(selections))
+ selection = selections[0]
+ # we expect that candidate2 is used as candidate1 is dropped by
+ # the filter
+ self.assertEqual(
+ "host1-candidate2",
+ jsonutils.loads(selection.allocation_request)
+ )
+
+ @mock.patch("nova.objects.selection.Selection.from_host_state")
+ @mock.patch(
+ "nova.scheduler.manager.SchedulerManager._consume_selected_host",
+ )
+ @mock.patch(
+ "nova.scheduler.utils.claim_resources",
+ new=mock.Mock(return_value=True),
+ )
+ @mock.patch("nova.scheduler.manager.SchedulerManager._get_all_host_states")
+ def test_consecutive_filter_sees_filtered_a_c_list(
+ self,
+ mock_get_all_host_states,
+ mock_consume,
+ mock_selection_from_host_state,
+ ):
+ # create two filters
+ # 1) DropFirstFilter runs first and drops the first candidate from each
+ # host
+ # 2) ACRecorderFilter runs next and records what candidates it saw
+ recorder_filter = self.ACRecorderFilter()
+ self.manager.host_manager.enabled_filters = [
+ self.DropFirstFilter(),
+ recorder_filter,
+ ]
+
+ instance_uuids = [uuids.inst1]
+ alloc_reqs_by_rp_uuid = {}
+ # have a host with two candidates
+ host1 = host_manager.HostState("host1", "node1", uuids.cell1)
+ host1.uuid = uuids.host1
+ alloc_reqs_by_rp_uuid[uuids.host1] = [
+ "host1-candidate1",
+ "host1-candidate2",
+ ]
+ mock_get_all_host_states.return_value = iter([host1])
+
+ self.manager._schedule(
+ self.context,
+ self.request_spec,
+ instance_uuids,
+ alloc_reqs_by_rp_uuid,
+ mock.sentinel.provider_summaries,
+ 'fake-alloc-req-version',
+ )
+
+ # we expect that the second filter saw one host with one candidate and
+ # as candidate1 was already filtered out by the run of the first filter
+ self.assertEqual(
+ [["host1-candidate2"]],
+ recorder_filter.seen_candidates
+ )
+
+ @mock.patch(
+ "nova.scheduler.manager.SchedulerManager._consume_selected_host",
+ )
+ @mock.patch(
+ "nova.scheduler.utils.claim_resources",
+ new=mock.Mock(return_value=True),
+ )
+ @mock.patch("nova.scheduler.manager.SchedulerManager._get_all_host_states")
+ def test_filters_removes_all_a_c_host_is_not_selected(
+ self,
+ mock_get_all_host_states,
+ mock_consume,
+ ):
+ # use the filter that always drops the first candidate on each host
+ self.manager.host_manager.enabled_filters = [self.DropFirstFilter()]
+
+ instance_uuids = [uuids.inst1]
+ alloc_reqs_by_rp_uuid = {}
+ # have two hosts
+ # first with a single candidate
+ host1 = host_manager.HostState("host1", "node1", uuids.cell1)
+ host1.uuid = uuids.host1
+ alloc_reqs_by_rp_uuid[uuids.host1] = [
+ "host1-candidate1",
+ ]
+ # second with two candidates
+ host2 = host_manager.HostState("host2", "node2", uuids.cell1)
+ host2.uuid = uuids.host2
+ alloc_reqs_by_rp_uuid[uuids.host2] = [
+ "host2-candidate1",
+ "host2-candidate2",
+ ]
+ mock_get_all_host_states.return_value = iter([host1, host2])
+
+ result = self.manager._schedule(
+ self.context,
+ self.request_spec,
+ instance_uuids,
+ alloc_reqs_by_rp_uuid,
+ mock.sentinel.provider_summaries,
+ 'fake-alloc-req-version',
+ )
+ # we expect that the first host is not selected as the filter
+ # removed every candidate from the host
+ # also we expect that on the second host only candidate2 could have
+ # been selected
+ # we asked for one instance, so we expect one set of selections
+ self.assertEqual(1, len(result))
+ selections = result[0]
+ # we did not ask for alternatives so a single selection is expected
+ self.assertEqual(1, len(selections))
+ selection = selections[0]
+ # we expect that candidate2 is used as candidate1 is dropped by
+ # the filter
+ self.assertEqual(uuids.host2, selection.compute_node_uuid)
+ self.assertEqual(
+ "host2-candidate2",
+ jsonutils.loads(selection.allocation_request)
+ )
+
+ @mock.patch(
+ "nova.scheduler.manager.SchedulerManager._consume_selected_host",
+ )
+ @mock.patch(
+ "nova.scheduler.utils.claim_resources",
+ new=mock.Mock(return_value=True),
+ )
+ @mock.patch("nova.scheduler.manager.SchedulerManager._get_all_host_states")
+ def test_consume_selected_host_sees_updated_request_spec(
+ self,
+ mock_get_all_host_states,
+ mock_consume,
+ ):
+ # simulate that nothing is filtered out, by not having any filters
+ self.manager.host_manager.enabled_filters = []
+
+ # set up the request spec with a request group to be updated
+ # by the selected candidate
+ self.request_spec.requested_resources = [
+ objects.RequestGroup(
+ requester_id=uuids.group_req1, provider_uuids=[]
+ )
+ ]
+
+ instance_uuids = [uuids.inst1]
+ alloc_reqs_by_rp_uuid = {}
+ # have single host with a single candidate
+ # first with a single candidate
+ host1 = host_manager.HostState("host1", "node1", uuids.cell1)
+ host1.uuid = uuids.host1
+ # simulate that placement fulfilled the above RequestGroup from
+ # a certain child RP of the host.
+ alloc_reqs_by_rp_uuid[uuids.host1] = [
+ {
+ "mappings": {
+ "": [uuids.host1],
+ uuids.group_req1: [uuids.host1_child_rp],
+ }
+ }
+ ]
+ mock_get_all_host_states.return_value = iter([host1])
+
+ # make asserts on the request_spec passed to consume
+ def assert_request_spec_updated_with_selected_candidate(
+ selected_host, spec_obj, instance_uuid=None
+ ):
+ # we expect that the scheduler updated the request_spec based
+ # the selected candidate before called consume
+ self.assertEqual(
+ [uuids.host1_child_rp],
+ spec_obj.requested_resources[0].provider_uuids,
+ )
+
+ mock_consume.side_effect = (
+ assert_request_spec_updated_with_selected_candidate)
+
+ self.manager._schedule(
+ self.context,
+ self.request_spec,
+ instance_uuids,
+ alloc_reqs_by_rp_uuid,
+ mock.sentinel.provider_summaries,
+ 'fake-alloc-req-version',
+ )
+
+ mock_consume.assert_called_once()
+
+ @mock.patch(
+ "nova.scheduler.manager.SchedulerManager._consume_selected_host",
+ )
+ @mock.patch(
+ "nova.scheduler.utils.claim_resources",
+ return_value=True,
+ )
+ @mock.patch("nova.scheduler.manager.SchedulerManager._get_all_host_states")
+ def test_get_alternate_hosts_returns_main_selection_with_claimed_a_c(
+ self,
+ mock_get_all_host_states,
+ mock_claim,
+ mock_consume,
+ ):
+ """Assert that the first (a.k.a main) selection returned for an
+ instance always maps to the allocation candidate, that was claimed by
+ the scheduler in placement.
+ """
+ # use the filter that always drops the first candidate on each host
+ self.manager.host_manager.enabled_filters = [self.DropFirstFilter()]
+
+ instance_uuids = [uuids.inst1]
+ alloc_reqs_by_rp_uuid = {}
+ # have one host with 3 candidates each fulfilling a request group
+ # from different child RP
+ host1 = host_manager.HostState("host1", "node1", uuids.cell1)
+ host1.uuid = uuids.host1
+ alloc_reqs_by_rp_uuid[uuids.host1] = [
+ {
+ "mappings": {
+ # This is odd but the un-name request group uses "" as the
+ # name of the group.
+ "": [uuids.host1],
+ uuids.group_req1: [getattr(uuids, f"host1_child{i}")],
+ }
+ } for i in [1, 2, 3]
+ ]
+ mock_get_all_host_states.return_value = iter([host1])
+
+ result = self.manager._schedule(
+ self.context,
+ self.request_spec,
+ instance_uuids,
+ alloc_reqs_by_rp_uuid,
+ mock.sentinel.provider_summaries,
+ 'fake-alloc-req-version',
+ return_alternates=True,
+ )
+
+ # we scheduled one instance
+ self.assertEqual(1, len(result))
+ selections = result[0]
+ # we did not ask for alternatives
+ self.assertEqual(1, len(selections))
+ selection = selections[0]
+ self.assertEqual(uuids.host1, selection.compute_node_uuid)
+ # we expect that host1_child2 candidate is selected as the
+ # DropFirstFilter will drop host1_child1
+ expected_a_c = {
+ "mappings": {
+ "": [uuids.host1],
+ uuids.group_req1: [uuids.host1_child2],
+ }
+ }
+ self.assertEqual(
+ expected_a_c,
+ jsonutils.loads(selection.allocation_request),
+ )
+ # and we expect that the same candidate was claimed in placement
+ mock_claim.assert_called_once_with(
+ mock.ANY,
+ self.manager.placement_client,
+ self.request_spec,
+ uuids.inst1,
+ expected_a_c,
+ allocation_request_version="fake-alloc-req-version",
+ )
+
+ @mock.patch(
+ "nova.scheduler.manager.SchedulerManager._consume_selected_host",
+ )
+ @mock.patch(
+ "nova.scheduler.utils.claim_resources",
+ return_value=True,
+ )
+ @mock.patch("nova.scheduler.manager.SchedulerManager._get_all_host_states")
+ def test_get_alternate_hosts_returns_alts_with_filtered_a_c(
+ self,
+ mock_get_all_host_states,
+ mock_claim,
+ mock_consume,
+ ):
+ """Assert that alternate generation also works based on filtered
+ candidates.
+ """
+
+ class RPFilter(filters.BaseHostFilter):
+ """A filter that only allows candidates with specific RPs"""
+
+ def __init__(self, allowed_rp_uuids):
+ self.allowed_rp_uuids = allowed_rp_uuids
+
+ def host_passes(self, host_state, filter_properties):
+ host_state.allocation_candidates = [
+ a_c
+ for a_c in host_state.allocation_candidates
+ if a_c["mappings"][uuids.group_req1][0]
+ in self.allowed_rp_uuids
+ ]
+ return True
+
+ instance_uuids = [uuids.inst1]
+ alloc_reqs_by_rp_uuid = {}
+ # have 3 hosts each with 2 allocation candidates fulfilling a request
+ # group from a different child RP
+ hosts = []
+ for i in [1, 2, 3]:
+ host = host_manager.HostState(f"host{i}", f"node{i}", uuids.cell1)
+ host.uuid = getattr(uuids, f"host{i}")
+ alloc_reqs_by_rp_uuid[host.uuid] = [
+ {
+ "mappings": {
+ "": [host.uuid],
+ uuids.group_req1: [
+ getattr(uuids, f"host{i}_child{j}")
+ ],
+ }
+ }
+ for j in [1, 2]
+ ]
+ hosts.append(host)
+ mock_get_all_host_states.return_value = iter(hosts)
+
+ # configure a filter that only "likes" host1_child2 and host3_child2
+ # RPs. This means host2 is totally out and host1 and host3 only have
+ # one viable candidate
+ self.manager.host_manager.enabled_filters = [
+ RPFilter(allowed_rp_uuids=[uuids.host1_child2, uuids.host3_child2])
+ ]
+
+ result = self.manager._schedule(
+ self.context,
+ self.request_spec,
+ instance_uuids,
+ alloc_reqs_by_rp_uuid,
+ mock.sentinel.provider_summaries,
+ 'fake-alloc-req-version',
+ return_alternates=True,
+ )
+ # we scheduled one instance
+ self.assertEqual(1, len(result))
+ selections = result[0]
+ # we expect a main selection and a single alternative
+ # (host1, and host3) on both selection we expect child2 as selected
+ # candidate
+ self.assertEqual(2, len(selections))
+ main_selection = selections[0]
+ self.assertEqual(uuids.host1, main_selection.compute_node_uuid)
+ self.assertEqual(
+ [uuids.host1_child2],
+ jsonutils.loads(main_selection.allocation_request)["mappings"][
+ uuids.group_req1
+ ],
+ )
+
+ alt_selection = selections[1]
+ self.assertEqual(uuids.host3, alt_selection.compute_node_uuid)
+ self.assertEqual(
+ [uuids.host3_child2],
+ jsonutils.loads(alt_selection.allocation_request)["mappings"][
+ uuids.group_req1
+ ],
+ )
diff --git a/nova/tests/unit/test_filesystem.py b/nova/tests/unit/test_filesystem.py
new file mode 100644
index 0000000000..85f16157ee
--- /dev/null
+++ b/nova/tests/unit/test_filesystem.py
@@ -0,0 +1,52 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+from unittest import mock
+
+from nova import exception
+from nova import filesystem
+from nova import test
+
+
+class TestFSCommon(test.NoDBTestCase):
+
+ def test_read_sys(self):
+ open_mock = mock.mock_open(read_data='bar')
+ with mock.patch('builtins.open', open_mock) as m_open:
+ self.assertEqual('bar', filesystem.read_sys('foo'))
+ expected_path = os.path.join(filesystem.SYS, 'foo')
+ m_open.assert_called_once_with(expected_path, mode='r')
+
+ def test_read_sys_error(self):
+ with mock.patch('builtins.open',
+ side_effect=OSError('error')) as m_open:
+ self.assertRaises(exception.FileNotFound,
+ filesystem.read_sys, 'foo')
+ expected_path = os.path.join(filesystem.SYS, 'foo')
+ m_open.assert_called_once_with(expected_path, mode='r')
+
+ def test_write_sys(self):
+ open_mock = mock.mock_open()
+ with mock.patch('builtins.open', open_mock) as m_open:
+ self.assertIsNone(filesystem.write_sys('foo', 'bar'))
+ expected_path = os.path.join(filesystem.SYS, 'foo')
+ m_open.assert_called_once_with(expected_path, mode='w')
+ open_mock().write.assert_called_once_with('bar')
+
+ def test_write_sys_error(self):
+ with mock.patch('builtins.open',
+ side_effect=OSError('fake_error')) as m_open:
+ self.assertRaises(exception.FileNotFound,
+ filesystem.write_sys, 'foo', 'bar')
+ expected_path = os.path.join(filesystem.SYS, 'foo')
+ m_open.assert_called_once_with(expected_path, mode='w')
diff --git a/nova/tests/unit/test_hacking.py b/nova/tests/unit/test_hacking.py
index 10b2a79db4..41cbada99f 100644
--- a/nova/tests/unit/test_hacking.py
+++ b/nova/tests/unit/test_hacking.py
@@ -1043,3 +1043,24 @@ class HackingTestCase(test.NoDBTestCase):
import unittest.mock
"""
self._assert_has_no_errors(code, checks.import_stock_mock)
+
+ def test_check_set_daemon(self):
+ code = """
+ self.setDaemon(True)
+ worker.setDaemon(True)
+ self._event_thread.setDaemon(True)
+ mythread.setDaemon(False)
+ self.thread.setDaemon(1)
+ """
+ errors = [(x + 1, 0, 'N372') for x in range(5)]
+ self._assert_has_errors(
+ code, checks.check_set_daemon, expected_errors=errors)
+
+ code = """
+ self.setDaemon = True
+ worker.setDaemonFlag(True)
+ self._event_thread.resetDaemon(True)
+ self.set.Daemon(True)
+ self.thread.setdaemon(True)
+ """
+ self._assert_has_no_errors(code, checks.check_set_daemon)
diff --git a/nova/tests/unit/test_policy.py b/nova/tests/unit/test_policy.py
index 871e836d87..752b872381 100644
--- a/nova/tests/unit/test_policy.py
+++ b/nova/tests/unit/test_policy.py
@@ -303,10 +303,10 @@ class RealRolePolicyTestCase(test.NoDBTestCase):
def setUp(self):
super(RealRolePolicyTestCase, self).setUp()
self.policy = self.useFixture(nova_fixtures.RealPolicyFixture())
- self.non_admin_context = context.RequestContext('fake', 'fake',
- roles=['member'])
- self.admin_context = context.RequestContext('fake', 'fake', True,
- roles=['admin', 'member'])
+ self.non_admin_context = context.RequestContext(
+ 'fake', 'fake', roles=['member', 'reader'])
+ self.admin_context = context.RequestContext(
+ 'fake', 'fake', True, roles=['admin', 'member', 'reader'])
self.target = {}
self.fake_policy = jsonutils.loads(fake_policy.policy_data)
@@ -387,6 +387,7 @@ class RealRolePolicyTestCase(test.NoDBTestCase):
"os_compute_api:os-hypervisors:search",
"os_compute_api:os-hypervisors:servers",
"os_compute_api:limits:other_project",
+"os_compute_api:os-flavor-access",
)
self.admin_or_owner_rules = (
@@ -440,7 +441,6 @@ class RealRolePolicyTestCase(test.NoDBTestCase):
"os_compute_api:os-remote-consoles",
"os_compute_api:os-deferred-delete:restore",
"os_compute_api:os-deferred-delete:force",
-"os_compute_api:os-flavor-access",
"os_compute_api:os-flavor-extra-specs:index",
"os_compute_api:os-flavor-extra-specs:show",
"os_compute_api:os-floating-ips:add",
diff --git a/nova/tests/unit/test_rpc.py b/nova/tests/unit/test_rpc.py
index 3fe56013bd..40a914b5f7 100644
--- a/nova/tests/unit/test_rpc.py
+++ b/nova/tests/unit/test_rpc.py
@@ -214,20 +214,20 @@ class TestRPC(test.NoDBTestCase):
@mock.patch.object(rpc, 'TRANSPORT')
@mock.patch.object(rpc, 'profiler', None)
@mock.patch.object(rpc, 'RequestContextSerializer')
- @mock.patch.object(messaging, 'RPCClient')
- def test_get_client(self, mock_client, mock_ser, mock_TRANSPORT):
+ @mock.patch.object(messaging, 'get_rpc_client')
+ def test_get_client(self, mock_get, mock_ser, mock_TRANSPORT):
tgt = mock.Mock()
ser = mock.Mock()
- mock_client.return_value = 'client'
+ mock_get.return_value = 'client'
mock_ser.return_value = ser
client = rpc.get_client(tgt, version_cap='1.0', serializer='foo')
mock_ser.assert_called_once_with('foo')
- mock_client.assert_called_once_with(mock_TRANSPORT,
- tgt, version_cap='1.0',
- call_monitor_timeout=None,
- serializer=ser)
+ mock_get.assert_called_once_with(mock_TRANSPORT,
+ tgt, version_cap='1.0',
+ call_monitor_timeout=None,
+ serializer=ser)
self.assertEqual('client', client)
@mock.patch.object(rpc, 'TRANSPORT')
@@ -253,21 +253,21 @@ class TestRPC(test.NoDBTestCase):
@mock.patch.object(rpc, 'TRANSPORT')
@mock.patch.object(rpc, 'profiler', mock.Mock())
@mock.patch.object(rpc, 'ProfilerRequestContextSerializer')
- @mock.patch.object(messaging, 'RPCClient')
- def test_get_client_profiler_enabled(self, mock_client, mock_ser,
+ @mock.patch.object(messaging, 'get_rpc_client')
+ def test_get_client_profiler_enabled(self, mock_get, mock_ser,
mock_TRANSPORT):
tgt = mock.Mock()
ser = mock.Mock()
- mock_client.return_value = 'client'
+ mock_get.return_value = 'client'
mock_ser.return_value = ser
client = rpc.get_client(tgt, version_cap='1.0', serializer='foo')
mock_ser.assert_called_once_with('foo')
- mock_client.assert_called_once_with(mock_TRANSPORT,
- tgt, version_cap='1.0',
- call_monitor_timeout=None,
- serializer=ser)
+ mock_get.assert_called_once_with(mock_TRANSPORT,
+ tgt, version_cap='1.0',
+ call_monitor_timeout=None,
+ serializer=ser)
self.assertEqual('client', client)
@mock.patch.object(rpc, 'TRANSPORT')
@@ -432,11 +432,11 @@ class TestProfilerRequestContextSerializer(test.NoDBTestCase):
class TestClientRouter(test.NoDBTestCase):
- @mock.patch('oslo_messaging.RPCClient')
- def test_by_instance(self, mock_rpcclient):
+ @mock.patch('oslo_messaging.get_rpc_client')
+ def test_by_instance(self, mock_get):
default_client = mock.Mock()
cell_client = mock.Mock()
- mock_rpcclient.return_value = cell_client
+ mock_get.return_value = cell_client
ctxt = mock.Mock()
ctxt.mq_connection = mock.sentinel.transport
@@ -444,7 +444,7 @@ class TestClientRouter(test.NoDBTestCase):
client = router.client(ctxt)
# verify a client was created by ClientRouter
- mock_rpcclient.assert_called_once_with(
+ mock_get.assert_called_once_with(
mock.sentinel.transport, default_client.target,
version_cap=default_client.version_cap,
call_monitor_timeout=default_client.call_monitor_timeout,
@@ -452,11 +452,11 @@ class TestClientRouter(test.NoDBTestCase):
# verify cell client was returned
self.assertEqual(cell_client, client)
- @mock.patch('oslo_messaging.RPCClient')
- def test_by_instance_untargeted(self, mock_rpcclient):
+ @mock.patch('oslo_messaging.get_rpc_client')
+ def test_by_instance_untargeted(self, mock_get):
default_client = mock.Mock()
cell_client = mock.Mock()
- mock_rpcclient.return_value = cell_client
+ mock_get.return_value = cell_client
ctxt = mock.Mock()
ctxt.mq_connection = None
@@ -464,7 +464,7 @@ class TestClientRouter(test.NoDBTestCase):
client = router.client(ctxt)
self.assertEqual(router.default_client, client)
- self.assertFalse(mock_rpcclient.called)
+ self.assertFalse(mock_get.called)
class TestIsNotificationsEnabledDecorator(test.NoDBTestCase):
diff --git a/nova/tests/unit/test_service.py b/nova/tests/unit/test_service.py
index 9fb6fa1c40..acc1aeca7f 100644
--- a/nova/tests/unit/test_service.py
+++ b/nova/tests/unit/test_service.py
@@ -128,7 +128,7 @@ class ServiceTestCase(test.NoDBTestCase):
serv.manager.additional_endpoints = []
serv.start()
# init_host is called before any service record is created
- serv.manager.init_host.assert_called_once_with()
+ serv.manager.init_host.assert_called_once_with(None)
mock_get_by_host_and_binary.assert_called_once_with(mock.ANY,
self.host, self.binary)
mock_create.assert_called_once_with()
@@ -186,7 +186,7 @@ class ServiceTestCase(test.NoDBTestCase):
mock_create.side_effect = ex
serv.manager = mock_manager
self.assertRaises(test.TestingException, serv.start)
- serv.manager.init_host.assert_called_with()
+ serv.manager.init_host.assert_called_with(None)
mock_get_by_host_and_binary.assert_has_calls([
mock.call(mock.ANY, self.host, self.binary),
mock.call(mock.ANY, self.host, self.binary)])
@@ -216,7 +216,7 @@ class ServiceTestCase(test.NoDBTestCase):
serv.manager.service_name = self.topic
serv.manager.additional_endpoints = []
serv.start()
- serv.manager.init_host.assert_called_once_with()
+ serv.manager.init_host.assert_called_once_with(None)
mock_get_by_host_and_binary.assert_called_once_with(mock.ANY,
self.host,
self.binary)
@@ -241,7 +241,8 @@ class ServiceTestCase(test.NoDBTestCase):
serv.manager.additional_endpoints = []
serv.start()
- serv.manager.init_host.assert_called_with()
+ serv.manager.init_host.assert_called_with(
+ mock_svc_get_by_host_and_binary.return_value)
serv.stop()
serv.manager.cleanup_host.assert_called_with()
diff --git a/nova/tests/unit/virt/disk/test_api.py b/nova/tests/unit/virt/disk/test_api.py
index 62005de525..135558e145 100644
--- a/nova/tests/unit/virt/disk/test_api.py
+++ b/nova/tests/unit/virt/disk/test_api.py
@@ -40,6 +40,7 @@ class FakeMount(object):
class APITestCase(test.NoDBTestCase):
+ @mock.patch('nova.virt.disk.vfs.guestfs.VFSGuestFS', new=mock.Mock())
def test_can_resize_need_fs_type_specified(self):
imgfile = tempfile.NamedTemporaryFile()
self.addCleanup(imgfile.close)
diff --git a/nova/tests/unit/virt/ironic/test_driver.py b/nova/tests/unit/virt/ironic/test_driver.py
index 6ac7ca464e..52aa37ac13 100644
--- a/nova/tests/unit/virt/ironic/test_driver.py
+++ b/nova/tests/unit/virt/ironic/test_driver.py
@@ -935,6 +935,48 @@ class IronicDriverTestCase(test.NoDBTestCase):
expected = {
'CUSTOM_IRON_NFV': {
'total': 1,
+ 'reserved': 1,
+ 'min_unit': 1,
+ 'max_unit': 1,
+ 'step_size': 1,
+ 'allocation_ratio': 1.0,
+ },
+ }
+ mock_nfc.assert_called_once_with(mock.sentinel.nodename)
+ mock_nr.assert_called_once_with(mock_nfc.return_value)
+ mock_res_used.assert_called_once_with(mock_nfc.return_value)
+ mock_res_unavail.assert_called_once_with(mock_nfc.return_value)
+ result = self.ptree.data(mock.sentinel.nodename).inventory
+ self.assertEqual(expected, result)
+
+ @mock.patch.object(ironic_driver.IronicDriver,
+ '_node_resources_used', return_value=True)
+ @mock.patch.object(ironic_driver.IronicDriver,
+ '_node_resources_unavailable', return_value=False)
+ @mock.patch.object(ironic_driver.IronicDriver, '_node_resource')
+ @mock.patch.object(ironic_driver.IronicDriver, '_node_from_cache')
+ def test_update_provider_tree_with_rc_occupied_workaround(self,
+ mock_nfc, mock_nr, mock_res_unavail, mock_res_used):
+ """Ensure that when a node is used, we report the inventory matching
+ the consumed resources.
+ """
+ self.flags(skip_reserve_in_use_ironic_nodes=True,
+ group="workarounds")
+ mock_nr.return_value = {
+ 'vcpus': 24,
+ 'vcpus_used': 24,
+ 'memory_mb': 1024,
+ 'memory_mb_used': 1024,
+ 'local_gb': 100,
+ 'local_gb_used': 100,
+ 'resource_class': 'iron-nfv',
+ }
+
+ self.driver.update_provider_tree(self.ptree, mock.sentinel.nodename)
+
+ expected = {
+ 'CUSTOM_IRON_NFV': {
+ 'total': 1,
'reserved': 0,
'min_unit': 1,
'max_unit': 1,
@@ -945,7 +987,7 @@ class IronicDriverTestCase(test.NoDBTestCase):
mock_nfc.assert_called_once_with(mock.sentinel.nodename)
mock_nr.assert_called_once_with(mock_nfc.return_value)
mock_res_used.assert_called_once_with(mock_nfc.return_value)
- self.assertFalse(mock_res_unavail.called)
+ mock_res_unavail.assert_called_once_with(mock_nfc.return_value)
result = self.ptree.data(mock.sentinel.nodename).inventory
self.assertEqual(expected, result)
@@ -1016,7 +1058,7 @@ class IronicDriverTestCase(test.NoDBTestCase):
mock_nfc.assert_called_once_with(mock.sentinel.nodename)
mock_nr.assert_called_once_with(mock_nfc.return_value)
mock_res_used.assert_called_once_with(mock_nfc.return_value)
- self.assertFalse(mock_res_unavail.called)
+ mock_res_unavail.assert_called_once_with(mock_nfc.return_value)
result = self.ptree.data(mock.sentinel.nodename).traits
self.assertEqual(set(), result)
@@ -1048,7 +1090,7 @@ class IronicDriverTestCase(test.NoDBTestCase):
mock_nfc.assert_called_once_with(mock.sentinel.nodename)
mock_nr.assert_called_once_with(mock_nfc.return_value)
mock_res_used.assert_called_once_with(mock_nfc.return_value)
- self.assertFalse(mock_res_unavail.called)
+ mock_res_unavail.assert_called_once_with(mock_nfc.return_value)
result = self.ptree.data(mock.sentinel.nodename).traits
self.assertEqual(set(traits), result)
@@ -2500,7 +2542,10 @@ class IronicDriverTestCase(test.NoDBTestCase):
@mock.patch.object(cw.IronicClientWrapper, 'call')
def test_prepare_for_spawn(self, mock_call):
- node = ironic_utils.get_test_node(driver='fake')
+ node = ironic_utils.get_test_node(
+ driver='fake', instance_uuid=None,
+ provision_state=ironic_states.AVAILABLE,
+ power_state=ironic_states.POWER_OFF)
self.mock_conn.get_node.return_value = node
instance = fake_instance.fake_instance_obj(self.ctx,
node=node.uuid)
@@ -2532,7 +2577,10 @@ class IronicDriverTestCase(test.NoDBTestCase):
instance)
def test_prepare_for_spawn_conflict(self):
- node = ironic_utils.get_test_node(driver='fake')
+ node = ironic_utils.get_test_node(
+ driver='fake', instance_uuid=None,
+ provision_state=ironic_states.AVAILABLE,
+ power_state=ironic_states.POWER_OFF)
self.mock_conn.get_node.return_value = node
self.mock_conn.update_node.side_effect = sdk_exc.ConflictException
instance = fake_instance.fake_instance_obj(self.ctx, node=node.id)
@@ -2540,6 +2588,18 @@ class IronicDriverTestCase(test.NoDBTestCase):
self.driver.prepare_for_spawn,
instance)
+ def test_prepare_for_spawn_not_available(self):
+ node = ironic_utils.get_test_node(
+ driver='fake', instance_uuid=None,
+ provision_state=ironic_states.CLEANWAIT,
+ power_state=ironic_states.POWER_OFF)
+ self.mock_conn.get_node.return_value = node
+ self.mock_conn.update_node.side_effect = sdk_exc.ConflictException
+ instance = fake_instance.fake_instance_obj(self.ctx, node=node.id)
+ self.assertRaises(exception.ComputeResourcesUnavailable,
+ self.driver.prepare_for_spawn,
+ instance)
+
@mock.patch.object(ironic_driver.IronicDriver, '_cleanup_deploy')
def test_failed_spawn_cleanup(self, mock_cleanup):
node = ironic_utils.get_test_node(driver='fake')
diff --git a/nova/tests/unit/virt/libvirt/cpu/__init__.py b/nova/tests/unit/virt/libvirt/cpu/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/virt/libvirt/cpu/__init__.py
diff --git a/nova/tests/unit/virt/libvirt/cpu/test_api.py b/nova/tests/unit/virt/libvirt/cpu/test_api.py
new file mode 100644
index 0000000000..b5bcb762f3
--- /dev/null
+++ b/nova/tests/unit/virt/libvirt/cpu/test_api.py
@@ -0,0 +1,194 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from unittest import mock
+
+from nova import exception
+from nova import objects
+from nova import test
+from nova.virt.libvirt.cpu import api
+from nova.virt.libvirt.cpu import core
+
+
+class TestAPI(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestAPI, self).setUp()
+ self.core_1 = api.Core(1)
+
+ # Create a fake instance with two pinned CPUs but only one is on the
+ # dedicated set
+ numa_topology = objects.InstanceNUMATopology(cells=[
+ objects.InstanceNUMACell(cpu_pinning_raw={'0': '0', '2': '2'}),
+ ])
+ self.fake_inst = objects.Instance(numa_topology=numa_topology)
+
+ @mock.patch.object(core, 'get_online')
+ def test_online(self, mock_get_online):
+ mock_get_online.return_value = True
+ self.assertTrue(self.core_1.online)
+ mock_get_online.assert_called_once_with(self.core_1.ident)
+
+ @mock.patch.object(core, 'set_online')
+ def test_set_online(self, mock_set_online):
+ self.core_1.online = True
+ mock_set_online.assert_called_once_with(self.core_1.ident)
+
+ @mock.patch.object(core, 'set_offline')
+ def test_set_offline(self, mock_set_offline):
+ self.core_1.online = False
+ mock_set_offline.assert_called_once_with(self.core_1.ident)
+
+ def test_hash(self):
+ self.assertEqual(hash(self.core_1.ident), hash(self.core_1))
+
+ @mock.patch.object(core, 'get_governor')
+ def test_governor(self, mock_get_governor):
+ mock_get_governor.return_value = 'fake_governor'
+ self.assertEqual('fake_governor', self.core_1.governor)
+ mock_get_governor.assert_called_once_with(self.core_1.ident)
+
+ @mock.patch.object(core, 'set_governor')
+ def test_set_governor_low(self, mock_set_governor):
+ self.flags(cpu_power_governor_low='fake_low_gov', group='libvirt')
+ self.core_1.set_low_governor()
+ mock_set_governor.assert_called_once_with(self.core_1.ident,
+ 'fake_low_gov')
+
+ @mock.patch.object(core, 'set_governor')
+ def test_set_governor_high(self, mock_set_governor):
+ self.flags(cpu_power_governor_high='fake_high_gov', group='libvirt')
+ self.core_1.set_high_governor()
+ mock_set_governor.assert_called_once_with(self.core_1.ident,
+ 'fake_high_gov')
+
+ @mock.patch.object(core, 'set_online')
+ def test_power_up_online(self, mock_online):
+ self.flags(cpu_power_management=True, group='libvirt')
+ self.flags(cpu_dedicated_set='0-1', group='compute')
+
+ api.power_up(self.fake_inst)
+ # only core #0 can be set as core #2 is not on the dedicated set
+ # As a reminder, core(i).online calls set_online(i)
+ mock_online.assert_called_once_with(0)
+
+ @mock.patch.object(core, 'set_governor')
+ def test_power_up_governor(self, mock_set_governor):
+ self.flags(cpu_power_management=True, group='libvirt')
+ self.flags(cpu_power_management_strategy='governor', group='libvirt')
+ self.flags(cpu_dedicated_set='0-1', group='compute')
+
+ api.power_up(self.fake_inst)
+ # only core #0 can be set as core #2 is not on the dedicated set
+ # As a reminder, core(i).set_high_governor calls set_governor(i)
+ mock_set_governor.assert_called_once_with(0, 'performance')
+
+ @mock.patch.object(core, 'set_online')
+ def test_power_up_skipped(self, mock_online):
+ self.flags(cpu_power_management=False, group='libvirt')
+ api.power_up(self.fake_inst)
+ mock_online.assert_not_called()
+
+ @mock.patch.object(core, 'set_online')
+ def test_power_up_skipped_if_standard_instance(self, mock_online):
+ self.flags(cpu_power_management=True, group='libvirt')
+ api.power_up(objects.Instance(numa_topology=None))
+ mock_online.assert_not_called()
+
+ @mock.patch.object(core, 'set_offline')
+ def test_power_down_offline(self, mock_offline):
+ self.flags(cpu_power_management=True, group='libvirt')
+ self.flags(cpu_dedicated_set='0-1', group='compute')
+
+ api.power_down(self.fake_inst)
+ # only core #0 can be set as core #2 is not on the dedicated set
+ # As a reminder, core(i).online calls set_online(i)
+ mock_offline.assert_called_once_with(0)
+
+ @mock.patch.object(core, 'set_governor')
+ def test_power_down_governor(self, mock_set_governor):
+ self.flags(cpu_power_management=True, group='libvirt')
+ self.flags(cpu_power_management_strategy='governor', group='libvirt')
+ self.flags(cpu_dedicated_set='0-1', group='compute')
+
+ api.power_down(self.fake_inst)
+ # only core #0 can be set as core #2 is not on the dedicated set
+ # As a reminder, core(i).set_high_governor calls set_governor(i)
+ mock_set_governor.assert_called_once_with(0, 'powersave')
+
+ @mock.patch.object(core, 'set_offline')
+ def test_power_down_skipped(self, mock_offline):
+ self.flags(cpu_power_management=False, group='libvirt')
+ api.power_down(self.fake_inst)
+ mock_offline.assert_not_called()
+
+ @mock.patch.object(core, 'set_offline')
+ def test_power_down_skipped_if_standard_instance(self, mock_offline):
+ self.flags(cpu_power_management=True, group='libvirt')
+ api.power_down(objects.Instance(numa_topology=None))
+ mock_offline.assert_not_called()
+
+ @mock.patch.object(core, 'set_offline')
+ def test_power_down_all_dedicated_cpus_offline(self, mock_offline):
+ self.flags(cpu_power_management=True, group='libvirt')
+ self.flags(cpu_dedicated_set='0-1', group='compute')
+
+ api.power_down_all_dedicated_cpus()
+ # All dedicated CPUs are turned offline
+ mock_offline.assert_has_calls([mock.call(0), mock.call(1)])
+
+ @mock.patch.object(core, 'set_governor')
+ def test_power_down_all_dedicated_cpus_governor(self, mock_set_governor):
+ self.flags(cpu_power_management=True, group='libvirt')
+ self.flags(cpu_power_management_strategy='governor', group='libvirt')
+ self.flags(cpu_dedicated_set='0-1', group='compute')
+
+ api.power_down_all_dedicated_cpus()
+ # All dedicated CPUs are turned offline
+ mock_set_governor.assert_has_calls([mock.call(0, 'powersave'),
+ mock.call(1, 'powersave')])
+
+ @mock.patch.object(core, 'set_offline')
+ def test_power_down_all_dedicated_cpus_skipped(self, mock_offline):
+ self.flags(cpu_power_management=False, group='libvirt')
+ api.power_down_all_dedicated_cpus()
+ mock_offline.assert_not_called()
+
+ def test_power_down_all_dedicated_cpus_wrong_config(self):
+ self.flags(cpu_power_management=True, group='libvirt')
+ self.flags(cpu_dedicated_set=None, group='compute')
+ self.assertRaises(exception.InvalidConfiguration,
+ api.power_down_all_dedicated_cpus)
+
+ @mock.patch.object(core, 'get_governor')
+ @mock.patch.object(core, 'get_online')
+ def test_validate_all_dedicated_cpus_for_governor(self, mock_get_online,
+ mock_get_governor):
+ self.flags(cpu_power_management=True, group='libvirt')
+ self.flags(cpu_dedicated_set='0-1', group='compute')
+ self.flags(cpu_power_management_strategy='governor', group='libvirt')
+ mock_get_governor.return_value = 'performance'
+ mock_get_online.side_effect = (True, False)
+ self.assertRaises(exception.InvalidConfiguration,
+ api.validate_all_dedicated_cpus)
+
+ @mock.patch.object(core, 'get_governor')
+ @mock.patch.object(core, 'get_online')
+ def test_validate_all_dedicated_cpus_for_cpu_state(self, mock_get_online,
+ mock_get_governor):
+ self.flags(cpu_power_management=True, group='libvirt')
+ self.flags(cpu_dedicated_set='0-1', group='compute')
+ self.flags(cpu_power_management_strategy='cpu_state', group='libvirt')
+ mock_get_online.return_value = True
+ mock_get_governor.side_effect = ('powersave', 'performance')
+ self.assertRaises(exception.InvalidConfiguration,
+ api.validate_all_dedicated_cpus)
diff --git a/nova/tests/unit/virt/libvirt/cpu/test_core.py b/nova/tests/unit/virt/libvirt/cpu/test_core.py
new file mode 100644
index 0000000000..a3cba00d3b
--- /dev/null
+++ b/nova/tests/unit/virt/libvirt/cpu/test_core.py
@@ -0,0 +1,122 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from unittest import mock
+
+from nova import exception
+from nova import test
+from nova.tests import fixtures
+from nova.virt.libvirt.cpu import core
+
+
+class TestCore(test.NoDBTestCase):
+
+ @mock.patch.object(core.filesystem, 'read_sys')
+ @mock.patch.object(core.hardware, 'parse_cpu_spec')
+ def test_get_available_cores(self, mock_parse_cpu_spec, mock_read_sys):
+ mock_read_sys.return_value = '1-2'
+ mock_parse_cpu_spec.return_value = set([1, 2])
+ self.assertEqual(set([1, 2]), core.get_available_cores())
+ mock_read_sys.assert_called_once_with(core.AVAILABLE_PATH)
+ mock_parse_cpu_spec.assert_called_once_with('1-2')
+
+ @mock.patch.object(core.filesystem, 'read_sys')
+ @mock.patch.object(core.hardware, 'parse_cpu_spec')
+ def test_get_available_cores_none(
+ self, mock_parse_cpu_spec, mock_read_sys):
+ mock_read_sys.return_value = ''
+ self.assertEqual(set(), core.get_available_cores())
+ mock_parse_cpu_spec.assert_not_called()
+
+ @mock.patch.object(core, 'get_available_cores')
+ def test_exists(self, mock_get_available_cores):
+ mock_get_available_cores.return_value = set([1])
+ self.assertTrue(core.exists(1))
+ mock_get_available_cores.assert_called_once_with()
+ self.assertFalse(core.exists(2))
+
+ @mock.patch.object(
+ core, 'CPU_PATH_TEMPLATE',
+ new_callable=mock.PropertyMock(return_value='/sys/blah%(core)s'))
+ @mock.patch.object(core, 'exists')
+ def test_gen_cpu_path(self, mock_exists, mock_cpu_path):
+ mock_exists.return_value = True
+ self.assertEqual('/sys/blah1', core.gen_cpu_path(1))
+ mock_exists.assert_called_once_with(1)
+
+ @mock.patch.object(core, 'exists')
+ def test_gen_cpu_path_raises(self, mock_exists):
+ mock_exists.return_value = False
+ self.assertRaises(ValueError, core.gen_cpu_path, 1)
+ self.assertIn('Unable to access CPU: 1', self.stdlog.logger.output)
+
+
+class TestCoreHelpers(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestCoreHelpers, self).setUp()
+ self.useFixture(fixtures.PrivsepFixture())
+ _p1 = mock.patch.object(core, 'exists', return_value=True)
+ self.mock_exists = _p1.start()
+ self.addCleanup(_p1.stop)
+
+ _p2 = mock.patch.object(core, 'gen_cpu_path',
+ side_effect=lambda x: '/fakesys/blah%s' % x)
+ self.mock_gen_cpu_path = _p2.start()
+ self.addCleanup(_p2.stop)
+
+ @mock.patch.object(core.filesystem, 'read_sys')
+ def test_get_online(self, mock_read_sys):
+ mock_read_sys.return_value = '1'
+ self.assertTrue(core.get_online(1))
+ mock_read_sys.assert_called_once_with('/fakesys/blah1/online')
+
+ @mock.patch.object(core.filesystem, 'read_sys')
+ def test_get_online_not_exists(self, mock_read_sys):
+ mock_read_sys.side_effect = exception.FileNotFound(file_path='foo')
+ self.assertTrue(core.get_online(1))
+ mock_read_sys.assert_called_once_with('/fakesys/blah1/online')
+
+ @mock.patch.object(core.filesystem, 'write_sys')
+ @mock.patch.object(core, 'get_online')
+ def test_set_online(self, mock_get_online, mock_write_sys):
+ mock_get_online.return_value = True
+ self.assertTrue(core.set_online(1))
+ mock_write_sys.assert_called_once_with('/fakesys/blah1/online',
+ data='1')
+ mock_get_online.assert_called_once_with(1)
+
+ @mock.patch.object(core.filesystem, 'write_sys')
+ @mock.patch.object(core, 'get_online')
+ def test_set_offline(self, mock_get_online, mock_write_sys):
+ mock_get_online.return_value = False
+ self.assertTrue(core.set_offline(1))
+ mock_write_sys.assert_called_once_with('/fakesys/blah1/online',
+ data='0')
+ mock_get_online.assert_called_once_with(1)
+
+ @mock.patch.object(core.filesystem, 'read_sys')
+ def test_get_governor(self, mock_read_sys):
+ mock_read_sys.return_value = 'fake_gov'
+ self.assertEqual('fake_gov', core.get_governor(1))
+ mock_read_sys.assert_called_once_with(
+ '/fakesys/blah1/cpufreq/scaling_governor')
+
+ @mock.patch.object(core, 'get_governor')
+ @mock.patch.object(core.filesystem, 'write_sys')
+ def test_set_governor(self, mock_write_sys, mock_get_governor):
+ mock_get_governor.return_value = 'fake_gov'
+ self.assertEqual('fake_gov',
+ core.set_governor(1, 'fake_gov'))
+ mock_write_sys.assert_called_once_with(
+ '/fakesys/blah1/cpufreq/scaling_governor', data='fake_gov')
+ mock_get_governor.assert_called_once_with(1)
diff --git a/nova/tests/unit/virt/libvirt/test_config.py b/nova/tests/unit/virt/libvirt/test_config.py
index c4c9359dd8..3d0b5ae685 100644
--- a/nova/tests/unit/virt/libvirt/test_config.py
+++ b/nova/tests/unit/virt/libvirt/test_config.py
@@ -1537,7 +1537,7 @@ class LibvirtConfigGuestInputTest(LibvirtConfigBaseTest):
class LibvirtConfigGuestGraphicsTest(LibvirtConfigBaseTest):
- def test_config_graphics(self):
+ def test_config_graphics_vnc(self):
obj = config.LibvirtConfigGuestGraphics()
obj.type = "vnc"
obj.autoport = True
@@ -1549,6 +1549,30 @@ class LibvirtConfigGuestGraphicsTest(LibvirtConfigBaseTest):
<graphics type="vnc" autoport="yes" keymap="en_US" listen="127.0.0.1"/>
""")
+ def test_config_graphics_spice(self):
+ obj = config.LibvirtConfigGuestGraphics()
+ obj.type = "spice"
+ obj.autoport = False
+ obj.keymap = "en_US"
+ obj.listen = "127.0.0.1"
+
+ obj.image_compression = "auto_glz"
+ obj.jpeg_compression = "auto"
+ obj.zlib_compression = "always"
+ obj.playback_compression = True
+ obj.streaming_mode = "filter"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <graphics type="spice" autoport="no" keymap="en_US" listen="127.0.0.1">
+ <image compression="auto_glz"/>
+ <jpeg compression="auto"/>
+ <zlib compression="always"/>
+ <playback compression="on"/>
+ <streaming mode="filter"/>
+ </graphics>
+ """)
+
class LibvirtConfigGuestHostdev(LibvirtConfigBaseTest):
@@ -1591,7 +1615,7 @@ class LibvirtConfigGuestHostdevPCI(LibvirtConfigBaseTest):
</hostdev>
"""
- def test_config_guest_hosdev_pci(self):
+ def test_config_guest_hostdev_pci(self):
hostdev = config.LibvirtConfigGuestHostdevPCI()
hostdev.domain = "1234"
hostdev.bus = "11"
@@ -1600,7 +1624,7 @@ class LibvirtConfigGuestHostdevPCI(LibvirtConfigBaseTest):
xml = hostdev.to_xml()
self.assertXmlEqual(self.expected, xml)
- def test_parse_guest_hosdev_pci(self):
+ def test_parse_guest_hostdev_pci(self):
xmldoc = self.expected
obj = config.LibvirtConfigGuestHostdevPCI()
obj.parse_str(xmldoc)
@@ -1612,7 +1636,7 @@ class LibvirtConfigGuestHostdevPCI(LibvirtConfigBaseTest):
self.assertEqual(obj.slot, '0x22')
self.assertEqual(obj.function, '0x3')
- def test_parse_guest_hosdev_usb(self):
+ def test_parse_guest_hostdev_usb(self):
xmldoc = """<hostdev mode='subsystem' type='usb'>
<source startupPolicy='optional'>
<vendor id='0x1234'/>
@@ -3181,6 +3205,32 @@ class LibvirtConfigNodeDeviceTest(LibvirtConfigBaseTest):
config.LibvirtConfigNodeDeviceMdevInformation)
self.assertEqual("nvidia-11", obj.mdev_information.type)
self.assertEqual(12, obj.mdev_information.iommu_group)
+ self.assertIsNone(obj.mdev_information.uuid)
+
+ def test_config_mdev_device_uuid(self):
+ xmlin = """
+ <device>
+ <name>mdev_b2107403_110c_45b0_af87_32cc91597b8a_0000_41_00_0</name>
+ <path>/sys/devices/pci0000:40/0000:40:03.1/0000:41:00.0/b2107403-110c-45b0-af87-32cc91597b8a</path>
+ <parent>pci_0000_41_00_0</parent>
+ <driver>
+ <name>vfio_mdev</name>
+ </driver>
+ <capability type='mdev'>
+ <type id='nvidia-442'/>
+ <uuid>b2107403-110c-45b0-af87-32cc91597b8a</uuid>
+ <iommuGroup number='57'/>
+ </capability>
+ </device>"""
+
+ obj = config.LibvirtConfigNodeDevice()
+ obj.parse_str(xmlin)
+ self.assertIsInstance(obj.mdev_information,
+ config.LibvirtConfigNodeDeviceMdevInformation)
+ self.assertEqual("nvidia-442", obj.mdev_information.type)
+ self.assertEqual(57, obj.mdev_information.iommu_group)
+ self.assertEqual("b2107403-110c-45b0-af87-32cc91597b8a",
+ obj.mdev_information.uuid)
def test_config_vdpa_device(self):
xmlin = """
diff --git a/nova/tests/unit/virt/libvirt/test_driver.py b/nova/tests/unit/virt/libvirt/test_driver.py
index fee87d3bb5..2b58c7df8b 100644
--- a/nova/tests/unit/virt/libvirt/test_driver.py
+++ b/nova/tests/unit/virt/libvirt/test_driver.py
@@ -817,6 +817,16 @@ class LibvirtConnTestCase(test.NoDBTestCase,
"Driver capabilities for 'supports_socket_pci_numa_affinity' "
"is invalid",
)
+ self.assertFalse(
+ drvr.capabilities['supports_ephemeral_encryption'],
+ "Driver capabilities for 'supports_ephemeral_encryption' "
+ "is invalid",
+ )
+ self.assertFalse(
+ drvr.capabilities['supports_ephemeral_encryption_luks'],
+ "Driver capabilities for 'supports_ephemeral_encryption_luks' "
+ " is invalid",
+ )
def test_driver_capabilities_qcow2_with_rbd(self):
self.flags(images_type='rbd', group='libvirt')
@@ -1320,7 +1330,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertRaises(exception.Invalid, drvr.init_host, "dummyhost")
- @mock.patch('nova.virt.libvirt.host.libvirt.Connection.compareCPU')
+ @mock.patch(
+ 'nova.virt.libvirt.host.libvirt.Connection.compareHypervisorCPU')
def test__check_cpu_compatibility_advance_model(self, mocked_compare):
mocked_compare.side_effect = (2, 0)
self.flags(cpu_mode="custom",
@@ -1333,6 +1344,22 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_register_all_undefined_instance_details',
new=mock.Mock())
+ @mock.patch('nova.virt.libvirt.host.libvirt.Connection.compareCPU')
+ def test__check_cpu_compatibility_skip_compare_at_init(
+ self, mocked_compare
+ ):
+ self.flags(group='workarounds', skip_cpu_compare_at_startup=True)
+ self.flags(cpu_mode="custom",
+ cpu_models=["Icelake-Server-noTSX"],
+ cpu_model_extra_flags = ["-mpx"],
+ group="libvirt")
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ drvr.init_host("dummyhost")
+ mocked_compare.assert_not_called()
+
+ @mock.patch.object(libvirt_driver.LibvirtDriver,
+ '_register_all_undefined_instance_details',
+ new=mock.Mock())
def test__check_cpu_compatibility_with_flag(self):
self.flags(cpu_mode="custom",
cpu_models=["Penryn"],
@@ -1341,9 +1368,10 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr.init_host("dummyhost")
- @mock.patch('nova.virt.libvirt.host.libvirt.Connection.compareCPU')
+ @mock.patch(
+ 'nova.virt.libvirt.host.libvirt.Connection.compareHypervisorCPU')
def test__check_cpu_compatibility_advance_flag(self, mocked_compare):
- mocked_compare.side_effect = (2, 0)
+ mocked_compare.side_effect = (-1, 0)
self.flags(cpu_mode="custom",
cpu_models=["qemu64"],
cpu_model_extra_flags = ["avx", "avx2"],
@@ -1352,11 +1380,12 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertRaises(exception.InvalidCPUInfo,
drvr.init_host, "dummyhost")
- @mock.patch('nova.virt.libvirt.host.libvirt.Connection.compareCPU')
+ @mock.patch(
+ 'nova.virt.libvirt.host.libvirt.Connection.compareHypervisorCPU')
def test__check_cpu_compatibility_wrong_flag(self, mocked_compare):
# here, and in the surrounding similar tests, the non-zero error
# code in the compareCPU() side effect indicates error
- mocked_compare.side_effect = (2, 0)
+ mocked_compare.side_effect = (-1, 0)
self.flags(cpu_mode="custom",
cpu_models=["Broadwell-noTSX"],
cpu_model_extra_flags = ["a v x"],
@@ -1365,11 +1394,12 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertRaises(exception.InvalidCPUInfo,
drvr.init_host, "dummyhost")
- @mock.patch('nova.virt.libvirt.host.libvirt.Connection.compareCPU')
+ @mock.patch(
+ 'nova.virt.libvirt.host.libvirt.Connection.compareHypervisorCPU')
def test__check_cpu_compatibility_enabled_and_disabled_flags(
self, mocked_compare
):
- mocked_compare.side_effect = (2, 0)
+ mocked_compare.side_effect = (-1, 0)
self.flags(
cpu_mode="custom",
cpu_models=["Cascadelake-Server"],
@@ -1822,6 +1852,22 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_guest.set_user_password.assert_called_once_with("root", "123")
+ @mock.patch('nova.virt.libvirt.host.Host.get_guest')
+ def test_qemu_announce_self(self, mock_get_guest):
+ # Enable the workaround, configure to call announce_self 3 times
+ self.flags(enable_qemu_monitor_announce_self=True, group='workarounds')
+
+ mock_guest = mock.Mock(spec=libvirt_guest.Guest)
+ mock_get_guest.return_value = mock_guest
+
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ drvr._qemu_monitor_announce_self(mock_guest)
+
+ # Ensure that 3 calls are made as defined by option
+ # enable_qemu_monitor_announce_self_retries default of 3
+ mock_guest.announce_self.assert_any_call()
+ self.assertEqual(3, mock_guest.announce_self.call_count)
+
@mock.patch('nova.utils.get_image_from_system_metadata')
@mock.patch.object(host.Host,
'has_min_version', return_value=True)
@@ -3356,7 +3402,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual(
"Memory encryption requested by hw:mem_encryption extra spec in "
"m1.fake flavor but image fake_image doesn't have "
- "'hw_firmware_type' property set to 'uefi'", str(exc))
+ "'hw_firmware_type' property set to 'uefi' or volume-backed "
+ "instance was requested", str(exc))
def test_sev_enabled_host_extra_spec_no_machine_type(self):
exc = self.assertRaises(exception.InvalidMachineType,
@@ -5793,6 +5840,11 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual(cfg.devices[3].type, 'vnc')
self.assertEqual(cfg.devices[3].listen, '10.0.0.1')
self.assertIsNone(cfg.devices[3].keymap)
+ self.assertIsNone(cfg.devices[3].image_compression)
+ self.assertIsNone(cfg.devices[3].jpeg_compression)
+ self.assertIsNone(cfg.devices[3].zlib_compression)
+ self.assertIsNone(cfg.devices[3].playback_compression)
+ self.assertIsNone(cfg.devices[3].streaming_mode)
def test_get_guest_config_with_vnc_and_tablet(self):
self.flags(enabled=True, group='vnc')
@@ -5823,6 +5875,11 @@ class LibvirtConnTestCase(test.NoDBTestCase,
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[3].type, 'vnc')
+ self.assertIsNone(cfg.devices[3].image_compression)
+ self.assertIsNone(cfg.devices[3].jpeg_compression)
+ self.assertIsNone(cfg.devices[3].zlib_compression)
+ self.assertIsNone(cfg.devices[3].playback_compression)
+ self.assertIsNone(cfg.devices[3].streaming_mode)
self.assertEqual(cfg.devices[5].type, 'tablet')
def test_get_guest_config_with_spice_and_tablet(self):
@@ -5859,6 +5916,11 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual(cfg.devices[3].type, 'spice')
self.assertEqual(cfg.devices[3].listen, '10.0.0.1')
self.assertIsNone(cfg.devices[3].keymap)
+ self.assertIsNone(cfg.devices[3].image_compression)
+ self.assertIsNone(cfg.devices[3].jpeg_compression)
+ self.assertIsNone(cfg.devices[3].zlib_compression)
+ self.assertIsNone(cfg.devices[3].playback_compression)
+ self.assertIsNone(cfg.devices[3].streaming_mode)
self.assertEqual(cfg.devices[5].type, 'tablet')
@mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
@@ -5918,8 +5980,57 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual(cfg.devices[3].target_name, "com.redhat.spice.0")
self.assertEqual(cfg.devices[3].type, 'spicevmc')
self.assertEqual(cfg.devices[4].type, "spice")
+ self.assertIsNone(cfg.devices[4].image_compression)
+ self.assertIsNone(cfg.devices[4].jpeg_compression)
+ self.assertIsNone(cfg.devices[4].zlib_compression)
+ self.assertIsNone(cfg.devices[4].playback_compression)
+ self.assertIsNone(cfg.devices[4].streaming_mode)
self.assertEqual(cfg.devices[5].type, video_type)
+ def test_get_guest_config_with_spice_compression(self):
+ self.flags(enabled=False, group='vnc')
+ self.flags(virt_type='kvm', group='libvirt')
+ self.flags(enabled=True,
+ agent_enabled=False,
+ image_compression='auto_lz',
+ jpeg_compression='never',
+ zlib_compression='always',
+ playback_compression=False,
+ streaming_mode='all',
+ server_listen='10.0.0.1',
+ group='spice')
+ self.flags(pointer_model='usbtablet')
+
+ cfg = self._get_guest_config_with_graphics()
+
+ self.assertEqual(len(cfg.devices), 9)
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestInput)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigGuestRng)
+ self.assertIsInstance(cfg.devices[7],
+ vconfig.LibvirtConfigGuestUSBHostController)
+ self.assertIsInstance(cfg.devices[8],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ self.assertEqual(cfg.devices[3].type, 'spice')
+ self.assertEqual(cfg.devices[3].listen, '10.0.0.1')
+ self.assertEqual(cfg.devices[3].image_compression, 'auto_lz')
+ self.assertEqual(cfg.devices[3].jpeg_compression, 'never')
+ self.assertEqual(cfg.devices[3].zlib_compression, 'always')
+ self.assertFalse(cfg.devices[3].playback_compression)
+ self.assertEqual(cfg.devices[3].streaming_mode, 'all')
+
@mock.patch.object(host.Host, 'get_guest')
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_get_serial_ports_from_guest')
@@ -9144,6 +9255,34 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.Invalid, drvr._get_pcpu_available)
+ @mock.patch('nova.virt.libvirt.host.Host.get_available_cpus',
+ return_value=set([0, 1, 2, 3]))
+ def test_get_pcpu_available_for_power_mgmt(self, get_available_cpus):
+ """Test what happens when the '[compute] cpu_dedicated_set' config
+ option is set and power management is defined.
+ """
+ self.flags(vcpu_pin_set=None)
+ self.flags(cpu_dedicated_set='2-3', cpu_shared_set=None,
+ group='compute')
+ self.flags(cpu_power_management=True, group='libvirt')
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ pcpus = drvr._get_pcpu_available()
+ self.assertEqual(set([2, 3]), pcpus)
+
+ @mock.patch('nova.virt.libvirt.host.Host.get_available_cpus',
+ return_value=set([4, 5]))
+ def test_get_pcpu_available__cpu_dedicated_set_invalid_for_pm(self,
+ get_available_cpus):
+ """Test what happens when the '[compute] cpu_dedicated_set' config
+ option is set but it's invalid with power management set.
+ """
+ self.flags(vcpu_pin_set=None)
+ self.flags(cpu_dedicated_set='4-6', cpu_shared_set=None,
+ group='compute')
+ self.flags(cpu_power_management=True, group='libvirt')
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.assertRaises(exception.Invalid, drvr._get_pcpu_available)
+
@mock.patch('nova.virt.libvirt.host.Host.get_online_cpus',
return_value=set([0, 1, 2, 3]))
def test_get_vcpu_available(self, get_online_cpus):
@@ -9244,6 +9383,26 @@ class LibvirtConnTestCase(test.NoDBTestCase,
image_meta))
mock_fsthaw.assert_called_once_with()
+ def test_set_quiesced_agent_connection_fails(self):
+ # This is require to mock guest host
+ self.create_fake_libvirt_mock(lookupByUUIDString=self.fake_lookup)
+
+ with mock.patch.object(FakeVirtDomain, "fsFreeze") as mock_fsfreeze:
+ error = fakelibvirt.make_libvirtError(
+ fakelibvirt.libvirtError,
+ "QEMU guest agent is not connected",
+ error_code=fakelibvirt.VIR_ERR_AGENT_UNRESPONSIVE)
+
+ mock_fsfreeze.side_effect = error
+ mock_fsfreeze.error_code = error.get_error_code()
+
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
+ instance = objects.Instance(**self.test_instance)
+ image_meta = objects.ImageMeta.from_dict(
+ {"properties": {"hw_qemu_guest_agent": "yes", }})
+ self.assertRaises(exception.InstanceQuiesceFailed,
+ drvr._set_quiesced, self.context, instance, image_meta, True)
+
def test_create_snapshot_metadata(self):
base = objects.ImageMeta.from_dict(
{'disk_format': 'raw'})
@@ -11126,7 +11285,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
new=mock.Mock(return_value=False))
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file')
- @mock.patch.object(fakelibvirt.Connection, 'compareCPU')
+ @mock.patch.object(fakelibvirt.Connection, 'compareHypervisorCPU')
def test_check_can_live_migrate_dest_all_pass_with_block_migration(
self, mock_cpu, mock_test_file,
):
@@ -11165,7 +11324,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
new=mock.Mock(return_value=False))
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file')
- @mock.patch.object(fakelibvirt.Connection, 'compareCPU')
+ @mock.patch.object(fakelibvirt.Connection, 'compareHypervisorCPU')
def test_check_can_live_migrate_dest_all_pass_with_over_commit(
self, mock_cpu, mock_test_file,
):
@@ -11205,7 +11364,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
new=mock.Mock(return_value=False))
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file')
- @mock.patch.object(fakelibvirt.Connection, 'compareCPU')
+ @mock.patch.object(fakelibvirt.Connection, 'compareHypervisorCPU')
def test_check_can_live_migrate_dest_all_pass_no_block_migration(
self, mock_cpu, mock_test_file,
):
@@ -11243,7 +11402,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file',
return_value='fake')
- @mock.patch.object(fakelibvirt.Connection, 'compareCPU')
+ @mock.patch.object(fakelibvirt.Connection, 'compareHypervisorCPU')
def test_check_can_live_migrate_dest_fills_listen_addrs(
self, mock_cpu, mock_test_file,
):
@@ -11275,7 +11434,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file',
return_value='fake')
- @mock.patch.object(fakelibvirt.Connection, 'compareCPU',
+ @mock.patch.object(fakelibvirt.Connection, 'compareHypervisorCPU',
return_value=1)
def test_check_can_live_migrate_dest_ensure_serial_adds_not_set(
self, mock_cpu, mock_test_file,
@@ -11383,7 +11542,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
new=mock.Mock(return_value=False))
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file')
- @mock.patch.object(fakelibvirt.Connection, 'compareCPU')
+ @mock.patch.object(fakelibvirt.Connection, 'compareHypervisorCPU')
def test_check_can_live_migrate_dest_no_instance_cpu_info(
self, mock_cpu, mock_test_file,
):
@@ -11424,7 +11583,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
new=mock.Mock(return_value=False))
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file')
- @mock.patch.object(fakelibvirt.Connection, 'compareCPU')
+ @mock.patch.object(fakelibvirt.Connection, 'compareHypervisorCPU')
def test_check_can_live_migrate_dest_file_backed(
self, mock_cpu, mock_test_file,
):
@@ -11450,7 +11609,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertTrue(return_value.dst_wants_file_backed_memory)
- @mock.patch.object(fakelibvirt.Connection, 'compareCPU')
+ @mock.patch.object(fakelibvirt.Connection, 'compareHypervisorCPU')
def test_check_can_live_migrate_dest_incompatible_cpu_raises(
self, mock_cpu):
instance_ref = objects.Instance(**self.test_instance)
@@ -11486,7 +11645,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
for vif in result.vifs:
self.assertTrue(vif.supports_os_vif_delegation)
- @mock.patch.object(host.Host, 'compare_cpu')
+ @mock.patch.object(host.Host, 'compare_hypervisor_cpu')
@mock.patch.object(nova.virt.libvirt, 'config')
def test_compare_cpu_compatible_host_cpu(self, mock_vconfig, mock_compare):
instance = objects.Instance(**self.test_instance)
@@ -11496,7 +11655,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
instance)
self.assertIsNone(ret)
- @mock.patch.object(host.Host, 'compare_cpu')
+ @mock.patch.object(host.Host, 'compare_hypervisor_cpu')
@mock.patch.object(nova.virt.libvirt, 'config')
def test_compare_cpu_handles_not_supported_error_gracefully(self,
mock_vconfig,
@@ -11533,7 +11692,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion',
return_value=versionutils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_AARCH64_CPU_COMPARE))
- @mock.patch.object(host.Host, 'compare_cpu')
+ @mock.patch.object(host.Host, 'compare_hypervisor_cpu')
def test_compare_cpu_host_aarch64(self,
mock_compare,
mock_get_libversion,
@@ -11556,7 +11715,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_compare.assert_called_once_with(caps.host.cpu.to_xml())
self.assertIsNone(ret)
- @mock.patch.object(host.Host, 'compare_cpu')
+ @mock.patch.object(host.Host, 'compare_hypervisor_cpu')
@mock.patch.object(nova.virt.libvirt.LibvirtDriver,
'_vcpu_model_to_cpu_config')
def test_compare_cpu_compatible_guest_cpu(self, mock_vcpu_to_cpu,
@@ -11575,7 +11734,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
ret = conn._compare_cpu(None, None, instance)
self.assertIsNone(ret)
- @mock.patch.object(host.Host, 'compare_cpu')
+ @mock.patch.object(host.Host, 'compare_hypervisor_cpu')
@mock.patch.object(nova.virt.libvirt, 'config')
def test_compare_cpu_invalid_cpuinfo_raises(self, mock_vconfig,
mock_compare):
@@ -11587,7 +11746,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
jsonutils.dumps(_fake_cpu_info),
instance)
- @mock.patch.object(host.Host, 'compare_cpu')
+ @mock.patch.object(host.Host, 'compare_hypervisor_cpu')
@mock.patch.object(nova.virt.libvirt, 'config')
def test_compare_cpu_incompatible_cpu_raises(self, mock_vconfig,
mock_compare):
@@ -14018,6 +14177,85 @@ class LibvirtConnTestCase(test.NoDBTestCase,
def test_live_migration_main_monitoring_failed(self):
self._test_live_migration_main(mon_side_effect=Exception)
+ @mock.patch.object(host.Host, "get_connection", new=mock.Mock())
+ @mock.patch.object(utils, "spawn", new=mock.Mock())
+ @mock.patch.object(host.Host, "get_guest")
+ @mock.patch.object(
+ libvirt_driver.LibvirtDriver, "_live_migration_copy_disk_paths")
+ def _test_live_migration_monitor_job_stats_exception(
+ self, exc, mock_copy_disk_paths, mock_get_guest, expect_success=True
+ ):
+ # Verify behavior when various exceptions are raised inside of
+ # Guest.get_job_info() during live migration monitoring.
+ mock_domain = mock.Mock(fakelibvirt.virDomain)
+ guest = libvirt_guest.Guest(mock_domain)
+ mock_get_guest.return_value = guest
+
+ # First, raise the exception from jobStats(), then return "completed"
+ # to make sure we exit the monitoring loop.
+ guest._domain.jobStats.side_effect = [
+ exc,
+ {'type': fakelibvirt.VIR_DOMAIN_JOB_COMPLETED},
+ ]
+
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ instance = objects.Instance(**self.test_instance)
+ post_method = mock.Mock()
+ migrate_data = mock.Mock()
+ disks_to_copy = (['/some/path/one', '/test/path/two'],
+ ['vda', 'vdb'])
+ mock_copy_disk_paths.return_value = disks_to_copy
+
+ func = drvr._live_migration
+ args = (self.context, instance, mock.sentinel.dest, post_method,
+ mock.sentinel.recover_method, mock.sentinel.block_migration,
+ migrate_data)
+
+ if expect_success:
+ func(*args)
+ post_method.assert_called_once_with(
+ self.context, instance, mock.sentinel.dest,
+ mock.sentinel.block_migration, migrate_data
+ )
+ else:
+ actual_exc = self.assertRaises(
+ fakelibvirt.libvirtError, func, *args)
+ self.assertEqual(exc, actual_exc)
+
+ def test_live_migration_monitor_job_stats_no_domain(self):
+ exp = fakelibvirt.make_libvirtError(
+ fakelibvirt.libvirtError, 'no domain',
+ error_code=fakelibvirt.VIR_ERR_NO_DOMAIN
+ )
+ self._test_live_migration_monitor_job_stats_exception(
+ exp, expect_success=True)
+
+ def test_live_migration_monitor_job_stats_op_invalid(self):
+ exp = fakelibvirt.make_libvirtError(
+ fakelibvirt.libvirtError, 'operation invalid',
+ error_code=fakelibvirt.VIR_ERR_OPERATION_INVALID
+ )
+ self._test_live_migration_monitor_job_stats_exception(
+ exp, expect_success=True)
+
+ def test_live_migration_monitor_job_stats_no_ram_info_set(self):
+ exp = fakelibvirt.make_libvirtError(
+ fakelibvirt.libvirtError, 'internal error',
+ error_message='migration was active, but no RAM info was set',
+ error_code=fakelibvirt.VIR_ERR_INTERNAL_ERROR
+ )
+ self._test_live_migration_monitor_job_stats_exception(
+ exp, expect_success=True)
+
+ def test_live_migration_monitor_job_stats_internal_error(self):
+ exp = fakelibvirt.make_libvirtError(
+ fakelibvirt.libvirtError,
+ 'some other internal error',
+ error_code=fakelibvirt.VIR_ERR_INTERNAL_ERROR
+ )
+ self._test_live_migration_monitor_job_stats_exception(
+ exp, expect_success=False)
+
@mock.patch('os.path.exists', return_value=False)
@mock.patch('nova.virt.libvirt.utils.create_image')
@mock.patch.object(libvirt_driver.LibvirtDriver,
diff --git a/nova/tests/unit/virt/libvirt/test_host.py b/nova/tests/unit/virt/libvirt/test_host.py
index 3afd6c139d..631b10d81a 100644
--- a/nova/tests/unit/virt/libvirt/test_host.py
+++ b/nova/tests/unit/virt/libvirt/test_host.py
@@ -1052,6 +1052,12 @@ Active: 8381604 kB
'iowait': 6121490000000},
stats)
+ @mock.patch.object(fakelibvirt.virConnect, "getCPUMap")
+ def test_get_available_cpus(self, mock_map):
+ mock_map.return_value = (4, [True, True, False, False], None)
+ result = self.host.get_available_cpus()
+ self.assertEqual(result, {0, 1, 2, 3})
+
@mock.patch.object(fakelibvirt.virConnect, "defineXML")
def test_write_instance_config(self, mock_defineXML):
fake_dom_xml = """
diff --git a/nova/tests/unit/virt/libvirt/test_utils.py b/nova/tests/unit/virt/libvirt/test_utils.py
index 0b80bde49f..c648108f56 100644
--- a/nova/tests/unit/virt/libvirt/test_utils.py
+++ b/nova/tests/unit/virt/libvirt/test_utils.py
@@ -103,19 +103,23 @@ class LibvirtUtilsTestCase(test.NoDBTestCase):
def test_valid_hostname_bad(self):
self.assertFalse(libvirt_utils.is_valid_hostname("foo/?com=/bin/sh"))
+ @mock.patch('tempfile.NamedTemporaryFile')
@mock.patch('oslo_concurrency.processutils.execute')
@mock.patch('nova.virt.images.qemu_img_info')
def _test_create_image(
self, path, disk_format, disk_size, mock_info, mock_execute,
- backing_file=None
+ mock_ntf, backing_file=None, encryption=None
):
mock_info.return_value = mock.Mock(
file_format=mock.sentinel.backing_fmt,
cluster_size=mock.sentinel.cluster_size,
)
+ fh = mock_ntf.return_value.__enter__.return_value
libvirt_utils.create_image(
- path, disk_format, disk_size, backing_file=backing_file)
+ path, disk_format, disk_size, backing_file=backing_file,
+ encryption=encryption,
+ )
cow_opts = []
@@ -130,9 +134,32 @@ class LibvirtUtilsTestCase(test.NoDBTestCase):
f'cluster_size={mock.sentinel.cluster_size}',
]
+ encryption_opts = []
+
+ if encryption:
+ encryption_opts = [
+ '--object', f"secret,id=sec,file={fh.name}",
+ '-o', 'encrypt.key-secret=sec',
+ '-o', f"encrypt.format={encryption.get('format')}",
+ ]
+
+ encryption_options = {
+ 'cipher-alg': 'aes-256',
+ 'cipher-mode': 'xts',
+ 'hash-alg': 'sha256',
+ 'iter-time': 2000,
+ 'ivgen-alg': 'plain64',
+ 'ivgen-hash-alg': 'sha256',
+ }
+ for option, value in encryption_options.items():
+ encryption_opts += [
+ '-o',
+ f'encrypt.{option}={value}',
+ ]
+
expected_args = (
'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'create', '-f',
- disk_format, *cow_opts, path,
+ disk_format, *cow_opts, *encryption_opts, path,
)
if disk_size is not None:
expected_args += (disk_size,)
@@ -159,6 +186,16 @@ class LibvirtUtilsTestCase(test.NoDBTestCase):
backing_file=mock.sentinel.backing_file,
)
+ def test_create_image_encryption(self):
+ encryption = {
+ 'secret': 'a_secret',
+ 'format': 'luks',
+ }
+ self._test_create_image(
+ '/some/stuff', 'qcow2', '1234567891234',
+ encryption=encryption,
+ )
+
@ddt.unpack
@ddt.data({'fs_type': 'some_fs_type',
'default_eph_format': None,
diff --git a/nova/tests/unit/virt/test_hardware.py b/nova/tests/unit/virt/test_hardware.py
index 26ec198f08..753ee41550 100644
--- a/nova/tests/unit/virt/test_hardware.py
+++ b/nova/tests/unit/virt/test_hardware.py
@@ -2638,45 +2638,45 @@ class VirtNUMAHostTopologyTestCase(test.NoDBTestCase):
def test_get_fitting_success_no_limits(self):
fitted_instance1 = hw.numa_fit_instance_to_host(
- self.host, self.instance1)
+ self.host, self.instance1, {})
self.assertIsInstance(fitted_instance1, objects.InstanceNUMATopology)
self.host = hw.numa_usage_from_instance_numa(
self.host, fitted_instance1)
fitted_instance2 = hw.numa_fit_instance_to_host(
- self.host, self.instance3)
+ self.host, self.instance3, {})
self.assertIsInstance(fitted_instance2, objects.InstanceNUMATopology)
def test_get_fitting_success_limits(self):
fitted_instance = hw.numa_fit_instance_to_host(
- self.host, self.instance3, self.limits)
+ self.host, self.instance3, {}, self.limits)
self.assertIsInstance(fitted_instance, objects.InstanceNUMATopology)
self.assertEqual(1, fitted_instance.cells[0].id)
def test_get_fitting_fails_no_limits(self):
fitted_instance = hw.numa_fit_instance_to_host(
- self.host, self.instance2, self.limits)
+ self.host, self.instance2, {}, self.limits)
self.assertIsNone(fitted_instance)
def test_get_fitting_cumulative_fails_limits(self):
fitted_instance1 = hw.numa_fit_instance_to_host(
- self.host, self.instance1, self.limits)
+ self.host, self.instance1, {}, self.limits)
self.assertIsInstance(fitted_instance1, objects.InstanceNUMATopology)
self.assertEqual(1, fitted_instance1.cells[0].id)
self.host = hw.numa_usage_from_instance_numa(
self.host, fitted_instance1)
fitted_instance2 = hw.numa_fit_instance_to_host(
- self.host, self.instance2, self.limits)
+ self.host, self.instance2, {}, self.limits)
self.assertIsNone(fitted_instance2)
def test_get_fitting_cumulative_success_limits(self):
fitted_instance1 = hw.numa_fit_instance_to_host(
- self.host, self.instance1, self.limits)
+ self.host, self.instance1, {}, self.limits)
self.assertIsInstance(fitted_instance1, objects.InstanceNUMATopology)
self.assertEqual(1, fitted_instance1.cells[0].id)
self.host = hw.numa_usage_from_instance_numa(
self.host, fitted_instance1)
fitted_instance2 = hw.numa_fit_instance_to_host(
- self.host, self.instance3, self.limits)
+ self.host, self.instance3, {}, self.limits)
self.assertIsInstance(fitted_instance2, objects.InstanceNUMATopology)
self.assertEqual(2, fitted_instance2.cells[0].id)
@@ -2691,7 +2691,7 @@ class VirtNUMAHostTopologyTestCase(test.NoDBTestCase):
network_metadata=network_metadata)
fitted_instance = hw.numa_fit_instance_to_host(
- self.host, self.instance1, limits=limits)
+ self.host, self.instance1, {}, limits=limits)
self.assertIsInstance(fitted_instance, objects.InstanceNUMATopology)
mock_supports.assert_called_once_with(
@@ -2708,7 +2708,7 @@ class VirtNUMAHostTopologyTestCase(test.NoDBTestCase):
network_metadata=network_metadata)
fitted_instance = hw.numa_fit_instance_to_host(
- self.host, self.instance1, limits=limits)
+ self.host, self.instance1, {}, limits=limits)
self.assertIsNone(fitted_instance)
mock_supports.assert_has_calls([
@@ -2725,6 +2725,7 @@ class VirtNUMAHostTopologyTestCase(test.NoDBTestCase):
'support_requests', return_value= True):
fitted_instance1 = hw.numa_fit_instance_to_host(self.host,
self.instance1,
+ {},
pci_requests=pci_reqs,
pci_stats=pci_stats)
self.assertIsInstance(fitted_instance1,
@@ -2740,6 +2741,7 @@ class VirtNUMAHostTopologyTestCase(test.NoDBTestCase):
fitted_instance1 = hw.numa_fit_instance_to_host(
self.host,
self.instance1,
+ {},
pci_requests=pci_reqs,
pci_stats=pci_stats)
self.assertIsNone(fitted_instance1)
@@ -2758,7 +2760,7 @@ class VirtNUMAHostTopologyTestCase(test.NoDBTestCase):
# ...therefore an instance without a PCI device should get host cell 2
instance_topology = hw.numa_fit_instance_to_host(
- self.host, self.instance1, pci_stats=pci_stats)
+ self.host, self.instance1, {}, pci_stats=pci_stats)
self.assertIsInstance(instance_topology, objects.InstanceNUMATopology)
# TODO(sfinucan): We should be comparing this against the HOST cell
self.assertEqual(2, instance_topology.cells[0].id)
@@ -2768,7 +2770,7 @@ class VirtNUMAHostTopologyTestCase(test.NoDBTestCase):
# ...therefore an instance without a PCI device should get host cell 1
instance_topology = hw.numa_fit_instance_to_host(
- self.host, self.instance1, pci_stats=pci_stats)
+ self.host, self.instance1, {}, pci_stats=pci_stats)
self.assertIsInstance(instance_topology, objects.InstanceNUMATopology)
self.assertEqual(1, instance_topology.cells[0].id)
@@ -3895,7 +3897,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
]
)
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
for cell in inst_topo.cells:
self.assertInstanceCellPinned(cell, cell_ids=(0, 1))
@@ -3933,7 +3935,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
]
)
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
for cell in inst_topo.cells:
self.assertInstanceCellPinned(cell, cell_ids=(1,))
@@ -3971,7 +3973,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
]
)
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
self.assertIsNone(inst_topo)
def test_host_numa_fit_instance_to_host_fit(self):
@@ -4014,7 +4016,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
]
)
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
for cell in inst_topo.cells:
self.assertInstanceCellPinned(cell, cell_ids=(0, 1))
@@ -4069,7 +4071,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
]
)
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
for cell in inst_topo.cells:
self.assertInstanceCellPinned(cell, cell_ids=(0, 2))
@@ -4114,7 +4116,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
]
)
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
self.assertIsNone(inst_topo)
def test_host_numa_fit_instance_to_host_fail_topology(self):
@@ -4148,7 +4150,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
objects.InstanceNUMACell(
cpuset=set(), pcpuset=set([4, 5]), memory=1024,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
self.assertIsNone(inst_topo)
def test_cpu_pinning_usage_from_instances(self):
@@ -4788,7 +4790,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
cpuset=set(), pcpuset=set([0]), memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
self.assertEqual({0: 0}, inst_topo.cells[0].cpu_pinning)
self.assertIsNone(inst_topo.cells[0].cpuset_reserved)
@@ -4802,7 +4804,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
cpuset=set(), pcpuset=set([0]), memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
self.assertEqual({0: 0}, inst_topo.cells[0].cpu_pinning)
self.assertIsNone(inst_topo.cells[0].cpuset_reserved)
@@ -4816,7 +4818,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
cpuset=set(), pcpuset=set([0]), memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
self.assertEqual({0: 0}, inst_topo.cells[0].cpu_pinning)
self.assertEqual(set([1]), inst_topo.cells[0].cpuset_reserved)
@@ -4830,7 +4832,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
cpuset=set(), pcpuset=set([0, 1, 2, 4]), memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
self.assertIsNone(inst_topo)
def test_multi_nodes_isolate(self):
@@ -4847,7 +4849,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
cpuset=set(), pcpuset=set([1]), memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
self.assertEqual({0: 0}, inst_topo.cells[0].cpu_pinning)
self.assertEqual(set([1]), inst_topo.cells[0].cpuset_reserved)
self.assertEqual({1: 2}, inst_topo.cells[1].cpu_pinning)
@@ -4867,7 +4869,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
cpuset=set(), pcpuset=set([2]), memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
# The guest NUMA node 0 is requesting 2pCPUs + 1 additional
# pCPU for emulator threads, the host can't handle the
# request.
@@ -4887,7 +4889,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
cpuset=set(), pcpuset=set([1, 2]), memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
self.assertEqual({0: 0}, inst_topo.cells[0].cpu_pinning)
self.assertEqual(set([1]), inst_topo.cells[0].cpuset_reserved)
self.assertEqual({1: 2, 2: 3}, inst_topo.cells[1].cpu_pinning)
@@ -4962,7 +4964,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
cpu_thread_policy=fields.CPUThreadAllocationPolicy.ISOLATE
)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
self.assertEqual({0: 0, 1: 2}, inst_topo.cells[0].cpu_pinning)
self.assertEqual(set([4]), inst_topo.cells[0].cpuset_reserved)
@@ -4992,7 +4994,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
cpu_thread_policy=fields.CPUThreadAllocationPolicy.ISOLATE
)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
self.assertEqual({0: 2, 1: 4}, inst_topo.cells[0].cpu_pinning)
self.assertEqual(set([1]), inst_topo.cells[0].cpuset_reserved)
@@ -5021,7 +5023,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
if policy:
inst_topo.emulator_threads_policy = policy
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
return inst_topo
def test_mixed_instance_not_define(self):
@@ -5078,7 +5080,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
cpuset=set(), pcpuset=set([0, 1]), memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
self.assertEqual({0: 2, 1: 3}, inst_topo.cells[0].cpu_pinning)
self.assertEqual(set([1]), inst_topo.cells[0].cpuset_reserved)
@@ -5107,7 +5109,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
cpu_thread_policy=fields.CPUThreadAllocationPolicy.ISOLATE
)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
self.assertEqual({0: 2, 1: 4}, inst_topo.cells[0].cpu_pinning)
self.assertEqual(set([1]), inst_topo.cells[0].cpuset_reserved)
@@ -5362,7 +5364,7 @@ class MemEncryptionRequestedWithoutUEFITestCase(
expected_error = (
"Memory encryption requested by %(requesters)s but image "
"%(image_name)s doesn't have 'hw_firmware_type' property "
- "set to 'uefi'"
+ "set to 'uefi' or volume-backed instance was requested"
)
def _test_encrypted_memory_support_no_uefi(self, enc_extra_spec,
@@ -5489,6 +5491,25 @@ class MemEncryptionRequiredTestCase(test.NoDBTestCase):
(self.flavor_name, self.image_id)
)
+ def test_encrypted_memory_support_flavor_for_volume(self):
+ extra_specs = {'hw:mem_encryption': True}
+
+ flavor = objects.Flavor(name=self.flavor_name,
+ extra_specs=extra_specs)
+ # Following image_meta is typical for root Cinder volume
+ image_meta = objects.ImageMeta.from_dict({
+ 'min_disk': 0,
+ 'min_ram': 0,
+ 'properties': {},
+ 'size': 0,
+ 'status': 'active'})
+ # Confirm that exception.FlavorImageConflict is raised when
+ # flavor with hw:mem_encryption flag is used to create
+ # volume-backed instance
+ self.assertRaises(exception.FlavorImageConflict,
+ hw.get_mem_encryption_constraint, flavor,
+ image_meta)
+
class PCINUMAAffinityPolicyTest(test.NoDBTestCase):
@@ -5896,7 +5917,7 @@ class HostCellsSortingTestCase(test.NoDBTestCase):
def test_sort_host_numa_cell_num_equal_instance_cell_num(self):
instance_topology = hw.numa_fit_instance_to_host(
- self.host, self.instance0)
+ self.host, self.instance0, {})
self.assertInstanceNUMAcellOrder([0, 1, 2, 3], instance_topology)
def test_sort_no_pci_stats_no_shared_cpu_policy(self):
@@ -5905,14 +5926,14 @@ class HostCellsSortingTestCase(test.NoDBTestCase):
True,
group = 'compute')
instance_topology = hw.numa_fit_instance_to_host(
- self.host, self.instance2)
+ self.host, self.instance2, {})
self.assertInstanceNUMAcellOrder([0, 1, 3], instance_topology)
CONF.set_override(
'packing_host_numa_cells_allocation_strategy',
False,
group = 'compute')
instance_topology = hw.numa_fit_instance_to_host(
- self.host, self.instance2)
+ self.host, self.instance2, {})
self.assertInstanceNUMAcellOrder([2, 3, 0], instance_topology)
def test_sort_no_pci_stats_shared_cpu_policy(self):
@@ -5921,14 +5942,14 @@ class HostCellsSortingTestCase(test.NoDBTestCase):
True,
group = 'compute')
instance_topology = hw.numa_fit_instance_to_host(
- self.host, self.instance1)
+ self.host, self.instance1, {})
self.assertInstanceNUMAcellOrder([0, 1, 2], instance_topology)
CONF.set_override(
'packing_host_numa_cells_allocation_strategy',
False,
group = 'compute')
instance_topology = hw.numa_fit_instance_to_host(
- self.host, self.instance1)
+ self.host, self.instance1, {})
self.assertInstanceNUMAcellOrder([3, 1, 2], instance_topology)
def test_sort_pci_stats_pci_req_no_shared_cpu_policy(self):
@@ -5941,6 +5962,7 @@ class HostCellsSortingTestCase(test.NoDBTestCase):
pci_reqs = [pci_request]
instance_topology = hw.numa_fit_instance_to_host(
self.host, self.instance2,
+ {},
pci_requests = pci_reqs,
pci_stats = self.pci_stats)
self.assertInstanceNUMAcellOrder([1, 0, 3], instance_topology)
@@ -5950,6 +5972,7 @@ class HostCellsSortingTestCase(test.NoDBTestCase):
group = 'compute')
instance_topology = hw.numa_fit_instance_to_host(
self.host, self.instance2,
+ {},
pci_requests = pci_reqs,
pci_stats = self.pci_stats)
self.assertInstanceNUMAcellOrder([1, 2, 3], instance_topology)
@@ -5964,6 +5987,7 @@ class HostCellsSortingTestCase(test.NoDBTestCase):
pci_reqs = [pci_request]
instance_topology = hw.numa_fit_instance_to_host(
self.host, self.instance1,
+ {},
pci_requests = pci_reqs,
pci_stats = self.pci_stats)
self.assertInstanceNUMAcellOrder([1, 0, 2], instance_topology)
@@ -5973,6 +5997,7 @@ class HostCellsSortingTestCase(test.NoDBTestCase):
group = 'compute')
instance_topology = hw.numa_fit_instance_to_host(
self.host, self.instance1,
+ {},
pci_requests = pci_reqs,
pci_stats = self.pci_stats)
self.assertInstanceNUMAcellOrder([1, 3, 2], instance_topology)
@@ -5984,6 +6009,7 @@ class HostCellsSortingTestCase(test.NoDBTestCase):
group = 'compute')
instance_topology = hw.numa_fit_instance_to_host(
self.host, self.instance2,
+ {},
pci_stats = self.pci_stats)
self.assertInstanceNUMAcellOrder([0, 3, 2], instance_topology)
CONF.set_override(
@@ -5992,6 +6018,7 @@ class HostCellsSortingTestCase(test.NoDBTestCase):
group = 'compute')
instance_topology = hw.numa_fit_instance_to_host(
self.host, self.instance2,
+ {},
pci_stats = self.pci_stats)
self.assertInstanceNUMAcellOrder([2, 3, 0], instance_topology)
@@ -6002,6 +6029,7 @@ class HostCellsSortingTestCase(test.NoDBTestCase):
group = 'compute')
instance_topology = hw.numa_fit_instance_to_host(
self.host, self.instance1,
+ {},
pci_stats = self.pci_stats)
self.assertInstanceNUMAcellOrder([0, 2, 3], instance_topology)
CONF.set_override(
@@ -6010,5 +6038,6 @@ class HostCellsSortingTestCase(test.NoDBTestCase):
group = 'compute')
instance_topology = hw.numa_fit_instance_to_host(
self.host, self.instance1,
+ {},
pci_stats = self.pci_stats)
self.assertInstanceNUMAcellOrder([3, 2, 0], instance_topology)
diff --git a/nova/tests/unit/virt/test_images.py b/nova/tests/unit/virt/test_images.py
index 58581d93ba..62a61c1e8b 100644
--- a/nova/tests/unit/virt/test_images.py
+++ b/nova/tests/unit/virt/test_images.py
@@ -16,6 +16,8 @@ import os
from unittest import mock
from oslo_concurrency import processutils
+from oslo_serialization import jsonutils
+from oslo_utils import imageutils
from nova.compute import utils as compute_utils
from nova import exception
@@ -135,3 +137,47 @@ class QemuTestCase(test.NoDBTestCase):
'-O', 'out_format', '-f', 'in_format', 'source', 'dest')
mock_disk_op_sema.__enter__.assert_called_once()
self.assertTupleEqual(expected, mock_execute.call_args[0])
+
+ def test_convert_image_vmdk_allowed_list_checking(self):
+ info = {'format': 'vmdk',
+ 'format-specific': {
+ 'type': 'vmdk',
+ 'data': {
+ 'create-type': 'monolithicFlat',
+ }}}
+
+ # If the format is not in the allowed list, we should get an error
+ self.assertRaises(exception.ImageUnacceptable,
+ images.check_vmdk_image, 'foo',
+ imageutils.QemuImgInfo(jsonutils.dumps(info),
+ format='json'))
+
+ # With the format in the allowed list, no error
+ self.flags(vmdk_allowed_types=['streamOptimized', 'monolithicFlat',
+ 'monolithicSparse'],
+ group='compute')
+ images.check_vmdk_image('foo',
+ imageutils.QemuImgInfo(jsonutils.dumps(info),
+ format='json'))
+
+ # With an empty list, allow nothing
+ self.flags(vmdk_allowed_types=[], group='compute')
+ self.assertRaises(exception.ImageUnacceptable,
+ images.check_vmdk_image, 'foo',
+ imageutils.QemuImgInfo(jsonutils.dumps(info),
+ format='json'))
+
+ @mock.patch.object(images, 'fetch')
+ @mock.patch('nova.privsep.qemu.unprivileged_qemu_img_info')
+ def test_fetch_checks_vmdk_rules(self, mock_info, mock_fetch):
+ info = {'format': 'vmdk',
+ 'format-specific': {
+ 'type': 'vmdk',
+ 'data': {
+ 'create-type': 'monolithicFlat',
+ }}}
+ mock_info.return_value = jsonutils.dumps(info)
+ with mock.patch('os.path.exists', return_value=True):
+ e = self.assertRaises(exception.ImageUnacceptable,
+ images.fetch_to_raw, None, 'foo', 'anypath')
+ self.assertIn('Invalid VMDK create-type specified', str(e))
diff --git a/nova/tests/unit/virt/test_netutils.py b/nova/tests/unit/virt/test_netutils.py
index de3f451351..fa0e16df19 100644
--- a/nova/tests/unit/virt/test_netutils.py
+++ b/nova/tests/unit/virt/test_netutils.py
@@ -17,6 +17,17 @@ from nova.virt import netutils
class TestNetUtilsTestCase(test.NoDBTestCase):
+
+ def _get_fake_instance_nw_info(self, num_networks, dhcp_server, mtu):
+ network_info = fake_network.fake_get_instance_nw_info(self,
+ num_networks)
+ for vif in network_info:
+ for subnet in vif['network']['subnets']:
+ subnet['meta']['dhcp_server'] = dhcp_server
+ vif['network']['meta']['mtu'] = mtu
+
+ return network_info
+
def test_get_cached_vifs_with_vlan_no_nw_info(self):
# Make sure that an empty dictionary will be returned when
# nw_info is None
@@ -39,3 +50,15 @@ class TestNetUtilsTestCase(test.NoDBTestCase):
expected = {'fa:16:3e:d1:28:e4': '2145'}
self.assertEqual(expected,
netutils.get_cached_vifs_with_vlan(network_info))
+
+ def test__get_link_mtu(self):
+ network_info_dhcp = self._get_fake_instance_nw_info(
+ 1, '192.168.0.100', 9000)
+ network_info_no_dhcp = self._get_fake_instance_nw_info(
+ 1, None, 9000)
+
+ for vif in network_info_dhcp:
+ self.assertIsNone(netutils._get_link_mtu(vif))
+
+ for vif in network_info_no_dhcp:
+ self.assertEqual(9000, netutils._get_link_mtu(vif))
diff --git a/nova/tests/unit/virt/test_node.py b/nova/tests/unit/virt/test_node.py
new file mode 100644
index 0000000000..668b762520
--- /dev/null
+++ b/nova/tests/unit/virt/test_node.py
@@ -0,0 +1,142 @@
+# Copyright 2022 Red Hat, inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+from unittest import mock
+import uuid
+
+import fixtures
+from oslo_config import cfg
+from oslo_utils.fixture import uuidsentinel as uuids
+import testtools
+
+from nova import exception
+from nova import test
+from nova.tests import fixtures as nova_fixtures
+from nova.virt import node
+
+CONF = cfg.CONF
+
+
+# NOTE(danms): We do not inherit from test.TestCase because we need
+# our node methods not stubbed out in order to exercise them.
+class TestNodeIdentity(testtools.TestCase):
+ def flags(self, **kw):
+ """Override flag variables for a test."""
+ group = kw.pop('group', None)
+ for k, v in kw.items():
+ CONF.set_override(k, v, group)
+
+ def setUp(self):
+ super().setUp()
+ self.useFixture(nova_fixtures.ConfFixture(CONF))
+ self.tempdir = self.useFixture(fixtures.TempDir()).path
+ self.identity_file = os.path.join(self.tempdir, node.COMPUTE_ID_FILE)
+ self.fake_config_files = ['%s/etc/nova.conf' % self.tempdir,
+ '%s/etc/nova/nova.conf' % self.tempdir,
+ '%s/opt/etc/nova/nova.conf' % self.tempdir]
+ for fn in self.fake_config_files:
+ os.makedirs(os.path.dirname(fn))
+ self.flags(state_path=self.tempdir,
+ config_file=self.fake_config_files)
+ node.LOCAL_NODE_UUID = None
+
+ def test_generate_local_node_uuid(self):
+ node_uuid = uuids.node
+ node.write_local_node_uuid(node_uuid)
+
+ e = self.assertRaises(exception.InvalidNodeConfiguration,
+ node.write_local_node_uuid, 'anything')
+ self.assertIn(
+ 'Identity file %s appeared unexpectedly' % self.identity_file,
+ str(e))
+
+ def test_generate_local_node_uuid_unexpected_open_fail(self):
+ with mock.patch('builtins.open') as mock_open:
+ mock_open.side_effect = IndexError()
+ e = self.assertRaises(exception.InvalidNodeConfiguration,
+ node.write_local_node_uuid, 'foo')
+ self.assertIn('Unable to write uuid to %s' % (
+ self.identity_file), str(e))
+
+ def test_generate_local_node_uuid_unexpected_write_fail(self):
+ with mock.patch('builtins.open') as mock_open:
+ mock_write = mock_open.return_value.__enter__.return_value.write
+ mock_write.side_effect = IndexError()
+ e = self.assertRaises(exception.InvalidNodeConfiguration,
+ node.write_local_node_uuid, 'foo')
+ self.assertIn('Unable to write uuid to %s' % (
+ self.identity_file), str(e))
+
+ def test_get_local_node_uuid_simple_exists(self):
+ node_uuid = uuids.node
+ with test.patch_open('%s/etc/nova/compute_id' % self.tempdir,
+ node_uuid):
+ self.assertEqual(node_uuid, node.get_local_node_uuid())
+
+ def test_get_local_node_uuid_simple_exists_whitespace(self):
+ node_uuid = uuids.node
+ # Make sure we strip whitespace from the file contents
+ with test.patch_open('%s/etc/nova/compute_id' % self.tempdir,
+ ' %s \n' % node_uuid):
+ self.assertEqual(node_uuid, node.get_local_node_uuid())
+
+ def test_get_local_node_uuid_simple_generate(self):
+ self.assertIsNone(node.LOCAL_NODE_UUID)
+ node_uuid1 = node.get_local_node_uuid()
+ self.assertEqual(node_uuid1, node.LOCAL_NODE_UUID)
+ node_uuid2 = node.get_local_node_uuid()
+ self.assertEqual(node_uuid2, node.LOCAL_NODE_UUID)
+
+ # Make sure we got the same thing each time, and that it's a
+ # valid uuid. Since we provided no uuid, it must have been
+ # generated the first time and read/returned the second.
+ self.assertEqual(node_uuid1, node_uuid2)
+ uuid.UUID(node_uuid1)
+
+ # Try to read it directly to make sure the file was really
+ # created and with the right value.
+ self.assertEqual(node_uuid1, node.read_local_node_uuid())
+
+ def test_get_local_node_uuid_two(self):
+ node_uuid = uuids.node
+
+ # Write the uuid to two of our locations
+ for cf in (self.fake_config_files[0], self.fake_config_files[1]):
+ open(os.path.join(os.path.dirname(cf),
+ node.COMPUTE_ID_FILE), 'w').write(node_uuid)
+
+ # Make sure we got the expected uuid and that no exceptions
+ # were raised about the files disagreeing
+ self.assertEqual(node_uuid, node.get_local_node_uuid())
+
+ def test_get_local_node_uuid_two_mismatch(self):
+ node_uuids = [uuids.node1, uuids.node2]
+
+ # Write a different uuid to each file
+ for id, fn in zip(node_uuids, self.fake_config_files):
+ open(os.path.join(
+ os.path.dirname(fn),
+ node.COMPUTE_ID_FILE), 'w').write(id)
+
+ # Make sure we get an error that identifies the mismatching
+ # file with its uuid, as well as what we expected to find
+ e = self.assertRaises(exception.InvalidNodeConfiguration,
+ node.get_local_node_uuid)
+ expected = ('UUID %s in %s does not match %s' % (
+ node_uuids[1],
+ os.path.join(os.path.dirname(self.fake_config_files[1]),
+ 'compute_id'),
+ node_uuids[0]))
+ self.assertIn(expected, str(e))
diff --git a/nova/utils.py b/nova/utils.py
index 664056a09f..b5d45c58b5 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -632,15 +632,13 @@ def _serialize_profile_info():
return trace_info
-def spawn(func, *args, **kwargs):
- """Passthrough method for eventlet.spawn.
-
- This utility exists so that it can be stubbed for testing without
- interfering with the service spawns.
+def pass_context(runner, func, *args, **kwargs):
+ """Generalised passthrough method
- It will also grab the context from the threadlocal store and add it to
- the store on the new thread. This allows for continuity in logging the
- context when using this method to spawn a new thread.
+ It will grab the context from the threadlocal store and add it to
+ the store on the runner. This allows for continuity in logging the
+ context when using this method to spawn a new thread through the
+ runner function
"""
_context = common_context.get_current()
profiler_info = _serialize_profile_info()
@@ -655,11 +653,11 @@ def spawn(func, *args, **kwargs):
profiler.init(**profiler_info)
return func(*args, **kwargs)
- return eventlet.spawn(context_wrapper, *args, **kwargs)
+ return runner(context_wrapper, *args, **kwargs)
-def spawn_n(func, *args, **kwargs):
- """Passthrough method for eventlet.spawn_n.
+def spawn(func, *args, **kwargs):
+ """Passthrough method for eventlet.spawn.
This utility exists so that it can be stubbed for testing without
interfering with the service spawns.
@@ -668,25 +666,26 @@ def spawn_n(func, *args, **kwargs):
the store on the new thread. This allows for continuity in logging the
context when using this method to spawn a new thread.
"""
- _context = common_context.get_current()
- profiler_info = _serialize_profile_info()
- @functools.wraps(func)
- def context_wrapper(*args, **kwargs):
- # NOTE: If update_store is not called after spawn_n it won't be
- # available for the logger to pull from threadlocal storage.
- if _context is not None:
- _context.update_store()
- if profiler_info and profiler:
- profiler.init(**profiler_info)
- func(*args, **kwargs)
+ return pass_context(eventlet.spawn, func, *args, **kwargs)
+
+
+def spawn_n(func, *args, **kwargs):
+ """Passthrough method for eventlet.spawn_n.
+
+ This utility exists so that it can be stubbed for testing without
+ interfering with the service spawns.
- eventlet.spawn_n(context_wrapper, *args, **kwargs)
+ It will also grab the context from the threadlocal store and add it to
+ the store on the new thread. This allows for continuity in logging the
+ context when using this method to spawn a new thread.
+ """
+ pass_context(eventlet.spawn_n, func, *args, **kwargs)
def tpool_execute(func, *args, **kwargs):
"""Run func in a native thread"""
- tpool.execute(func, *args, **kwargs)
+ return pass_context(tpool.execute, func, *args, **kwargs)
def is_none_string(val):
diff --git a/nova/virt/driver.py b/nova/virt/driver.py
index 532ed1fa50..5d42a392d8 100644
--- a/nova/virt/driver.py
+++ b/nova/virt/driver.py
@@ -34,6 +34,7 @@ from nova import context as nova_context
from nova.i18n import _
from nova import objects
from nova.virt import event as virtevent
+import nova.virt.node
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
@@ -1595,6 +1596,11 @@ class ComputeDriver(object):
"""
raise NotImplementedError()
+ def get_nodenames_by_uuid(self, refresh=False):
+ """Returns a dict of {uuid: nodename} for all managed nodes."""
+ nodename = self.get_available_nodes()[0]
+ return {nova.virt.node.get_local_node_uuid(): nodename}
+
def node_is_available(self, nodename):
"""Return whether this compute service manages a particular node."""
if nodename in self.get_available_nodes():
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index 362bf89973..bf7dc8fc72 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -32,6 +32,7 @@ import fixtures
import os_resource_classes as orc
from oslo_log import log as logging
from oslo_serialization import jsonutils
+from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import versionutils
from nova.compute import power_state
@@ -48,6 +49,7 @@ from nova.objects import migrate_data
from nova.virt import driver
from nova.virt import hardware
from nova.virt.ironic import driver as ironic
+import nova.virt.node
from nova.virt import virtapi
CONF = nova.conf.CONF
@@ -160,8 +162,8 @@ class FakeDriver(driver.ComputeDriver):
self._host = host
# NOTE(gibi): this is unnecessary complex and fragile but this is
# how many current functional sample tests expect the node name.
- self._nodes = (['fake-mini'] if self._host == 'compute'
- else [self._host])
+ self._set_nodes(['fake-mini'] if self._host == 'compute'
+ else [self._host])
def _set_nodes(self, nodes):
# NOTE(gibi): this is not part of the driver interface but used
@@ -504,6 +506,12 @@ class FakeDriver(driver.ComputeDriver):
host_status['host_hostname'] = nodename
host_status['host_name_label'] = nodename
host_status['cpu_info'] = jsonutils.dumps(cpu_info)
+ # NOTE(danms): Because the fake driver runs on the same host
+ # in tests, potentially with multiple nodes, we need to
+ # control our node uuids. Make sure we return a unique and
+ # consistent uuid for each node we are responsible for to
+ # avoid the persistent local node identity from taking over.
+ host_status['uuid'] = str(getattr(uuids, 'node_%s' % nodename))
return host_status
def update_provider_tree(self, provider_tree, nodename, allocations=None):
@@ -646,6 +654,10 @@ class FakeDriver(driver.ComputeDriver):
def get_available_nodes(self, refresh=False):
return self._nodes
+ def get_nodenames_by_uuid(self, refresh=False):
+ return {str(getattr(uuids, 'node_%s' % n)): n
+ for n in self.get_available_nodes()}
+
def instance_on_disk(self, instance):
return False
@@ -764,7 +776,7 @@ class PredictableNodeUUIDDriver(SmallFakeDriver):
PredictableNodeUUIDDriver, self).get_available_resource(nodename)
# This is used in ComputeNode.update_from_virt_driver which is called
# from the ResourceTracker when creating a ComputeNode.
- resources['uuid'] = uuid.uuid5(uuid.NAMESPACE_DNS, nodename)
+ resources['uuid'] = str(uuid.uuid5(uuid.NAMESPACE_DNS, nodename))
return resources
@@ -1119,3 +1131,22 @@ class EphEncryptionDriverPLAIN(MediumFakeDriver):
FakeDriver.capabilities,
supports_ephemeral_encryption=True,
supports_ephemeral_encryption_plain=True)
+
+
+class FakeDriverWithoutFakeNodes(FakeDriver):
+ """FakeDriver that behaves like a real single-node driver.
+
+ This behaves like a real virt driver from the perspective of its
+ nodes, with a stable nodename and use of the global node identity
+ stuff to provide a stable node UUID.
+ """
+
+ def get_available_resource(self, nodename):
+ resources = super().get_available_resource(nodename)
+ resources['uuid'] = nova.virt.node.get_local_node_uuid()
+ return resources
+
+ def get_nodenames_by_uuid(self, refresh=False):
+ return {
+ nova.virt.node.get_local_node_uuid(): self.get_available_nodes()[0]
+ }
diff --git a/nova/virt/hardware.py b/nova/virt/hardware.py
index 271a719aa2..c8f8bb2481 100644
--- a/nova/virt/hardware.py
+++ b/nova/virt/hardware.py
@@ -1213,10 +1213,13 @@ def _check_for_mem_encryption_requirement_conflicts(
"image %(image_name)s which has hw_mem_encryption property "
"explicitly set to %(image_val)s"
)
+ # image_meta.name is not set if image object represents root
+ # Cinder volume.
+ image_name = (image_meta.name if 'name' in image_meta else None)
data = {
'flavor_name': flavor.name,
'flavor_val': flavor_mem_enc_str,
- 'image_name': image_meta.name,
+ 'image_name': image_name,
'image_val': image_mem_enc,
}
raise exception.FlavorImageConflict(emsg % data)
@@ -1228,10 +1231,15 @@ def _check_mem_encryption_uses_uefi_image(requesters, image_meta):
emsg = _(
"Memory encryption requested by %(requesters)s but image "
- "%(image_name)s doesn't have 'hw_firmware_type' property set to 'uefi'"
+ "%(image_name)s doesn't have 'hw_firmware_type' property set to "
+ "'uefi' or volume-backed instance was requested"
)
+ # image_meta.name is not set if image object represents root Cinder
+ # volume, for this case FlavorImageConflict should be raised, but
+ # image_meta.name can't be extracted.
+ image_name = (image_meta.name if 'name' in image_meta else None)
data = {'requesters': " and ".join(requesters),
- 'image_name': image_meta.name}
+ 'image_name': image_name}
raise exception.FlavorImageConflict(emsg % data)
@@ -1260,12 +1268,14 @@ def _check_mem_encryption_machine_type(image_meta, machine_type=None):
if mach_type is None:
return
+ # image_meta.name is not set if image object represents root Cinder volume.
+ image_name = (image_meta.name if 'name' in image_meta else None)
# Could be something like pc-q35-2.11 if a specific version of the
# machine type is required, so do substring matching.
if 'q35' not in mach_type:
raise exception.InvalidMachineType(
mtype=mach_type,
- image_id=image_meta.id, image_name=image_meta.name,
+ image_id=image_meta.id, image_name=image_name,
reason=_("q35 type is required for SEV to work"))
@@ -2295,6 +2305,7 @@ def _numa_cells_support_network_metadata(
def numa_fit_instance_to_host(
host_topology: 'objects.NUMATopology',
instance_topology: 'objects.InstanceNUMATopology',
+ provider_mapping: ty.Optional[ty.Dict[str, ty.List[str]]],
limits: ty.Optional['objects.NUMATopologyLimit'] = None,
pci_requests: ty.Optional['objects.InstancePCIRequests'] = None,
pci_stats: ty.Optional[stats.PciDeviceStats] = None,
@@ -2310,6 +2321,12 @@ def numa_fit_instance_to_host(
:param host_topology: objects.NUMATopology object to fit an
instance on
:param instance_topology: objects.InstanceNUMATopology to be fitted
+ :param provider_mapping: A dict keyed by RequestGroup requester_id,
+ to a list of resource provider UUIDs which provide resource
+ for that RequestGroup. If it is None then it signals that the
+ InstancePCIRequest objects already stores a mapping per request.
+ I.e.: we are called _after_ the scheduler made allocations for this
+ request in placement.
:param limits: objects.NUMATopologyLimits that defines limits
:param pci_requests: instance pci_requests
:param pci_stats: pci_stats for the host
@@ -2465,7 +2482,7 @@ def numa_fit_instance_to_host(
continue
if pci_requests and pci_stats and not pci_stats.support_requests(
- pci_requests, chosen_instance_cells):
+ pci_requests, provider_mapping, chosen_instance_cells):
continue
if network_metadata and not _numa_cells_support_network_metadata(
diff --git a/nova/virt/images.py b/nova/virt/images.py
index 5358f3766a..f13c872290 100644
--- a/nova/virt/images.py
+++ b/nova/virt/images.py
@@ -110,6 +110,34 @@ def get_info(context, image_href):
return IMAGE_API.get(context, image_href)
+def check_vmdk_image(image_id, data):
+ # Check some rules about VMDK files. Specifically we want to make
+ # sure that the "create-type" of the image is one that we allow.
+ # Some types of VMDK files can reference files outside the disk
+ # image and we do not want to allow those for obvious reasons.
+
+ types = CONF.compute.vmdk_allowed_types
+
+ if not len(types):
+ LOG.warning('Refusing to allow VMDK image as vmdk_allowed_'
+ 'types is empty')
+ msg = _('Invalid VMDK create-type specified')
+ raise exception.ImageUnacceptable(image_id=image_id, reason=msg)
+
+ try:
+ create_type = data.format_specific['data']['create-type']
+ except KeyError:
+ msg = _('Unable to determine VMDK create-type')
+ raise exception.ImageUnacceptable(image_id=image_id, reason=msg)
+
+ if create_type not in CONF.compute.vmdk_allowed_types:
+ LOG.warning('Refusing to process VMDK file with create-type of %r '
+ 'which is not in allowed set of: %s', create_type,
+ ','.join(CONF.compute.vmdk_allowed_types))
+ msg = _('Invalid VMDK create-type specified')
+ raise exception.ImageUnacceptable(image_id=image_id, reason=msg)
+
+
def fetch_to_raw(context, image_href, path, trusted_certs=None):
path_tmp = "%s.part" % path
fetch(context, image_href, path_tmp, trusted_certs)
@@ -129,6 +157,9 @@ def fetch_to_raw(context, image_href, path, trusted_certs=None):
reason=(_("fmt=%(fmt)s backed by: %(backing_file)s") %
{'fmt': fmt, 'backing_file': backing_file}))
+ if fmt == 'vmdk':
+ check_vmdk_image(image_href, data)
+
if fmt != "raw" and CONF.force_raw_images:
staged = "%s.converted" % path
LOG.debug("%s was %s, converting to raw", image_href, fmt)
diff --git a/nova/virt/ironic/driver.py b/nova/virt/ironic/driver.py
index 7496db5a7c..77fefb81ea 100644
--- a/nova/virt/ironic/driver.py
+++ b/nova/virt/ironic/driver.py
@@ -397,6 +397,18 @@ class IronicDriver(virt_driver.ComputeDriver):
_("Ironic node uuid not supplied to "
"driver for instance %s.") % instance.uuid)
node = self._get_node(node_uuid)
+
+ # Its possible this node has just moved from deleting
+ # to cleaning. Placement will update the inventory
+ # as all reserved, but this instance might have got here
+ # before that happened, but after the previous allocation
+ # got deleted. We trigger a re-schedule to another node.
+ if (self._node_resources_used(node) or
+ self._node_resources_unavailable(node)):
+ msg = "Chosen ironic node %s is not available" % node_uuid
+ LOG.info(msg, instance=instance)
+ raise exception.ComputeResourcesUnavailable(reason=msg)
+
self._set_instance_id(node, instance)
def failed_spawn_cleanup(self, instance):
@@ -827,6 +839,13 @@ class IronicDriver(virt_driver.ComputeDriver):
return node_uuids
+ def get_nodenames_by_uuid(self, refresh=False):
+ nodes = self.get_available_nodes(refresh=refresh)
+ # We use the uuid for compute_node.uuid and
+ # compute_node.hypervisor_hostname, so the dict keys and values are
+ # the same.
+ return dict(zip(nodes, nodes))
+
def update_provider_tree(self, provider_tree, nodename, allocations=None):
"""Update a ProviderTree object with current resource provider and
inventory information.
@@ -874,15 +893,25 @@ class IronicDriver(virt_driver.ComputeDriver):
"""
# nodename is the ironic node's UUID.
node = self._node_from_cache(nodename)
+
reserved = False
- if (not self._node_resources_used(node) and
- self._node_resources_unavailable(node)):
- LOG.debug('Node %(node)s is not ready for a deployment, '
- 'reporting resources as reserved for it. Node\'s '
- 'provision state is %(prov)s, power state is '
- '%(power)s and maintenance is %(maint)s.',
- {'node': node.uuid, 'prov': node.provision_state,
- 'power': node.power_state, 'maint': node.maintenance})
+ if self._node_resources_unavailable(node):
+ # Operators might mark a node as in maintainance,
+ # even when an instance is on the node,
+ # either way lets mark this as reserved
+ reserved = True
+
+ if (self._node_resources_used(node) and
+ not CONF.workarounds.skip_reserve_in_use_ironic_nodes):
+ # Make resources as reserved once we have
+ # and instance here.
+ # When the allocation is deleted, most likely
+ # automatic clean will start, so we keep the node
+ # reserved until it becomes available again.
+ # In the case without automatic clean, once
+ # the allocation is removed in placement it
+ # also stays as reserved until we notice on
+ # the next periodic its actually available.
reserved = True
info = self._node_resource(node)
diff --git a/nova/virt/libvirt/config.py b/nova/virt/libvirt/config.py
index 3d91c325c3..231283b8dd 100644
--- a/nova/virt/libvirt/config.py
+++ b/nova/virt/libvirt/config.py
@@ -2047,6 +2047,12 @@ class LibvirtConfigGuestGraphics(LibvirtConfigGuestDevice):
self.keymap = None
self.listen = None
+ self.image_compression = None
+ self.jpeg_compression = None
+ self.zlib_compression = None
+ self.playback_compression = None
+ self.streaming_mode = None
+
def format_dom(self):
dev = super(LibvirtConfigGuestGraphics, self).format_dom()
@@ -2057,6 +2063,24 @@ class LibvirtConfigGuestGraphics(LibvirtConfigGuestDevice):
if self.listen:
dev.set("listen", self.listen)
+ if self.type == "spice":
+ if self.image_compression is not None:
+ dev.append(etree.Element(
+ 'image', compression=self.image_compression))
+ if self.jpeg_compression is not None:
+ dev.append(etree.Element(
+ 'jpeg', compression=self.jpeg_compression))
+ if self.zlib_compression is not None:
+ dev.append(etree.Element(
+ 'zlib', compression=self.zlib_compression))
+ if self.playback_compression is not None:
+ dev.append(etree.Element(
+ 'playback', compression=self.get_on_off_str(
+ self.playback_compression)))
+ if self.streaming_mode is not None:
+ dev.append(etree.Element(
+ 'streaming', mode=self.streaming_mode))
+
return dev
@@ -3382,6 +3406,7 @@ class LibvirtConfigNodeDeviceMdevInformation(LibvirtConfigObject):
root_name="capability", **kwargs)
self.type = None
self.iommu_group = None
+ self.uuid = None
def parse_dom(self, xmldoc):
super(LibvirtConfigNodeDeviceMdevInformation,
@@ -3391,6 +3416,8 @@ class LibvirtConfigNodeDeviceMdevInformation(LibvirtConfigObject):
self.type = c.get('id')
if c.tag == "iommuGroup":
self.iommu_group = int(c.get('number'))
+ if c.tag == "uuid":
+ self.uuid = c.text
class LibvirtConfigNodeDeviceVpdCap(LibvirtConfigObject):
diff --git a/nova/virt/libvirt/cpu/__init__.py b/nova/virt/libvirt/cpu/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/virt/libvirt/cpu/__init__.py
diff --git a/nova/virt/libvirt/cpu/api.py b/nova/virt/libvirt/cpu/api.py
new file mode 100644
index 0000000000..1c17458d6b
--- /dev/null
+++ b/nova/virt/libvirt/cpu/api.py
@@ -0,0 +1,157 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from dataclasses import dataclass
+
+from oslo_log import log as logging
+
+import nova.conf
+from nova import exception
+from nova.i18n import _
+from nova import objects
+from nova.virt import hardware
+from nova.virt.libvirt.cpu import core
+
+LOG = logging.getLogger(__name__)
+
+CONF = nova.conf.CONF
+
+
+@dataclass
+class Core:
+ """Class to model a CPU core as reported by sysfs.
+
+ It may be a physical CPU core or a hardware thread on a shared CPU core
+ depending on if the system supports SMT.
+ """
+
+ # NOTE(sbauza): ident is a mandatory field.
+ # The CPU core id/number
+ ident: int
+
+ @property
+ def online(self) -> bool:
+ return core.get_online(self.ident)
+
+ @online.setter
+ def online(self, state: bool) -> None:
+ if state:
+ core.set_online(self.ident)
+ else:
+ core.set_offline(self.ident)
+
+ def __hash__(self):
+ return hash(self.ident)
+
+ def __eq__(self, other):
+ return self.ident == other.ident
+
+ def __str__(self):
+ return str(self.ident)
+
+ @property
+ def governor(self) -> str:
+ return core.get_governor(self.ident)
+
+ def set_high_governor(self) -> None:
+ core.set_governor(self.ident, CONF.libvirt.cpu_power_governor_high)
+
+ def set_low_governor(self) -> None:
+ core.set_governor(self.ident, CONF.libvirt.cpu_power_governor_low)
+
+
+def power_up(instance: objects.Instance) -> None:
+ if not CONF.libvirt.cpu_power_management:
+ return
+ if instance.numa_topology is None:
+ return
+
+ cpu_dedicated_set = hardware.get_cpu_dedicated_set() or set()
+ pcpus = instance.numa_topology.cpu_pinning
+ powered_up = set()
+ for pcpu in pcpus:
+ if pcpu in cpu_dedicated_set:
+ pcpu = Core(pcpu)
+ if CONF.libvirt.cpu_power_management_strategy == 'cpu_state':
+ pcpu.online = True
+ else:
+ pcpu.set_high_governor()
+ powered_up.add(str(pcpu))
+ LOG.debug("Cores powered up : %s", powered_up)
+
+
+def power_down(instance: objects.Instance) -> None:
+ if not CONF.libvirt.cpu_power_management:
+ return
+ if instance.numa_topology is None:
+ return
+
+ cpu_dedicated_set = hardware.get_cpu_dedicated_set() or set()
+ pcpus = instance.numa_topology.cpu_pinning
+ powered_down = set()
+ for pcpu in pcpus:
+ if pcpu in cpu_dedicated_set:
+ pcpu = Core(pcpu)
+ if CONF.libvirt.cpu_power_management_strategy == 'cpu_state':
+ pcpu.online = False
+ else:
+ pcpu.set_low_governor()
+ powered_down.add(str(pcpu))
+ LOG.debug("Cores powered down : %s", powered_down)
+
+
+def power_down_all_dedicated_cpus() -> None:
+ if not CONF.libvirt.cpu_power_management:
+ return
+ if (CONF.libvirt.cpu_power_management and
+ not CONF.compute.cpu_dedicated_set
+ ):
+ msg = _("'[compute]/cpu_dedicated_set' is mandatory to be set if "
+ "'[libvirt]/cpu_power_management' is set."
+ "Please provide the CPUs that can be pinned or don't use the "
+ "power management if you only use shared CPUs.")
+ raise exception.InvalidConfiguration(msg)
+
+ cpu_dedicated_set = hardware.get_cpu_dedicated_set() or set()
+ for pcpu in cpu_dedicated_set:
+ pcpu = Core(pcpu)
+ if CONF.libvirt.cpu_power_management_strategy == 'cpu_state':
+ pcpu.online = False
+ else:
+ pcpu.set_low_governor()
+ LOG.debug("Cores powered down : %s", cpu_dedicated_set)
+
+
+def validate_all_dedicated_cpus() -> None:
+ if not CONF.libvirt.cpu_power_management:
+ return
+ cpu_dedicated_set = hardware.get_cpu_dedicated_set() or set()
+ governors = set()
+ cpu_states = set()
+ for pcpu in cpu_dedicated_set:
+ pcpu = Core(pcpu)
+ # we need to collect the governors strategy and the CPU states
+ governors.add(pcpu.governor)
+ cpu_states.add(pcpu.online)
+ if CONF.libvirt.cpu_power_management_strategy == 'cpu_state':
+ # all the cores need to have the same governor strategy
+ if len(governors) > 1:
+ msg = _("All the cores need to have the same governor strategy"
+ "before modifying the CPU states. You can reboot the "
+ "compute node if you prefer.")
+ raise exception.InvalidConfiguration(msg)
+ elif CONF.libvirt.cpu_power_management_strategy == 'governor':
+ # all the cores need to be online
+ if False in cpu_states:
+ msg = _("All the cores need to be online before modifying the "
+ "governor strategy.")
+ raise exception.InvalidConfiguration(msg)
diff --git a/nova/virt/libvirt/cpu/core.py b/nova/virt/libvirt/cpu/core.py
new file mode 100644
index 0000000000..782f028fee
--- /dev/null
+++ b/nova/virt/libvirt/cpu/core.py
@@ -0,0 +1,78 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+import typing as ty
+
+from oslo_log import log as logging
+
+from nova import exception
+from nova import filesystem
+import nova.privsep
+from nova.virt import hardware
+
+LOG = logging.getLogger(__name__)
+
+AVAILABLE_PATH = '/sys/devices/system/cpu/present'
+
+CPU_PATH_TEMPLATE = '/sys/devices/system/cpu/cpu%(core)s'
+
+
+def get_available_cores() -> ty.Set[int]:
+ cores = filesystem.read_sys(AVAILABLE_PATH)
+ return hardware.parse_cpu_spec(cores) if cores else set()
+
+
+def exists(core: int) -> bool:
+ return core in get_available_cores()
+
+
+def gen_cpu_path(core: int) -> str:
+ if not exists(core):
+ LOG.warning('Unable to access CPU: %s', core)
+ raise ValueError('CPU: %(core)s does not exist', core)
+ return CPU_PATH_TEMPLATE % {'core': core}
+
+
+def get_online(core: int) -> bool:
+ try:
+ online = filesystem.read_sys(
+ os.path.join(gen_cpu_path(core), 'online')).strip()
+ except exception.FileNotFound:
+ # The online file may not exist if we haven't written it yet.
+ # By default, this means that the CPU is online.
+ online = '1'
+ return online == '1'
+
+
+@nova.privsep.sys_admin_pctxt.entrypoint
+def set_online(core: int) -> bool:
+ filesystem.write_sys(os.path.join(gen_cpu_path(core), 'online'), data='1')
+ return get_online(core)
+
+
+def set_offline(core: int) -> bool:
+ filesystem.write_sys(os.path.join(gen_cpu_path(core), 'online'), data='0')
+ return not get_online(core)
+
+
+def get_governor(core: int) -> str:
+ return filesystem.read_sys(
+ os.path.join(gen_cpu_path(core), 'cpufreq/scaling_governor')).strip()
+
+
+@nova.privsep.sys_admin_pctxt.entrypoint
+def set_governor(core: int, governor: str) -> str:
+ filesystem.write_sys(
+ os.path.join(gen_cpu_path(core), 'cpufreq/scaling_governor'),
+ data=governor)
+ return get_governor(core)
diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py
index ce884dfe30..73134d8391 100644
--- a/nova/virt/libvirt/driver.py
+++ b/nova/virt/libvirt/driver.py
@@ -114,6 +114,7 @@ from nova.virt.image import model as imgmodel
from nova.virt import images
from nova.virt.libvirt import blockinfo
from nova.virt.libvirt import config as vconfig
+from nova.virt.libvirt.cpu import api as libvirt_cpu
from nova.virt.libvirt import designer
from nova.virt.libvirt import event as libvirtevent
from nova.virt.libvirt import guest as libvirt_guest
@@ -412,6 +413,8 @@ class LibvirtDriver(driver.ComputeDriver):
not CONF.force_raw_images)
requires_ploop_image = CONF.libvirt.virt_type == 'parallels'
+ self.image_backend = imagebackend.Backend(CONF.use_cow_images)
+
self.capabilities = {
"has_imagecache": True,
"supports_evacuate": True,
@@ -439,6 +442,10 @@ class LibvirtDriver(driver.ComputeDriver):
"supports_bfv_rescue": True,
"supports_vtpm": CONF.libvirt.swtpm_enabled,
"supports_socket_pci_numa_affinity": True,
+ "supports_ephemeral_encryption":
+ self.image_backend.backend().SUPPORTS_LUKS,
+ "supports_ephemeral_encryption_luks":
+ self.image_backend.backend().SUPPORTS_LUKS,
}
super(LibvirtDriver, self).__init__(virtapi)
@@ -463,7 +470,6 @@ class LibvirtDriver(driver.ComputeDriver):
self._disk_cachemode = None
self.image_cache_manager = imagecache.ImageCacheManager()
- self.image_backend = imagebackend.Backend(CONF.use_cow_images)
self.disk_cachemodes = {}
@@ -812,6 +818,18 @@ class LibvirtDriver(driver.ComputeDriver):
"force_raw_images to True.")
raise exception.InvalidConfiguration(msg)
+ # NOTE(sbauza): We verify first if the dedicated CPU performances were
+ # modified by Nova before. Note that it can provide an exception if
+ # either the governor strategies are different between the cores or if
+ # the cores are offline.
+ libvirt_cpu.validate_all_dedicated_cpus()
+ # NOTE(sbauza): We powerdown all dedicated CPUs but if some instances
+ # exist that are pinned for some CPUs, then we'll later powerup those
+ # CPUs when rebooting the instance in _init_instance()
+ # Note that it can provide an exception if the config options are
+ # wrongly modified.
+ libvirt_cpu.power_down_all_dedicated_cpus()
+
# TODO(sbauza): Remove this code once mediated devices are persisted
# across reboots.
self._recreate_assigned_mediated_devices()
@@ -984,33 +1002,26 @@ class LibvirtDriver(driver.ComputeDriver):
msg = _("The cpu_models option is required when cpu_mode=custom")
raise exception.Invalid(msg)
- cpu = vconfig.LibvirtConfigGuestCPU()
- for model in models:
- cpu.model = self._get_cpu_model_mapping(model)
- try:
- self._compare_cpu(cpu, self._get_cpu_info(), None)
- except exception.InvalidCPUInfo as e:
- msg = (_("Configured CPU model: %(model)s is not "
- "compatible with host CPU. Please correct your "
- "config and try again. %(e)s") % {
- 'model': model, 'e': e})
- raise exception.InvalidCPUInfo(msg)
-
- # Use guest CPU model to check the compatibility between guest CPU and
- # configured extra_flags
- cpu = vconfig.LibvirtConfigGuestCPU()
- cpu.model = self._host.get_capabilities().host.cpu.model
- for flag in set(x.lower() for x in CONF.libvirt.cpu_model_extra_flags):
- cpu_feature = self._prepare_cpu_flag(flag)
- cpu.add_feature(cpu_feature)
- try:
- self._compare_cpu(cpu, self._get_cpu_info(), None)
- except exception.InvalidCPUInfo as e:
- msg = (_("Configured extra flag: %(flag)s it not correct, or "
- "the host CPU does not support this flag. Please "
- "correct the config and try again. %(e)s") % {
- 'flag': flag, 'e': e})
- raise exception.InvalidCPUInfo(msg)
+ if not CONF.workarounds.skip_cpu_compare_at_startup:
+ # Use guest CPU model to check the compatibility between
+ # guest CPU and configured extra_flags
+ for model in models:
+ cpu = vconfig.LibvirtConfigGuestCPU()
+ cpu.model = self._get_cpu_model_mapping(model)
+ for flag in set(x.lower() for
+ x in CONF.libvirt.cpu_model_extra_flags):
+ cpu_feature = self._prepare_cpu_flag(flag)
+ cpu.add_feature(cpu_feature)
+ try:
+ self._compare_cpu(cpu, self._get_cpu_info(), None)
+ except exception.InvalidCPUInfo as e:
+ msg = (_("Configured CPU model: %(model)s "
+ "and CPU Flags %(flags)s ar not "
+ "compatible with host CPU. Please correct your "
+ "config and try again. %(e)s") % {
+ 'model': model, 'e': e,
+ 'flags': CONF.libvirt.cpu_model_extra_flags})
+ raise exception.InvalidCPUInfo(msg)
def _check_vtpm_support(self) -> None:
# TODO(efried): A key manager must be configured to create/retrieve
@@ -1514,6 +1525,8 @@ class LibvirtDriver(driver.ComputeDriver):
# NOTE(GuanQiang): teardown container to avoid resource leak
if CONF.libvirt.virt_type == 'lxc':
self._teardown_container(instance)
+ # We're sure the instance is gone, we can shutdown the core if so
+ libvirt_cpu.power_down(instance)
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, destroy_secrets=True):
@@ -3166,6 +3179,7 @@ class LibvirtDriver(driver.ComputeDriver):
current_power_state = guest.get_power_state(self._host)
+ libvirt_cpu.power_up(instance)
# TODO(stephenfin): Any reason we couldn't use 'self.resume' here?
guest.launch(pause=current_power_state == power_state.PAUSED)
@@ -3250,7 +3264,13 @@ class LibvirtDriver(driver.ComputeDriver):
'[Error Code %(error_code)s] %(ex)s')
% {'instance_name': instance.name,
'error_code': error_code, 'ex': err_msg})
- raise exception.InternalError(msg)
+
+ if error_code == libvirt.VIR_ERR_AGENT_UNRESPONSIVE:
+ msg += (", libvirt cannot connect to the qemu-guest-agent"
+ " inside the instance.")
+ raise exception.InstanceQuiesceFailed(reason=msg)
+ else:
+ raise exception.InternalError(msg)
def quiesce(self, context, instance, image_meta):
"""Freeze the guest filesystems to prepare for snapshot.
@@ -7296,6 +7316,11 @@ class LibvirtDriver(driver.ComputeDriver):
graphics = vconfig.LibvirtConfigGuestGraphics()
graphics.type = "spice"
graphics.listen = CONF.spice.server_listen
+ graphics.image_compression = CONF.spice.image_compression
+ graphics.jpeg_compression = CONF.spice.jpeg_compression
+ graphics.zlib_compression = CONF.spice.zlib_compression
+ graphics.playback_compression = CONF.spice.playback_compression
+ graphics.streaming_mode = CONF.spice.streaming_mode
guest.add_device(graphics)
add_video_driver = True
@@ -7637,6 +7662,7 @@ class LibvirtDriver(driver.ComputeDriver):
post_xml_callback()
if power_on or pause:
+ libvirt_cpu.power_up(instance)
guest.launch(pause=pause)
return guest
@@ -7741,15 +7767,18 @@ class LibvirtDriver(driver.ComputeDriver):
if not CONF.compute.cpu_dedicated_set:
return set()
- online_cpus = self._host.get_online_cpus()
+ if CONF.libvirt.cpu_power_management:
+ available_cpus = self._host.get_available_cpus()
+ else:
+ available_cpus = self._host.get_online_cpus()
dedicated_cpus = hardware.get_cpu_dedicated_set()
- if not dedicated_cpus.issubset(online_cpus):
+ if not dedicated_cpus.issubset(available_cpus):
msg = _("Invalid '[compute] cpu_dedicated_set' config: one or "
- "more of the configured CPUs is not online. Online "
- "cpuset(s): %(online)s, configured cpuset(s): %(req)s")
+ "more of the configured CPUs is not available. Available "
+ "cpuset(s): %(available)s, configured cpuset(s): %(req)s")
raise exception.Invalid(msg % {
- 'online': sorted(online_cpus),
+ 'available': sorted(available_cpus),
'req': sorted(dedicated_cpus)})
return dedicated_cpus
@@ -8227,15 +8256,52 @@ class LibvirtDriver(driver.ComputeDriver):
def _get_mediated_device_information(self, devname):
"""Returns a dict of a mediated device."""
- virtdev = self._host.device_lookup_by_name(devname)
+ # LP #1951656 - In Libvirt 7.7, the mdev name now includes the PCI
+ # address of the parent device (e.g. mdev_<uuid>_<pci_address>) due to
+ # the mdevctl allowing for multiple mediated devs having the same UUID
+ # defined (only one can be active at a time). Since the guest
+ # information doesn't have the parent ID, try to lookup which
+ # mediated device is available that matches the UUID. If multiple
+ # devices are found that match the UUID, then this is an error
+ # condition.
+ try:
+ virtdev = self._host.device_lookup_by_name(devname)
+ except libvirt.libvirtError as ex:
+ if ex.get_error_code() != libvirt.VIR_ERR_NO_NODE_DEVICE:
+ raise
+ mdevs = [dev for dev in self._host.list_mediated_devices()
+ if dev.startswith(devname)]
+ # If no matching devices are found, simply raise the original
+ # exception indicating that no devices are found.
+ if not mdevs:
+ raise
+ elif len(mdevs) > 1:
+ msg = ("The mediated device name %(devname)s refers to a UUID "
+ "that is present in multiple libvirt mediated devices. "
+ "Matching libvirt mediated devices are %(devices)s. "
+ "Mediated device UUIDs must be unique for Nova." %
+ {'devname': devname,
+ 'devices': ', '.join(mdevs)})
+ raise exception.InvalidLibvirtMdevConfig(reason=msg)
+
+ LOG.debug('Found requested device %s as %s. Using that.',
+ devname, mdevs[0])
+ virtdev = self._host.device_lookup_by_name(mdevs[0])
xmlstr = virtdev.XMLDesc(0)
cfgdev = vconfig.LibvirtConfigNodeDevice()
cfgdev.parse_str(xmlstr)
+ # Starting with Libvirt 7.3, the uuid information is available in the
+ # node device information. If its there, use that. Otherwise,
+ # fall back to the previous behavior of parsing the uuid from the
+ # devname.
+ if cfgdev.mdev_information.uuid:
+ mdev_uuid = cfgdev.mdev_information.uuid
+ else:
+ mdev_uuid = libvirt_utils.mdev_name2uuid(cfgdev.name)
device = {
"dev_id": cfgdev.name,
- # name is like mdev_00ead764_fdc0_46b6_8db9_2963f5c815b4
- "uuid": libvirt_utils.mdev_name2uuid(cfgdev.name),
+ "uuid": mdev_uuid,
# the physical GPU PCI device
"parent": cfgdev.parent,
"type": cfgdev.mdev_information.type,
@@ -8323,6 +8389,7 @@ class LibvirtDriver(driver.ComputeDriver):
:param requested_types: Filter out the result for only mediated devices
having those types.
"""
+ LOG.debug('Searching for available mdevs...')
allocated_mdevs = self._get_all_assigned_mediated_devices()
mdevs = self._get_mediated_devices(requested_types)
available_mdevs = set()
@@ -8338,6 +8405,7 @@ class LibvirtDriver(driver.ComputeDriver):
available_mdevs.add(mdev["uuid"])
available_mdevs -= set(allocated_mdevs)
+ LOG.info('Available mdevs at: %s.', available_mdevs)
return available_mdevs
def _create_new_mediated_device(self, parent, uuid=None):
@@ -8349,6 +8417,7 @@ class LibvirtDriver(driver.ComputeDriver):
:returns: the newly created mdev UUID or None if not possible
"""
+ LOG.debug('Attempting to create new mdev...')
supported_types = self.supported_vgpu_types
# Try to see if we can still create a new mediated device
devices = self._get_mdev_capable_devices(supported_types)
@@ -8360,6 +8429,7 @@ class LibvirtDriver(driver.ComputeDriver):
# The device is not the one that was called, not creating
# the mdev
continue
+ LOG.debug('Trying on: %s.', dev_name)
dev_supported_type = self._get_vgpu_type_per_pgpu(dev_name)
if dev_supported_type and device['types'][
dev_supported_type]['availableInstances'] > 0:
@@ -8369,7 +8439,13 @@ class LibvirtDriver(driver.ComputeDriver):
pci_addr = "{}:{}:{}.{}".format(*dev_name[4:].split('_'))
chosen_mdev = nova.privsep.libvirt.create_mdev(
pci_addr, dev_supported_type, uuid=uuid)
+ LOG.info('Created mdev: %s on pGPU: %s.',
+ chosen_mdev, pci_addr)
return chosen_mdev
+ LOG.debug('Failed: No available instances on device.')
+ LOG.info('Failed to create mdev. '
+ 'No free space found among the following devices: %s.',
+ [dev['dev_id'] for dev in devices])
@utils.synchronized(VGPU_RESOURCE_SEMAPHORE)
def _allocate_mdevs(self, allocations):
@@ -8452,6 +8528,8 @@ class LibvirtDriver(driver.ComputeDriver):
# Take the first available mdev
chosen_mdev = mdevs_available.pop()
else:
+ LOG.debug('No available mdevs where found. '
+ 'Creating an new one...')
chosen_mdev = self._create_new_mediated_device(parent_device)
if not chosen_mdev:
# If we can't find devices having available VGPUs, just raise
@@ -8459,6 +8537,7 @@ class LibvirtDriver(driver.ComputeDriver):
reason='mdev-capable resource is not available')
else:
chosen_mdevs.append(chosen_mdev)
+ LOG.info('Allocated mdev: %s.', chosen_mdev)
return chosen_mdevs
def _detach_mediated_devices(self, guest):
@@ -9461,6 +9540,7 @@ class LibvirtDriver(driver.ComputeDriver):
data["hypervisor_type"] = self._host.get_driver_type()
data["hypervisor_version"] = self._host.get_version()
data["hypervisor_hostname"] = self._host.get_hostname()
+ data["uuid"] = self._host.get_node_uuid()
# TODO(berrange): why do we bother converting the
# libvirt capabilities XML into a special JSON format ?
# The data format is different across all the drivers
@@ -9914,7 +9994,7 @@ class LibvirtDriver(driver.ComputeDriver):
try:
cpu_xml = cpu.to_xml()
LOG.debug("cpu compare xml: %s", cpu_xml, instance=instance)
- ret = self._host.compare_cpu(cpu_xml)
+ ret = self._host.compare_hypervisor_cpu(cpu_xml)
except libvirt.libvirtError as e:
error_code = e.get_error_code()
if error_code == libvirt.VIR_ERR_NO_SUPPORT:
@@ -10997,16 +11077,37 @@ class LibvirtDriver(driver.ComputeDriver):
if not CONF.workarounds.enable_qemu_monitor_announce_self:
return
- LOG.info('Sending announce-self command to QEMU monitor',
- instance=instance)
+ current_attempt = 0
- try:
- guest = self._host.get_guest(instance)
- guest.announce_self()
- except Exception:
- LOG.warning('Failed to send announce-self command to QEMU monitor',
- instance=instance)
- LOG.exception()
+ max_attempts = (
+ CONF.workarounds.qemu_monitor_announce_self_count)
+ # qemu_monitor_announce_retry_interval specified in seconds
+ announce_pause = (
+ CONF.workarounds.qemu_monitor_announce_self_interval)
+
+ while(current_attempt < max_attempts):
+ # Increment attempt
+ current_attempt += 1
+
+ # Only use announce_pause after the first attempt to avoid
+ # pausing before calling announce_self for the first attempt
+ if current_attempt != 1:
+ greenthread.sleep(announce_pause)
+
+ LOG.info('Sending announce-self command to QEMU monitor. '
+ 'Attempt %(current_attempt)s of %(max_attempts)s',
+ {'current_attempt': current_attempt,
+ 'max_attempts': max_attempts}, instance=instance)
+ try:
+ guest = self._host.get_guest(instance)
+ guest.announce_self()
+ except Exception:
+ LOG.warning('Failed to send announce-self command to '
+ 'QEMU monitor. Attempt %(current_attempt)s of '
+ '%(max_attempts)s',
+ {'current_attempt': current_attempt,
+ 'max_attempts': max_attempts}, instance=instance)
+ LOG.exception()
def post_live_migration_at_destination(self, context,
instance,
@@ -11256,6 +11357,9 @@ class LibvirtDriver(driver.ComputeDriver):
def get_available_nodes(self, refresh=False):
return [self._host.get_hostname()]
+ def get_nodenames_by_uuid(self, refresh=False):
+ return {self._host.get_node_uuid(): self._host.get_hostname()}
+
def get_host_cpu_stats(self):
"""Return the current CPU state of the host."""
return self._host.get_cpu_stats()
diff --git a/nova/virt/libvirt/host.py b/nova/virt/libvirt/host.py
index 785acdcd18..9658a5791d 100644
--- a/nova/virt/libvirt/host.py
+++ b/nova/virt/libvirt/host.py
@@ -66,6 +66,7 @@ from nova.virt.libvirt import event as libvirtevent
from nova.virt.libvirt import guest as libvirt_guest
from nova.virt.libvirt import migration as libvirt_migrate
from nova.virt.libvirt import utils as libvirt_utils
+import nova.virt.node # noqa
if ty.TYPE_CHECKING:
import libvirt
@@ -138,6 +139,7 @@ class Host(object):
self._caps = None
self._domain_caps = None
self._hostname = None
+ self._node_uuid = None
self._wrapped_conn = None
self._wrapped_conn_lock = threading.Lock()
@@ -738,6 +740,14 @@ class Host(object):
return doms
+ def get_available_cpus(self):
+ """Get the set of CPUs that exist on the host.
+
+ :returns: set of CPUs, raises libvirtError on error
+ """
+ cpus, cpu_map, online = self.get_connection().getCPUMap()
+ return {cpu for cpu in range(cpus)}
+
def get_online_cpus(self):
"""Get the set of CPUs that are online on the host
@@ -1059,6 +1069,12 @@ class Host(object):
{'old': self._hostname, 'new': hostname})
return self._hostname
+ def get_node_uuid(self):
+ """Returns the UUID of this node."""
+ if not self._node_uuid:
+ self._node_uuid = nova.virt.node.get_local_node_uuid()
+ return self._node_uuid
+
def find_secret(self, usage_type, usage_id):
"""Find a secret.
@@ -1566,7 +1582,7 @@ class Host(object):
def list_mediated_devices(self, flags=0):
"""Lookup mediated devices.
- :returns: a list of virNodeDevice instance
+ :returns: a list of strings with the name of the instance
"""
return self._list_devices("mdev", flags=flags)
@@ -1605,6 +1621,22 @@ class Host(object):
"""Compares the given CPU description with the host CPU."""
return self.get_connection().compareCPU(xmlDesc, flags)
+ def compare_hypervisor_cpu(self, xmlDesc, flags=0):
+ """Compares the given CPU description with the CPU provided by
+ the host hypervisor. This is different from the older method,
+ compare_cpu(), which compares a given CPU definition with the
+ host CPU without considering the abilities of the host
+ hypervisor. Except @xmlDesc, rest of all the parameters to
+ compareHypervisorCPU API are optional (libvirt will choose
+ sensible defaults).
+ """
+ emulator = None
+ arch = None
+ machine = None
+ virttype = None
+ return self.get_connection().compareHypervisorCPU(
+ emulator, arch, machine, virttype, xmlDesc, flags)
+
def is_cpu_control_policy_capable(self):
"""Returns whether kernel configuration CGROUP_SCHED is enabled
diff --git a/nova/virt/libvirt/imagebackend.py b/nova/virt/libvirt/imagebackend.py
index 534cc60759..0a64ef43dd 100644
--- a/nova/virt/libvirt/imagebackend.py
+++ b/nova/virt/libvirt/imagebackend.py
@@ -81,6 +81,7 @@ def _update_utime_ignore_eacces(path):
class Image(metaclass=abc.ABCMeta):
SUPPORTS_CLONE = False
+ SUPPORTS_LUKS = False
def __init__(
self,
diff --git a/nova/virt/libvirt/utils.py b/nova/virt/libvirt/utils.py
index c673818603..adb2ec45a1 100644
--- a/nova/virt/libvirt/utils.py
+++ b/nova/virt/libvirt/utils.py
@@ -22,6 +22,7 @@ import grp
import os
import pwd
import re
+import tempfile
import typing as ty
import uuid
@@ -114,6 +115,7 @@ def create_image(
disk_format: str,
disk_size: ty.Optional[ty.Union[str, int]],
backing_file: ty.Optional[str] = None,
+ encryption: ty.Optional[ty.Dict[str, ty.Any]] = None
) -> None:
"""Disk image creation with qemu-img
:param path: Desired location of the disk image
@@ -125,15 +127,16 @@ def create_image(
If no suffix is given, it will be interpreted as bytes.
Can be None in the case of a COW image.
:param backing_file: (Optional) Backing file to use.
+ :param encryption: (Optional) Dict detailing various encryption attributes
+ such as the format and passphrase.
"""
- base_cmd = [
+ cmd = [
'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'create', '-f', disk_format
]
- cow_opts = []
if backing_file:
base_details = images.qemu_img_info(backing_file)
- cow_opts += [
+ cow_opts = [
f'backing_file={backing_file}',
f'backing_fmt={base_details.file_format}'
]
@@ -147,12 +150,60 @@ def create_image(
# Format as a comma separated list
csv_opts = ",".join(cow_opts)
- cow_opts = ['-o', csv_opts]
-
- cmd = base_cmd + cow_opts + [path]
- if disk_size is not None:
- cmd += [str(disk_size)]
- processutils.execute(*cmd)
+ cmd += ['-o', csv_opts]
+
+ # Disk size can be None in the case of a COW image
+ disk_size_arg = [str(disk_size)] if disk_size is not None else []
+
+ if encryption:
+ with tempfile.NamedTemporaryFile(mode='tr+', encoding='utf-8') as f:
+ # Write out the passphrase secret to a temp file
+ f.write(encryption.get('secret'))
+
+ # Ensure the secret is written to disk, we can't .close() here as
+ # that removes the file when using NamedTemporaryFile
+ f.flush()
+
+ # The basic options include the secret and encryption format
+ encryption_opts = [
+ '--object', f"secret,id=sec,file={f.name}",
+ '-o', 'encrypt.key-secret=sec',
+ '-o', f"encrypt.format={encryption.get('format')}",
+ ]
+ # Supported luks options:
+ # cipher-alg=<str> - Name of cipher algorithm and key length
+ # cipher-mode=<str> - Name of encryption cipher mode
+ # hash-alg=<str> - Name of hash algorithm to use for PBKDF
+ # iter-time=<num> - Time to spend in PBKDF in milliseconds
+ # ivgen-alg=<str> - Name of IV generator algorithm
+ # ivgen-hash-alg=<str> - Name of IV generator hash algorithm
+ #
+ # NOTE(melwitt): Sensible defaults (that match the qemu defaults)
+ # are hardcoded at this time for simplicity and consistency when
+ # instances are migrated. Configuration of luks options could be
+ # added in a future release.
+ encryption_options = {
+ 'cipher-alg': 'aes-256',
+ 'cipher-mode': 'xts',
+ 'hash-alg': 'sha256',
+ 'iter-time': 2000,
+ 'ivgen-alg': 'plain64',
+ 'ivgen-hash-alg': 'sha256',
+ }
+
+ for option, value in encryption_options.items():
+ encryption_opts += [
+ '-o',
+ f'encrypt.{option}={value}',
+ ]
+
+ # We need to execute the command while the NamedTemporaryFile still
+ # exists
+ cmd += encryption_opts + [path] + disk_size_arg
+ processutils.execute(*cmd)
+ else:
+ cmd += [path] + disk_size_arg
+ processutils.execute(*cmd)
def create_ploop_image(
@@ -575,17 +626,31 @@ def get_default_machine_type(arch: str) -> ty.Optional[str]:
def mdev_name2uuid(mdev_name: str) -> str:
- """Convert an mdev name (of the form mdev_<uuid_with_underscores>) to a
- uuid (of the form 8-4-4-4-12).
+ """Convert an mdev name (of the form mdev_<uuid_with_underscores> or
+ mdev_<uuid_with_underscores>_<pciaddress>) to a uuid
+ (of the form 8-4-4-4-12).
+
+ :param mdev_name: the name of the mdev to parse the UUID from
+ :returns: string containing the uuid
"""
- return str(uuid.UUID(mdev_name[5:].replace('_', '-')))
+ mdev_uuid = mdev_name[5:].replace('_', '-')
+ # Unconditionnally remove the PCI address from the name
+ mdev_uuid = mdev_uuid[:36]
+ return str(uuid.UUID(mdev_uuid))
+
+def mdev_uuid2name(mdev_uuid: str, parent: str = None) -> str:
+ """Convert an mdev uuid (of the form 8-4-4-4-12) and optionally its parent
+ device to a name (of the form mdev_<uuid_with_underscores>[_<pciid>]).
-def mdev_uuid2name(mdev_uuid: str) -> str:
- """Convert an mdev uuid (of the form 8-4-4-4-12) to a name (of the form
- mdev_<uuid_with_underscores>).
+ :param mdev_uuid: the uuid of the mediated device
+ :param parent: the parent device id for the mediated device
+ :returns: name of the mdev to reference in libvirt
"""
- return "mdev_" + mdev_uuid.replace('-', '_')
+ name = "mdev_" + mdev_uuid.replace('-', '_')
+ if parent and parent.startswith('pci_'):
+ name = name + parent[4:]
+ return name
def get_flags_by_flavor_specs(flavor: 'objects.Flavor') -> ty.Set[str]:
diff --git a/nova/virt/libvirt/volume/fibrechannel.py b/nova/virt/libvirt/volume/fibrechannel.py
index b50db3aa1c..22c65e99c0 100644
--- a/nova/virt/libvirt/volume/fibrechannel.py
+++ b/nova/virt/libvirt/volume/fibrechannel.py
@@ -79,7 +79,6 @@ class LibvirtFibreChannelVolumeDriver(libvirt_volume.LibvirtBaseVolumeDriver):
"""Extend the volume."""
LOG.debug("calling os-brick to extend FC Volume", instance=instance)
new_size = self.connector.extend_volume(connection_info['data'])
- LOG.debug("Extend FC Volume %s; new_size=%s",
- connection_info['data']['device_path'],
+ LOG.debug("Extend FC Volume: new_size=%s",
new_size, instance=instance)
return new_size
diff --git a/nova/virt/netutils.py b/nova/virt/netutils.py
index 6ea91e2221..0ab3ddc4c1 100644
--- a/nova/virt/netutils.py
+++ b/nova/virt/netutils.py
@@ -263,12 +263,19 @@ def _get_eth_link(vif, ifc_num):
'id': link_id,
'vif_id': vif['id'],
'type': nic_type,
- 'mtu': vif['network']['meta'].get('mtu'),
+ 'mtu': _get_link_mtu(vif),
'ethernet_mac_address': vif.get('address'),
}
return link
+def _get_link_mtu(vif):
+ for subnet in vif['network']['subnets']:
+ if subnet['meta'].get('dhcp_server'):
+ return None
+ return vif['network']['meta'].get('mtu')
+
+
def _get_nets(vif, subnet, version, net_num, link_id):
"""Get networks for the given VIF and subnet
diff --git a/nova/virt/node.py b/nova/virt/node.py
new file mode 100644
index 0000000000..4cb3d0a573
--- /dev/null
+++ b/nova/virt/node.py
@@ -0,0 +1,108 @@
+# Copyright 2022 Red Hat, inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import logging
+import os
+import uuid
+
+from oslo_utils import uuidutils
+
+import nova.conf
+from nova import exception
+
+CONF = nova.conf.CONF
+LOG = logging.getLogger(__name__)
+COMPUTE_ID_FILE = 'compute_id'
+LOCAL_NODE_UUID = None
+
+
+def write_local_node_uuid(node_uuid):
+ # We only ever write an identity file in the CONF.state_path
+ # location
+ fn = os.path.join(CONF.state_path, COMPUTE_ID_FILE)
+
+ # Try to create the identity file and write our uuid into it. Fail
+ # if the file exists (since it shouldn't if we made it here).
+ try:
+ with open(fn, 'x') as f:
+ f.write(node_uuid)
+ except FileExistsError:
+ # If the file exists, we must either fail or re-survey all the
+ # potential files. If we just read and return it, it could be
+ # inconsistent with files in the other locations.
+ raise exception.InvalidNodeConfiguration(
+ reason='Identity file %s appeared unexpectedly' % fn)
+ except Exception as e:
+ raise exception.InvalidNodeConfiguration(
+ reason='Unable to write uuid to %s: %s' % (fn, e))
+
+ LOG.info('Wrote node identity %s to %s', node_uuid, fn)
+
+
+def read_local_node_uuid():
+ locations = ([os.path.dirname(f) for f in CONF.config_file] +
+ [CONF.state_path])
+
+ uuids = []
+ found = []
+ for location in locations:
+ fn = os.path.join(location, COMPUTE_ID_FILE)
+ try:
+ # UUIDs should be 36 characters in canonical format. Read
+ # a little more to be graceful about whitespace in/around
+ # the actual value we want to read. However, it must parse
+ # to a legit UUID once we strip the whitespace.
+ with open(fn) as f:
+ content = f.read(40)
+ node_uuid = str(uuid.UUID(content.strip()))
+ except FileNotFoundError:
+ continue
+ except ValueError:
+ raise exception.InvalidNodeConfiguration(
+ reason='Unable to parse UUID from %s' % fn)
+ uuids.append(node_uuid)
+ found.append(fn)
+
+ if uuids:
+ # Any identities we found must be consistent, or we fail
+ first = uuids[0]
+ for i, (node_uuid, fn) in enumerate(zip(uuids, found)):
+ if node_uuid != first:
+ raise exception.InvalidNodeConfiguration(
+ reason='UUID %s in %s does not match %s' % (
+ node_uuid, fn, uuids[i - 1]))
+ LOG.info('Determined node identity %s from %s', first, found[0])
+ return first
+ else:
+ return None
+
+
+def get_local_node_uuid():
+ """Read or create local node uuid file.
+
+ :returns: UUID string read from file, or generated
+ """
+ global LOCAL_NODE_UUID
+
+ if LOCAL_NODE_UUID is not None:
+ return LOCAL_NODE_UUID
+
+ node_uuid = read_local_node_uuid()
+ if not node_uuid:
+ node_uuid = uuidutils.generate_uuid()
+ LOG.info('Generated node identity %s', node_uuid)
+ write_local_node_uuid(node_uuid)
+
+ LOCAL_NODE_UUID = node_uuid
+ return node_uuid
diff --git a/playbooks/ceph/glance-copy-policy.yaml b/playbooks/ceph/glance-copy-policy.yaml
deleted file mode 100644
index 41654a103d..0000000000
--- a/playbooks/ceph/glance-copy-policy.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
-- hosts: controller
- tasks:
- - name: create local.sh
- become: yes
- blockinfile:
- path: /opt/stack/devstack/local.sh
- create: True
- mode: 0777
- block: |
- # This policy is default to admin only in glance. Override
- # here to allow everyone and every type of image (private
- # or public) to copy. This way we will be able to test copy
- # image via non-admin as well as on private images.
- echo $'"copy_image": ""' >> /etc/glance/policy.yaml
- sudo systemctl restart 'devstack@g-*'
diff --git a/playbooks/ceph/glance-setup.yaml b/playbooks/ceph/glance-setup.yaml
new file mode 100644
index 0000000000..5792c72237
--- /dev/null
+++ b/playbooks/ceph/glance-setup.yaml
@@ -0,0 +1,39 @@
+- hosts: controller
+ tasks:
+ - name: create local.sh
+ become: yes
+ blockinfile:
+ path: /opt/stack/devstack/local.sh
+ create: True
+ mode: 0777
+ block: |
+ # Delete all existing images
+ source /opt/stack/devstack/openrc admin
+ for img in $(openstack image list -f value -c ID); do
+ openstack image show $img
+ echo Deleting $img
+ openstack image delete $img
+ done
+
+ # Inflate our cirros image to 1G raw
+ arch=$(uname -m)
+ image=$(ls /opt/stack/devstack/files/cirros*${arch}-disk.img | tail -n1)
+ rawimage="/opt/stack/devstack/files/cirros-raw.img"
+ qemu-img convert -O raw "$image" "$rawimage"
+ truncate --size $((950 << 20)) "$rawimage"
+
+ # Upload it to glance as the sole image available so tempest
+ # config will find it. Wait ten seconds after doing this
+ # before the restart below.
+ openstack image create --container-format bare --disk-format raw --public "cirros-raw" < "$rawimage"
+ sleep 10
+ openstack image list
+ openstack image show cirros-raw
+
+ # This policy is default to admin only in glance. Override
+ # here to allow everyone and every type of image (private
+ # or public) to copy. This way we will be able to test copy
+ # image via non-admin as well as on private images.
+ echo $'"copy_image": ""' >> /etc/glance/policy.yaml
+ sudo systemctl restart 'devstack@g-*'
+
diff --git a/releasenotes/notes/Do-not-send-mtu-value-in-metadata-for-networks-with-enabled-dhcp-641506f2a13b540f.yaml b/releasenotes/notes/Do-not-send-mtu-value-in-metadata-for-networks-with-enabled-dhcp-641506f2a13b540f.yaml
new file mode 100644
index 0000000000..b5232f5ea2
--- /dev/null
+++ b/releasenotes/notes/Do-not-send-mtu-value-in-metadata-for-networks-with-enabled-dhcp-641506f2a13b540f.yaml
@@ -0,0 +1,5 @@
+---
+other:
+ - |
+ For networks which have any subnets with enabled DHCP, MTU value is not send
+ in the metadata. In such case MTU is configured through the DHCP server.
diff --git a/releasenotes/notes/add-spice-compression-support-e41676f445544e8d.yaml b/releasenotes/notes/add-spice-compression-support-e41676f445544e8d.yaml
new file mode 100644
index 0000000000..b370889171
--- /dev/null
+++ b/releasenotes/notes/add-spice-compression-support-e41676f445544e8d.yaml
@@ -0,0 +1,23 @@
+---
+features:
+ - |
+ The following SPICE-related options are added to the ``spice``
+ configuration group of a Nova configuration:
+
+ - ``image_compression``
+ - ``jpeg_compression``
+ - ``zlib_compression``
+ - ``playback_compression``
+ - ``streaming_mode``
+
+ These configuration options can be used to enable and set the
+ SPICE compression settings for libvirt (QEMU/KVM) provisioned
+ instances. Each configuration option is optional and can be set
+ explictly to configure the associated SPICE compression setting
+ for libvirt. If all configuration options are not set, then none
+ of the SPICE compression settings will be configured for libvirt,
+ which corresponds to the behavior before this change. In this case,
+ the built-in defaults from the libvirt backend (e.g. QEMU) are used.
+
+ Note that those options are only taken into account if SPICE support
+ is enabled (and the VNC support is disabled).
diff --git a/releasenotes/notes/allowing-target-state-for-evacuate-d4c1912c481973d6.yaml b/releasenotes/notes/allowing-target-state-for-evacuate-d4c1912c481973d6.yaml
new file mode 100644
index 0000000000..6c5bc98046
--- /dev/null
+++ b/releasenotes/notes/allowing-target-state-for-evacuate-d4c1912c481973d6.yaml
@@ -0,0 +1,13 @@
+---
+features:
+ - |
+ Starting with v2.95 any evacuated instance will be stopped at
+ destination. The required minimum version for Nova computes is
+ 27.0.0 (antelope 2023.1). Operator can still continue using
+ previous behavior by selecting microversion below v2.95.
+upgrade:
+ - |
+ Operators will have to consider upgrading compute hosts to Nova
+ 27.0.0 (antelope 2023.1) in order to take advantage of the new
+ (microversion v2.95) evacuate API behavior. An exception will be
+ raised for older versions.
diff --git a/releasenotes/notes/antelope-prelude-4a99907b00e739f8.yaml b/releasenotes/notes/antelope-prelude-4a99907b00e739f8.yaml
new file mode 100644
index 0000000000..66890684af
--- /dev/null
+++ b/releasenotes/notes/antelope-prelude-4a99907b00e739f8.yaml
@@ -0,0 +1,51 @@
+---
+prelude: |
+ The OpenStack 2023.1 (Nova 27.0.0) release includes many new features and
+ bug fixes. Please be sure to read the upgrade section which describes the
+ required actions to upgrade your cloud from 26.0.0 (Zed) to 27.0.0 (2023.1).
+ As a reminder, OpenStack 2023.1 is our first `Skip-Level-Upgrade Release`__
+ (starting from now, we name it a `SLURP release`) where you can
+ rolling-upgrade your compute services from OpenStack Yoga as an experimental
+ feature. Next SLURP release will be 2024.1.
+
+ .. __: https://governance.openstack.org/tc/resolutions/20220210-release-cadence-adjustment.html
+
+ There are a few major changes worth mentioning. This is not an exhaustive
+ list:
+
+ - The latest Compute API microversion supported for 2023.1 is `v2.95`__.
+
+ .. __: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#maximum-in-2023.1
+
+ - `PCI devices can now be scheduled <https://docs.openstack.org/nova/latest/admin/pci-passthrough.html#pci-tracking-in-placement>`_
+ by Nova using the Placement API on a opt-in basis. This will help the
+ nova-scheduler service to better schedule flavors that use PCI
+ (non-Neutron related) resources, will generate less reschedules if an
+ instance cannot be created on a candidate and will help the nova-scheduler
+ to not miss valid candidates if the list was too large.
+
+ - Operators can now ask Nova to `manage the power consumption of dedicated
+ CPUs <https://docs.openstack.org/nova/latest/admin/cpu-topologies.html#configuring-cpu-power-management-for-dedicated-cores>`_
+ so as to either offline them or change their governor if they're
+ currently not in use by any instance or if the instance is stopped.
+
+ - Nova will prevent unexpected compute service renames by `persisting a unique
+ compute UUID on local disk <https://docs.openstack.org/nova/latest/admin/compute-node-identification.html>`_.
+ This stored UUID will be considered the source of truth for knowing whether
+ the compute service hostame has been modified or not. As a reminder,
+ changing a compute hostname is forbidden, particularly when this compute is
+ currently running instances on top of it.
+
+ - `SPICE consoles <https://docs.openstack.org/nova/latest/admin/remote-console-access.html#spice-console>`_
+ can now be configured with compression settings which include choices of the
+ compression algorithm and the compression mode.
+
+ - Fully-Qualified Domain Names are now considered valid for an instance
+ hostname if you use the 2.94 API microversion.
+
+ - By opting into 2.95 API microversion, evacuated instances will remain
+ stopped on the destination host until manually started.
+
+ - Nova APIs now `by default support new RBAC policies <https://docs.openstack.org/nova/latest/configuration/policy.html>`
+ and scopes. See our `Policy Concepts documention <https://docs.openstack.org/nova/latest/configuration/policy-concepts.html>`
+ for further details.
diff --git a/releasenotes/notes/bp-libvirt-cpu-state-mgmt-fbc9c1f9f473003c.yaml b/releasenotes/notes/bp-libvirt-cpu-state-mgmt-fbc9c1f9f473003c.yaml
new file mode 100644
index 0000000000..95422fce67
--- /dev/null
+++ b/releasenotes/notes/bp-libvirt-cpu-state-mgmt-fbc9c1f9f473003c.yaml
@@ -0,0 +1,18 @@
+---
+features:
+ - |
+ This is now possible to configure nova-compute services using libvirt driver
+ by setting ``[libvirt]cpu_power_management`` to ``True`` in order to let the
+ service to powering down or up physical CPUs depending on whether those CPUs
+ are pinned or not to instances. In order on to support this feature, the
+ compute service needs to be set with ``[compute]cpu_dedicated_set``. If so,
+ all the related CPUs will be powering down until they are used by an
+ instance where the related pinned CPU will be powering up just before
+ starting the guest. If ``[compute]cpu_dedicated_set`` isn't set, then the
+ compute service will refuse to start.
+ By default the power strategy will offline CPUs when powering down and
+ online the CPUs on powering up but another strategy is possible by using
+ ``[libvirt]cpu_power_management_strategy=governor`` which will rather modify
+ the related CPU governor using ``[libvirt]cpu_power_governor_low`` and
+ ``[libvirt]cpu_power_governor_high`` configuration values (respective
+ defaults being ``powersave`` and ``performance``)
diff --git a/releasenotes/notes/bp-pci-device-tracking-in-placement-antelope-082310a2b0337e0e.yaml b/releasenotes/notes/bp-pci-device-tracking-in-placement-antelope-082310a2b0337e0e.yaml
new file mode 100644
index 0000000000..7a9e53ed26
--- /dev/null
+++ b/releasenotes/notes/bp-pci-device-tracking-in-placement-antelope-082310a2b0337e0e.yaml
@@ -0,0 +1,8 @@
+---
+features:
+ - |
+ Since 26.0.0 (Zed) Nova supports tracking PCI devices in Placement. Now
+ Nova also supports scheduling flavor based PCI device requests via
+ Placement. This support is disable by default. Please read
+ `documentation <https://docs.openstack.org/nova/latest/admin/pci-passthrough.html#pci-tracking-in-placement>`_
+ for more details on what is supported how this feature can be enabled.
diff --git a/releasenotes/notes/bug-1996995-qemu_monitor_announce_self-add-configurables-2b2f19d238442f72.yaml b/releasenotes/notes/bug-1996995-qemu_monitor_announce_self-add-configurables-2b2f19d238442f72.yaml
new file mode 100644
index 0000000000..0941dd7450
--- /dev/null
+++ b/releasenotes/notes/bug-1996995-qemu_monitor_announce_self-add-configurables-2b2f19d238442f72.yaml
@@ -0,0 +1,28 @@
+---
+fixes:
+ - |
+ Fixes `bug 1996995`_ in which VMs live migrated on certain VXLAN Arista
+ network fabrics were inaccessible until the switch arp cache expired.
+
+ A Nova workaround option of ``enable_qemu_monitor_announce_self`` was added
+ to fix `bug 1815989`_ which when enabled would interact with the QEMU
+ monitor and force a VM to announce itself.
+
+ On certain network fabrics, VMs that are live migrated remain inaccessible
+ via the network despite the QEMU monitor announce_self command successfully
+ being called.
+
+ It was noted that on Arista VXLAN fabrics, testing showed that it required
+ several attempts of running the QEMU announce_self monitor command before
+ the switch would acknowledge a VM's new location on the fabric.
+
+ This fix introduces two operator configurable options.
+ The first option sets the number of times the QEMU monitor announce_self
+ command is called - ``qemu_announce_self_count``
+
+ The second option allows operators to set the delay between the QEMU
+ announce_self commands in seconds for subsequent announce_self commands
+ with ``qemu_announce_self_interval``
+
+ .. _`bug 1996995`: https://bugs.launchpad.net/nova/+bug/1996995
+ .. _`bug 1815989`: https://bugs.launchpad.net/nova/+bug/1815989
diff --git a/releasenotes/notes/enable-enforce-scope-and-new-defaults-14db8c75b263b599.yaml b/releasenotes/notes/enable-enforce-scope-and-new-defaults-14db8c75b263b599.yaml
new file mode 100644
index 0000000000..72a6f861b6
--- /dev/null
+++ b/releasenotes/notes/enable-enforce-scope-and-new-defaults-14db8c75b263b599.yaml
@@ -0,0 +1,23 @@
+---
+upgrade:
+ - |
+ The Nova service enable the API policies (RBAC) new defaults and scope by
+ default. The Default value of config options ``[oslo_policy] enforce_scope``
+ and ``[oslo_policy] oslo_policy.enforce_new_defaults`` have been changed
+ to ``True``.
+
+ This means if you are using system scope token to access Nova API then
+ the request will be failed with 403 error code. Also, new defaults will be
+ enforced by default. To know about the new defaults of each policy
+ rule, refer to the `Policy New Defaults`_. For more detail about the Nova
+ API policies changes, refer to `Policy Concepts`_.
+
+ If you want to disable them then modify the below config options value in
+ ``nova.conf`` file::
+
+ [oslo_policy]
+ enforce_new_defaults=False
+ enforce_scope=False
+
+ .. _`Policy New Defaults`: https://docs.openstack.org/nova/latest/configuration/policy.html
+ .. _`Policy Concepts`: https://docs.openstack.org/nova/latest/configuration/policy-concepts.html
diff --git a/releasenotes/notes/fix-ironic-scheduler-race-08cf8aba0365f512.yaml b/releasenotes/notes/fix-ironic-scheduler-race-08cf8aba0365f512.yaml
new file mode 100644
index 0000000000..4fd2cc1ca9
--- /dev/null
+++ b/releasenotes/notes/fix-ironic-scheduler-race-08cf8aba0365f512.yaml
@@ -0,0 +1,11 @@
+---
+fixes:
+ - |
+ Fixed when placement returns ironic nodes that have just started automatic
+ cleaning as possible valid candidates. This is done by marking all ironic
+ nodes with an instance on them as reserved, such that nova only makes them
+ available once we have double checked Ironic reports the node as available.
+ If you don't have automatic cleaning on, this might mean it takes longer
+ than normal for Ironic nodes to become available for new instances.
+ If you want the old behaviour use the following workaround config:
+ `[workarounds]skip_reserve_in_use_ironic_nodes=true`
diff --git a/releasenotes/notes/microversion-2-94-59649401d5763286.yaml b/releasenotes/notes/microversion-2-94-59649401d5763286.yaml
new file mode 100644
index 0000000000..d0927e6f75
--- /dev/null
+++ b/releasenotes/notes/microversion-2-94-59649401d5763286.yaml
@@ -0,0 +1,22 @@
+---
+features:
+ - |
+ The 2.94 microversion has been added. This microversion extends
+ microversion 2.90 by allowing Fully Qualified Domain Names (FQDN) wherever
+ the ``hostname`` is able to be specified. This consists of creating an
+ instance (``POST /servers``), updating an instance
+ (``PUT /servers/{id}``), or rebuilding an instance
+ (``POST /servers/{server_id}/action (rebuild)``). When using an FQDN as the
+ instance hostname, the ``[api]dhcp_domain`` configuration option must be
+ set to the empty string in order for the correct FQDN to appear in the
+ ``hostname`` field in the metadata API.
+
+upgrade:
+ - |
+ In order to make use of microversion's 2.94 FQDN hostnames, the
+ ``[api]dhcp_domain`` config option must be set to the empty string. If
+ this is not done, the ``hostname`` field in the metadata API will be
+ incorrect, as it will include the value of ``[api]dhcp_domain`` appended to
+ the instance's FQDN. Note that simply not setting ``[api]dhcp_domain`` is
+ not enough, as it has a default value of ``novalocal``. It must explicitly
+ be set to the empty string.
diff --git a/releasenotes/notes/multiple-config-files-with-mod_wsgi-f114ea5fdd8b9a51.yaml b/releasenotes/notes/multiple-config-files-with-mod_wsgi-f114ea5fdd8b9a51.yaml
new file mode 100644
index 0000000000..f4361477de
--- /dev/null
+++ b/releasenotes/notes/multiple-config-files-with-mod_wsgi-f114ea5fdd8b9a51.yaml
@@ -0,0 +1,14 @@
+---
+fixes:
+ - |
+ apache mod_wsgi does not support passing commandline arguments to the wsgi
+ application that it hosts. As a result when the nova api or metadata api
+ where run under mod_wsgi it was not posible to use multiple config files
+ or non-default file names i.e. nova-api.conf
+ This has been adressed by the intoduction of a new, optional, envionment
+ varible ``OS_NOVA_CONFIG_FILES``. ``OS_NOVA_CONFIG_FILES`` is a ``;``
+ seperated list fo file path relitive to ``OS_NOVA_CONFIG_DIR``.
+ When unset the default ``api-paste.ini`` and ``nova.conf`` will be used
+ form ``/etc/nova``. This is supported for the nova api and nova metadata
+ wsgi applications.
+
diff --git a/releasenotes/notes/rescue-volume-based-instance-c6e3fba236d90be7.yaml b/releasenotes/notes/rescue-volume-based-instance-c6e3fba236d90be7.yaml
new file mode 100644
index 0000000000..7e80059b80
--- /dev/null
+++ b/releasenotes/notes/rescue-volume-based-instance-c6e3fba236d90be7.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ Fix rescuing volume based instance by adding a check for 'hw_rescue_disk'
+ and 'hw_rescue_device' properties in image metadata before attempting
+ to rescue instance.
diff --git a/releasenotes/notes/stable-compute-uuid-08663a0955616728.yaml b/releasenotes/notes/stable-compute-uuid-08663a0955616728.yaml
new file mode 100644
index 0000000000..fdeb593bd2
--- /dev/null
+++ b/releasenotes/notes/stable-compute-uuid-08663a0955616728.yaml
@@ -0,0 +1,19 @@
+---
+features:
+ - |
+ The compute manager now uses a local file to provide node uuid persistence
+ to guard against problems with renamed services, among other things.
+ Deployers wishing to ensure that *new* compute services get a predicatble
+ uuid before initial startup may provision that file and nova will use it,
+ otherwise nova will generate and write one to a `compute_id` file in
+ `CONF.state_path` the first time it starts up. Accidental renames of a
+ compute node's hostname will be detected and the manager will exit to avoid
+ database corruption. Note that none of this applies to Ironic computes, as
+ they manage nodes and uuids differently.
+upgrade:
+ - |
+ Existing compute nodes will, upon upgrade, perist the uuid of the compute
+ node assigned to their hostname at first startup. Since this must match
+ what is currently in the database, it is important to let nova provision
+ this file from its database. Nova will only persist to a `compute_id` file
+ in the `CONF.state_path` directory, which should already be writable.
diff --git a/releasenotes/notes/use-compareHypervisorCPU-b75c8f097cc73556.yaml b/releasenotes/notes/use-compareHypervisorCPU-b75c8f097cc73556.yaml
new file mode 100644
index 0000000000..924e09a602
--- /dev/null
+++ b/releasenotes/notes/use-compareHypervisorCPU-b75c8f097cc73556.yaml
@@ -0,0 +1,12 @@
+---
+fixes:
+ - |
+ Nova's use of libvirt's compareCPU() API has become error-prone as
+ it doesn't take into account host hypervisor's capabilities. With
+ QEMU >=2.9 and libvirt >= 4.4.0, libvirt will do the right thing in
+ terms of CPU comparison checks via a new replacement API,
+ compareHypervisorCPU(). Nova satisfies the said minimum version
+ requirements of QEMU and libvirt by a good margin.
+
+ This change replaces the usage of older API, compareCPU(), with the
+ new one, compareHypervisorCPU().
diff --git a/releasenotes/source/2023.1.rst b/releasenotes/source/2023.1.rst
new file mode 100644
index 0000000000..d1238479ba
--- /dev/null
+++ b/releasenotes/source/2023.1.rst
@@ -0,0 +1,6 @@
+===========================
+2023.1 Series Release Notes
+===========================
+
+.. release-notes::
+ :branch: stable/2023.1
diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst
index 6bff00e25a..ed6f8c2d07 100644
--- a/releasenotes/source/index.rst
+++ b/releasenotes/source/index.rst
@@ -6,6 +6,7 @@ Nova Release Notes
:maxdepth: 1
unreleased
+ 2023.1
zed
yoga
xena
diff --git a/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po b/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po
index d90391af7c..c0bd8bc9a8 100644
--- a/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po
+++ b/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po
@@ -2,15 +2,16 @@
# Andi Chandler <andi@gowling.com>, 2018. #zanata
# Andi Chandler <andi@gowling.com>, 2020. #zanata
# Andi Chandler <andi@gowling.com>, 2022. #zanata
+# Andi Chandler <andi@gowling.com>, 2023. #zanata
msgid ""
msgstr ""
"Project-Id-Version: nova\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2022-09-16 12:59+0000\n"
+"POT-Creation-Date: 2023-03-06 19:02+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"PO-Revision-Date: 2022-09-15 09:04+0000\n"
+"PO-Revision-Date: 2023-01-26 10:17+0000\n"
"Last-Translator: Andi Chandler <andi@gowling.com>\n"
"Language-Team: English (United Kingdom)\n"
"Language: en_GB\n"
@@ -382,9 +383,6 @@ msgstr "20.5.0"
msgid "20.6.1"
msgstr "20.6.1"
-msgid "20.6.1-29"
-msgstr "20.6.1-29"
-
msgid "204 NoContent on success"
msgstr "204 NoContent on success"
@@ -409,9 +407,6 @@ msgstr "21.2.2"
msgid "21.2.3"
msgstr "21.2.3"
-msgid "21.2.4-12"
-msgstr "21.2.4-12"
-
msgid "22.0.0"
msgstr "22.0.0"
@@ -433,9 +428,6 @@ msgstr "22.3.0"
msgid "22.4.0"
msgstr "22.4.0"
-msgid "22.4.0-6"
-msgstr "22.4.0-6"
-
msgid "23.0.0"
msgstr "23.0.0"
@@ -451,8 +443,8 @@ msgstr "23.2.0"
msgid "23.2.1"
msgstr "23.2.1"
-msgid "23.2.1-13"
-msgstr "23.2.1-13"
+msgid "23.2.2"
+msgstr "23.2.2"
msgid "24.0.0"
msgstr "24.0.0"
@@ -463,8 +455,8 @@ msgstr "24.1.0"
msgid "24.1.1"
msgstr "24.1.1"
-msgid "24.1.1-7"
-msgstr "24.1.1-7"
+msgid "24.2.0"
+msgstr "24.2.0"
msgid "25.0.0"
msgstr "25.0.0"
@@ -472,8 +464,14 @@ msgstr "25.0.0"
msgid "25.0.1"
msgstr "25.0.1"
-msgid "25.0.1-5"
-msgstr "25.0.1-5"
+msgid "25.1.0"
+msgstr "25.1.0"
+
+msgid "26.0.0"
+msgstr "26.0.0"
+
+msgid "26.1.0"
+msgstr "26.1.0"
msgid "400 for unknown param for query param and for request body."
msgstr "400 for unknown param for query param and for request body."
@@ -488,6 +486,24 @@ msgid "409 Conflict if inventory in use or if some other request concurrently"
msgstr "409 Conflict if inventory in use or if some other request concurrently"
msgid ""
+"A ``--dry-run`` option has been added to the ``nova-manage placement "
+"heal_allocations`` CLI which allows running the command to get output "
+"without committing any changes to placement."
+msgstr ""
+"A ``--dry-run`` option has been added to the ``nova-manage placement "
+"heal_allocations`` CLI which allows running the command to get output "
+"without committing any changes to placement."
+
+msgid ""
+"A ``--force`` flag is provided to skip the above checks but caution should "
+"be taken as this could easily lead to the underlying ABI of the instance "
+"changing when moving between machine types."
+msgstr ""
+"A ``--force`` flag is provided to skip the above checks but caution should "
+"be taken as this could easily lead to the underlying ABI of the instance "
+"changing when moving between machine types."
+
+msgid ""
"A ``default_floating_pool`` configuration option has been added in the "
"``[neutron]`` group. The existing ``default_floating_pool`` option in the "
"``[DEFAULT]`` group is retained and should be used by nova-network users. "
@@ -571,6 +587,24 @@ msgid "Stein Series Release Notes"
msgstr "Stein Series Release Notes"
msgid ""
+"The XenServer configuration option 'iqn_prefix' has been removed. It was not "
+"used anywhere and has no effect on any code, so there should be no impact."
+msgstr ""
+"The XenServer configuration option 'iqn_prefix' has been removed. It was not "
+"used anywhere and has no effect on any code, so there should be no impact."
+
+msgid ""
+"The ``api_rate_limit`` configuration option has been removed. The option was "
+"disabled by default back in the Havana release since it's effectively broken "
+"for more than one API worker. It has been removed because the legacy v2 API "
+"code that was using it has also been removed."
+msgstr ""
+"The ``api_rate_limit`` configuration option has been removed. The option was "
+"disabled by default back in the Havana release since it's effectively broken "
+"for more than one API worker. It has been removed because the legacy v2 API "
+"code that was using it has also been removed."
+
+msgid ""
"The ``nova-manage vm list`` command is deprecated and will be removed in the "
"15.0.0 Ocata release. Use the ``nova list`` command from python-novaclient "
"instead."
@@ -580,6 +614,24 @@ msgstr ""
"instead."
msgid ""
+"The default flavors that nova has previously had are no longer created as "
+"part of the first database migration. New deployments will need to create "
+"appropriate flavors before first use."
+msgstr ""
+"The default flavours that Nova previously had are no longer created as part "
+"of the first database migration. New deployments will need to create "
+"appropriate flavours before first use."
+
+msgid ""
+"The network configuration option 'fake_call' has been removed. It hasn't "
+"been used for several cycles, and has no effect on any code, so there should "
+"be no impact."
+msgstr ""
+"The network configuration option 'fake_call' has been removed. It hasn't "
+"been used for several cycles, and has no effect on any code, so there should "
+"be no impact."
+
+msgid ""
"These commands only work with nova-network which is itself deprecated in "
"favor of Neutron."
msgstr ""
@@ -611,6 +663,9 @@ msgstr "Xena Series Release Notes"
msgid "Yoga Series Release Notes"
msgstr "Yoga Series Release Notes"
+msgid "Zed Series Release Notes"
+msgstr "Zed Series Release Notes"
+
msgid "kernels 3.x: 8"
msgstr "kernels 3.x: 8"
diff --git a/requirements.txt b/requirements.txt
index c38ade020d..9954d06bc9 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -42,8 +42,8 @@ oslo.upgradecheck>=1.3.0
oslo.utils>=4.12.1 # Apache-2.0
oslo.db>=10.0.0 # Apache-2.0
oslo.rootwrap>=5.15.0 # Apache-2.0
-oslo.messaging>=10.3.0 # Apache-2.0
-oslo.policy>=3.7.0 # Apache-2.0
+oslo.messaging>=14.1.0 # Apache-2.0
+oslo.policy>=3.11.0 # Apache-2.0
oslo.privsep>=2.6.2 # Apache-2.0
oslo.i18n>=5.1.0 # Apache-2.0
oslo.service>=2.8.0 # Apache-2.0
@@ -54,7 +54,7 @@ oslo.versionedobjects>=1.35.0 # Apache-2.0
os-brick>=5.2 # Apache-2.0
os-resource-classes>=1.1.0 # Apache-2.0
os-traits>=2.9.0 # Apache-2.0
-os-vif>=1.15.2 # Apache-2.0
+os-vif>=3.1.0 # Apache-2.0
castellan>=0.16.0 # Apache-2.0
microversion-parse>=0.2.1 # Apache-2.0
tooz>=1.58.0 # Apache-2.0
diff --git a/setup.cfg b/setup.cfg
index 58594e229c..fa6f6af656 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -22,6 +22,7 @@ classifiers =
Programming Language :: Python :: 3
Programming Language :: Python :: 3.8
Programming Language :: Python :: 3.9
+ Programming Language :: Python :: 3.10
Programming Language :: Python :: 3 :: Only
Programming Language :: Python :: Implementation :: CPython
diff --git a/tox.ini b/tox.ini
index edb08599e7..097edbe827 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,13 +1,8 @@
[tox]
minversion = 3.18.0
-envlist = py39,functional,pep8
-# Automatic envs (pyXX) will only use the python version appropriate to that
-# env and ignore basepython inherited from [testenv] if we set
-# ignore_basepython_conflict.
-ignore_basepython_conflict = True
+envlist = py3,functional,pep8
[testenv]
-basepython = python3
usedevelop = True
allowlist_externals =
bash
@@ -15,6 +10,7 @@ allowlist_externals =
rm
env
make
+install_command = python -I -m pip install -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} {opts} {packages}
setenv =
VIRTUAL_ENV={envdir}
LANGUAGE=en_US
@@ -26,8 +22,6 @@ setenv =
# TODO(stephenfin): Remove once we bump our upper-constraint to SQLAlchemy 2.0
SQLALCHEMY_WARN_20=1
deps =
- -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
- -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
extras =
zvm
@@ -67,7 +61,7 @@ description =
# because we do not want placement present during unit tests.
deps =
{[testenv]deps}
- openstack-placement>=1.0.0
+ openstack-placement>=9.0.0.0b1
extras =
commands =
stestr --test-path=./nova/tests/functional run {posargs}
@@ -351,6 +345,7 @@ extension =
N369 = checks:check_lockutils_rwlocks
N370 = checks:check_six
N371 = checks:import_stock_mock
+ N372 = checks:check_set_daemon
paths =
./nova/hacking