summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.zuul.yaml45
-rw-r--r--HACKING.rst1
-rw-r--r--api-guide/source/accelerator-support.rst4
-rw-r--r--api-ref/source/parameters.yaml3
-rw-r--r--api-ref/source/servers-actions.inc5
-rw-r--r--api-ref/source/servers.inc7
-rw-r--r--doc/api_samples/os-evacuate/v2.95/server-evacuate-find-host-req.json4
-rw-r--r--doc/api_samples/os-evacuate/v2.95/server-evacuate-req.json5
-rw-r--r--doc/api_samples/servers/v2.94/server-action-rebuild-resp.json80
-rw-r--r--doc/api_samples/servers/v2.94/server-action-rebuild.json15
-rw-r--r--doc/api_samples/servers/v2.94/server-create-req.json30
-rw-r--r--doc/api_samples/servers/v2.94/server-create-resp.json22
-rw-r--r--doc/api_samples/servers/v2.94/server-get-resp.json81
-rw-r--r--doc/api_samples/servers/v2.94/server-update-req.json8
-rw-r--r--doc/api_samples/servers/v2.94/server-update-resp.json78
-rw-r--r--doc/api_samples/servers/v2.94/servers-details-resp.json88
-rw-r--r--doc/api_samples/servers/v2.94/servers-list-resp.json24
-rw-r--r--doc/api_samples/versions/v21-version-get-resp.json2
-rw-r--r--doc/api_samples/versions/versions-get-resp.json2
-rw-r--r--doc/source/admin/architecture.rst2
-rw-r--r--doc/source/admin/availability-zones.rst58
-rw-r--r--doc/source/admin/compute-node-identification.rst83
-rw-r--r--doc/source/admin/cpu-topologies.rst91
-rw-r--r--doc/source/admin/huge-pages.rst2
-rw-r--r--doc/source/admin/index.rst2
-rw-r--r--doc/source/admin/live-migration-usage.rst2
-rw-r--r--doc/source/admin/manage-logs.rst10
-rw-r--r--doc/source/admin/pci-passthrough.rst37
-rw-r--r--doc/source/admin/remote-console-access.rst10
-rw-r--r--doc/source/admin/scheduling.rst31
-rw-r--r--doc/source/admin/soft-delete-shadow-tables.rst62
-rw-r--r--doc/source/admin/upgrades.rst20
-rw-r--r--doc/source/cli/nova-compute.rst2
-rw-r--r--doc/source/contributor/ptl-guide.rst73
-rw-r--r--doc/source/reference/database-migrations.rst12
-rw-r--r--doc/source/reference/isolate-aggregates.rst2
-rw-r--r--etc/nova/nova-config-generator.conf1
-rw-r--r--mypy-files.txt4
-rw-r--r--nova/api/openstack/api_version_request.py6
-rw-r--r--nova/api/openstack/compute/evacuate.py25
-rw-r--r--nova/api/openstack/compute/flavor_access.py9
-rw-r--r--nova/api/openstack/compute/remote_consoles.py3
-rw-r--r--nova/api/openstack/compute/rest_api_version_history.rst20
-rw-r--r--nova/api/openstack/compute/schemas/evacuate.py4
-rw-r--r--nova/api/openstack/compute/schemas/servers.py14
-rw-r--r--nova/api/openstack/compute/servers.py9
-rw-r--r--nova/api/openstack/identity.py22
-rw-r--r--nova/cmd/manage.py4
-rw-r--r--nova/compute/api.py14
-rw-r--r--nova/compute/claims.py25
-rw-r--r--nova/compute/manager.py248
-rw-r--r--nova/compute/pci_placement_translator.py74
-rw-r--r--nova/compute/resource_tracker.py169
-rw-r--r--nova/compute/rpcapi.py18
-rw-r--r--nova/compute/utils.py27
-rw-r--r--nova/compute/vm_states.py3
-rw-r--r--nova/conductor/api.py6
-rw-r--r--nova/conductor/manager.py24
-rw-r--r--nova/conductor/rpcapi.py15
-rw-r--r--nova/conductor/tasks/live_migrate.py2
-rw-r--r--nova/conductor/tasks/migrate.py5
-rw-r--r--nova/conf/api.py7
-rw-r--r--nova/conf/compute.py9
-rw-r--r--nova/conf/libvirt.py18
-rw-r--r--nova/conf/pci.py30
-rw-r--r--nova/conf/scheduler.py64
-rw-r--r--nova/conf/spice.py53
-rw-r--r--nova/conf/workarounds.py44
-rw-r--r--nova/db/api/legacy_migrations/README4
-rw-r--r--nova/db/api/legacy_migrations/manage.py20
-rw-r--r--nova/db/api/legacy_migrations/migrate.cfg20
-rw-r--r--nova/db/api/legacy_migrations/versions/067_train.py602
-rw-r--r--nova/db/api/legacy_migrations/versions/068_placeholder.py22
-rw-r--r--nova/db/api/legacy_migrations/versions/069_placeholder.py22
-rw-r--r--nova/db/api/legacy_migrations/versions/070_placeholder.py22
-rw-r--r--nova/db/api/legacy_migrations/versions/071_placeholder.py22
-rw-r--r--nova/db/api/legacy_migrations/versions/072_placeholder.py22
-rw-r--r--nova/db/api/legacy_migrations/versions/073_placeholder.py22
-rw-r--r--nova/db/api/legacy_migrations/versions/074_placeholder.py22
-rw-r--r--nova/db/api/legacy_migrations/versions/075_placeholder.py22
-rw-r--r--nova/db/api/legacy_migrations/versions/076_placeholder.py22
-rw-r--r--nova/db/api/legacy_migrations/versions/077_placeholder.py22
-rw-r--r--nova/db/api/legacy_migrations/versions/078_placeholder.py22
-rw-r--r--nova/db/api/legacy_migrations/versions/079_placeholder.py22
-rw-r--r--nova/db/api/legacy_migrations/versions/080_placeholder.py22
-rw-r--r--nova/db/api/legacy_migrations/versions/081_placeholder.py22
-rw-r--r--nova/db/api/legacy_migrations/versions/082_placeholder.py22
-rw-r--r--nova/db/api/legacy_migrations/versions/083_placeholder.py22
-rw-r--r--nova/db/api/legacy_migrations/versions/084_placeholder.py22
-rw-r--r--nova/db/api/legacy_migrations/versions/085_placeholder.py22
-rw-r--r--nova/db/api/legacy_migrations/versions/086_placeholder.py22
-rw-r--r--nova/db/api/legacy_migrations/versions/087_placeholder.py22
-rw-r--r--nova/db/main/legacy_migrations/README4
-rw-r--r--nova/db/main/legacy_migrations/__init__.py0
-rw-r--r--nova/db/main/legacy_migrations/manage.py20
-rw-r--r--nova/db/main/legacy_migrations/migrate.cfg20
-rw-r--r--nova/db/main/legacy_migrations/versions/402_train.py1619
-rw-r--r--nova/db/main/legacy_migrations/versions/403_placeholder.py22
-rw-r--r--nova/db/main/legacy_migrations/versions/404_placeholder.py22
-rw-r--r--nova/db/main/legacy_migrations/versions/405_placeholder.py22
-rw-r--r--nova/db/main/legacy_migrations/versions/406_placeholder.py22
-rw-r--r--nova/db/main/legacy_migrations/versions/407_placeholder.py22
-rw-r--r--nova/db/main/legacy_migrations/versions/408_placeholder.py22
-rw-r--r--nova/db/main/legacy_migrations/versions/409_placeholder.py22
-rw-r--r--nova/db/main/legacy_migrations/versions/410_placeholder.py22
-rw-r--r--nova/db/main/legacy_migrations/versions/411_placeholder.py22
-rw-r--r--nova/db/main/legacy_migrations/versions/412_placeholder.py22
-rw-r--r--nova/db/main/legacy_migrations/versions/413_placeholder.py22
-rw-r--r--nova/db/main/legacy_migrations/versions/414_placeholder.py22
-rw-r--r--nova/db/main/legacy_migrations/versions/415_placeholder.py22
-rw-r--r--nova/db/main/legacy_migrations/versions/416_placeholder.py22
-rw-r--r--nova/db/main/legacy_migrations/versions/417_placeholder.py22
-rw-r--r--nova/db/main/legacy_migrations/versions/418_placeholder.py22
-rw-r--r--nova/db/main/legacy_migrations/versions/419_placeholder.py22
-rw-r--r--nova/db/main/legacy_migrations/versions/420_placeholder.py22
-rw-r--r--nova/db/main/legacy_migrations/versions/421_placeholder.py22
-rw-r--r--nova/db/main/legacy_migrations/versions/422_placeholder.py22
-rw-r--r--nova/db/main/legacy_migrations/versions/__init__.py0
-rw-r--r--nova/db/main/migrations/versions/960aac0e09ea_de_duplicate_indexes_in_instances__.py35
-rw-r--r--nova/db/main/models.py2
-rw-r--r--nova/db/migration.py77
-rw-r--r--nova/exception.py25
-rw-r--r--nova/filesystem.py59
-rw-r--r--nova/hacking/checks.py21
-rw-r--r--nova/manager.py7
-rw-r--r--nova/network/neutron.py23
-rw-r--r--nova/objects/compute_node.py15
-rw-r--r--nova/objects/request_spec.py113
-rw-r--r--nova/objects/service.py35
-rw-r--r--nova/pci/request.py12
-rw-r--r--nova/pci/stats.py285
-rw-r--r--nova/pci/whitelist.py2
-rw-r--r--nova/policies/tenant_networks.py4
-rw-r--r--nova/policy.py12
-rw-r--r--nova/rpc.py16
-rw-r--r--nova/scheduler/client/report.py2
-rw-r--r--nova/scheduler/filters/__init__.py44
-rw-r--r--nova/scheduler/filters/numa_topology_filter.py24
-rw-r--r--nova/scheduler/filters/pci_passthrough_filter.py23
-rw-r--r--nova/scheduler/host_manager.py34
-rw-r--r--nova/scheduler/manager.py140
-rw-r--r--nova/scheduler/utils.py11
-rw-r--r--nova/scheduler/weights/hypervisor_version.py39
-rw-r--r--nova/service.py4
-rw-r--r--nova/test.py23
-rw-r--r--nova/tests/fixtures/__init__.py2
-rw-r--r--nova/tests/fixtures/filesystem.py81
-rw-r--r--nova/tests/fixtures/libvirt.py6
-rw-r--r--nova/tests/fixtures/libvirt_imagebackend.py3
-rw-r--r--nova/tests/fixtures/nova.py150
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/os-evacuate/v2.95/server-evacuate-find-host-req.json.tpl5
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/os-evacuate/v2.95/server-evacuate-req.json.tpl5
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-action-rebuild-resp.json.tpl80
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-action-rebuild.json.tpl15
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-create-req.json.tpl21
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-create-resp.json.tpl22
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-get-resp.json.tpl81
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-update-req.json.tpl8
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-update-resp.json.tpl78
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/servers-details-resp.json.tpl88
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/servers-list-resp.json.tpl24
-rw-r--r--nova/tests/functional/api_sample_tests/test_evacuate.py52
-rw-r--r--nova/tests/functional/api_sample_tests/test_remote_consoles.py20
-rw-r--r--nova/tests/functional/api_sample_tests/test_servers.py45
-rw-r--r--nova/tests/functional/compute/test_resource_tracker.py1
-rw-r--r--nova/tests/functional/integrated_helpers.py16
-rw-r--r--nova/tests/functional/libvirt/base.py9
-rw-r--r--nova/tests/functional/libvirt/test_evacuate.py5
-rw-r--r--nova/tests/functional/libvirt/test_pci_in_placement.py471
-rw-r--r--nova/tests/functional/libvirt/test_pci_sriov_servers.py1182
-rw-r--r--nova/tests/functional/libvirt/test_power_manage.py270
-rw-r--r--nova/tests/functional/libvirt/test_vpmem.py7
-rw-r--r--nova/tests/functional/notification_sample_tests/test_compute_task.py7
-rw-r--r--nova/tests/functional/notification_sample_tests/test_instance.py20
-rw-r--r--nova/tests/functional/regressions/test_bug_1595962.py1
-rw-r--r--nova/tests/functional/regressions/test_bug_1669054.py3
-rw-r--r--nova/tests/functional/regressions/test_bug_1713783.py6
-rw-r--r--nova/tests/functional/regressions/test_bug_1764883.py3
-rw-r--r--nova/tests/functional/regressions/test_bug_1823370.py3
-rw-r--r--nova/tests/functional/regressions/test_bug_1896463.py2
-rw-r--r--nova/tests/functional/regressions/test_bug_1922053.py6
-rw-r--r--nova/tests/functional/regressions/test_bug_1995153.py107
-rw-r--r--nova/tests/functional/test_server_group.py57
-rw-r--r--nova/tests/functional/test_server_rescue.py86
-rw-r--r--nova/tests/functional/test_servers.py50
-rw-r--r--nova/tests/functional/test_servers_resource_request.py22
-rw-r--r--nova/tests/functional/test_service.py85
-rw-r--r--nova/tests/unit/api/openstack/compute/test_evacuate.py29
-rw-r--r--nova/tests/unit/api/openstack/compute/test_flavor_access.py25
-rw-r--r--nova/tests/unit/api/openstack/compute/test_remote_consoles.py12
-rw-r--r--nova/tests/unit/api/openstack/compute/test_server_groups.py97
-rw-r--r--nova/tests/unit/api/openstack/fakes.py14
-rw-r--r--nova/tests/unit/cmd/test_policy.py13
-rw-r--r--nova/tests/unit/compute/test_api.py157
-rw-r--r--nova/tests/unit/compute/test_claims.py6
-rw-r--r--nova/tests/unit/compute/test_compute.py63
-rw-r--r--nova/tests/unit/compute/test_compute_mgr.py429
-rw-r--r--nova/tests/unit/compute/test_pci_placement_translator.py87
-rw-r--r--nova/tests/unit/compute/test_resource_tracker.py236
-rw-r--r--nova/tests/unit/compute/test_rpcapi.py48
-rw-r--r--nova/tests/unit/compute/test_shelve.py16
-rw-r--r--nova/tests/unit/compute/test_utils.py68
-rw-r--r--nova/tests/unit/conductor/tasks/test_live_migrate.py2
-rw-r--r--nova/tests/unit/conductor/test_conductor.py33
-rw-r--r--nova/tests/unit/console/test_websocketproxy.py4
-rw-r--r--nova/tests/unit/db/api/test_migrations.py44
-rw-r--r--nova/tests/unit/db/main/test_migrations.py53
-rw-r--r--nova/tests/unit/db/test_migration.py189
-rw-r--r--nova/tests/unit/network/test_neutron.py18
-rw-r--r--nova/tests/unit/objects/test_compute_node.py17
-rw-r--r--nova/tests/unit/objects/test_request_spec.py233
-rw-r--r--nova/tests/unit/pci/test_request.py15
-rw-r--r--nova/tests/unit/pci/test_stats.py950
-rw-r--r--nova/tests/unit/policies/base.py10
-rw-r--r--nova/tests/unit/policies/test_evacuate.py2
-rw-r--r--nova/tests/unit/scheduler/fakes.py2
-rw-r--r--nova/tests/unit/scheduler/filters/test_numa_topology_filters.py97
-rw-r--r--nova/tests/unit/scheduler/filters/test_pci_passthrough_filters.py113
-rw-r--r--nova/tests/unit/scheduler/test_host_manager.py12
-rw-r--r--nova/tests/unit/scheduler/test_manager.py907
-rw-r--r--nova/tests/unit/scheduler/weights/test_weights_hypervisor_version.py97
-rw-r--r--nova/tests/unit/test_filesystem.py52
-rw-r--r--nova/tests/unit/test_hacking.py21
-rw-r--r--nova/tests/unit/test_policy.py10
-rw-r--r--nova/tests/unit/test_rpc.py44
-rw-r--r--nova/tests/unit/test_service.py9
-rw-r--r--nova/tests/unit/virt/disk/test_api.py1
-rw-r--r--nova/tests/unit/virt/ironic/test_driver.py70
-rw-r--r--nova/tests/unit/virt/libvirt/cpu/__init__.py (renamed from nova/db/api/legacy_migrations/__init__.py)0
-rw-r--r--nova/tests/unit/virt/libvirt/cpu/test_api.py194
-rw-r--r--nova/tests/unit/virt/libvirt/cpu/test_core.py122
-rw-r--r--nova/tests/unit/virt/libvirt/test_config.py32
-rw-r--r--nova/tests/unit/virt/libvirt/test_driver.py312
-rw-r--r--nova/tests/unit/virt/libvirt/test_host.py70
-rw-r--r--nova/tests/unit/virt/libvirt/test_utils.py43
-rw-r--r--nova/tests/unit/virt/test_hardware.py132
-rw-r--r--nova/tests/unit/virt/test_images.py46
-rw-r--r--nova/tests/unit/virt/test_node.py142
-rw-r--r--nova/tests/unit/virt/test_virt_drivers.py1
-rw-r--r--nova/utils.py47
-rw-r--r--nova/virt/driver.py6
-rw-r--r--nova/virt/fake.py37
-rw-r--r--nova/virt/hardware.py28
-rw-r--r--nova/virt/hyperv/driver.py8
-rw-r--r--nova/virt/images.py31
-rw-r--r--nova/virt/ironic/driver.py45
-rw-r--r--nova/virt/libvirt/config.py24
-rw-r--r--nova/virt/libvirt/cpu/__init__.py (renamed from nova/db/api/legacy_migrations/versions/__init__.py)0
-rw-r--r--nova/virt/libvirt/cpu/api.py157
-rw-r--r--nova/virt/libvirt/cpu/core.py78
-rw-r--r--nova/virt/libvirt/driver.py151
-rw-r--r--nova/virt/libvirt/event.py7
-rw-r--r--nova/virt/libvirt/host.py63
-rw-r--r--nova/virt/libvirt/imagebackend.py1
-rw-r--r--nova/virt/libvirt/utils.py75
-rw-r--r--nova/virt/node.py108
-rw-r--r--releasenotes/notes/add-spice-compression-support-e41676f445544e8d.yaml23
-rw-r--r--releasenotes/notes/allowing-target-state-for-evacuate-d4c1912c481973d6.yaml13
-rw-r--r--releasenotes/notes/antelope-prelude-4a99907b00e739f8.yaml51
-rw-r--r--releasenotes/notes/bp-libvirt-cpu-state-mgmt-fbc9c1f9f473003c.yaml18
-rw-r--r--releasenotes/notes/bp-pci-device-tracking-in-placement-antelope-082310a2b0337e0e.yaml8
-rw-r--r--releasenotes/notes/bug-1996995-qemu_monitor_announce_self-add-configurables-2b2f19d238442f72.yaml28
-rw-r--r--releasenotes/notes/enable-enforce-scope-and-new-defaults-14db8c75b263b599.yaml23
-rw-r--r--releasenotes/notes/fix-ironic-scheduler-race-08cf8aba0365f512.yaml11
-rw-r--r--releasenotes/notes/hyperv-experimental-antelope-372e18a05cafc295.yaml6
-rw-r--r--releasenotes/notes/hypervisor-version-weigher-d0bba77e720edafe.yaml20
-rw-r--r--releasenotes/notes/microversion-2-94-59649401d5763286.yaml22
-rw-r--r--releasenotes/notes/port-binding-removed-shelved-offloaded-f1772a64be007b24.yaml6
-rw-r--r--releasenotes/notes/remove-sqlalchemy-migrate-907c200314884d81.yaml5
-rw-r--r--releasenotes/notes/rescue-volume-based-instance-c6e3fba236d90be7.yaml6
-rw-r--r--releasenotes/notes/stable-compute-uuid-08663a0955616728.yaml19
-rw-r--r--releasenotes/notes/use-compareHypervisorCPU-b75c8f097cc73556.yaml12
-rw-r--r--releasenotes/source/2023.1.rst6
-rw-r--r--releasenotes/source/index.rst1
-rw-r--r--releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po89
-rw-r--r--requirements.txt5
-rw-r--r--setup.cfg1
-rw-r--r--tox.ini15
278 files changed, 11241 insertions, 4872 deletions
diff --git a/.zuul.yaml b/.zuul.yaml
index 434db83a7e..9c41476e68 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -419,6 +419,7 @@
# Added in Yoga.
NOVNC_FROM_PACKAGE: False
NOVA_USE_UNIFIED_LIMITS: True
+ MYSQL_REDUCE_MEMORY: True
devstack_services:
# Disable OVN services
br-ex-tcpdump: false
@@ -608,6 +609,7 @@
GLANCE_USE_IMPORT_WORKFLOW: True
DEVSTACK_PARALLEL: True
GLANCE_LIMIT_IMAGE_SIZE_TOTAL: 2048
+ MYSQL_REDUCE_MEMORY: True
# NOTE(danms): This job is pretty heavy as it is, so we disable some
# services that are not relevant to the nova-glance-ceph scenario
# that this job is intended to validate.
@@ -656,6 +658,36 @@
image_conversion:
output_format: raw
+# TODO(gmann): Remove this jobs once all the required services for intergrate
+# compute gate (Cinder, Glance, Neutron) by default enable scope and new
+# defaults which means all the nova jobs will be tested with new RBAC in
+# integrated way and we do not need this separate job.
+- job:
+ name: tempest-integrated-compute-enforce-scope-new-defaults
+ parent: tempest-integrated-compute
+ description: |
+ This job runs the Tempest tests with scope and new defaults enabled
+ for Nova, Neutron, Glance, and Cinder services.
+ # TODO (gmann): There were few fixes in neutron and neutron-lib for the
+ # RBAC but they are not yet released so we need to add both projcts as
+ # the required-projects. Those can be removed once new version of neutron
+ # and neutron-lib is released.
+ required-projects:
+ - openstack/neutron
+ - openstack/neutron-lib
+ vars:
+ devstack_localrc:
+ # Enabeling the scope and new defaults for services implemented it.
+ # NOTE (gmann): We need to keep keystone scope check disable as
+ # services (except ironic) does not support the system scope and
+ # they need keystone to continue working with project scope. Until
+ # Keystone policies are changed to work for project scoped also, we
+ # need to keep scope check disable for keystone.
+ NOVA_ENFORCE_SCOPE: true
+ CINDER_ENFORCE_SCOPE: true
+ GLANCE_ENFORCE_SCOPE: true
+ NEUTRON_ENFORCE_SCOPE: true
+
- project:
# Please try to keep the list of job names sorted alphabetically.
templates:
@@ -692,10 +724,7 @@
voting: false
- nova-tox-functional-py38
- nova-tox-functional-py39
- - nova-tox-functional-py310:
- voting: true
- - openstack-tox-py310:
- voting: true
+ - nova-tox-functional-py310
- tempest-integrated-compute:
# NOTE(gmann): Policies changes do not need to run all the
# integration test jobs. Running only tempest and grenade
@@ -715,7 +744,9 @@
- ^setup.cfg$
- ^tools/.*$
- ^tox.ini$
- - grenade-skip-level:
+ - tempest-integrated-compute-enforce-scope-new-defaults:
+ irrelevant-files: *policies-irrelevant-files
+ - grenade-skip-level-always:
irrelevant-files: *policies-irrelevant-files
- nova-grenade-multinode:
irrelevant-files: *policies-irrelevant-files
@@ -748,6 +779,10 @@
- ^(?!nova/network/.*)(?!nova/virt/libvirt/vif.py).*$
- tempest-integrated-compute:
irrelevant-files: *policies-irrelevant-files
+ - tempest-integrated-compute-enforce-scope-new-defaults:
+ irrelevant-files: *policies-irrelevant-files
+ - grenade-skip-level-always:
+ irrelevant-files: *policies-irrelevant-files
- nova-grenade-multinode:
irrelevant-files: *policies-irrelevant-files
- tempest-ipv6-only:
diff --git a/HACKING.rst b/HACKING.rst
index a2f67d993b..c5a1ba4ae3 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -76,6 +76,7 @@ Nova Specific Commandments
with eventlet patched code. Use nova.utils.ReaderWriterLock() instead.
- [N370] Don't use or import six
- [N371] You must explicitly import python's mock: ``from unittest import mock``
+- [N372] Don't use the setDaemon method. Use the daemon attribute instead.
Creating Unit Tests
-------------------
diff --git a/api-guide/source/accelerator-support.rst b/api-guide/source/accelerator-support.rst
index c71e899fd4..9d1b4d77b4 100644
--- a/api-guide/source/accelerator-support.rst
+++ b/api-guide/source/accelerator-support.rst
@@ -12,7 +12,7 @@ appropriate privileges) must do the following:
* Create a device profile in Cyborg, which specifies what accelerator
resources need to be provisioned. (See `Cyborg device profiles API`_.)
- .. _`Cyborg device profiles API`: https://docs.openstack.org/api-ref/accelerator/v2/index.html#device-profiles
+ .. _`Cyborg device profiles API`: https://docs.openstack.org/api-ref/accelerator/#device-profiles
* Set the device profile name as an extra spec in a chosen flavor,
with this syntax:
@@ -102,7 +102,7 @@ appropriate privileges) must do the following:
resources need to be provisioned. (See `Cyborg device profiles API`_,
`Cyborg SRIOV Test Report`_.)
- .. _`Cyborg device profiles API`: https://docs.openstack.org/api-ref/accelerator/v2/index.html#device-profiles
+ .. _`Cyborg device profiles API`: https://docs.openstack.org/api-ref/accelerator/#device-profiles
.. _`Cyborg SRIOV Test Report`: https://wiki.openstack.org/wiki/Cyborg/TestReport/IntelNic
* create a 'accelerator-direct' vnic type port with the device-profile name
diff --git a/api-ref/source/parameters.yaml b/api-ref/source/parameters.yaml
index aba8185a4b..e185dce29d 100644
--- a/api-ref/source/parameters.yaml
+++ b/api-ref/source/parameters.yaml
@@ -6382,6 +6382,9 @@ server_hostname_req:
description: |
The hostname to configure for the instance in the metadata service.
+ Starting with microversion 2.94, this can be a Fully Qualified Domain Name
+ (FQDN) of up to 255 characters in length.
+
.. note::
This information is published via the metadata service and requires
diff --git a/api-ref/source/servers-actions.inc b/api-ref/source/servers-actions.inc
index 3b8b68d4ff..bb9953afa0 100644
--- a/api-ref/source/servers-actions.inc
+++ b/api-ref/source/servers-actions.inc
@@ -604,6 +604,11 @@ Request
.. literalinclude:: ../../doc/api_samples/servers/v2.90/server-action-rebuild.json
:language: javascript
+**Example Rebuild Server (rebuild Action) (v2.94)**
+
+.. literalinclude:: ../../doc/api_samples/servers/v2.94/server-action-rebuild.json
+ :language: javascript
+
Response
--------
diff --git a/api-ref/source/servers.inc b/api-ref/source/servers.inc
index 547a71e914..e72d0641b9 100644
--- a/api-ref/source/servers.inc
+++ b/api-ref/source/servers.inc
@@ -448,6 +448,11 @@ Request
.. literalinclude:: ../../doc/api_samples/servers/v2.90/server-create-req.json
:language: javascript
+**Example Create Server With FQDN in Hostname (v2.94)**
+
+.. literalinclude:: ../../doc/api_samples/servers/v2.94/server-create-req.json
+ :language: javascript
+
Response
--------
@@ -610,7 +615,7 @@ Response
.. rest_parameters:: parameters.yaml
- - server: server
+ - servers: servers
- accessIPv4: accessIPv4
- accessIPv6: accessIPv6
- addresses: addresses
diff --git a/doc/api_samples/os-evacuate/v2.95/server-evacuate-find-host-req.json b/doc/api_samples/os-evacuate/v2.95/server-evacuate-find-host-req.json
new file mode 100644
index 0000000000..8ad929226e
--- /dev/null
+++ b/doc/api_samples/os-evacuate/v2.95/server-evacuate-find-host-req.json
@@ -0,0 +1,4 @@
+{
+ "evacuate": {
+ }
+}
diff --git a/doc/api_samples/os-evacuate/v2.95/server-evacuate-req.json b/doc/api_samples/os-evacuate/v2.95/server-evacuate-req.json
new file mode 100644
index 0000000000..d192892cdc
--- /dev/null
+++ b/doc/api_samples/os-evacuate/v2.95/server-evacuate-req.json
@@ -0,0 +1,5 @@
+{
+ "evacuate": {
+ "host": "testHost"
+ }
+}
diff --git a/doc/api_samples/servers/v2.94/server-action-rebuild-resp.json b/doc/api_samples/servers/v2.94/server-action-rebuild-resp.json
new file mode 100644
index 0000000000..7eeb568ea4
--- /dev/null
+++ b/doc/api_samples/servers/v2.94/server-action-rebuild-resp.json
@@ -0,0 +1,80 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO",
+ "OS-EXT-AZ:availability_zone": "us-west",
+ "OS-EXT-SRV-ATTR:hostname": "updated-hostname.example.com",
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "OS-SRV-USG:launched_at": "2021-08-19T15:16:22.177882",
+ "OS-SRV-USG:terminated_at": null,
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "addresses": {
+ "private": [
+ {
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
+ "OS-EXT-IPS:type": "fixed",
+ "addr": "192.168.1.30",
+ "version": 4
+ }
+ ]
+ },
+ "adminPass": "seekr3t",
+ "config_drive": "",
+ "created": "2019-04-23T17:10:22Z",
+ "description": null,
+ "flavor": {
+ "disk": 1,
+ "ephemeral": 0,
+ "extra_specs": {},
+ "original_name": "m1.tiny",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1
+ },
+ "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6",
+ "id": "0c37a84a-c757-4f22-8c7f-0bf8b6970886",
+ "image": {
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/0c37a84a-c757-4f22-8c7f-0bf8b6970886",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/0c37a84a-c757-4f22-8c7f-0bf8b6970886",
+ "rel": "bookmark"
+ }
+ ],
+ "locked": false,
+ "locked_reason": null,
+ "metadata": {
+ "meta_var": "meta_val"
+ },
+ "name": "foobar",
+ "os-extended-volumes:volumes_attached": [],
+ "progress": 0,
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "server_groups": [],
+ "status": "ACTIVE",
+ "tags": [],
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "trusted_image_certificates": null,
+ "updated": "2019-04-23T17:10:24Z",
+ "user_data": "ZWNobyAiaGVsbG8gd29ybGQi",
+ "user_id": "fake"
+ }
+}
diff --git a/doc/api_samples/servers/v2.94/server-action-rebuild.json b/doc/api_samples/servers/v2.94/server-action-rebuild.json
new file mode 100644
index 0000000000..b5401ad9ca
--- /dev/null
+++ b/doc/api_samples/servers/v2.94/server-action-rebuild.json
@@ -0,0 +1,15 @@
+{
+ "rebuild" : {
+ "accessIPv4" : "1.2.3.4",
+ "accessIPv6" : "80fe::",
+ "OS-DCF:diskConfig": "AUTO",
+ "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b",
+ "name" : "foobar",
+ "adminPass" : "seekr3t",
+ "hostname": "custom-hostname.example.com",
+ "metadata" : {
+ "meta_var" : "meta_val"
+ },
+ "user_data": "ZWNobyAiaGVsbG8gd29ybGQi"
+ }
+}
diff --git a/doc/api_samples/servers/v2.94/server-create-req.json b/doc/api_samples/servers/v2.94/server-create-req.json
new file mode 100644
index 0000000000..c6d4ce5640
--- /dev/null
+++ b/doc/api_samples/servers/v2.94/server-create-req.json
@@ -0,0 +1,30 @@
+{
+ "server" : {
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "name" : "new-server-test",
+ "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "1",
+ "availability_zone": "us-west",
+ "OS-DCF:diskConfig": "AUTO",
+ "hostname": "custom-hostname.example.com",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality": [
+ {
+ "path": "/etc/banner.txt",
+ "contents": "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6 b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ],
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "user_data" : "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg=="
+ },
+ "OS-SCH-HNT:scheduler_hints": {
+ "same_host": "48e6a9f6-30af-47e0-bc04-acaed113bb4e"
+ }
+}
diff --git a/doc/api_samples/servers/v2.94/server-create-resp.json b/doc/api_samples/servers/v2.94/server-create-resp.json
new file mode 100644
index 0000000000..f50e29dd8b
--- /dev/null
+++ b/doc/api_samples/servers/v2.94/server-create-resp.json
@@ -0,0 +1,22 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO",
+ "adminPass": "6NpUwoz2QDRN",
+ "id": "f5dc173b-6804-445a-a6d8-c705dad5b5eb",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/f5dc173b-6804-445a-a6d8-c705dad5b5eb",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/f5dc173b-6804-445a-a6d8-c705dad5b5eb",
+ "rel": "bookmark"
+ }
+ ],
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ]
+ }
+}
diff --git a/doc/api_samples/servers/v2.94/server-get-resp.json b/doc/api_samples/servers/v2.94/server-get-resp.json
new file mode 100644
index 0000000000..0a05b2f917
--- /dev/null
+++ b/doc/api_samples/servers/v2.94/server-get-resp.json
@@ -0,0 +1,81 @@
+{
+ "server": {
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "addresses": {
+ "private": [
+ {
+ "addr": "192.168.1.30",
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
+ "OS-EXT-IPS:type": "fixed",
+ "version": 4
+ }
+ ]
+ },
+ "created": "2013-09-03T04:01:32Z",
+ "description": null,
+ "locked": false,
+ "locked_reason": null,
+ "flavor": {
+ "disk": 1,
+ "ephemeral": 0,
+ "extra_specs": {},
+ "original_name": "m1.tiny",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1
+ },
+ "hostId": "92154fab69d5883ba2c8622b7e65f745dd33257221c07af363c51b29",
+ "id": "0e44cc9c-e052-415d-afbf-469b0d384170",
+ "image": {
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/0e44cc9c-e052-415d-afbf-469b0d384170",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/0e44cc9c-e052-415d-afbf-469b0d384170",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "config_drive": "",
+ "OS-DCF:diskConfig": "AUTO",
+ "OS-EXT-AZ:availability_zone": "nova",
+ "OS-EXT-SRV-ATTR:hostname": "custom-hostname.example.com",
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "os-extended-volumes:volumes_attached": [
+ {"id": "volume_id1", "delete_on_termination": false},
+ {"id": "volume_id2", "delete_on_termination": false}
+ ],
+ "OS-SRV-USG:launched_at": "2013-09-23T13:37:00.880302",
+ "OS-SRV-USG:terminated_at": null,
+ "progress": 0,
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "server_groups": [],
+ "status": "ACTIVE",
+ "tags": [],
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "trusted_image_certificates": null,
+ "updated": "2013-09-03T04:01:33Z",
+ "user_id": "fake"
+ }
+}
diff --git a/doc/api_samples/servers/v2.94/server-update-req.json b/doc/api_samples/servers/v2.94/server-update-req.json
new file mode 100644
index 0000000000..1743f05fc7
--- /dev/null
+++ b/doc/api_samples/servers/v2.94/server-update-req.json
@@ -0,0 +1,8 @@
+{
+ "server": {
+ "accessIPv4": "4.3.2.1",
+ "accessIPv6": "80fe::",
+ "OS-DCF:diskConfig": "AUTO",
+ "hostname" : "new-server-hostname.example.com"
+ }
+}
diff --git a/doc/api_samples/servers/v2.94/server-update-resp.json b/doc/api_samples/servers/v2.94/server-update-resp.json
new file mode 100644
index 0000000000..4aa834f9ec
--- /dev/null
+++ b/doc/api_samples/servers/v2.94/server-update-resp.json
@@ -0,0 +1,78 @@
+{
+ "server": {
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "addresses": {
+ "private": [
+ {
+ "addr": "192.168.1.30",
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
+ "OS-EXT-IPS:type": "fixed",
+ "version": 4
+ }
+ ]
+ },
+ "created": "2013-09-03T04:01:32Z",
+ "description": null,
+ "locked": false,
+ "locked_reason": null,
+ "flavor": {
+ "disk": 1,
+ "ephemeral": 0,
+ "extra_specs": {},
+ "original_name": "m1.tiny",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1
+ },
+ "hostId": "92154fab69d5883ba2c8622b7e65f745dd33257221c07af363c51b29",
+ "id": "0e44cc9c-e052-415d-afbf-469b0d384170",
+ "image": {
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/0e44cc9c-e052-415d-afbf-469b0d384170",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/0e44cc9c-e052-415d-afbf-469b0d384170",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "config_drive": "",
+ "OS-DCF:diskConfig": "AUTO",
+ "OS-EXT-AZ:availability_zone": "us-west",
+ "OS-EXT-SRV-ATTR:hostname": "new-server-hostname.example.com",
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "os-extended-volumes:volumes_attached": [],
+ "OS-SRV-USG:launched_at": "2013-09-23T13:37:00.880302",
+ "OS-SRV-USG:terminated_at": null,
+ "progress": 0,
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "server_groups": [],
+ "status": "ACTIVE",
+ "tags": [],
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "trusted_image_certificates": null,
+ "updated": "2013-09-03T04:01:33Z",
+ "user_id": "fake"
+ }
+}
diff --git a/doc/api_samples/servers/v2.94/servers-details-resp.json b/doc/api_samples/servers/v2.94/servers-details-resp.json
new file mode 100644
index 0000000000..54b63fa523
--- /dev/null
+++ b/doc/api_samples/servers/v2.94/servers-details-resp.json
@@ -0,0 +1,88 @@
+{
+ "servers": [
+ {
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "addresses": {
+ "private": [
+ {
+ "addr": "192.168.1.30",
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
+ "OS-EXT-IPS:type": "fixed",
+ "version": 4
+ }
+ ]
+ },
+ "created": "2013-09-03T04:01:32Z",
+ "description": "",
+ "flavor": {
+ "disk": 1,
+ "ephemeral": 0,
+ "extra_specs": {},
+ "original_name": "m1.tiny",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1
+ },
+ "hostId": "bcf92836fc9ed4203a75cb0337afc7f917d2be504164b995c2334b25",
+ "id": "f5dc173b-6804-445a-a6d8-c705dad5b5eb",
+ "image": {
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/f5dc173b-6804-445a-a6d8-c705dad5b5eb",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/f5dc173b-6804-445a-a6d8-c705dad5b5eb",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "config_drive": "",
+ "locked": false,
+ "locked_reason": "",
+ "OS-DCF:diskConfig": "AUTO",
+ "OS-EXT-AZ:availability_zone": "nova",
+ "OS-EXT-SRV-ATTR:hostname": "custom-hostname.example.com",
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "os-extended-volumes:volumes_attached": [
+ {"id": "volume_id1", "delete_on_termination": false},
+ {"id": "volume_id2", "delete_on_termination": false}
+ ],
+ "OS-SRV-USG:launched_at": "2013-09-23T13:53:12.774549",
+ "OS-SRV-USG:terminated_at": null,
+ "progress": 0,
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "status": "ACTIVE",
+ "tags": [],
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "trusted_image_certificates": null,
+ "updated": "2013-09-03T04:01:32Z",
+ "user_id": "fake"
+ }
+ ],
+ "servers_links": [
+ {
+ "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/detail?limit=1&marker=f5dc173b-6804-445a-a6d8-c705dad5b5eb",
+ "rel": "next"
+ }
+ ]
+}
diff --git a/doc/api_samples/servers/v2.94/servers-list-resp.json b/doc/api_samples/servers/v2.94/servers-list-resp.json
new file mode 100644
index 0000000000..742d54b170
--- /dev/null
+++ b/doc/api_samples/servers/v2.94/servers-list-resp.json
@@ -0,0 +1,24 @@
+{
+ "servers": [
+ {
+ "id": "22c91117-08de-4894-9aa9-6ef382400985",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/22c91117-08de-4894-9aa9-6ef382400985",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/22c91117-08de-4894-9aa9-6ef382400985",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "new-server-test"
+ }
+ ],
+ "servers_links": [
+ {
+ "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers?limit=1&marker=22c91117-08de-4894-9aa9-6ef382400985",
+ "rel": "next"
+ }
+ ]
+}
diff --git a/doc/api_samples/versions/v21-version-get-resp.json b/doc/api_samples/versions/v21-version-get-resp.json
index 78678556bf..3f285e6017 100644
--- a/doc/api_samples/versions/v21-version-get-resp.json
+++ b/doc/api_samples/versions/v21-version-get-resp.json
@@ -19,7 +19,7 @@
}
],
"status": "CURRENT",
- "version": "2.93",
+ "version": "2.95",
"min_version": "2.1",
"updated": "2013-07-23T11:33:21Z"
}
diff --git a/doc/api_samples/versions/versions-get-resp.json b/doc/api_samples/versions/versions-get-resp.json
index 59b67279b7..749fd4674f 100644
--- a/doc/api_samples/versions/versions-get-resp.json
+++ b/doc/api_samples/versions/versions-get-resp.json
@@ -22,7 +22,7 @@
}
],
"status": "CURRENT",
- "version": "2.93",
+ "version": "2.95",
"min_version": "2.1",
"updated": "2013-07-23T11:33:21Z"
}
diff --git a/doc/source/admin/architecture.rst b/doc/source/admin/architecture.rst
index 69130122f7..f5e2b90dd9 100644
--- a/doc/source/admin/architecture.rst
+++ b/doc/source/admin/architecture.rst
@@ -173,7 +173,7 @@ is possible to configure other filesystem types.
.. rubric:: Cinder-provisioned block storage
-The OpenStack Block Storage service, Cinder, provides persistent volumes hat
+The OpenStack Block Storage service, Cinder, provides persistent volumes that
are represented by a persistent virtualized block device independent of any
particular instance.
diff --git a/doc/source/admin/availability-zones.rst b/doc/source/admin/availability-zones.rst
index ffe1be06f9..28c4451b60 100644
--- a/doc/source/admin/availability-zones.rst
+++ b/doc/source/admin/availability-zones.rst
@@ -39,7 +39,7 @@ when comparing availability zones and host aggregates:
The use of the default availability zone name in requests can be very
error-prone. Since the user can see the list of availability zones, they
have no way to know whether the default availability zone name (currently
- ``nova``) is provided because an host belongs to an aggregate whose AZ
+ ``nova``) is provided because a host belongs to an aggregate whose AZ
metadata key is set to ``nova``, or because there is at least one host
not belonging to any aggregate. Consequently, it is highly recommended
for users to never ever ask for booting an instance by specifying an
@@ -118,11 +118,47 @@ Implications for moving servers
There are several ways to move a server to another host: evacuate, resize,
cold migrate, live migrate, and unshelve. Move operations typically go through
-the scheduler to pick the target host *unless* a target host is specified and
-the request forces the server to that host by bypassing the scheduler. Only
-evacuate and live migrate can forcefully bypass the scheduler and move a
-server to a specified host and even then it is highly recommended to *not*
-force and bypass the scheduler.
+the scheduler to pick the target host.
+
+Prior to API microversion 2.68, using older openstackclient (pre-5.5.0) and
+novaclient, it was possible to specify a target host and the request forces
+the server to that host by bypassing the scheduler. Only evacuate and live
+migrate can forcefully bypass the scheduler and move a server to specified host
+and even then it is highly recommended to *not* force and bypass a scheduler.
+
+- live migrate with force host (works with older openstackclients(pre-5.5.0):
+
+.. code-block:: console
+
+ $ openstack server migrate --live <host> <server>
+
+- live migrate without forcing:
+
+.. code-block:: console
+
+ $ openstack server migrate --live-migration --host <host> <server>
+
+While support for 'server evacuate' command to openstackclient was added
+in 5.5.3 and there it never exposed ability to force an evacuation, but
+it was previously possible with novaclient.
+
+- evacuate with force host:
+
+.. code-block:: console
+
+ $ nova evacuate --force <server> <host>
+
+- evacuate without forcing using novaclient:
+
+.. code-block:: console
+
+ $ nova evacuate
+
+- evacuate without forcing using openstackclient:
+
+.. code-block:: console
+
+ $ openstack server evacuate --host <host> <server>
With respect to availability zones, a server is restricted to a zone if:
@@ -150,16 +186,6 @@ If the server was not created in a specific zone then it is free to be moved
to other zones, i.e. the :ref:`AvailabilityZoneFilter <AvailabilityZoneFilter>`
is a no-op.
-Knowing this, it is dangerous to force a server to another host with evacuate
-or live migrate if the server is restricted to a zone and is then forced to
-move to a host in another zone, because that will create an inconsistency in
-the internal tracking of where that server should live and may require manually
-updating the database for that server. For example, if a user creates a server
-in zone A and then the admin force live migrates the server to zone B, and then
-the user resizes the server, the scheduler will try to move it back to zone A
-which may or may not work, e.g. if the admin deleted or renamed zone A in the
-interim.
-
Resource affinity
~~~~~~~~~~~~~~~~~
diff --git a/doc/source/admin/compute-node-identification.rst b/doc/source/admin/compute-node-identification.rst
new file mode 100644
index 0000000000..31d4802d0b
--- /dev/null
+++ b/doc/source/admin/compute-node-identification.rst
@@ -0,0 +1,83 @@
+===========================
+Compute Node Identification
+===========================
+
+Nova requires that compute nodes maintain a constant and consistent identity
+during their lifecycle. With the exception of the ironic driver, starting in
+the 2023.1 release, this is achieved by use of a file containing the node
+unique identifier that is persisted on disk. Prior to 2023.1, a combination of
+the compute node's hostname and the :oslo.config:option:`host` value in the
+configuration file were used.
+
+The 2023.1 and later compute node identification file must remain unchanged
+during the lifecycle of the compute node. Changing the value or removing the
+file will result in a failure to start and may require advanced techniques
+for recovery. The file is read once at `nova-compute`` startup, at which point
+it is validated for formatting and the corresponding node is located or
+created in the database.
+
+.. note::
+
+ Even after 2023.1, the compute node's hostname may not be changed after
+ the initial registration with the controller nodes, it is just not used
+ as the primary method for identification.
+
+The behavior of ``nova-compute`` is different when using the ironic driver,
+as the (UUID-based) identity and mapping of compute nodes to compute manager
+service hosts is dynamic. In that case, no single node identity is maintained
+by the compute host and thus no identity file is read or written. Thus none
+of the sections below apply to hosts with :oslo.config:option:`compute_driver`
+set to `ironic`.
+
+Self-provisioning of the node identity
+--------------------------------------
+
+By default, ``nova-compute`` will automatically generate and write a UUID to
+disk the first time it starts up, and will use that going forward as its
+stable identity. Using the :oslo.config:option:`state_path`
+(which is ``/var/lib/nova`` on most systems), a ``compute_id`` file will be
+created with a generated UUID.
+
+Since this file (and it's parent directory) is writable by nova, it may be
+desirable to move this to one of the other locations that nova looks for the
+identification file.
+
+Deployment provisioning of the node identity
+--------------------------------------------
+
+In addition to the location mentioned above, nova will also search the parent
+directories of any config file in use (either the defaults or provided on
+the command line) for a ``compute_id`` file. Thus, a deployment tool may, on
+most systems, pre-provision the node's UUID by writing one to
+``/etc/nova/compute_id``.
+
+The contents of the file should be a single UUID in canonical textual
+representation with no additional whitespace or other characters. The following
+should work on most Linux systems:
+
+.. code-block:: shell
+
+ $ uuidgen > /etc/nova/compute_id
+
+.. note::
+
+ **Do not** execute the above command blindly in every run of a deployment
+ tool, as that will result in overwriting the ``compute_id`` file each time,
+ which *will* prevent nova from working properly.
+
+Upgrading from pre-2023.1
+-------------------------
+
+Before release 2023.1, ``nova-compute`` only used the hostname (combined with
+:oslo.config:option:`host`, if set) to identify its compute node objects in
+the database. When upgrading from a prior release, the compute node will
+perform a one-time migration of the hostname-matched compute node UUID to the
+``compute_id`` file in the :oslo.config:option:`state_path` location.
+
+.. note::
+
+ It is imperative that you allow the above migration to run and complete on
+ compute nodes that are being upgraded. Skipping this step by
+ pre-provisioning a ``compute_id`` file before the upgrade will **not** work
+ and will be equivalent to changing the compute node UUID after it has
+ already been created once.
diff --git a/doc/source/admin/cpu-topologies.rst b/doc/source/admin/cpu-topologies.rst
index 9770639c3a..082c88f655 100644
--- a/doc/source/admin/cpu-topologies.rst
+++ b/doc/source/admin/cpu-topologies.rst
@@ -730,6 +730,97 @@ CPU policy, meanwhile, will consume ``VCPU`` inventory.
.. _configure-hyperv-numa:
+Configuring CPU power management for dedicated cores
+----------------------------------------------------
+
+.. versionchanged:: 27.0.0
+
+ This feature was only introduced by the 2023.1 Antelope release
+
+.. important::
+
+ The functionality described below is currently only supported by the
+ libvirt/KVM driver.
+
+For power saving reasons, operators can decide to turn down the power usage of
+CPU cores whether they are in use or not. For obvious reasons, Nova only allows
+to change the power consumption of a dedicated CPU core and not a shared one.
+Accordingly, usage of this feature relies on the reading of
+:oslo.config:option:`compute.cpu_dedicated_set` config option to know which CPU
+cores to handle.
+The main action to enable the power management of dedicated cores is to set
+:oslo.config:option:`libvirt.cpu_power_management` config option to ``True``.
+
+By default, if this option is enabled, Nova will lookup the dedicated cores and
+power them down at the compute service startup. Then, once an instance starts
+by being attached to a dedicated core, this below core will be powered up right
+before the libvirt guest starts. On the other way, once an instance is stopped,
+migrated or deleted, then the corresponding dedicated core will be powered down.
+
+There are two distinct strategies for powering up or down :
+
+- the default is to offline the CPU core and online it when needed.
+- an alternative strategy is to use two distinct CPU governors for the up state
+ and the down state.
+
+The strategy can be chosen using
+:oslo.config:option:`libvirt.cpu_power_management_strategy` config option.
+``cpu_state`` supports the first online/offline strategy, while ``governor``
+sets the alternative strategy.
+We default to turning off the cores as it provides you the best power savings
+while there could be other tools outside Nova to manage the governor, like
+tuned. That being said, we also provide a way to automatically change the
+governors on the fly, as explained below.
+
+If the strategy is set to ``governor``, a couple of config options are provided
+to define which exact CPU govenor to use for each of the up and down states :
+
+- :oslo.config:option:`libvirt.cpu_power_governor_low` will define the governor
+ to use for the powerdown state (defaults to ``powersave``)
+- :oslo.config:option:`libvirt.cpu_power_governor_high` will define the
+ governor to use for the powerup state (defaults to ``performance``)
+
+.. important::
+ This is the responsibility of the operator to ensure that the govenors
+ defined by the configuration options are currently supported by the OS
+ underlying kernel that runs the compute service.
+
+ As a side note, we recommend the ``schedutil`` governor as an alternative for
+ the high-power state (if the kernel supports it) as the CPU frequency is
+ dynamically set based on CPU task states. Other governors may be worth to
+ be tested, including ``conservative`` and ``ondemand`` which are quite a bit
+ more power consuming than ``schedutil`` but more efficient than
+ ``performance``. See `Linux kernel docs`_ for further explanations.
+
+.. _`Linux kernel docs`: https://www.kernel.org/doc/Documentation/cpu-freq/governors.txt
+
+As an example, a ``nova.conf`` part of configuration would look like::
+
+ [compute]
+ cpu_dedicated_set=2-17
+
+ [libvirt]
+ cpu_power_management=True
+ cpu_power_management_strategy=cpu_state
+
+.. warning::
+
+ The CPU core #0 has a special meaning in most of the recent Linux kernels.
+ This is always highly discouraged to use it for CPU pinning but please
+ refrain to have it power managed or you could have surprises if Nova turns
+ it off !
+
+One last important note : you can decide to change the CPU management strategy
+during the compute lifecycle, or you can currently already manage the CPU
+states. For ensuring that Nova can correctly manage the CPU performances, we
+added a couple of checks at startup that refuse to start nova-compute service
+if those arbitrary rules aren't enforced :
+
+- if the operator opts for ``cpu_state`` strategy, then all dedicated CPU
+ governors *MUST* be identical.
+- if they decide using ``governor``, then all dedicated CPU cores *MUST* be
+ online.
+
Configuring Hyper-V compute nodes for instance NUMA policies
------------------------------------------------------------
diff --git a/doc/source/admin/huge-pages.rst b/doc/source/admin/huge-pages.rst
index 73f6c5dd2d..a451c6e3ab 100644
--- a/doc/source/admin/huge-pages.rst
+++ b/doc/source/admin/huge-pages.rst
@@ -96,7 +96,7 @@ pages at boot time, run:
.. code-block:: console
- # echo 'GRUB_CMDLINE_LINUX="$GRUB_CMDLINE_LINUX hugepagesz=2M hugepages=2048 transparent_hugepage=never"' > /etc/default/grub
+ # echo 'GRUB_CMDLINE_LINUX="$GRUB_CMDLINE_LINUX hugepagesz=2M hugepages=2048 transparent_hugepage=never"' >> /etc/default/grub
$ grep GRUB_CMDLINE_LINUX /etc/default/grub
GRUB_CMDLINE_LINUX="..."
GRUB_CMDLINE_LINUX="$GRUB_CMDLINE_LINUX hugepagesz=2M hugepages=2048 transparent_hugepage=never"
diff --git a/doc/source/admin/index.rst b/doc/source/admin/index.rst
index 287e9d8fb5..8cb5bf7156 100644
--- a/doc/source/admin/index.rst
+++ b/doc/source/admin/index.rst
@@ -206,6 +206,7 @@ instance for these kind of workloads.
secure-boot
sev
managing-resource-providers
+ compute-node-identification
resource-limits
cpu-models
libvirt-misc
@@ -230,3 +231,4 @@ Once you are running nova, the following information is extremely useful.
node-down
hw-machine-type
hw-emulation-architecture
+ soft-delete-shadow-tables
diff --git a/doc/source/admin/live-migration-usage.rst b/doc/source/admin/live-migration-usage.rst
index 783ab5e27c..32c67c2b0a 100644
--- a/doc/source/admin/live-migration-usage.rst
+++ b/doc/source/admin/live-migration-usage.rst
@@ -102,7 +102,7 @@ Manual selection of the destination host
.. code-block:: console
- $ openstack server migrate d1df1b5a-70c4-4fed-98b7-423362f2c47c --live HostC
+ $ openstack server migrate d1df1b5a-70c4-4fed-98b7-423362f2c47c --live-migration --host HostC
#. Confirm that the instance has been migrated successfully:
diff --git a/doc/source/admin/manage-logs.rst b/doc/source/admin/manage-logs.rst
index f60a523852..3a1546d8f4 100644
--- a/doc/source/admin/manage-logs.rst
+++ b/doc/source/admin/manage-logs.rst
@@ -181,12 +181,18 @@ websocket client to access the serial console.
.. rubric:: Accessing the serial console on an instance
-#. Use the :command:`nova get-serial-proxy` command to retrieve the websocket
+#. Use the :command:`nova get-serial-console` command to retrieve the websocket
URL for the serial console on the instance:
.. code-block:: console
- $ nova get-serial-proxy INSTANCE_NAME
+ $ nova get-serial-console INSTANCE_NAME
+
+ Or use the :command:`openstack console url show` command.
+
+ .. code-block:: console
+
+ $ openstack console url show --serial INSTANCE_NAME
.. list-table::
:header-rows: 0
diff --git a/doc/source/admin/pci-passthrough.rst b/doc/source/admin/pci-passthrough.rst
index de79792f16..09a963603d 100644
--- a/doc/source/admin/pci-passthrough.rst
+++ b/doc/source/admin/pci-passthrough.rst
@@ -65,6 +65,10 @@ capabilities.
:oslo.config:option:`pci.device_spec` configuration that uses the
``devname`` field.
+.. versionchanged:: 27.0.0 (2023.1 Antelope):
+ Nova provides Placement based scheduling support for servers with flavor
+ based PCI requests. This support is disable by default.
+
Enabling PCI passthrough
------------------------
@@ -442,6 +446,39 @@ removed and VFs from the same PF is configured (or vice versa) then
nova-compute will refuse to start as it would create a situation where both
the PF and its VFs are made available for consumption.
+Since nova 27.0.0 (2023.1 Antelope) scheduling and allocation of PCI devices
+in Placement can also be enabled via
+:oslo.config:option:`filter_scheduler.pci_in_placement`. Please note that this
+should only be enabled after all the computes in the system is configured to
+report PCI inventory in Placement via
+enabling :oslo.config:option:`pci.report_in_placement`. In Antelope flavor
+based PCI requests are support but Neutron port base PCI requests are not
+handled in Placement.
+
+If you are upgrading from an earlier version with already existing servers with
+PCI usage then you must enable :oslo.config:option:`pci.report_in_placement`
+first on all your computes having PCI allocations and then restart the
+nova-compute service, before you enable
+:oslo.config:option:`filter_scheduler.pci_in_placement`. The compute service
+will heal the missing PCI allocation in placement during startup and will
+continue healing missing allocations for future servers until the scheduling
+support is enabled.
+
+If a flavor requests multiple ``type-VF`` devices via
+:nova:extra-spec:`pci_passthrough:alias` then it is important to consider the
+value of :nova:extra-spec:`group_policy` as well. The value ``none``
+allows nova to select VFs from the same parent PF to fulfill the request. The
+value ``isolate`` restricts nova to select each VF from a different parent PF
+to fulfill the request. If :nova:extra-spec:`group_policy` is not provided in
+such flavor then it will defaulted to ``none``.
+
+Symmetrically with the ``resource_class`` and ``traits`` fields of
+:oslo.config:option:`pci.device_spec` the :oslo.config:option:`pci.alias`
+configuration option supports requesting devices by Placement resource class
+name via the ``resource_class`` field and also support requesting traits to
+be present on the selected devices via the ``traits`` field in the alias. If
+the ``resource_class`` field is not specified in the alias then it is defaulted
+by nova to ``CUSTOM_PCI_<vendor_id>_<product_id>``.
For deeper technical details please read the `nova specification. <https://specs.openstack.org/openstack/nova-specs/specs/zed/approved/pci-device-tracking-in-placement.html>`_
diff --git a/doc/source/admin/remote-console-access.rst b/doc/source/admin/remote-console-access.rst
index 015c6522d0..9b28646d27 100644
--- a/doc/source/admin/remote-console-access.rst
+++ b/doc/source/admin/remote-console-access.rst
@@ -366,6 +366,16 @@ Replace ``IP_ADDRESS`` with the IP address from which the proxy is accessible
by the outside world. For example, this may be the management interface IP
address of the controller or the VIP.
+Optionally, the :program:`nova-compute` service supports the following
+additional options to configure compression settings (algorithms and modes)
+for SPICE consoles.
+
+- :oslo.config:option:`spice.image_compression`
+- :oslo.config:option:`spice.jpeg_compression`
+- :oslo.config:option:`spice.zlib_compression`
+- :oslo.config:option:`spice.playback_compression`
+- :oslo.config:option:`spice.streaming_mode`
+
Serial
------
diff --git a/doc/source/admin/scheduling.rst b/doc/source/admin/scheduling.rst
index 9071c92ac9..353514ab55 100644
--- a/doc/source/admin/scheduling.rst
+++ b/doc/source/admin/scheduling.rst
@@ -1049,6 +1049,37 @@ Otherwise, it will fall back to the
more than one value is found for a host in aggregate metadata, the minimum
value will be used.
+``HypervisorVersionWeigher``
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. versionadded:: 28.0.0 (Bobcat)
+
+Weigh hosts by their relative hypervisor version reported by the virt driver.
+
+While the hypervisor_version filed for all virt drivers is an int,
+each nova virt driver uses a different algorithm to convert the hypervisor-specific
+version sequence into an int. As such the values are not directly comparable between
+hosts with different hypervisors.
+
+For example, the ironic virt driver uses the ironic API micro-version as the hypervisor
+version for a given node. The libvirt driver uses the libvirt version
+i.e. Libvirt `7.1.123` becomes `700100123` vs Ironic `1.82` becomes `1`
+Hyper-V `6.3` becomes `6003`.
+
+If you have a mixed virt driver deployment in the ironic vs non-ironic
+case nothing special needs to be done. ironic nodes are scheduled using custom
+resource classes so ironic flavors will never match non-ironic compute nodes.
+
+If a deployment has multiple non-ironic virt drivers it is recommended to use aggregates
+to group hosts by virt driver. While this is not strictly required, it is
+desirable to avoid bias towards one virt driver.
+see :ref:`filtering_hosts_by_isolating_aggregates` and :ref:`AggregateImagePropertiesIsolation`
+for more information.
+
+The default behavior of the HypervisorVersionWeigher is to select newer hosts.
+If you prefer to invert the behavior set the
+:oslo.config:option:`filter_scheduler.hypervisor_version_weight_multiplier` option
+to a negative number and the weighing has the opposite effect of the default.
Utilization-aware scheduling
----------------------------
diff --git a/doc/source/admin/soft-delete-shadow-tables.rst b/doc/source/admin/soft-delete-shadow-tables.rst
new file mode 100644
index 0000000000..126279c4d0
--- /dev/null
+++ b/doc/source/admin/soft-delete-shadow-tables.rst
@@ -0,0 +1,62 @@
+=============================
+Soft Delete and Shadow Tables
+=============================
+
+Nova has two unrelated features which are called ``soft delete``:
+
+Soft delete instances that can be restored
+------------------------------------------
+
+After an instance delete request, the actual delete is
+delayed by a configurable amount of time (config option
+:oslo.config:option:`reclaim_instance_interval`). During the delay,
+the instance is marked to be in state ``SOFT_DELETED`` and can be
+restored (:command:`openstack server restore`) by an admin in order to
+gracefully handle human mistakes. If the instance is not restored during
+the configured delay, a periodic job actually deletes the instance.
+
+This feature is optional and by default off.
+
+See also:
+
+- "Delete, Restore" in `API Guide: Server Concepts
+ <https://docs.openstack.org/api-guide/compute/server_concepts.html#server-actions>`_
+- config reference: :oslo.config:option:`reclaim_instance_interval`
+
+Soft delete database rows to shadow tables
+------------------------------------------
+
+At an actual instance delete, no DB record is deleted. Instead the
+records are marked as deleted (for example ``instances.deleted``
+in Nova cell databases). This preserves historic information
+for debugging and audit uses. But it also leads to accumulation
+of data in Nova cell DB tables, which may have an effect on
+Nova DB performance as documented in `DB prune deleted rows
+<https://docs.openstack.org/nova/latest/admin/upgrades.html#concepts>`_.
+
+The records marked as deleted can be cleaned up in multiple stages.
+First you can move them to so-called shadow tables (tables with prefix
+``shadow_`` in Nova cell databases). This is called *archiving the
+deleted rows*. Nova does not query shadow tables, therefore data moved
+to the shadow tables no longer affect DB performance. However storage
+space is still consumed. Then you can actually delete the information
+from the shadow tables. This is called *DB purge*.
+
+These operations can be performed by nova-manage:
+
+- https://docs.openstack.org/nova/latest/cli/nova-manage.html#db-archive-deleted-rows
+- https://docs.openstack.org/nova/latest/cli/nova-manage.html#db-purge
+
+This feature is not optional. Every long-running deployment should
+regularly archive and purge the deleted rows. For example via a cron
+job to regularly call :program:`nova-manage db archive_deleted_rows` and
+:program:`nova-manage db purge`. The tradeoffs between data retention,
+DB performance and storage needs should be considered.
+
+In the Mitaka release there was an agreement between Nova developers that
+it's not desirable to provide shadow tables for every table in the Nova
+database, `documented in a spec
+<https://specs.openstack.org/openstack/nova-specs/specs/mitaka/implemented/no-more-soft-delete.html>`_.
+
+Therefore not all information about an instance is preserved in the shadow
+tables. Since then new shadow tables are not introduced.
diff --git a/doc/source/admin/upgrades.rst b/doc/source/admin/upgrades.rst
index 00a714970b..61fd0cf258 100644
--- a/doc/source/admin/upgrades.rst
+++ b/doc/source/admin/upgrades.rst
@@ -41,21 +41,27 @@ Rolling upgrade process
To reduce downtime, the compute services can be upgraded in a rolling fashion.
It means upgrading a few services at a time. This results in a condition where
both old (N) and new (N+1) nova-compute services co-exist for a certain time
-period. Note that, there is no upgrade of the hypervisor here, this is just
+period (or even N with N+2 upgraded nova-compute services, see below).
+Note that, there is no upgrade of the hypervisor here, this is just
upgrading the nova services. If reduced downtime is not a concern (or lower
complexity is desired), all services may be taken down and restarted at the
same time.
.. important::
- Nova does not currently support the coexistence of N and N+2 or greater
- :program:`nova-compute` or :program:`nova-conductor` services in the same
- deployment. The `nova-conductor`` service will fail to start when a
- ``nova-compute`` service that is older than the previous release (N-2 or
- greater) is detected. Similarly, in a :doc:`deployment with multiple cells
+ As of OpenStack 2023.1 (Antelope), Nova supports the coexistence of N and
+ N-2 (Yoga) :program:`nova-compute` or :program:`nova-conductor` services in
+ the same deployment. The `nova-conductor`` service will fail to start when
+ a ``nova-compute`` service that is older than the support envelope is
+ detected. This varies by release and the support envelope will be explained
+ in the release notes. Similarly, in a :doc:`deployment with multiple cells
</admin/cells>`, neither the super conductor service nor any per-cell
conductor service will start if any other conductor service in the
- deployment is older than the previous release.
+ deployment is older than the N-2 release.
+
+ Releases older than 2023.1 will only support rolling upgrades for a single
+ release difference between :program:`nova-compute` and
+ :program:`nova-conductor` services.
#. Before maintenance window:
diff --git a/doc/source/cli/nova-compute.rst b/doc/source/cli/nova-compute.rst
index f190949efa..1346dab92e 100644
--- a/doc/source/cli/nova-compute.rst
+++ b/doc/source/cli/nova-compute.rst
@@ -41,6 +41,8 @@ Files
* ``/etc/nova/policy.d/``
* ``/etc/nova/rootwrap.conf``
* ``/etc/nova/rootwrap.d/``
+* ``/etc/nova/compute_id``
+* ``/var/lib/nova/compute_id``
See Also
========
diff --git a/doc/source/contributor/ptl-guide.rst b/doc/source/contributor/ptl-guide.rst
index 813f1bc83e..b530b100bc 100644
--- a/doc/source/contributor/ptl-guide.rst
+++ b/doc/source/contributor/ptl-guide.rst
@@ -29,7 +29,11 @@ New PTL
* Get acquainted with the release schedule
- * Example: https://wiki.openstack.org/wiki/Nova/Stein_Release_Schedule
+ * Example: https://releases.openstack.org/antelope/schedule.html
+
+ * Also, note that we usually create a specific wiki page for each cycle like
+ https://wiki.openstack.org/wiki/Nova/2023.1_Release_Schedule but it's
+ preferred to use the main release schedule above.
Project Team Gathering
----------------------
@@ -37,30 +41,34 @@ Project Team Gathering
* Create PTG planning etherpad, retrospective etherpad and alert about it in
nova meeting and dev mailing list
- * Example: https://etherpad.openstack.org/p/nova-ptg-stein
+ * Example: https://etherpad.opendev.org/p/nova-antelope-ptg
* Run sessions at the PTG
-* Have a priorities discussion at the PTG
+* Do a retro of the previous cycle
- * Example: https://etherpad.openstack.org/p/nova-ptg-stein-priorities
+* Make agreement on the agenda for this release, including but not exhaustively:
-* Sign up for group photo at the PTG (if applicable)
+ * Number of review days, for either specs or implementation
+ * Define the Spec approval and Feature freeze dates
+ * Modify the release schedule if needed by adding the new dates.
+ As an example : https://review.opendev.org/c/openstack/releases/+/877094
+
+* Discuss the implications of the `SLURP or non-SLURP`__ current release
-* Open review runways for the cycle
+.. __: https://governance.openstack.org/tc/resolutions/20220210-release-cadence-adjustment.html
+
+* Sign up for group photo at the PTG (if applicable)
- * Example: https://etherpad.openstack.org/p/nova-runways-stein
After PTG
---------
* Send PTG session summaries to the dev mailing list
-* Make sure the cycle priorities spec gets reviewed and merged
-
- * Example: https://specs.openstack.org/openstack/nova-specs/priorities/stein-priorities.html
+* Add `RFE bugs`__ if you have action items that are simple to do but without a owner yet.
-* Run the count-blueprints script daily to gather data for the cycle burndown chart
+.. __: https://bugs.launchpad.net/nova/+bugs?field.tag=rfe
A few weeks before milestone 1
------------------------------
@@ -70,12 +78,13 @@ A few weeks before milestone 1
* Periodically check the series goals others have proposed in the “Set series
goals” link:
- * Example: https://blueprints.launchpad.net/nova/stein/+setgoals
+ * Example: https://blueprints.launchpad.net/nova/antelope/+setgoals
Milestone 1
-----------
-* Do milestone release of nova and python-novaclient (in launchpad only)
+* Do milestone release of nova and python-novaclient (in launchpad only, can be
+ optional)
* This is launchpad bookkeeping only. With the latest release team changes,
projects no longer do milestone releases. See: https://releases.openstack.org/reference/release_models.html#cycle-with-milestones-legacy
@@ -87,6 +96,8 @@ Milestone 1
the minor version to leave room for future stable branch releases
* os-vif
+ * placement
+ * os-traits / os-resource-classes
* Release stable branches of nova
@@ -117,28 +128,26 @@ Summit
* Prepare the on-boarding session materials. Enlist help of others
+* Prepare the operator meet-and-greet session. Enlist help of others
+
A few weeks before milestone 2
------------------------------
* Plan a spec review day (optional)
-* Periodically check the series goals others have proposed in the “Set series
- goals” link:
-
- * Example: https://blueprints.launchpad.net/nova/stein/+setgoals
-
Milestone 2
-----------
-* Spec freeze
+* Spec freeze (if agreed)
-* Release nova and python-novaclient
+* Release nova and python-novaclient (if new features were merged)
* Release other libraries as needed
* Stable branch releases of nova
* For nova, set the launchpad milestone release as “released” with the date
+ (can be optional)
Shortly after spec freeze
-------------------------
@@ -146,7 +155,7 @@ Shortly after spec freeze
* Create a blueprint status etherpad to help track, especially non-priority
blueprint work, to help things get done by Feature Freeze (FF). Example:
- * https://etherpad.openstack.org/p/nova-stein-blueprint-status
+ * https://etherpad.opendev.org/p/nova-antelope-blueprint-status
* Create or review a patch to add the next release’s specs directory so people
can propose specs for next release after spec freeze for current release
@@ -155,13 +164,15 @@ Non-client library release freeze
---------------------------------
* Final release for os-vif
+* Final release for os-traits
+* Final release for os-resource-classes
Milestone 3
-----------
* Feature freeze day
-* Client library freeze, release python-novaclient
+* Client library freeze, release python-novaclient and osc-placement
* Close out all blueprints, including “catch all” blueprints like mox,
versioned notifications
@@ -170,7 +181,7 @@ Milestone 3
* For nova, set the launchpad milestone release as “released” with the date
-* Write the `cycle highlights
+* Start writing the `cycle highlights
<https://docs.openstack.org/project-team-guide/release-management.html#cycle-highlights>`__
Week following milestone 3
@@ -199,7 +210,7 @@ A few weeks before RC
* Make a RC1 todos etherpad and tag bugs as ``<release>-rc-potential`` and keep
track of them, example:
- * https://etherpad.openstack.org/p/nova-stein-rc-potential
+ * https://etherpad.opendev.org/p/nova-antelope-rc-potential
* Go through the bug list and identify any rc-potential bugs and tag them
@@ -242,7 +253,7 @@ RC
* Example: https://review.opendev.org/644412
-* Write the cycle-highlights in marketing-friendly sentences and propose to the
+* Push the cycle-highlights in marketing-friendly sentences and propose to the
openstack/releases repo. Usually based on reno prelude but made more readable
and friendly
@@ -257,11 +268,13 @@ Immediately after RC
* https://wiki.openstack.org/wiki/Nova/ReleaseChecklist
- * Drop old RPC compat code (if there was a RPC major version bump)
+ * Drop old RPC compat code (if there was a RPC major version bump and if
+ agreed on at the PTG)
* Example: https://review.opendev.org/543580
- * Bump the oldest supported compute service version
+ * Bump the oldest supported compute service version (if master branch is now
+ on a non-SLURP version)
* https://review.opendev.org/#/c/738482/
@@ -275,7 +288,9 @@ Immediately after RC
* Set the previous to last series status to “supported”
-* Repeat launchpad steps ^ for python-novaclient
+* Repeat launchpad steps ^ for python-novaclient (optional)
+
+* Repeat launchpad steps ^ for placement
* Register milestones in launchpad for the new cycle based on the new cycle
release schedule
@@ -293,7 +308,7 @@ Immediately after RC
* Create new release wiki:
- * Example: https://wiki.openstack.org/wiki/Nova/Train_Release_Schedule
+ * Example: https://wiki.openstack.org/wiki/Nova/2023.1_Release_Schedule
* Update the contributor guide for the new cycle
diff --git a/doc/source/reference/database-migrations.rst b/doc/source/reference/database-migrations.rst
index add7597e93..ea2b9050d9 100644
--- a/doc/source/reference/database-migrations.rst
+++ b/doc/source/reference/database-migrations.rst
@@ -24,6 +24,10 @@ Schema migrations
The database migration engine was changed from ``sqlalchemy-migrate`` to
``alembic``.
+.. versionchanged:: 27.0.0 (Antelope)
+
+ The legacy ``sqlalchemy-migrate``-based database migrations were removed.
+
The `alembic`__ database migration tool is used to manage schema migrations in
nova. The migration files and related metadata can be found in
``nova/db/api/migrations`` (for the API database) and
@@ -36,10 +40,10 @@ respectively.
.. note::
- There are also legacy migrations provided in the ``legacy_migrations``
- subdirectory for both the API and main databases. These are provided to
- facilitate upgrades from pre-Xena (24.0.0) deployments and will be removed
- in a future release. They should not be modified or extended.
+ There were also legacy migrations provided in the ``legacy_migrations``
+ subdirectory for both the API and main databases. These were provided to
+ facilitate upgrades from pre-Xena (24.0.0) deployments. They were removed
+ in the 27.0.0 (Antelope) release.
The best reference for alembic is the `alembic documentation`__, but a small
example is provided here. You can create the migration either manually or
diff --git a/doc/source/reference/isolate-aggregates.rst b/doc/source/reference/isolate-aggregates.rst
index f5487df912..7b493f4db9 100644
--- a/doc/source/reference/isolate-aggregates.rst
+++ b/doc/source/reference/isolate-aggregates.rst
@@ -13,6 +13,8 @@
License for the specific language governing permissions and limitations
under the License.
+.. _filtering_hosts_by_isolating_aggregates:
+
Filtering hosts by isolating aggregates
=======================================
diff --git a/etc/nova/nova-config-generator.conf b/etc/nova/nova-config-generator.conf
index 742f348f11..8b7fd3bec8 100644
--- a/etc/nova/nova-config-generator.conf
+++ b/etc/nova/nova-config-generator.conf
@@ -16,3 +16,4 @@ namespace = oslo.concurrency
namespace = oslo.reports
namespace = keystonemiddleware.auth_token
namespace = osprofiler
+namespace = os_vif
diff --git a/mypy-files.txt b/mypy-files.txt
index 5a3b9ab339..391ed58d87 100644
--- a/mypy-files.txt
+++ b/mypy-files.txt
@@ -1,6 +1,7 @@
nova/compute/manager.py
nova/compute/pci_placement_translator.py
nova/crypto.py
+nova/filesystem.py
nova/limit/local.py
nova/limit/placement.py
nova/network/neutron.py
@@ -13,6 +14,9 @@ nova/virt/driver.py
nova/virt/hardware.py
nova/virt/libvirt/machine_type_utils.py
nova/virt/libvirt/__init__.py
+nova/virt/libvirt/cpu/__init__.py
+nova/virt/libvirt/cpu/api.py
+nova/virt/libvirt/cpu/core.py
nova/virt/libvirt/driver.py
nova/virt/libvirt/event.py
nova/virt/libvirt/guest.py
diff --git a/nova/api/openstack/api_version_request.py b/nova/api/openstack/api_version_request.py
index 84d8872f9e..718ac7e8e6 100644
--- a/nova/api/openstack/api_version_request.py
+++ b/nova/api/openstack/api_version_request.py
@@ -235,7 +235,7 @@ REST_API_VERSION_HISTORY = """REST API Version History:
``POST /flavors/{flavor_id}/os-extra_specs`` and
``PUT /flavors/{flavor_id}/os-extra_specs/{id}`` APIs.
* 2.87 - Adds support for rescuing boot from volume instances when the
- compute host reports the COMPUTE_BFV_RESCUE capability trait.
+ compute host reports the COMPUTE_RESCUE_BFV capability trait.
* 2.88 - Drop statistics-style fields from the ``/os-hypervisors/detail``
and ``/os-hypervisors/{hypervisor_id}`` APIs, and remove the
``/os-hypervisors/statistics`` and
@@ -253,6 +253,8 @@ REST_API_VERSION_HISTORY = """REST API Version History:
``POST /os-keypairs`` and allow including @ and dot (.) characters
in keypair name.
* 2.93 - Add support for volume backed server rebuild.
+ * 2.94 - Allow FQDN in server hostname.
+ * 2.95 - Evacuate will now stop instance at destination.
"""
# The minimum and maximum versions of the API supported
@@ -261,7 +263,7 @@ REST_API_VERSION_HISTORY = """REST API Version History:
# Note(cyeoh): This only applies for the v2.1 API once microversions
# support is fully merged. It does not affect the V2 API.
_MIN_API_VERSION = '2.1'
-_MAX_API_VERSION = '2.93'
+_MAX_API_VERSION = '2.95'
DEFAULT_API_VERSION = _MIN_API_VERSION
# Almost all proxy APIs which are related to network, images and baremetal
diff --git a/nova/api/openstack/compute/evacuate.py b/nova/api/openstack/compute/evacuate.py
index aa35812759..a6602be079 100644
--- a/nova/api/openstack/compute/evacuate.py
+++ b/nova/api/openstack/compute/evacuate.py
@@ -23,9 +23,11 @@ from nova.api.openstack.compute.schemas import evacuate
from nova.api.openstack import wsgi
from nova.api import validation
from nova.compute import api as compute
+from nova.compute import vm_states
import nova.conf
from nova import exception
from nova.i18n import _
+from nova import objects
from nova.policies import evacuate as evac_policies
from nova import utils
@@ -33,6 +35,8 @@ CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
+MIN_VER_NOVA_COMPUTE_EVACUATE_STOPPED = 62
+
class EvacuateController(wsgi.Controller):
def __init__(self):
@@ -77,7 +81,8 @@ class EvacuateController(wsgi.Controller):
@validation.schema(evacuate.evacuate, "2.0", "2.13")
@validation.schema(evacuate.evacuate_v214, "2.14", "2.28")
@validation.schema(evacuate.evacuate_v2_29, "2.29", "2.67")
- @validation.schema(evacuate.evacuate_v2_68, "2.68")
+ @validation.schema(evacuate.evacuate_v2_68, "2.68", "2.94")
+ @validation.schema(evacuate.evacuate_v2_95, "2.95")
def _evacuate(self, req, id, body):
"""Permit admins to evacuate a server from a failed host
to a new one.
@@ -92,6 +97,19 @@ class EvacuateController(wsgi.Controller):
host = evacuate_body.get("host")
force = None
+ target_state = None
+ if api_version_request.is_supported(req, min_version='2.95'):
+ min_ver = objects.service.get_minimum_version_all_cells(
+ context, ['nova-compute'])
+ if min_ver < MIN_VER_NOVA_COMPUTE_EVACUATE_STOPPED:
+ raise exception.NotSupportedComputeForEvacuateV295(
+ {'currently': min_ver,
+ 'expected': MIN_VER_NOVA_COMPUTE_EVACUATE_STOPPED})
+ # Starts to 2.95 any evacuated instances will be stopped at
+ # destination. Previously an active or stopped instance would have
+ # kept its state.
+ target_state = vm_states.STOPPED
+
on_shared_storage = self._get_on_shared_storage(req, evacuate_body)
if api_version_request.is_supported(req, min_version='2.29'):
@@ -120,7 +138,8 @@ class EvacuateController(wsgi.Controller):
try:
self.compute_api.evacuate(context, instance, host,
- on_shared_storage, password, force)
+ on_shared_storage, password, force,
+ target_state)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'evacuate', id)
@@ -130,6 +149,8 @@ class EvacuateController(wsgi.Controller):
exception.ExtendedResourceRequestOldCompute,
) as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
+ except exception.UnsupportedRPCVersion as e:
+ raise exc.HTTPConflict(explanation=e.format_message())
if (not api_version_request.is_supported(req, min_version='2.14') and
CONF.api.enable_instance_password):
diff --git a/nova/api/openstack/compute/flavor_access.py b/nova/api/openstack/compute/flavor_access.py
index e17e6f0ddc..fc8df15db5 100644
--- a/nova/api/openstack/compute/flavor_access.py
+++ b/nova/api/openstack/compute/flavor_access.py
@@ -93,7 +93,14 @@ class FlavorActionController(wsgi.Controller):
vals = body['removeTenantAccess']
tenant = vals['tenant']
- identity.verify_project_id(context, tenant)
+ # It doesn't really matter if project exists or not: we can delete
+ # it from flavor's access list in both cases.
+ try:
+ identity.verify_project_id(context, tenant)
+ except webob.exc.HTTPBadRequest as identity_exc:
+ msg = "Project ID %s is not a valid project." % tenant
+ if msg not in identity_exc.explanation:
+ raise
# NOTE(gibi): We have to load a flavor from the db here as
# flavor.remove_access() will try to emit a notification and that needs
diff --git a/nova/api/openstack/compute/remote_consoles.py b/nova/api/openstack/compute/remote_consoles.py
index 36015542aa..7d374ef432 100644
--- a/nova/api/openstack/compute/remote_consoles.py
+++ b/nova/api/openstack/compute/remote_consoles.py
@@ -56,6 +56,9 @@ class RemoteConsolesController(wsgi.Controller):
raise webob.exc.HTTPNotFound(explanation=e.format_message())
except exception.InstanceNotReady as e:
raise webob.exc.HTTPConflict(explanation=e.format_message())
+ except exception.InstanceInvalidState as e:
+ common.raise_http_conflict_for_instance_invalid_state(
+ e, 'get_vnc_console', id)
except NotImplementedError:
common.raise_feature_not_supported()
diff --git a/nova/api/openstack/compute/rest_api_version_history.rst b/nova/api/openstack/compute/rest_api_version_history.rst
index 642aefab49..c7a2777d3a 100644
--- a/nova/api/openstack/compute/rest_api_version_history.rst
+++ b/nova/api/openstack/compute/rest_api_version_history.rst
@@ -1141,7 +1141,7 @@ Validation is only used for recognized extra spec namespaces, currently:
-------------------------------------
Adds support for rescuing boot from volume instances when the compute host
-reports the ``COMPUTE_BFV_RESCUE`` capability trait.
+reports the ``COMPUTE_RESCUE_BFV`` capability trait.
.. _microversion 2.88:
@@ -1229,3 +1229,21 @@ Add support for volume backed server rebuild. The end user will provide the
image with the rebuild command and it will rebuild the volume with the new
image similar to the result of rebuilding an ephemeral disk.
+
+2.94
+----
+
+The ``hostname`` parameter to the ``POST /servers`` (create server), ``PUT
+/servers/{id}`` (update server) and ``POST /servers/{server_id}/action
+(rebuild)`` (rebuild server) APIs is now allowed to be a Fully Qualified Domain
+Name (FQDN).
+
+.. _microversion 2.95:
+
+2.95 (Maximum in 2023.1 Antelope)
+---------------------------------
+
+Any evacuated instances will be now stopped at destination. This
+requires minimun nova release 27.0.0, OpenStack release 2023.1
+Antelope. Operators can still use previous microversion for older
+behavior.
diff --git a/nova/api/openstack/compute/schemas/evacuate.py b/nova/api/openstack/compute/schemas/evacuate.py
index a415a97f89..c7b84a655e 100644
--- a/nova/api/openstack/compute/schemas/evacuate.py
+++ b/nova/api/openstack/compute/schemas/evacuate.py
@@ -46,3 +46,7 @@ evacuate_v2_29['properties']['evacuate']['properties'][
# v2.68 removes the 'force' parameter added in v2.29, meaning it is identical
# to v2.14
evacuate_v2_68 = copy.deepcopy(evacuate_v214)
+
+# v2.95 keeps the same schema, evacuating an instance will now result its state
+# to be stopped at destination.
+evacuate_v2_95 = copy.deepcopy(evacuate_v2_68)
diff --git a/nova/api/openstack/compute/schemas/servers.py b/nova/api/openstack/compute/schemas/servers.py
index 300411de40..0869f83434 100644
--- a/nova/api/openstack/compute/schemas/servers.py
+++ b/nova/api/openstack/compute/schemas/servers.py
@@ -360,6 +360,11 @@ create_v290 = copy.deepcopy(create_v274)
create_v290['properties']['server'][
'properties']['hostname'] = parameter_types.hostname
+# Support FQDN as hostname
+create_v294 = copy.deepcopy(create_v290)
+create_v294['properties']['server'][
+ 'properties']['hostname'] = parameter_types.fqdn
+
update = {
'type': 'object',
'properties': {
@@ -391,6 +396,11 @@ update_v290 = copy.deepcopy(update_v219)
update_v290['properties']['server'][
'properties']['hostname'] = parameter_types.hostname
+
+update_v294 = copy.deepcopy(update_v290)
+update_v294['properties']['server'][
+ 'properties']['hostname'] = parameter_types.fqdn
+
rebuild = {
'type': 'object',
'properties': {
@@ -449,6 +459,10 @@ rebuild_v290 = copy.deepcopy(rebuild_v263)
rebuild_v290['properties']['rebuild']['properties'][
'hostname'] = parameter_types.hostname
+rebuild_v294 = copy.deepcopy(rebuild_v290)
+rebuild_v294['properties']['rebuild']['properties'][
+ 'hostname'] = parameter_types.fqdn
+
resize = {
'type': 'object',
diff --git a/nova/api/openstack/compute/servers.py b/nova/api/openstack/compute/servers.py
index 946d5a7042..33e74456fd 100644
--- a/nova/api/openstack/compute/servers.py
+++ b/nova/api/openstack/compute/servers.py
@@ -677,7 +677,8 @@ class ServersController(wsgi.Controller):
@validation.schema(schema_servers.create_v263, '2.63', '2.66')
@validation.schema(schema_servers.create_v267, '2.67', '2.73')
@validation.schema(schema_servers.create_v274, '2.74', '2.89')
- @validation.schema(schema_servers.create_v290, '2.90')
+ @validation.schema(schema_servers.create_v290, '2.90', '2.93')
+ @validation.schema(schema_servers.create_v294, '2.94')
def create(self, req, body):
"""Creates a new server for a given user."""
context = req.environ['nova.context']
@@ -906,7 +907,8 @@ class ServersController(wsgi.Controller):
@validation.schema(schema_servers.update_v20, '2.0', '2.0')
@validation.schema(schema_servers.update, '2.1', '2.18')
@validation.schema(schema_servers.update_v219, '2.19', '2.89')
- @validation.schema(schema_servers.update_v290, '2.90')
+ @validation.schema(schema_servers.update_v290, '2.90', '2.93')
+ @validation.schema(schema_servers.update_v294, '2.94')
def update(self, req, id, body):
"""Update server then pass on to version-specific controller."""
@@ -1147,7 +1149,8 @@ class ServersController(wsgi.Controller):
@validation.schema(schema_servers.rebuild_v254, '2.54', '2.56')
@validation.schema(schema_servers.rebuild_v257, '2.57', '2.62')
@validation.schema(schema_servers.rebuild_v263, '2.63', '2.89')
- @validation.schema(schema_servers.rebuild_v290, '2.90')
+ @validation.schema(schema_servers.rebuild_v290, '2.90', '2.93')
+ @validation.schema(schema_servers.rebuild_v294, '2.94')
def _action_rebuild(self, req, id, body):
"""Rebuild an instance with the given attributes."""
rebuild_dict = body['rebuild']
diff --git a/nova/api/openstack/identity.py b/nova/api/openstack/identity.py
index 7ffc623fed..15ec884aea 100644
--- a/nova/api/openstack/identity.py
+++ b/nova/api/openstack/identity.py
@@ -27,24 +27,27 @@ def verify_project_id(context, project_id):
"""verify that a project_id exists.
This attempts to verify that a project id exists. If it does not,
- an HTTPBadRequest is emitted.
+ an HTTPBadRequest is emitted. Also HTTPBadRequest is emitted
+ if Keystone identity service version 3.0 is not found.
"""
adap = utils.get_ksa_adapter(
'identity', ksa_auth=context.get_auth_plugin(),
min_version=(3, 0), max_version=(3, 'latest'))
- failure = webob.exc.HTTPBadRequest(
- explanation=_("Project ID %s is not a valid project.") %
- project_id)
try:
resp = adap.get('/projects/%s' % project_id)
except kse.EndpointNotFound:
LOG.error(
- "Keystone identity service version 3.0 was not found. This might "
- "be because your endpoint points to the v2.0 versioned endpoint "
- "which is not supported. Please fix this.")
- raise failure
+ "Keystone identity service version 3.0 was not found. This "
+ "might be caused by Nova misconfiguration or Keystone "
+ "problems.")
+ msg = _("Nova was unable to find Keystone service endpoint.")
+ # TODO(astupnik). It may be reasonable to switch to HTTP 503
+ # (HTTP Service Unavailable) instead of HTTP Bad Request here.
+ # If proper Keystone servie is inaccessible, then technially
+ # this is a server side error and not an error in Nova.
+ raise webob.exc.HTTPBadRequest(explanation=msg)
except kse.ClientException:
# something is wrong, like there isn't a keystone v3 endpoint,
# or nova isn't configured for the interface to talk to it;
@@ -57,7 +60,8 @@ def verify_project_id(context, project_id):
return True
elif resp.status_code == 404:
# we got access, and we know this project is not there
- raise failure
+ msg = _("Project ID %s is not a valid project.") % project_id
+ raise webob.exc.HTTPBadRequest(explanation=msg)
elif resp.status_code == 403:
# we don't have enough permission to verify this, so default
# to "it's ok".
diff --git a/nova/cmd/manage.py b/nova/cmd/manage.py
index 08b8ebb310..45ae678ab4 100644
--- a/nova/cmd/manage.py
+++ b/nova/cmd/manage.py
@@ -122,6 +122,10 @@ def format_dict(dct, dict_property="Property", dict_value='Value',
"""
pt = prettytable.PrettyTable([dict_property, dict_value])
pt.align = 'l'
+ # starting in PrettyTable 3.4.0 we need to also set the header
+ # as align now only applies to the data.
+ if hasattr(pt, 'header_align'):
+ pt.header_align = 'l'
for k, v in sorted(dct.items(), key=sort_key):
# convert dict to str to check length
if isinstance(v, dict):
diff --git a/nova/compute/api.py b/nova/compute/api.py
index b2884543e5..6b2023c19f 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -3797,7 +3797,8 @@ class API:
orig_sys_metadata=orig_sys_metadata, bdms=bdms,
preserve_ephemeral=preserve_ephemeral, host=host,
request_spec=request_spec,
- reimage_boot_volume=reimage_boot_volume)
+ reimage_boot_volume=reimage_boot_volume,
+ target_state=None)
def _check_volume_status(self, context, bdms):
"""Check whether the status of the volume is "in-use".
@@ -4699,6 +4700,7 @@ class API:
allow_bfv_rescue=False):
"""Rescue the given instance."""
+ image_meta = None
if rescue_image_ref:
try:
image_meta = image_meta_obj.ImageMeta.from_image_ref(
@@ -4719,6 +4721,8 @@ class API:
"image properties set")
raise exception.UnsupportedRescueImage(
image=rescue_image_ref)
+ else:
+ image_meta = instance.image_meta
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
@@ -4727,6 +4731,9 @@ class API:
volume_backed = compute_utils.is_volume_backed_instance(
context, instance, bdms)
+ allow_bfv_rescue &= 'hw_rescue_bus' in image_meta.properties and \
+ 'hw_rescue_device' in image_meta.properties
+
if volume_backed and allow_bfv_rescue:
cn = objects.ComputeNode.get_by_host_and_nodename(
context, instance.host, instance.node)
@@ -5611,7 +5618,7 @@ class API:
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.ERROR], task_state=None)
def evacuate(self, context, instance, host, on_shared_storage,
- admin_password=None, force=None):
+ admin_password=None, force=None, target_state=None):
"""Running evacuate to target host.
Checking vm compute host state, if the host not in expected_state,
@@ -5622,6 +5629,7 @@ class API:
:param on_shared_storage: True if instance files on shared storage
:param admin_password: password to set on rebuilt instance
:param force: Force the evacuation to the specific host target
+ :param target_state: Set a target state for the evacuated instance
"""
LOG.debug('vm evacuation scheduled', instance=instance)
@@ -5685,7 +5693,7 @@ class API:
on_shared_storage=on_shared_storage,
host=host,
request_spec=request_spec,
- )
+ target_state=target_state)
def get_migrations(self, context, filters):
"""Get all migrations for the given filters."""
diff --git a/nova/compute/claims.py b/nova/compute/claims.py
index 79e8f2f012..490b418081 100644
--- a/nova/compute/claims.py
+++ b/nova/compute/claims.py
@@ -124,7 +124,13 @@ class Claim(NopClaim):
pci_requests = self._pci_requests
if pci_requests.requests:
stats = self.tracker.pci_tracker.stats
- if not stats.support_requests(pci_requests.requests):
+ if not stats.support_requests(
+ pci_requests.requests,
+ # We explicitly signal that we are _after_ the scheduler made
+ # allocations in placement and therefore pci_requests.requests
+ # carry its own placement provider mapping information
+ provider_mapping=None,
+ ):
return _('Claim pci failed')
def _test_numa_topology(self, compute_node, limit):
@@ -139,12 +145,17 @@ class Claim(NopClaim):
if pci_requests.requests:
pci_stats = self.tracker.pci_tracker.stats
- instance_topology = (
- hardware.numa_fit_instance_to_host(
- host_topology, requested_topology,
- limits=limit,
- pci_requests=pci_requests.requests,
- pci_stats=pci_stats))
+ instance_topology = hardware.numa_fit_instance_to_host(
+ host_topology,
+ requested_topology,
+ limits=limit,
+ pci_requests=pci_requests.requests,
+ pci_stats=pci_stats,
+ # We explicitly signal that we are _after_ the scheduler made
+ # allocations in placement and therefore pci_requests.requests
+ # carry its own placement provider mapping information
+ provider_mapping=None,
+ )
if requested_topology and not instance_topology:
if pci_requests.requests:
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index f25d037c50..5c42aa4d89 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -84,6 +84,7 @@ from nova.objects import external_event as external_event_obj
from nova.objects import fields
from nova.objects import instance as obj_instance
from nova.objects import migrate_data as migrate_data_obj
+from nova.objects import service as service_obj
from nova.pci import request as pci_req_module
from nova.pci import whitelist
from nova import safe_utils
@@ -96,6 +97,7 @@ from nova.virt import configdrive
from nova.virt import driver
from nova.virt import event as virtevent
from nova.virt import hardware
+import nova.virt.node
from nova.virt import storage_users
from nova.virt import virtapi
from nova.volume import cinder
@@ -616,7 +618,7 @@ class ComputeVirtAPI(virtapi.VirtAPI):
class ComputeManager(manager.Manager):
"""Manages the running instances from creation to destruction."""
- target = messaging.Target(version='6.1')
+ target = messaging.Target(version='6.2')
def __init__(self, compute_driver=None, *args, **kwargs):
"""Load configuration options and connect to the hypervisor."""
@@ -1470,31 +1472,111 @@ class ComputeManager(manager.Manager):
:return: a dict of ComputeNode objects keyed by the UUID of the given
node.
"""
- nodes_by_uuid = {}
try:
- node_names = self.driver.get_available_nodes()
+ node_ids = self.driver.get_nodenames_by_uuid()
except exception.VirtDriverNotReady:
LOG.warning(
"Virt driver is not ready. If this is the first time this "
- "service is starting on this host, then you can ignore this "
- "warning.")
+ "service is starting on this host, then you can ignore "
+ "this warning.")
return {}
- for node_name in node_names:
- try:
- node = objects.ComputeNode.get_by_host_and_nodename(
- context, self.host, node_name)
- nodes_by_uuid[node.uuid] = node
- except exception.ComputeHostNotFound:
- LOG.warning(
- "Compute node %s not found in the database. If this is "
- "the first time this service is starting on this host, "
- "then you can ignore this warning.", node_name)
- return nodes_by_uuid
+ nodes = objects.ComputeNodeList.get_all_by_uuids(context,
+ list(node_ids.keys()))
+ if not nodes:
+ # NOTE(danms): This should only happen if the compute_id is
+ # pre-provisioned on a host that has never started.
+ LOG.warning('Compute nodes %s for host %s were not found in the '
+ 'database. If this is the first time this service is '
+ 'starting on this host, then you can ignore this '
+ 'warning.',
+ list(node_ids.keys()), self.host)
+ return {}
+
+ for node in nodes:
+ if node.hypervisor_hostname != node_ids.get(node.uuid):
+ raise exception.InvalidConfiguration(
+ ('My compute node %s has hypervisor_hostname %s '
+ 'but virt driver reports it should be %s. Possible '
+ 'rename detected, refusing to start!') % (
+ node.uuid, node.hypervisor_hostname,
+ node_ids.get(node.uuid)))
+
+ return {n.uuid: n for n in nodes}
+
+ def _ensure_existing_node_identity(self, service_ref):
+ """If we are upgrading from an older service version, we need
+ to write our node identity uuid (if not already done) based on
+ nodes assigned to us in the database.
+ """
+ if 'ironic' in CONF.compute_driver.lower():
+ # We do not persist a single local node identity for
+ # ironic
+ return
+
+ if service_ref.version >= service_obj.NODE_IDENTITY_VERSION:
+ # Already new enough, nothing to do here, but make sure that we
+ # have a UUID file already, as this is not our first time starting.
+ if nova.virt.node.read_local_node_uuid() is None:
+ raise exception.InvalidConfiguration(
+ ('No local node identity found, but this is not our '
+ 'first startup on this host. Refusing to start after '
+ 'potentially having lost that state!'))
+ return
- def init_host(self):
+ if nova.virt.node.read_local_node_uuid():
+ # We already have a local node identity, no migration needed
+ return
+
+ context = nova.context.get_admin_context()
+ db_nodes = objects.ComputeNodeList.get_all_by_host(context, self.host)
+ if not db_nodes:
+ # This means we have no nodes in the database (that we
+ # know of) and thus have no need to record an existing
+ # UUID. That is probably strange, so log a warning.
+ raise exception.InvalidConfiguration(
+ ('Upgrading from service version %i but found no '
+ 'nodes in the database for host %s to persist '
+ 'locally; Possible rename detected, '
+ 'refusing to start!') % (
+ service_ref.version, self.host))
+
+ if len(db_nodes) > 1:
+ # If this happens we can't do the right thing, so raise an
+ # exception to abort host startup
+ LOG.warning('Multiple nodes found in the database for host %s; '
+ 'unable to persist local node identity automatically')
+ raise exception.InvalidConfiguration(
+ 'Multiple nodes found in database, manual node uuid '
+ 'configuration required')
+
+ nova.virt.node.write_local_node_uuid(db_nodes[0].uuid)
+
+ def _check_for_host_rename(self, nodes_by_uuid):
+ if 'ironic' in CONF.compute_driver.lower():
+ # Ironic (currently) rebalances nodes at various times, and as
+ # such, nodes being discovered as assigned to this host with a
+ # different hostname is not surprising. Skip this check for
+ # ironic.
+ return
+ for node in nodes_by_uuid.values():
+ if node.host != self.host:
+ raise exception.InvalidConfiguration(
+ 'My node %s has host %r but my host is %r; '
+ 'Possible rename detected, refusing to start!' % (
+ node.uuid, node.host, self.host))
+ LOG.debug('Verified node %s matches my host %s',
+ node.uuid, self.host)
+
+ def init_host(self, service_ref):
"""Initialization for a standalone compute service."""
+ if service_ref:
+ # If we are an existing service, check to see if we need
+ # to record a locally-persistent node identity because
+ # we have upgraded from a previous version.
+ self._ensure_existing_node_identity(service_ref)
+
if CONF.pci.device_spec:
# Simply loading the PCI passthrough spec will do a bunch of
# validation that would otherwise wait until the PciDevTracker is
@@ -1524,7 +1606,18 @@ class ComputeManager(manager.Manager):
raise exception.InvalidConfiguration(msg)
self.driver.init_host(host=self.host)
+
+ # NOTE(gibi): At this point the compute_nodes of the resource tracker
+ # has not been populated yet so we cannot rely on the resource tracker
+ # here.
context = nova.context.get_admin_context()
+ nodes_by_uuid = self._get_nodes(context)
+
+ # NOTE(danms): Check for a possible host rename and abort
+ # startup before we start mucking with instances we think are
+ # ours.
+ self._check_for_host_rename(nodes_by_uuid)
+
instances = objects.InstanceList.get_by_host(
context, self.host,
expected_attrs=['info_cache', 'metadata', 'numa_topology'])
@@ -1534,17 +1627,12 @@ class ComputeManager(manager.Manager):
self._validate_pinning_configuration(instances)
self._validate_vtpm_configuration(instances)
- # NOTE(gibi): At this point the compute_nodes of the resource tracker
- # has not been populated yet so we cannot rely on the resource tracker
- # here.
# NOTE(gibi): If ironic and vcenter virt driver slow start time
# becomes problematic here then we should consider adding a config
# option or a driver flag to tell us if we should thread
# _destroy_evacuated_instances and
# _error_out_instances_whose_build_was_interrupted out in the
# background on startup
- nodes_by_uuid = self._get_nodes(context)
-
try:
# checking that instance was not already evacuated to other host
evacuated_instances = self._destroy_evacuated_instances(
@@ -2471,10 +2559,12 @@ class ComputeManager(manager.Manager):
if provider_mapping:
try:
- compute_utils\
- .update_pci_request_spec_with_allocated_interface_name(
- context, self.reportclient,
- instance.pci_requests.requests, provider_mapping)
+ compute_utils.update_pci_request_with_placement_allocations(
+ context,
+ self.reportclient,
+ instance.pci_requests.requests,
+ provider_mapping,
+ )
except (exception.AmbiguousResourceProviderForPCIRequest,
exception.UnexpectedResourceProviderNameForPCIRequest
) as e:
@@ -2736,7 +2826,8 @@ class ComputeManager(manager.Manager):
block_device_mapping)
resources['block_device_info'] = block_device_info
except (exception.InstanceNotFound,
- exception.UnexpectedDeletingTaskStateError):
+ exception.UnexpectedDeletingTaskStateError,
+ exception.ComputeResourcesUnavailable):
with excutils.save_and_reraise_exception():
self._build_resources_cleanup(instance, network_info)
except (exception.UnexpectedTaskStateError,
@@ -3621,7 +3712,7 @@ class ComputeManager(manager.Manager):
bdms, recreate, on_shared_storage,
preserve_ephemeral, migration,
scheduled_node, limits, request_spec, accel_uuids,
- reimage_boot_volume):
+ reimage_boot_volume, target_state):
"""Destroy and re-make this instance.
A 'rebuild' effectively purges all existing data from the system and
@@ -3656,6 +3747,7 @@ class ComputeManager(manager.Manager):
:param reimage_boot_volume: Boolean to specify whether the user has
explicitly requested to rebuild a boot
volume
+ :param target_state: Set a target state for the evacuated instance.
"""
# recreate=True means the instance is being evacuated from a failed
@@ -3699,9 +3791,21 @@ class ComputeManager(manager.Manager):
try:
compute_node = self._get_compute_info(context, self.host)
scheduled_node = compute_node.hypervisor_hostname
- except exception.ComputeHostNotFound:
+ except exception.ComputeHostNotFound as e:
+ # This means we were asked to rebuild one of our own
+ # instances, or another instance as a target of an
+ # evacuation, but we are unable to find a matching compute
+ # node.
LOG.exception('Failed to get compute_info for %s',
self.host)
+ self._set_migration_status(migration, 'failed')
+ self._notify_instance_rebuild_error(context, instance, e,
+ bdms)
+ raise exception.InstanceFaultRollback(
+ inner_exception=exception.BuildAbortException(
+ instance_uuid=instance.uuid,
+ reason=e.format_message()))
+
else:
scheduled_node = instance.node
@@ -3720,7 +3824,8 @@ class ComputeManager(manager.Manager):
image_meta, injected_files, new_pass, orig_sys_metadata,
bdms, evacuate, on_shared_storage, preserve_ephemeral,
migration, request_spec, allocs, rebuild_claim,
- scheduled_node, limits, accel_uuids, reimage_boot_volume)
+ scheduled_node, limits, accel_uuids, reimage_boot_volume,
+ target_state)
except (exception.ComputeResourcesUnavailable,
exception.RescheduledException) as e:
if isinstance(e, exception.ComputeResourcesUnavailable):
@@ -3780,7 +3885,7 @@ class ComputeManager(manager.Manager):
injected_files, new_pass, orig_sys_metadata, bdms, evacuate,
on_shared_storage, preserve_ephemeral, migration, request_spec,
allocations, rebuild_claim, scheduled_node, limits, accel_uuids,
- reimage_boot_volume):
+ reimage_boot_volume, target_state):
"""Helper to avoid deep nesting in the top-level method."""
provider_mapping = None
@@ -3788,10 +3893,12 @@ class ComputeManager(manager.Manager):
provider_mapping = self._get_request_group_mapping(request_spec)
if provider_mapping:
- compute_utils.\
- update_pci_request_spec_with_allocated_interface_name(
- context, self.reportclient,
- instance.pci_requests.requests, provider_mapping)
+ compute_utils.update_pci_request_with_placement_allocations(
+ context,
+ self.reportclient,
+ instance.pci_requests.requests,
+ provider_mapping,
+ )
claim_context = rebuild_claim(
context, instance, scheduled_node, allocations,
@@ -3802,7 +3909,8 @@ class ComputeManager(manager.Manager):
context, instance, orig_image_ref, image_meta, injected_files,
new_pass, orig_sys_metadata, bdms, evacuate, on_shared_storage,
preserve_ephemeral, migration, request_spec, allocations,
- provider_mapping, accel_uuids, reimage_boot_volume)
+ provider_mapping, accel_uuids, reimage_boot_volume,
+ target_state)
@staticmethod
def _get_image_name(image_meta):
@@ -3816,10 +3924,18 @@ class ComputeManager(manager.Manager):
injected_files, new_pass, orig_sys_metadata, bdms, evacuate,
on_shared_storage, preserve_ephemeral, migration, request_spec,
allocations, request_group_resource_providers_mapping,
- accel_uuids, reimage_boot_volume):
+ accel_uuids, reimage_boot_volume, target_state):
orig_vm_state = instance.vm_state
if evacuate:
+ if target_state and orig_vm_state != vm_states.ERROR:
+ # This will ensure that at destination the instance will have
+ # the desired state.
+ if target_state not in vm_states.ALLOW_TARGET_STATES:
+ raise exception.InstanceEvacuateNotSupportedTargetState(
+ target_state=target_state)
+ orig_vm_state = target_state
+
if request_spec:
# NOTE(gibi): Do a late check of server group policy as
# parallel scheduling could violate such policy. This will
@@ -5414,10 +5530,12 @@ class ComputeManager(manager.Manager):
if provider_mapping:
try:
- compute_utils.\
- update_pci_request_spec_with_allocated_interface_name(
- context, self.reportclient,
- instance.pci_requests.requests, provider_mapping)
+ compute_utils.update_pci_request_with_placement_allocations(
+ context,
+ self.reportclient,
+ instance.pci_requests.requests,
+ provider_mapping,
+ )
except (exception.AmbiguousResourceProviderForPCIRequest,
exception.UnexpectedResourceProviderNameForPCIRequest
) as e:
@@ -5520,7 +5638,7 @@ class ComputeManager(manager.Manager):
clean_shutdown)
except exception.BuildAbortException:
# NOTE(gibi): We failed
- # update_pci_request_spec_with_allocated_interface_name so
+ # update_pci_request_with_placement_allocations so
# there is no reason to re-schedule. Just revert the allocation
# and fail the migration.
with excutils.save_and_reraise_exception():
@@ -5651,7 +5769,7 @@ class ComputeManager(manager.Manager):
'host (%s).', self.host, instance=instance)
self._send_prep_resize_notifications(
ctxt, instance, fields.NotificationPhase.START, flavor)
- # TODO(mriedem): update_pci_request_spec_with_allocated_interface_name
+ # TODO(mriedem): update_pci_request_with_placement_allocations
# should be called here if the request spec has request group mappings,
# e.g. for things like QoS ports with resource requests. Do it outside
# the try/except so if it raises BuildAbortException we do not attempt
@@ -6768,6 +6886,9 @@ class ComputeManager(manager.Manager):
current_power_state = self._get_power_state(instance)
network_info = self.network_api.get_instance_nw_info(context, instance)
+ ports_id = [vif['id'] for vif in network_info]
+ self.network_api.unbind_ports(context, ports_id, detach=False)
+
block_device_info = self._get_instance_block_device_info(context,
instance,
bdms=bdms)
@@ -6898,12 +7019,12 @@ class ComputeManager(manager.Manager):
try:
if provider_mappings:
- update = (
- compute_utils.
- update_pci_request_spec_with_allocated_interface_name)
- update(
- context, self.reportclient, instance.pci_requests.requests,
- provider_mappings)
+ compute_utils.update_pci_request_with_placement_allocations(
+ context,
+ self.reportclient,
+ instance.pci_requests.requests,
+ provider_mappings,
+ )
accel_info = []
if accel_uuids:
@@ -7983,12 +8104,12 @@ class ComputeManager(manager.Manager):
instance_uuid=instance.uuid) from e
try:
- update = (
- compute_utils.
- update_pci_request_spec_with_allocated_interface_name)
- update(
- context, self.reportclient, pci_reqs.requests,
- provider_mappings)
+ compute_utils.update_pci_request_with_placement_allocations(
+ context,
+ self.reportclient,
+ pci_reqs.requests,
+ provider_mappings,
+ )
except (
exception.AmbiguousResourceProviderForPCIRequest,
exception.UnexpectedResourceProviderNameForPCIRequest
@@ -8807,7 +8928,8 @@ class ComputeManager(manager.Manager):
# in order to be able to track and abort it in the future.
self._waiting_live_migrations[instance.uuid] = (None, None)
try:
- future = self._live_migration_executor.submit(
+ future = nova.utils.pass_context(
+ self._live_migration_executor.submit,
self._do_live_migration, context, dest, instance,
block_migration, migration, migrate_data)
self._waiting_live_migrations[instance.uuid] = (migration, future)
@@ -10091,7 +10213,9 @@ class ComputeManager(manager.Manager):
else:
LOG.debug('Triggering sync for uuid %s', uuid)
self._syncs_in_progress[uuid] = True
- self._sync_power_pool.spawn_n(_sync, db_instance)
+ nova.utils.pass_context(self._sync_power_pool.spawn_n,
+ _sync,
+ db_instance)
def _query_driver_power_state_and_sync(self, context, db_instance):
if db_instance.task_state is not None:
@@ -10374,6 +10498,14 @@ class ComputeManager(manager.Manager):
LOG.exception(
"Error updating PCI resources for node %(node)s.",
{'node': nodename})
+ except exception.InvalidConfiguration as e:
+ if startup:
+ # If this happens during startup, we need to let it raise to
+ # abort our service startup.
+ raise
+ else:
+ LOG.error("Error updating resources for node %s: %s",
+ nodename, e)
except Exception:
LOG.exception("Error updating resources for node %(node)s.",
{'node': nodename})
@@ -11290,7 +11422,7 @@ class _ComputeV5Proxy(object):
bdms, recreate, on_shared_storage,
preserve_ephemeral, migration,
scheduled_node, limits, request_spec,
- accel_uuids, False)
+ accel_uuids, False, None)
# 5.13 support for optional accel_uuids argument
def shelve_instance(self, context, instance, image_id,
diff --git a/nova/compute/pci_placement_translator.py b/nova/compute/pci_placement_translator.py
index d6d7fdd6f1..016efd9122 100644
--- a/nova/compute/pci_placement_translator.py
+++ b/nova/compute/pci_placement_translator.py
@@ -65,20 +65,50 @@ def _normalize_traits(traits: ty.List[str]) -> ty.List[str]:
return list(standard_traits) + custom_traits
-def _get_traits_for_dev(
- dev_spec_tags: ty.Dict[str, str],
-) -> ty.Set[str]:
+def get_traits(traits_str: str) -> ty.Set[str]:
+ """Return a normalized set of placement standard and custom traits from
+ a string of comma separated trait names.
+ """
# traits is a comma separated list of placement trait names
- traits_str = dev_spec_tags.get("traits")
if not traits_str:
- return {os_traits.COMPUTE_MANAGED_PCI_DEVICE}
+ return set()
+ return set(_normalize_traits(traits_str.split(',')))
- traits = traits_str.split(',')
- return set(_normalize_traits(traits)) | {
+
+def _get_traits_for_dev(
+ dev_spec_tags: ty.Dict[str, str],
+) -> ty.Set[str]:
+ return get_traits(dev_spec_tags.get("traits", "")) | {
os_traits.COMPUTE_MANAGED_PCI_DEVICE
}
+def _normalize_resource_class(rc: str) -> str:
+ rc = rc.upper()
+ if (
+ rc not in os_resource_classes.STANDARDS and
+ not os_resource_classes.is_custom(rc)
+ ):
+ rc = os_resource_classes.normalize_name(rc)
+ # mypy: normalize_name will return non None for non None input
+ assert rc
+
+ return rc
+
+
+def get_resource_class(
+ requested_name: ty.Optional[str], vendor_id: str, product_id: str
+) -> str:
+ """Return the normalized resource class name based on what is requested
+ or if nothing is requested then generated from the vendor_id and product_id
+ """
+ if requested_name:
+ rc = _normalize_resource_class(requested_name)
+ else:
+ rc = f"CUSTOM_PCI_{vendor_id}_{product_id}".upper()
+ return rc
+
+
def _get_rc_for_dev(
dev: pci_device.PciDevice,
dev_spec_tags: ty.Dict[str, str],
@@ -91,23 +121,8 @@ def _get_rc_for_dev(
The user specified resource class is normalized if it is not already an
acceptable standard or custom resource class.
"""
- # Either use the resource class from the config or the vendor_id and
- # product_id of the device to generate the RC
rc = dev_spec_tags.get("resource_class")
- if rc:
- rc = rc.upper()
- if (
- rc not in os_resource_classes.STANDARDS and
- not os_resource_classes.is_custom(rc)
- ):
- rc = os_resource_classes.normalize_name(rc)
- # mypy: normalize_name will return non None for non None input
- assert rc
-
- else:
- rc = f"CUSTOM_PCI_{dev.vendor_id}_{dev.product_id}".upper()
-
- return rc
+ return get_resource_class(rc, dev.vendor_id, dev.product_id)
class PciResourceProvider:
@@ -246,6 +261,12 @@ class PciResourceProvider:
)
provider_tree.update_traits(self.name, self.traits)
+ # Here we are sure the RP exists in the provider_tree. So, we can
+ # record the RP UUID in each PciDevice this RP represents
+ rp_uuid = provider_tree.data(self.name).uuid
+ for dev in self.devs:
+ dev.extra_info['rp_uuid'] = rp_uuid
+
def update_allocations(
self,
allocations: dict,
@@ -583,12 +604,17 @@ def update_provider_tree_for_pci(
pv.update_provider_tree(provider_tree)
old_alloc = copy.deepcopy(allocations)
+ # update_provider_tree correlated the PciDevice objects with RPs in
+ # placement and recorded the RP UUID in the PciDevice object. We need to
+ # trigger an update on the device pools in the tracker to get the device
+ # RP UUID mapped to the device pools
+ pci_tracker.stats.populate_pools_metadata_from_assigned_devices()
updated = pv.update_allocations(allocations, provider_tree)
if updated:
LOG.debug(
"Placement PCI view needs allocation healing. This should only "
- "happen if [scheduler]pci_in_placement is still disabled. "
+ "happen if [filter_scheduler]pci_in_placement is still disabled. "
"Original allocations: %s New allocations: %s",
old_alloc,
allocations,
diff --git a/nova/compute/resource_tracker.py b/nova/compute/resource_tracker.py
index ffbc7ed03f..9ee6670c17 100644
--- a/nova/compute/resource_tracker.py
+++ b/nova/compute/resource_tracker.py
@@ -49,6 +49,7 @@ from nova import rpc
from nova.scheduler.client import report
from nova import utils
from nova.virt import hardware
+from nova.virt import node
CONF = nova.conf.CONF
@@ -145,16 +146,20 @@ class ResourceTracker(object):
during the instance build.
"""
if self.disabled(nodename):
- # instance_claim() was called before update_available_resource()
- # (which ensures that a compute node exists for nodename). We
- # shouldn't get here but in case we do, just set the instance's
- # host and nodename attribute (probably incorrect) and return a
- # NoopClaim.
- # TODO(jaypipes): Remove all the disabled junk from the resource
- # tracker. Servicegroup API-level active-checking belongs in the
- # nova-compute manager.
- self._set_instance_host_and_node(instance, nodename)
- return claims.NopClaim()
+ # If we get here, it means we are trying to claim for an instance
+ # that was scheduled to a node that we do not have in our list,
+ # or is in some other way unmanageable by this node. This would
+ # mean that we are unable to account for resources, create
+ # allocations in placement, or do any of the other accounting
+ # necessary for this to work. In the past, this situation was
+ # effectively ignored silently, but in a world where we track
+ # resources with placement and instance assignment to compute nodes
+ # by service, we can no longer be leaky.
+ raise exception.ComputeResourcesUnavailable(
+ ('Attempt to claim resources for instance %(inst)s '
+ 'on unknown node %(node)s failed') % {
+ 'inst': instance.uuid,
+ 'node': nodename})
# sanity checks:
if instance.host:
@@ -279,9 +284,17 @@ class ResourceTracker(object):
context, instance, new_flavor, nodename, move_type)
if self.disabled(nodename):
- # compute_driver doesn't support resource tracking, just
- # generate the migration record and continue the resize:
- return claims.NopClaim(migration=migration)
+ # This means we were asked to accept an incoming migration to a
+ # node that we do not own or track. We really should not get here,
+ # but if we do, we must refuse to continue with the migration
+ # process, since we cannot account for those resources, create
+ # allocations in placement, etc. This has been a silent resource
+ # leak in the past, but it must be a hard failure now.
+ raise exception.ComputeResourcesUnavailable(
+ ('Attempt to claim move resources for instance %(inst)s on '
+ 'unknown node %(node)s failed') % {
+ 'inst': instance.uuid,
+ 'node': 'nodename'})
cn = self.compute_nodes[nodename]
@@ -619,18 +632,11 @@ class ResourceTracker(object):
:param prefix: Prefix to use when accessing migration context
attributes. 'old_' or 'new_', with 'new_' being the default.
"""
- # Remove usage for an instance that is tracked in migrations, such as
- # on the dest node during revert resize.
- if instance['uuid'] in self.tracked_migrations:
- migration = self.tracked_migrations.pop(instance['uuid'])
+ if instance["uuid"] in self.tracked_migrations:
if not flavor:
- flavor = self._get_flavor(instance, prefix, migration)
- # Remove usage for an instance that is not tracked in migrations (such
- # as on the source node after a migration).
- # NOTE(lbeliveau): On resize on the same node, the instance is
- # included in both tracked_migrations and tracked_instances.
- elif instance['uuid'] in self.tracked_instances:
- self.tracked_instances.remove(instance['uuid'])
+ flavor = self._get_flavor(
+ instance, prefix, self.tracked_migrations[instance["uuid"]]
+ )
if flavor is not None:
numa_topology = self._get_migration_context_resource(
@@ -646,6 +652,15 @@ class ResourceTracker(object):
ctxt = context.elevated()
self._update(ctxt, self.compute_nodes[nodename])
+ # Remove usage for an instance that is tracked in migrations, such as
+ # on the dest node during revert resize.
+ self.tracked_migrations.pop(instance['uuid'], None)
+ # Remove usage for an instance that is not tracked in migrations (such
+ # as on the source node after a migration).
+ # NOTE(lbeliveau): On resize on the same node, the instance is
+ # included in both tracked_migrations and tracked_instances.
+ self.tracked_instances.discard(instance['uuid'])
+
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True)
def update_usage(self, context, instance, nodename):
"""Update the resource usage and stats after a change in an
@@ -666,50 +681,6 @@ class ResourceTracker(object):
return (nodename not in self.compute_nodes or
not self.driver.node_is_available(nodename))
- def _check_for_nodes_rebalance(self, context, resources, nodename):
- """Check if nodes rebalance has happened.
-
- The ironic driver maintains a hash ring mapping bare metal nodes
- to compute nodes. If a compute dies, the hash ring is rebuilt, and
- some of its bare metal nodes (more precisely, those not in ACTIVE
- state) are assigned to other computes.
-
- This method checks for this condition and adjusts the database
- accordingly.
-
- :param context: security context
- :param resources: initial values
- :param nodename: node name
- :returns: True if a suitable compute node record was found, else False
- """
- if not self.driver.rebalances_nodes:
- return False
-
- # Its possible ironic just did a node re-balance, so let's
- # check if there is a compute node that already has the correct
- # hypervisor_hostname. We can re-use that rather than create a
- # new one and have to move existing placement allocations
- cn_candidates = objects.ComputeNodeList.get_by_hypervisor(
- context, nodename)
-
- if len(cn_candidates) == 1:
- cn = cn_candidates[0]
- LOG.info("ComputeNode %(name)s moving from %(old)s to %(new)s",
- {"name": nodename, "old": cn.host, "new": self.host})
- cn.host = self.host
- self.compute_nodes[nodename] = cn
- self._copy_resources(cn, resources)
- self._setup_pci_tracker(context, cn, resources)
- self._update(context, cn)
- return True
- elif len(cn_candidates) > 1:
- LOG.error(
- "Found more than one ComputeNode for nodename %s. "
- "Please clean up the orphaned ComputeNode records in your DB.",
- nodename)
-
- return False
-
def _init_compute_node(self, context, resources):
"""Initialize the compute node if it does not already exist.
@@ -727,6 +698,7 @@ class ResourceTracker(object):
False otherwise
"""
nodename = resources['hypervisor_hostname']
+ node_uuid = resources['uuid']
# if there is already a compute node just use resources
# to initialize
@@ -738,23 +710,43 @@ class ResourceTracker(object):
# now try to get the compute node record from the
# database. If we get one we use resources to initialize
- cn = self._get_compute_node(context, nodename)
+
+ # We use read_deleted=True so that we will find and recover a deleted
+ # node object, if necessary.
+ with utils.temporary_mutation(context, read_deleted='yes'):
+ cn = self._get_compute_node(context, node_uuid)
+ if cn and cn.deleted:
+ # Undelete and save this right now so that everything below
+ # can continue without read_deleted=yes
+ LOG.info('Undeleting compute node %s', cn.uuid)
+ cn.deleted = False
+ cn.deleted_at = None
+ cn.save()
if cn:
+ if cn.host != self.host:
+ LOG.info("ComputeNode %(name)s moving from %(old)s to %(new)s",
+ {"name": nodename, "old": cn.host, "new": self.host})
+ cn.host = self.host
+ self._update(context, cn)
+
self.compute_nodes[nodename] = cn
self._copy_resources(cn, resources)
self._setup_pci_tracker(context, cn, resources)
return False
- if self._check_for_nodes_rebalance(context, resources, nodename):
- return False
-
# there was no local copy and none in the database
# so we need to create a new compute node. This needs
# to be initialized with resource values.
cn = objects.ComputeNode(context)
cn.host = self.host
self._copy_resources(cn, resources, initial=True)
- cn.create()
+ try:
+ cn.create()
+ except exception.DuplicateRecord:
+ raise exception.InvalidConfiguration(
+ 'Duplicate compute node record found for host %s node %s' % (
+ cn.host, cn.hypervisor_hostname))
+
# Only map the ComputeNode into compute_nodes if create() was OK
# because if create() fails, on the next run through here nodename
# would be in compute_nodes and we won't try to create again (because
@@ -887,6 +879,14 @@ class ResourceTracker(object):
# contains a non-None value, even for non-Ironic nova-compute hosts. It
# is this value that will be populated in the compute_nodes table.
resources['host_ip'] = CONF.my_ip
+ if 'uuid' not in resources:
+ # NOTE(danms): Any driver that does not provide a uuid per
+ # node gets the locally-persistent compute_id. Only ironic
+ # should be setting the per-node uuid (and returning
+ # multiple nodes in general). If this is the first time we
+ # are creating a compute node on this host, we will
+ # generate and persist this uuid for the future.
+ resources['uuid'] = node.get_local_node_uuid()
# We want the 'cpu_info' to be None from the POV of the
# virt driver, but the DB requires it to be non-null so
@@ -991,8 +991,6 @@ class ResourceTracker(object):
# notified when instances are deleted, we need remove all usages
# from deleted instances.
self.pci_tracker.clean_usage(instances, migrations)
- dev_pools_obj = self.pci_tracker.stats.to_device_pools_obj()
- cn.pci_device_pools = dev_pools_obj
self._report_final_resource_view(nodename)
@@ -1014,14 +1012,13 @@ class ResourceTracker(object):
if startup:
self._check_resources(context)
- def _get_compute_node(self, context, nodename):
+ def _get_compute_node(self, context, node_uuid):
"""Returns compute node for the host and nodename."""
try:
- return objects.ComputeNode.get_by_host_and_nodename(
- context, self.host, nodename)
+ return objects.ComputeNode.get_by_uuid(context, node_uuid)
except exception.NotFound:
LOG.warning("No compute node record for %(host)s:%(node)s",
- {'host': self.host, 'node': nodename})
+ {'host': self.host, 'node': node_uuid})
def _report_hypervisor_resource_view(self, resources):
"""Log the hypervisor's view of free resources.
@@ -1314,13 +1311,23 @@ class ResourceTracker(object):
def _update(self, context, compute_node, startup=False):
"""Update partial stats locally and populate them to Scheduler."""
+
+ self._update_to_placement(context, compute_node, startup)
+
+ if self.pci_tracker:
+ # sync PCI device pool state stored in the compute node with
+ # the actual state from the PCI tracker as we commit changes in
+ # the DB and in the PCI tracker below
+ dev_pools_obj = self.pci_tracker.stats.to_device_pools_obj()
+ compute_node.pci_device_pools = dev_pools_obj
+
# _resource_change will update self.old_resources if it detects changes
# but we want to restore those if compute_node.save() fails.
nodename = compute_node.hypervisor_hostname
old_compute = self.old_resources[nodename]
if self._resource_change(compute_node):
# If the compute_node's resource changed, update to DB. Note that
- # _update_to_placement below does not supersede the need to do this
+ # _update_to_placement above does not supersede the need to do this
# because there are stats-related fields in the ComputeNode object
# which could have changed and still need to be reported to the
# scheduler filters/weighers (which could be out of tree as well).
@@ -1333,8 +1340,6 @@ class ResourceTracker(object):
with excutils.save_and_reraise_exception(logger=LOG):
self.old_resources[nodename] = old_compute
- self._update_to_placement(context, compute_node, startup)
-
if self.pci_tracker:
self.pci_tracker.save(context)
diff --git a/nova/compute/rpcapi.py b/nova/compute/rpcapi.py
index 3ac4c6dcfa..efc06300db 100644
--- a/nova/compute/rpcapi.py
+++ b/nova/compute/rpcapi.py
@@ -403,6 +403,7 @@ class ComputeAPI(object):
* ... - Rename the instance_type argument of resize_instance() to
flavor
* 6.1 - Add reimage_boot_volume parameter to rebuild_instance()
+ * 6.2 - Add target_state parameter to rebuild_instance()
'''
VERSION_ALIASES = {
@@ -424,6 +425,7 @@ class ComputeAPI(object):
'xena': '6.0',
'yoga': '6.0',
'zed': '6.1',
+ 'antelope': '6.2',
}
@property
@@ -1083,7 +1085,7 @@ class ComputeAPI(object):
image_ref, orig_image_ref, orig_sys_metadata, bdms,
recreate, on_shared_storage, host, node,
preserve_ephemeral, migration, limits, request_spec, accel_uuids,
- reimage_boot_volume):
+ reimage_boot_volume, target_state):
# NOTE(edleafe): compute nodes can only use the dict form of limits.
if isinstance(limits, objects.SchedulerLimits):
@@ -1096,12 +1098,20 @@ class ComputeAPI(object):
'limits': limits,
'request_spec': request_spec,
'accel_uuids': accel_uuids,
- 'reimage_boot_volume': reimage_boot_volume
+ 'reimage_boot_volume': reimage_boot_volume,
+ 'target_state': target_state,
}
-
- version = '6.1'
+ version = '6.2'
client = self.router.client(ctxt)
if not client.can_send_version(version):
+ if msg_args['target_state']:
+ raise exception.UnsupportedRPCVersion(
+ api="rebuild_instance",
+ required="6.2")
+ else:
+ del msg_args['target_state']
+ version = '6.1'
+ if not client.can_send_version(version):
if msg_args['reimage_boot_volume']:
raise exception.NovaException(
'Compute RPC version does not support '
diff --git a/nova/compute/utils.py b/nova/compute/utils.py
index 036e69b7ce..30efc24fc7 100644
--- a/nova/compute/utils.py
+++ b/nova/compute/utils.py
@@ -1491,7 +1491,7 @@ def notify_about_instance_delete(notifier, context, instance,
phase=fields.NotificationPhase.END)
-def update_pci_request_spec_with_allocated_interface_name(
+def update_pci_request_with_placement_allocations(
context, report_client, pci_requests, provider_mapping):
"""Update the instance's PCI request based on the request group -
resource provider mapping and the device RP name from placement.
@@ -1512,12 +1512,33 @@ def update_pci_request_spec_with_allocated_interface_name(
if not pci_requests:
return
- def needs_update(pci_request, mapping):
+ def needs_update_due_to_qos(pci_request, mapping):
return (pci_request.requester_id and
pci_request.requester_id in mapping)
+ def get_group_mapping_for_flavor_based_pci_request(pci_request, mapping):
+ # NOTE(gibi): for flavor based PCI requests nova generates RequestGroup
+ # suffixes from InstancePCIRequests in the form of
+ # {request_id}-{count_index}
+ # NOTE(gibi): a suffixed request group always fulfilled from a single
+ # RP
+ return {
+ group_id: rp_uuids[0]
+ for group_id, rp_uuids in mapping.items()
+ if group_id.startswith(pci_request.request_id)
+ }
+
for pci_request in pci_requests:
- if needs_update(pci_request, provider_mapping):
+ mapping = get_group_mapping_for_flavor_based_pci_request(
+ pci_request, provider_mapping)
+
+ if mapping:
+ for spec in pci_request.spec:
+ # FIXME(gibi): this is baaad but spec is a dict of strings so
+ # we need to serialize
+ spec['rp_uuids'] = ','.join(mapping.values())
+
+ elif needs_update_due_to_qos(pci_request, provider_mapping):
provider_uuids = provider_mapping[pci_request.requester_id]
if len(provider_uuids) != 1:
diff --git a/nova/compute/vm_states.py b/nova/compute/vm_states.py
index 633894c1ea..1a916ea59a 100644
--- a/nova/compute/vm_states.py
+++ b/nova/compute/vm_states.py
@@ -76,3 +76,6 @@ ALLOW_TRIGGER_CRASH_DUMP = [ACTIVE, PAUSED, RESCUED, RESIZED, ERROR]
# states we allow resources to be freed in
ALLOW_RESOURCE_REMOVAL = [DELETED, SHELVED_OFFLOADED]
+
+# states we allow for evacuate instance
+ALLOW_TARGET_STATES = [STOPPED]
diff --git a/nova/conductor/api.py b/nova/conductor/api.py
index 778fdd6c73..843c8ce3a3 100644
--- a/nova/conductor/api.py
+++ b/nova/conductor/api.py
@@ -144,7 +144,8 @@ class ComputeTaskAPI(object):
injected_files, new_pass, orig_sys_metadata,
bdms, recreate=False, on_shared_storage=False,
preserve_ephemeral=False, host=None,
- request_spec=None, reimage_boot_volume=False):
+ request_spec=None, reimage_boot_volume=False,
+ target_state=None):
self.conductor_compute_rpcapi.rebuild_instance(context,
instance=instance,
new_pass=new_pass,
@@ -158,7 +159,8 @@ class ComputeTaskAPI(object):
preserve_ephemeral=preserve_ephemeral,
host=host,
request_spec=request_spec,
- reimage_boot_volume=reimage_boot_volume)
+ reimage_boot_volume=reimage_boot_volume,
+ target_state=target_state)
def cache_images(self, context, aggregate, image_ids):
"""Request images be pre-cached on hosts within an aggregate.
diff --git a/nova/conductor/manager.py b/nova/conductor/manager.py
index 9e822db081..4b34b8339c 100644
--- a/nova/conductor/manager.py
+++ b/nova/conductor/manager.py
@@ -235,7 +235,7 @@ class ComputeTaskManager:
may involve coordinating activities on multiple compute nodes.
"""
- target = messaging.Target(namespace='compute_task', version='1.24')
+ target = messaging.Target(namespace='compute_task', version='1.25')
def __init__(self):
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
@@ -1037,6 +1037,12 @@ class ComputeTaskManager:
request_spec.requested_resources = res_req
request_spec.request_level_params = req_lvl_params
+ # NOTE(gibi): as PCI devices is tracked in placement we
+ # need to generate request groups from InstancePCIRequests.
+ # This will append new RequestGroup objects to the
+ # request_spec.requested_resources list if needed
+ request_spec.generate_request_groups_from_pci_requests()
+
# NOTE(cfriesen): Ensure that we restrict the scheduler to
# the cell specified by the instance mapping.
self._restrict_request_spec_to_cell(
@@ -1146,7 +1152,8 @@ class ComputeTaskManager:
injected_files, new_pass, orig_sys_metadata,
bdms, recreate, on_shared_storage,
preserve_ephemeral=False, host=None,
- request_spec=None, reimage_boot_volume=False):
+ request_spec=None, reimage_boot_volume=False,
+ target_state=None):
# recreate=True means the instance is being evacuated from a failed
# host to a new destination host. The 'recreate' variable name is
# confusing, so rename it to evacuate here at the top, which is simpler
@@ -1242,6 +1249,12 @@ class ComputeTaskManager:
request_spec.requested_resources = res_req
request_spec.request_level_params = req_lvl_params
+ # NOTE(gibi): as PCI devices is tracked in placement we
+ # need to generate request groups from InstancePCIRequests.
+ # This will append new RequestGroup objects to the
+ # request_spec.requested_resources list if needed
+ request_spec.generate_request_groups_from_pci_requests()
+
try:
# if this is a rebuild of instance on the same host with
# new image.
@@ -1344,7 +1357,8 @@ class ComputeTaskManager:
limits=limits,
request_spec=request_spec,
accel_uuids=accel_uuids,
- reimage_boot_volume=reimage_boot_volume)
+ reimage_boot_volume=reimage_boot_volume,
+ target_state=target_state)
def _validate_image_traits_for_rebuild(self, context, instance, image_ref):
"""Validates that the traits specified in the image can be satisfied
@@ -2082,8 +2096,8 @@ class ComputeTaskManager:
skipped_host(target_ctxt, host, image_ids)
continue
- fetch_pool.spawn_n(wrap_cache_images, target_ctxt, host,
- image_ids)
+ utils.pass_context(fetch_pool.spawn_n, wrap_cache_images,
+ target_ctxt, host, image_ids)
# Wait until all those things finish
fetch_pool.waitall()
diff --git a/nova/conductor/rpcapi.py b/nova/conductor/rpcapi.py
index ffaecd2c95..a5f0cf0094 100644
--- a/nova/conductor/rpcapi.py
+++ b/nova/conductor/rpcapi.py
@@ -287,6 +287,7 @@ class ComputeTaskAPI(object):
1.22 - Added confirm_snapshot_based_resize()
1.23 - Added revert_snapshot_based_resize()
1.24 - Add reimage_boot_volume parameter to rebuild_instance()
+ 1.25 - Add target_state parameter to rebuild_instance()
"""
def __init__(self):
@@ -428,8 +429,8 @@ class ComputeTaskAPI(object):
image_ref, orig_image_ref, orig_sys_metadata, bdms,
recreate=False, on_shared_storage=False, host=None,
preserve_ephemeral=False, request_spec=None,
- reimage_boot_volume=False):
- version = '1.24'
+ reimage_boot_volume=False, target_state=None):
+ version = '1.25'
kw = {'instance': instance,
'new_pass': new_pass,
'injected_files': injected_files,
@@ -442,9 +443,17 @@ class ComputeTaskAPI(object):
'preserve_ephemeral': preserve_ephemeral,
'host': host,
'request_spec': request_spec,
- 'reimage_boot_volume': reimage_boot_volume
+ 'reimage_boot_volume': reimage_boot_volume,
+ 'target_state': target_state,
}
if not self.client.can_send_version(version):
+ if kw['target_state']:
+ raise exception.UnsupportedRPCVersion(
+ api="rebuild_instance", required="1.25")
+ else:
+ del kw['target_state']
+ version = '1.24'
+ if not self.client.can_send_version(version):
if kw['reimage_boot_volume']:
raise exception.NovaException(
'Conductor RPC version does not support '
diff --git a/nova/conductor/tasks/live_migrate.py b/nova/conductor/tasks/live_migrate.py
index f8819b0dc8..cca97c53f7 100644
--- a/nova/conductor/tasks/live_migrate.py
+++ b/nova/conductor/tasks/live_migrate.py
@@ -542,7 +542,7 @@ class LiveMigrationTask(base.TaskBase):
# will be persisted when post_live_migration_at_destination
# runs.
compute_utils.\
- update_pci_request_spec_with_allocated_interface_name(
+ update_pci_request_with_placement_allocations(
self.context, self.report_client,
self.instance.pci_requests.requests, provider_mapping)
try:
diff --git a/nova/conductor/tasks/migrate.py b/nova/conductor/tasks/migrate.py
index 8838d0240a..754f9e5ba7 100644
--- a/nova/conductor/tasks/migrate.py
+++ b/nova/conductor/tasks/migrate.py
@@ -258,6 +258,11 @@ class MigrationTask(base.TaskBase):
# resource requests in a single list and add them to the RequestSpec.
self.request_spec.requested_resources = port_res_req
self.request_spec.request_level_params = req_lvl_params
+ # NOTE(gibi): as PCI devices is tracked in placement we need to
+ # generate request groups from InstancePCIRequests. This will append
+ # new RequestGroup objects to the request_spec.requested_resources list
+ # if needed
+ self.request_spec.generate_request_groups_from_pci_requests()
self._set_requested_destination_cell(legacy_props)
diff --git a/nova/conf/api.py b/nova/conf/api.py
index 5c8a367e8e..58cbc4931e 100644
--- a/nova/conf/api.py
+++ b/nova/conf/api.py
@@ -225,8 +225,11 @@ service.
help="""
Domain name used to configure FQDN for instances.
-Configure a fully-qualified domain name for instance hostnames. If unset, only
-the hostname without a domain will be configured.
+Configure a fully-qualified domain name for instance hostnames. The value is
+suffixed to the instance hostname from the database to construct the hostname
+that appears in the metadata API. To disable this behavior (for example in
+order to correctly support microversion's 2.94 FQDN hostnames), set this to the
+empty string.
Possible values:
diff --git a/nova/conf/compute.py b/nova/conf/compute.py
index 224c802935..de2743d850 100644
--- a/nova/conf/compute.py
+++ b/nova/conf/compute.py
@@ -1016,6 +1016,15 @@ Related options:
filtering computes based on supported image types, which is required
to be enabled for this to take effect.
"""),
+ cfg.ListOpt('vmdk_allowed_types',
+ default=['streamOptimized', 'monolithicSparse'],
+ help="""
+A list of strings describing allowed VMDK "create-type" subformats
+that will be allowed. This is recommended to only include
+single-file-with-sparse-header variants to avoid potential host file
+exposure due to processing named extents. If this list is empty, then no
+form of VMDK image will be allowed.
+"""),
cfg.BoolOpt('packing_host_numa_cells_allocation_strategy',
default=False,
help="""
diff --git a/nova/conf/libvirt.py b/nova/conf/libvirt.py
index 16a3f63090..204fe5c4b8 100644
--- a/nova/conf/libvirt.py
+++ b/nova/conf/libvirt.py
@@ -1478,6 +1478,23 @@ Related options:
"""),
]
+libvirt_cpu_mgmt_opts = [
+ cfg.BoolOpt('cpu_power_management',
+ default=False,
+ help='Use libvirt to manage CPU cores performance.'),
+ cfg.StrOpt('cpu_power_management_strategy',
+ choices=['cpu_state', 'governor'],
+ default='cpu_state',
+ help='Tuning strategy to reduce CPU power consumption when '
+ 'unused'),
+ cfg.StrOpt('cpu_power_governor_low',
+ default='powersave',
+ help='Governor to use in order '
+ 'to reduce CPU power consumption'),
+ cfg.StrOpt('cpu_power_governor_high',
+ default='performance',
+ help='Governor to use in order to have best CPU performance'),
+]
ALL_OPTS = list(itertools.chain(
libvirt_general_opts,
@@ -1499,6 +1516,7 @@ ALL_OPTS = list(itertools.chain(
libvirt_volume_nvmeof_opts,
libvirt_pmem_opts,
libvirt_vtpm_opts,
+ libvirt_cpu_mgmt_opts,
))
diff --git a/nova/conf/pci.py b/nova/conf/pci.py
index 673185391b..533bf52ead 100644
--- a/nova/conf/pci.py
+++ b/nova/conf/pci.py
@@ -67,6 +67,36 @@ Possible Values:
Required NUMA affinity of device. Valid values are: ``legacy``,
``preferred`` and ``required``.
+ ``resource_class``
+ The optional Placement resource class name that is used
+ to track the requested PCI devices in Placement. It can be a standard
+ resource class from the ``os-resource-classes`` lib. Or it can be an
+ arbitrary string. If it is an non-standard resource class then Nova will
+ normalize it to a proper Placement resource class by
+ making it upper case, replacing any consecutive character outside of
+ ``[A-Z0-9_]`` with a single '_', and prefixing the name with ``CUSTOM_`` if
+ not yet prefixed. The maximum allowed length is 255 character including the
+ prefix. If ``resource_class`` is not provided Nova will generate it from
+ ``vendor_id`` and ``product_id`` values of the alias in the form of
+ ``CUSTOM_PCI_{vendor_id}_{product_id}``. The ``resource_class`` requested
+ in the alias is matched against the ``resource_class`` defined in the
+ ``[pci]device_spec``. This field can only be used only if
+ ``[filter_scheduler]pci_in_placement`` is enabled.
+
+ ``traits``
+ An optional comma separated list of Placement trait names requested to be
+ present on the resource provider that fulfills this alias. Each trait can
+ be a standard trait from ``os-traits`` lib or it can be an arbitrary
+ string. If it is a non-standard trait then Nova will normalize the
+ trait name by making it upper case, replacing any consecutive character
+ outside of ``[A-Z0-9_]`` with a single '_', and prefixing the name
+ with ``CUSTOM_`` if not yet prefixed. The maximum allowed length of a
+ trait name is 255 character including the prefix. Every trait in
+ ``traits`` requested in the alias ensured to be in the list of traits
+ provided in the ``traits`` field of the ``[pci]device_spec`` when
+ scheduling the request. This field can only be used only if
+ ``[filter_scheduler]pci_in_placement`` is enabled.
+
* Supports multiple aliases by repeating the option (not by specifying
a list value)::
diff --git a/nova/conf/scheduler.py b/nova/conf/scheduler.py
index 03e78fe701..c7aa2ad76d 100644
--- a/nova/conf/scheduler.py
+++ b/nova/conf/scheduler.py
@@ -464,6 +464,49 @@ Possible values:
* An integer or float value, where the value corresponds to the multipler
ratio for this weigher.
"""),
+ cfg.FloatOpt("hypervisor_version_weight_multiplier",
+ default=1.0,
+ help="""
+Hypervisor Version weight multiplier ratio.
+
+The multiplier is used for weighting hosts based on the reported
+hypervisor version.
+Negative numbers indicate preferring older hosts,
+the default is to prefer newer hosts to aid with upgrades.
+
+Possible values:
+
+* An integer or float value, where the value corresponds to the multiplier
+ ratio for this weigher.
+
+Example:
+
+* Strongly prefer older hosts
+
+ .. code-block:: ini
+
+ [filter_scheduler]
+ hypervisor_version_weight_multiplier=-1000
+
+
+* Moderately prefer new hosts
+
+ .. code-block:: ini
+
+ [filter_scheduler]
+ hypervisor_version_weight_multiplier=2.5
+
+* Disable weigher influence
+
+ .. code-block:: ini
+
+ [filter_scheduler]
+ hypervisor_version_weight_multiplier=0
+
+Related options:
+
+* ``[filter_scheduler] weight_classes``
+"""),
cfg.FloatOpt("io_ops_weight_multiplier",
default=-1.0,
help="""
@@ -745,7 +788,26 @@ Possible values:
Related options:
* ``[filter_scheduler] aggregate_image_properties_isolation_namespace``
-""")]
+"""),
+ cfg.BoolOpt(
+ "pci_in_placement",
+ default=False,
+ help="""
+Enable scheduling and claiming PCI devices in Placement.
+
+This can be enabled after ``[pci]report_in_placement`` is enabled on all
+compute hosts.
+
+When enabled the scheduler queries Placement about the PCI device
+availability to select destination for a server with PCI request. The scheduler
+also allocates the selected PCI devices in Placement. Note that this logic
+does not replace the PCIPassthroughFilter but extends it.
+
+* ``[pci] report_in_placement``
+* ``[pci] alias``
+* ``[pci] device_spec``
+"""),
+]
metrics_group = cfg.OptGroup(
name="metrics",
diff --git a/nova/conf/spice.py b/nova/conf/spice.py
index 59ed4e80a0..e5854946f1 100644
--- a/nova/conf/spice.py
+++ b/nova/conf/spice.py
@@ -85,6 +85,59 @@ Agent. With the Spice agent installed the following features are enabled:
needing to click inside the console or press keys to release it. The
performance of mouse movement is also improved.
"""),
+ cfg.StrOpt('image_compression',
+ advanced=True,
+ choices=[
+ ('auto_glz', 'enable image compression mode to choose between glz '
+ 'and quic algorithm, based on image properties'),
+ ('auto_lz', 'enable image compression mode to choose between lz '
+ 'and quic algorithm, based on image properties'),
+ ('quic', 'enable image compression based on the SFALIC algorithm'),
+ ('glz', 'enable image compression using lz with history based '
+ 'global dictionary'),
+ ('lz', 'enable image compression with the Lempel-Ziv algorithm'),
+ ('off', 'disable image compression')
+ ],
+ help="""
+Configure the SPICE image compression (lossless).
+"""),
+ cfg.StrOpt('jpeg_compression',
+ advanced=True,
+ choices=[
+ ('auto', 'enable JPEG image compression automatically'),
+ ('never', 'disable JPEG image compression'),
+ ('always', 'enable JPEG image compression')
+ ],
+ help="""
+Configure the SPICE wan image compression (lossy for slow links).
+"""),
+ cfg.StrOpt('zlib_compression',
+ advanced=True,
+ choices=[
+ ('auto', 'enable zlib image compression automatically'),
+ ('never', 'disable zlib image compression'),
+ ('always', 'enable zlib image compression')
+ ],
+ help="""
+Configure the SPICE wan image compression (lossless for slow links).
+"""),
+ cfg.BoolOpt('playback_compression',
+ advanced=True,
+ help="""
+Enable the SPICE audio stream compression (using celt).
+"""),
+ cfg.StrOpt('streaming_mode',
+ advanced=True,
+ choices=[
+ ('filter', 'SPICE server adds additional filters to decide if '
+ 'video streaming should be activated'),
+ ('all', 'any fast-refreshing window can be encoded into a video '
+ 'stream'),
+ ('off', 'no video detection and (lossy) compression is performed')
+ ],
+ help="""
+Configure the SPICE video stream detection and (lossy) compression.
+"""),
cfg.URIOpt('html5proxy_base_url',
default='http://127.0.0.1:6082/spice_auto.html',
help="""
diff --git a/nova/conf/workarounds.py b/nova/conf/workarounds.py
index 2ec53282cd..943ec74885 100644
--- a/nova/conf/workarounds.py
+++ b/nova/conf/workarounds.py
@@ -374,6 +374,28 @@ Related options:
* :oslo.config:option:`DEFAULT.compute_driver` (libvirt)
"""),
+ cfg.IntOpt('qemu_monitor_announce_self_count',
+ default=3,
+ min=1,
+ help="""
+The total number of times to send the announce_self command to the QEMU
+monitor when enable_qemu_monitor_announce_self is enabled.
+
+Related options:
+
+* :oslo.config:option:`WORKAROUNDS.enable_qemu_monitor_announce_self` (libvirt)
+"""),
+ cfg.IntOpt('qemu_monitor_announce_self_interval',
+ default=1,
+ min=1,
+ help="""
+The number of seconds to wait before re-sending the announce_self
+command to the QEMU monitor.
+
+Related options:
+
+* :oslo.config:option:`WORKAROUNDS.enable_qemu_monitor_announce_self` (libvirt)
+"""),
cfg.BoolOpt('disable_compute_service_check_for_ffu',
default=False,
help="""
@@ -410,6 +432,13 @@ with the destination host. When using QEMU >= 2.9 and libvirt >=
4.4.0, libvirt will do the correct thing with respect to checking CPU
compatibility on the destination host during live migration.
"""),
+ cfg.BoolOpt('skip_cpu_compare_at_startup',
+ default=False,
+ help="""
+This will skip the CPU comparison call at the startup of Compute
+service and lets libvirt handle it.
+"""),
+
cfg.BoolOpt(
'skip_hypervisor_version_check_on_lm',
default=False,
@@ -417,6 +446,21 @@ compatibility on the destination host during live migration.
When this is enabled, it will skip version-checking of hypervisors
during live migration.
"""),
+ cfg.BoolOpt(
+ 'skip_reserve_in_use_ironic_nodes',
+ default=False,
+ help="""
+This may be useful if you use the Ironic driver, but don't have
+automatic cleaning enabled in Ironic. Nova, by default, will mark
+Ironic nodes as reserved as soon as they are in use. When you free
+the Ironic node (by deleting the nova instance) it takes a while
+for Nova to un-reserve that Ironic node in placement. Usually this
+is a good idea, because it avoids placement providing an Ironic
+as a valid candidate when it is still being cleaned.
+Howerver, if you don't use automatic cleaning, it can cause an
+extra delay before and Ironic node is available for building a
+new Nova instance.
+"""),
]
diff --git a/nova/db/api/legacy_migrations/README b/nova/db/api/legacy_migrations/README
deleted file mode 100644
index 6218f8cac4..0000000000
--- a/nova/db/api/legacy_migrations/README
+++ /dev/null
@@ -1,4 +0,0 @@
-This is a database migration repository.
-
-More information at
-http://code.google.com/p/sqlalchemy-migrate/
diff --git a/nova/db/api/legacy_migrations/manage.py b/nova/db/api/legacy_migrations/manage.py
deleted file mode 100644
index 6c2b3842ba..0000000000
--- a/nova/db/api/legacy_migrations/manage.py
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2012 OpenStack Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from migrate.versioning.shell import main
-
-
-if __name__ == '__main__':
- main(debug='False', repository='.')
diff --git a/nova/db/api/legacy_migrations/migrate.cfg b/nova/db/api/legacy_migrations/migrate.cfg
deleted file mode 100644
index 3e2ccef016..0000000000
--- a/nova/db/api/legacy_migrations/migrate.cfg
+++ /dev/null
@@ -1,20 +0,0 @@
-[db_settings]
-# Used to identify which repository this database is versioned under.
-# You can use the name of your project.
-repository_id=nova_api
-
-# The name of the database table used to track the schema version.
-# This name shouldn't already be used by your project.
-# If this is changed once a database is under version control, you'll need to
-# change the table name in each database too.
-version_table=migrate_version
-
-# When committing a change script, Migrate will attempt to generate the
-# sql for all supported databases; normally, if one of them fails - probably
-# because you don't have that database installed - it is ignored and the
-# commit continues, perhaps ending successfully.
-# Databases in this list MUST compile successfully during a commit, or the
-# entire commit will fail. List the databases your application will actually
-# be using to ensure your updates to that database work properly.
-# This must be a list; example: ['postgres','sqlite']
-required_dbs=[]
diff --git a/nova/db/api/legacy_migrations/versions/067_train.py b/nova/db/api/legacy_migrations/versions/067_train.py
deleted file mode 100644
index 6b82b17e4b..0000000000
--- a/nova/db/api/legacy_migrations/versions/067_train.py
+++ /dev/null
@@ -1,602 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from migrate.changeset.constraint import ForeignKeyConstraint
-from migrate import UniqueConstraint
-import sqlalchemy as sa
-from sqlalchemy import dialects
-
-from nova.db import types
-from nova.objects import keypair
-
-
-def InetSmall():
- return sa.String(length=39).with_variant(
- dialects.postgresql.INET(), 'postgresql'
- )
-
-
-def upgrade(migrate_engine):
- meta = sa.MetaData()
- # NOTE(stephenfin): This is not compatible with SQLAlchemy 2.0 but neither
- # is sqlalchemy-migrate which requires this. We'll remove these migrations
- # when dropping SQLAlchemy < 2.x support
- meta.bind = migrate_engine
-
- cell_mappings = sa.Table('cell_mappings', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('uuid', sa.String(length=36), nullable=False),
- sa.Column('name', sa.String(length=255)),
- sa.Column('transport_url', sa.Text()),
- sa.Column('database_connection', sa.Text()),
- # NOTE(stephenfin): These were originally added by sqlalchemy-migrate
- # which did not generate the constraints
- sa.Column(
- 'disabled', sa.Boolean(create_constraint=False), default=False),
- UniqueConstraint('uuid', name='uniq_cell_mappings0uuid'),
- sa.Index('uuid_idx', 'uuid'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- host_mappings = sa.Table('host_mappings', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('cell_id', sa.Integer, nullable=False),
- sa.Column('host', sa.String(length=255), nullable=False),
- UniqueConstraint(
- 'host', name='uniq_host_mappings0host'),
- sa.Index('host_idx', 'host'),
- ForeignKeyConstraint(
- columns=['cell_id'], refcolumns=[cell_mappings.c.id]),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- instance_mappings = sa.Table('instance_mappings', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('instance_uuid', sa.String(length=36), nullable=False),
- sa.Column('cell_id', sa.Integer, nullable=True),
- sa.Column('project_id', sa.String(length=255), nullable=False),
- # NOTE(stephenfin): These were originally added by sqlalchemy-migrate
- # which did not generate the constraints
- sa.Column(
- 'queued_for_delete', sa.Boolean(create_constraint=False),
- default=False),
- sa.Column('user_id', sa.String(length=255), nullable=True),
- UniqueConstraint(
- 'instance_uuid', name='uniq_instance_mappings0instance_uuid'),
- sa.Index('instance_uuid_idx', 'instance_uuid'),
- sa.Index('project_id_idx', 'project_id'),
- sa.Index(
- 'instance_mappings_user_id_project_id_idx', 'user_id',
- 'project_id'),
- ForeignKeyConstraint(
- columns=['cell_id'], refcolumns=[cell_mappings.c.id]),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- flavors = sa.Table('flavors', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('name', sa.String(length=255), nullable=False),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('memory_mb', sa.Integer, nullable=False),
- sa.Column('vcpus', sa.Integer, nullable=False),
- sa.Column('swap', sa.Integer, nullable=False),
- sa.Column('vcpu_weight', sa.Integer),
- sa.Column('flavorid', sa.String(length=255), nullable=False),
- sa.Column('rxtx_factor', sa.Float),
- sa.Column('root_gb', sa.Integer),
- sa.Column('ephemeral_gb', sa.Integer),
- sa.Column('disabled', sa.Boolean),
- sa.Column('is_public', sa.Boolean),
- sa.Column('description', sa.Text()),
- UniqueConstraint('flavorid', name='uniq_flavors0flavorid'),
- UniqueConstraint('name', name='uniq_flavors0name'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- flavor_extra_specs = sa.Table('flavor_extra_specs', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('flavor_id', sa.Integer, nullable=False),
- sa.Column('key', sa.String(length=255), nullable=False),
- sa.Column('value', sa.String(length=255)),
- UniqueConstraint(
- 'flavor_id', 'key', name='uniq_flavor_extra_specs0flavor_id0key'),
- sa.Index('flavor_extra_specs_flavor_id_key_idx', 'flavor_id', 'key'),
- ForeignKeyConstraint(columns=['flavor_id'], refcolumns=[flavors.c.id]),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- flavor_projects = sa.Table('flavor_projects', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('flavor_id', sa.Integer, nullable=False),
- sa.Column('project_id', sa.String(length=255), nullable=False),
- UniqueConstraint(
- 'flavor_id', 'project_id',
- name='uniq_flavor_projects0flavor_id0project_id'),
- ForeignKeyConstraint(
- columns=['flavor_id'], refcolumns=[flavors.c.id]),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- request_specs = sa.Table('request_specs', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('instance_uuid', sa.String(36), nullable=False),
- sa.Column('spec', types.MediumText(), nullable=False),
- UniqueConstraint(
- 'instance_uuid', name='uniq_request_specs0instance_uuid'),
- sa.Index('request_spec_instance_uuid_idx', 'instance_uuid'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- build_requests = sa.Table('build_requests', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('request_spec_id', sa.Integer, nullable=True),
- sa.Column('project_id', sa.String(length=255), nullable=False),
- sa.Column('user_id', sa.String(length=255), nullable=True),
- sa.Column('display_name', sa.String(length=255)),
- sa.Column('instance_metadata', sa.Text),
- sa.Column('progress', sa.Integer),
- sa.Column('vm_state', sa.String(length=255)),
- sa.Column('task_state', sa.String(length=255)),
- sa.Column('image_ref', sa.String(length=255)),
- sa.Column('access_ip_v4', InetSmall()),
- sa.Column('access_ip_v6', InetSmall()),
- sa.Column('info_cache', sa.Text),
- sa.Column('security_groups', sa.Text, nullable=True),
- sa.Column('config_drive', sa.Boolean, default=False, nullable=True),
- sa.Column('key_name', sa.String(length=255)),
- sa.Column(
- 'locked_by',
- sa.Enum('owner', 'admin', name='build_requests0locked_by')),
- sa.Column('instance_uuid', sa.String(length=36)),
- sa.Column('instance', types.MediumText()),
- sa.Column('block_device_mappings', types.MediumText()),
- sa.Column('tags', sa.Text()),
- UniqueConstraint(
- 'instance_uuid', name='uniq_build_requests0instance_uuid'),
- sa.Index('build_requests_project_id_idx', 'project_id'),
- sa.Index('build_requests_instance_uuid_idx', 'instance_uuid'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- keypairs = sa.Table('key_pairs', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('name', sa.String(255), nullable=False),
- sa.Column('user_id', sa.String(255), nullable=False),
- sa.Column('fingerprint', sa.String(255)),
- sa.Column('public_key', sa.Text()),
- sa.Column(
- 'type',
- sa.Enum('ssh', 'x509', metadata=meta, name='keypair_types'),
- nullable=False, server_default=keypair.KEYPAIR_TYPE_SSH),
- UniqueConstraint(
- 'user_id', 'name', name='uniq_key_pairs0user_id0name'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- projects = sa.Table('projects', meta,
- sa.Column(
- 'id', sa.Integer, primary_key=True, nullable=False,
- autoincrement=True),
- sa.Column('external_id', sa.String(length=255), nullable=False),
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- UniqueConstraint('external_id', name='uniq_projects0external_id'),
- mysql_engine='InnoDB',
- mysql_charset='latin1',
- )
-
- users = sa.Table('users', meta,
- sa.Column(
- 'id', sa.Integer, primary_key=True, nullable=False,
- autoincrement=True),
- sa.Column('external_id', sa.String(length=255), nullable=False),
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- UniqueConstraint('external_id', name='uniq_users0external_id'),
- mysql_engine='InnoDB',
- mysql_charset='latin1',
- )
-
- resource_classes = sa.Table('resource_classes', meta,
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('name', sa.String(length=255), nullable=False),
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- UniqueConstraint('name', name='uniq_resource_classes0name'),
- mysql_engine='InnoDB',
- mysql_charset='latin1'
- )
-
- nameargs = {}
- if migrate_engine.name == 'mysql':
- nameargs['collation'] = 'utf8_bin'
-
- resource_providers = sa.Table(
- 'resource_providers', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('uuid', sa.String(36), nullable=False),
- sa.Column('name', sa.Unicode(200, **nameargs), nullable=True),
- sa.Column('generation', sa.Integer, default=0),
- sa.Column('can_host', sa.Integer, default=0),
- sa.Column(
- 'root_provider_id', sa.Integer,
- sa.ForeignKey('resource_providers.id')),
- sa.Column(
- 'parent_provider_id', sa.Integer,
- sa.ForeignKey('resource_providers.id')),
- UniqueConstraint('uuid', name='uniq_resource_providers0uuid'),
- UniqueConstraint('name', name='uniq_resource_providers0name'),
- sa.Index('resource_providers_name_idx', 'name'),
- sa.Index('resource_providers_uuid_idx', 'uuid'),
- sa.Index(
- 'resource_providers_root_provider_id_idx', 'root_provider_id'),
- sa.Index(
- 'resource_providers_parent_provider_id_idx', 'parent_provider_id'),
- mysql_engine='InnoDB',
- mysql_charset='latin1'
- )
-
- inventories = sa.Table(
- 'inventories', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('resource_provider_id', sa.Integer, nullable=False),
- sa.Column('resource_class_id', sa.Integer, nullable=False),
- sa.Column('total', sa.Integer, nullable=False),
- sa.Column('reserved', sa.Integer, nullable=False),
- sa.Column('min_unit', sa.Integer, nullable=False),
- sa.Column('max_unit', sa.Integer, nullable=False),
- sa.Column('step_size', sa.Integer, nullable=False),
- sa.Column('allocation_ratio', sa.Float, nullable=False),
- sa.Index(
- 'inventories_resource_provider_id_idx', 'resource_provider_id'),
- sa.Index(
- 'inventories_resource_provider_resource_class_idx',
- 'resource_provider_id', 'resource_class_id'),
- sa.Index(
- 'inventories_resource_class_id_idx', 'resource_class_id'),
- UniqueConstraint(
- 'resource_provider_id', 'resource_class_id',
- name='uniq_inventories0resource_provider_resource_class'),
- mysql_engine='InnoDB',
- mysql_charset='latin1'
- )
-
- traits = sa.Table(
- 'traits', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column(
- 'id', sa.Integer, primary_key=True, nullable=False,
- autoincrement=True),
- sa.Column('name', sa.Unicode(255, **nameargs), nullable=False),
- UniqueConstraint('name', name='uniq_traits0name'),
- mysql_engine='InnoDB',
- mysql_charset='latin1',
- )
-
- allocations = sa.Table(
- 'allocations', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('resource_provider_id', sa.Integer, nullable=False),
- sa.Column('consumer_id', sa.String(36), nullable=False),
- sa.Column('resource_class_id', sa.Integer, nullable=False),
- sa.Column('used', sa.Integer, nullable=False),
- sa.Index(
- 'allocations_resource_provider_class_used_idx',
- 'resource_provider_id', 'resource_class_id', 'used'),
- sa.Index(
- 'allocations_resource_class_id_idx', 'resource_class_id'),
- sa.Index('allocations_consumer_id_idx', 'consumer_id'),
- mysql_engine='InnoDB',
- mysql_charset='latin1'
- )
-
- consumers = sa.Table(
- 'consumers', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column(
- 'id', sa.Integer, primary_key=True, nullable=False,
- autoincrement=True),
- sa.Column('uuid', sa.String(length=36), nullable=False),
- sa.Column('project_id', sa.Integer, nullable=False),
- sa.Column('user_id', sa.Integer, nullable=False),
- sa.Column(
- 'generation', sa.Integer, default=0, server_default=sa.text('0'),
- nullable=False),
- sa.Index('consumers_project_id_uuid_idx', 'project_id', 'uuid'),
- sa.Index(
- 'consumers_project_id_user_id_uuid_idx', 'project_id', 'user_id',
- 'uuid'),
- UniqueConstraint('uuid', name='uniq_consumers0uuid'),
- mysql_engine='InnoDB',
- mysql_charset='latin1',
- )
-
- resource_provider_aggregates = sa.Table(
- 'resource_provider_aggregates', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column(
- 'resource_provider_id', sa.Integer, primary_key=True,
- nullable=False),
- sa.Column(
- 'aggregate_id', sa.Integer, primary_key=True, nullable=False),
- sa.Index(
- 'resource_provider_aggregates_aggregate_id_idx', 'aggregate_id'),
- mysql_engine='InnoDB',
- mysql_charset='latin1'
- )
-
- resource_provider_traits = sa.Table(
- 'resource_provider_traits', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column(
- 'trait_id', sa.Integer, sa.ForeignKey('traits.id'),
- primary_key=True, nullable=False),
- sa.Column(
- 'resource_provider_id', sa.Integer, primary_key=True,
- nullable=False),
- sa.Index(
- 'resource_provider_traits_resource_provider_trait_idx',
- 'resource_provider_id', 'trait_id'),
- ForeignKeyConstraint(
- columns=['resource_provider_id'],
- refcolumns=[resource_providers.c.id]),
- mysql_engine='InnoDB',
- mysql_charset='latin1',
- )
-
- placement_aggregates = sa.Table('placement_aggregates', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('uuid', sa.String(length=36), index=True),
- UniqueConstraint('uuid', name='uniq_placement_aggregates0uuid'),
- mysql_engine='InnoDB',
- mysql_charset='latin1'
- )
-
- aggregates = sa.Table('aggregates', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('uuid', sa.String(length=36)),
- sa.Column('name', sa.String(length=255)),
- sa.Index('aggregate_uuid_idx', 'uuid'),
- UniqueConstraint('name', name='uniq_aggregate0name'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- aggregate_hosts = sa.Table('aggregate_hosts', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('host', sa.String(length=255)),
- sa.Column(
- 'aggregate_id', sa.Integer, sa.ForeignKey('aggregates.id'),
- nullable=False),
- UniqueConstraint(
- 'host', 'aggregate_id',
- name='uniq_aggregate_hosts0host0aggregate_id'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- aggregate_metadata = sa.Table('aggregate_metadata', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column(
- 'aggregate_id', sa.Integer, sa.ForeignKey('aggregates.id'),
- nullable=False),
- sa.Column('key', sa.String(length=255), nullable=False),
- sa.Column('value', sa.String(length=255), nullable=False),
- UniqueConstraint(
- 'aggregate_id', 'key',
- name='uniq_aggregate_metadata0aggregate_id0key'),
- sa.Index('aggregate_metadata_key_idx', 'key'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- groups = sa.Table('instance_groups', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('user_id', sa.String(length=255)),
- sa.Column('project_id', sa.String(length=255)),
- sa.Column('uuid', sa.String(length=36), nullable=False),
- sa.Column('name', sa.String(length=255)),
- UniqueConstraint(
- 'uuid', name='uniq_instance_groups0uuid'),
- mysql_engine='InnoDB',
- mysql_charset='utf8',
- )
-
- group_policy = sa.Table('instance_group_policy', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('policy', sa.String(length=255)),
- sa.Column(
- 'group_id', sa.Integer, sa.ForeignKey('instance_groups.id'),
- nullable=False),
- sa.Column('rules', sa.Text),
- sa.Index('instance_group_policy_policy_idx', 'policy'),
- mysql_engine='InnoDB',
- mysql_charset='utf8',
- )
-
- group_member = sa.Table('instance_group_member', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('instance_uuid', sa.String(length=255)),
- sa.Column(
- 'group_id', sa.Integer, sa.ForeignKey('instance_groups.id'),
- nullable=False),
- sa.Index('instance_group_member_instance_idx', 'instance_uuid'),
- mysql_engine='InnoDB',
- mysql_charset='utf8',
- )
-
- quota_classes = sa.Table('quota_classes', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('class_name', sa.String(length=255)),
- sa.Column('resource', sa.String(length=255)),
- sa.Column('hard_limit', sa.Integer),
- sa.Index('quota_classes_class_name_idx', 'class_name'),
- mysql_engine='InnoDB',
- mysql_charset='utf8',
- )
-
- quota_usages = sa.Table('quota_usages', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('project_id', sa.String(length=255)),
- sa.Column('resource', sa.String(length=255), nullable=False),
- sa.Column('in_use', sa.Integer, nullable=False),
- sa.Column('reserved', sa.Integer, nullable=False),
- sa.Column('until_refresh', sa.Integer),
- sa.Column('user_id', sa.String(length=255)),
- sa.Index('quota_usages_project_id_idx', 'project_id'),
- sa.Index('quota_usages_user_id_idx', 'user_id'),
- mysql_engine='InnoDB',
- mysql_charset='utf8',
- )
-
- quotas = sa.Table('quotas', meta,
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('project_id', sa.String(length=255)),
- sa.Column('resource', sa.String(length=255), nullable=False),
- sa.Column('hard_limit', sa.Integer),
- UniqueConstraint(
- 'project_id', 'resource', name='uniq_quotas0project_id0resource'),
- mysql_engine='InnoDB',
- mysql_charset='utf8',
- )
-
- project_user_quotas = sa.Table('project_user_quotas', meta,
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('user_id', sa.String(length=255), nullable=False),
- sa.Column('project_id', sa.String(length=255), nullable=False),
- sa.Column('resource', sa.String(length=255), nullable=False),
- sa.Column('hard_limit', sa.Integer, nullable=True),
- UniqueConstraint(
- 'user_id', 'project_id', 'resource',
- name='uniq_project_user_quotas0user_id0project_id0resource'),
- sa.Index(
- 'project_user_quotas_project_id_idx', 'project_id'),
- sa.Index(
- 'project_user_quotas_user_id_idx', 'user_id'),
- mysql_engine='InnoDB',
- mysql_charset='utf8',
- )
-
- reservations = sa.Table('reservations', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('uuid', sa.String(length=36), nullable=False),
- sa.Column(
- 'usage_id', sa.Integer, sa.ForeignKey('quota_usages.id'),
- nullable=False),
- sa.Column('project_id', sa.String(length=255)),
- sa.Column('resource', sa.String(length=255)),
- sa.Column('delta', sa.Integer, nullable=False),
- sa.Column('expire', sa.DateTime),
- sa.Column('user_id', sa.String(length=255)),
- sa.Index('reservations_project_id_idx', 'project_id'),
- sa.Index('reservations_uuid_idx', 'uuid'),
- sa.Index('reservations_expire_idx', 'expire'),
- sa.Index('reservations_user_id_idx', 'user_id'),
- mysql_engine='InnoDB',
- mysql_charset='utf8',
- )
-
- tables = [
- cell_mappings,
- host_mappings,
- instance_mappings,
- flavors,
- flavor_extra_specs,
- flavor_projects,
- request_specs,
- build_requests,
- keypairs,
- projects,
- users,
- resource_classes,
- resource_providers,
- inventories,
- traits,
- allocations,
- consumers,
- resource_provider_aggregates,
- resource_provider_traits,
- placement_aggregates,
- aggregates,
- aggregate_hosts,
- aggregate_metadata,
- groups,
- group_policy,
- group_member,
- quota_classes,
- quota_usages,
- quotas,
- project_user_quotas,
- reservations,
- ]
- for table in tables:
- table.create(checkfirst=True)
diff --git a/nova/db/api/legacy_migrations/versions/068_placeholder.py b/nova/db/api/legacy_migrations/versions/068_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/api/legacy_migrations/versions/068_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/api/legacy_migrations/versions/069_placeholder.py b/nova/db/api/legacy_migrations/versions/069_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/api/legacy_migrations/versions/069_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/api/legacy_migrations/versions/070_placeholder.py b/nova/db/api/legacy_migrations/versions/070_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/api/legacy_migrations/versions/070_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/api/legacy_migrations/versions/071_placeholder.py b/nova/db/api/legacy_migrations/versions/071_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/api/legacy_migrations/versions/071_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/api/legacy_migrations/versions/072_placeholder.py b/nova/db/api/legacy_migrations/versions/072_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/api/legacy_migrations/versions/072_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/api/legacy_migrations/versions/073_placeholder.py b/nova/db/api/legacy_migrations/versions/073_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/api/legacy_migrations/versions/073_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/api/legacy_migrations/versions/074_placeholder.py b/nova/db/api/legacy_migrations/versions/074_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/api/legacy_migrations/versions/074_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/api/legacy_migrations/versions/075_placeholder.py b/nova/db/api/legacy_migrations/versions/075_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/api/legacy_migrations/versions/075_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/api/legacy_migrations/versions/076_placeholder.py b/nova/db/api/legacy_migrations/versions/076_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/api/legacy_migrations/versions/076_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/api/legacy_migrations/versions/077_placeholder.py b/nova/db/api/legacy_migrations/versions/077_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/api/legacy_migrations/versions/077_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/api/legacy_migrations/versions/078_placeholder.py b/nova/db/api/legacy_migrations/versions/078_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/api/legacy_migrations/versions/078_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/api/legacy_migrations/versions/079_placeholder.py b/nova/db/api/legacy_migrations/versions/079_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/api/legacy_migrations/versions/079_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/api/legacy_migrations/versions/080_placeholder.py b/nova/db/api/legacy_migrations/versions/080_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/api/legacy_migrations/versions/080_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/api/legacy_migrations/versions/081_placeholder.py b/nova/db/api/legacy_migrations/versions/081_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/api/legacy_migrations/versions/081_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/api/legacy_migrations/versions/082_placeholder.py b/nova/db/api/legacy_migrations/versions/082_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/api/legacy_migrations/versions/082_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/api/legacy_migrations/versions/083_placeholder.py b/nova/db/api/legacy_migrations/versions/083_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/api/legacy_migrations/versions/083_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/api/legacy_migrations/versions/084_placeholder.py b/nova/db/api/legacy_migrations/versions/084_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/api/legacy_migrations/versions/084_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/api/legacy_migrations/versions/085_placeholder.py b/nova/db/api/legacy_migrations/versions/085_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/api/legacy_migrations/versions/085_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/api/legacy_migrations/versions/086_placeholder.py b/nova/db/api/legacy_migrations/versions/086_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/api/legacy_migrations/versions/086_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/api/legacy_migrations/versions/087_placeholder.py b/nova/db/api/legacy_migrations/versions/087_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/api/legacy_migrations/versions/087_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/main/legacy_migrations/README b/nova/db/main/legacy_migrations/README
deleted file mode 100644
index c5f51f2280..0000000000
--- a/nova/db/main/legacy_migrations/README
+++ /dev/null
@@ -1,4 +0,0 @@
-This is a database migration repository.
-
-More information at
-https://sqlalchemy-migrate.readthedocs.io/en/latest/
diff --git a/nova/db/main/legacy_migrations/__init__.py b/nova/db/main/legacy_migrations/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/nova/db/main/legacy_migrations/__init__.py
+++ /dev/null
diff --git a/nova/db/main/legacy_migrations/manage.py b/nova/db/main/legacy_migrations/manage.py
deleted file mode 100644
index 6c2b3842ba..0000000000
--- a/nova/db/main/legacy_migrations/manage.py
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2012 OpenStack Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from migrate.versioning.shell import main
-
-
-if __name__ == '__main__':
- main(debug='False', repository='.')
diff --git a/nova/db/main/legacy_migrations/migrate.cfg b/nova/db/main/legacy_migrations/migrate.cfg
deleted file mode 100644
index 006e01e406..0000000000
--- a/nova/db/main/legacy_migrations/migrate.cfg
+++ /dev/null
@@ -1,20 +0,0 @@
-[db_settings]
-# Used to identify which repository this database is versioned under.
-# You can use the name of your project.
-repository_id=nova
-
-# The name of the database table used to track the schema version.
-# This name shouldn't already be used by your project.
-# If this is changed once a database is under version control, you'll need to
-# change the table name in each database too.
-version_table=migrate_version
-
-# When committing a change script, Migrate will attempt to generate the
-# sql for all supported databases; normally, if one of them fails - probably
-# because you don't have that database installed - it is ignored and the
-# commit continues, perhaps ending successfully.
-# Databases in this list MUST compile successfully during a commit, or the
-# entire commit will fail. List the databases your application will actually
-# be using to ensure your updates to that database work properly.
-# This must be a list; example: ['postgres','sqlite']
-required_dbs=[]
diff --git a/nova/db/main/legacy_migrations/versions/402_train.py b/nova/db/main/legacy_migrations/versions/402_train.py
deleted file mode 100644
index 5a39d87f8c..0000000000
--- a/nova/db/main/legacy_migrations/versions/402_train.py
+++ /dev/null
@@ -1,1619 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from migrate.changeset import UniqueConstraint
-from oslo_log import log as logging
-import sqlalchemy as sa
-from sqlalchemy import dialects
-from sqlalchemy.ext import compiler
-from sqlalchemy import types as sqla_types
-
-from nova.db import types
-from nova.objects import keypair
-
-LOG = logging.getLogger(__name__)
-
-
-def Inet():
- return sa.String(length=43).with_variant(
- dialects.postgresql.INET(), 'postgresql',
- )
-
-
-def InetSmall():
- return sa.String(length=39).with_variant(
- dialects.postgresql.INET(), 'postgresql',
- )
-
-
-# We explicitly name many of our foreignkeys for MySQL so they match Havana
-@compiler.compiles(sa.ForeignKeyConstraint, 'postgresql')
-def process(element, compiler, **kw):
- element.name = None
- return compiler.visit_foreign_key_constraint(element, **kw)
-
-
-def _create_shadow_tables(migrate_engine):
- meta = sa.MetaData()
- meta.reflect(migrate_engine)
- table_names = list(meta.tables.keys())
-
- # NOTE(stephenfin): This is not compatible with SQLAlchemy 2.0 but neither
- # is sqlalchemy-migrate which requires this. We'll remove these migrations
- # when dropping SQLAlchemy < 2.x support
- meta.bind = migrate_engine
-
- for table_name in table_names:
- # Skip tables that are not soft-deletable
- if table_name in (
- 'tags',
- 'resource_providers',
- 'inventories',
- 'allocations',
- 'resource_provider_aggregates',
- 'console_auth_tokens',
- ):
- continue
-
- table = sa.Table(table_name, meta, autoload_with=migrate_engine)
-
- columns = []
- for column in table.columns:
- column_copy = None
-
- # NOTE(boris-42): BigInteger is not supported by sqlite, so after
- # copy it will have NullType. The other types that are used in Nova
- # are supported by sqlite
- if isinstance(column.type, sqla_types.NullType):
- column_copy = sa.Column(
- column.name, sa.BigInteger(), default=0,
- )
-
- if table_name == 'instances' and column.name == 'locked_by':
- enum = sa.Enum(
- 'owner', 'admin', name='shadow_instances0locked_by',
- )
- column_copy = sa.Column(column.name, enum)
-
- # TODO(stephenfin): Fix these various bugs in a follow-up
-
- # 244_increase_user_id_length_volume_usage_cache; this
- # alteration should apply to shadow tables also
-
- if table_name == 'volume_usage_cache' and column.name == 'user_id':
- # nullable should be True
- column_copy = sa.Column('user_id', sa.String(36))
-
- # 247_nullable_mismatch; these alterations should apply to shadow
- # tables also
-
- if table_name == 'quota_usages' and column.name == 'resources':
- # nullable should be False
- column_copy = sa.Column('resource', sa.String(length=255))
-
- if table_name == 'pci_devices':
- if column.name == 'deleted':
- # nullable should be True
- column_copy = sa.Column(
- 'deleted', sa.Integer, default=0, nullable=False,
- )
-
- if column.name == 'product_id':
- # nullable should be False
- column_copy = sa.Column('product_id', sa.String(4))
-
- if column.name == 'vendor_id':
- # nullable should be False
- column_copy = sa.Column('vendor_id', sa.String(4))
-
- if column.name == 'dev_type':
- # nullable should be False
- column_copy = sa.Column('dev_type', sa.String(8))
-
- # 280_add_nullable_false_to_keypairs_name; this should apply to the
- # shadow table also
-
- if table_name == 'key_pairs' and column.name == 'name':
- # nullable should be False
- column_copy = sa.Column('name', sa.String(length=255))
-
- # NOTE(stephenfin): By default, 'sqlalchemy.Enum' will issue a
- # 'CREATE TYPE' command on PostgreSQL, even if the type already
- # exists. We work around this by using the PostgreSQL-specific
- # 'sqlalchemy.dialects.postgresql.ENUM' type and setting
- # 'create_type' to 'False'. See [1] for more information.
- #
- # [1] https://stackoverflow.com/a/28894354/613428
- if migrate_engine.name == 'postgresql':
- if table_name == 'key_pairs' and column.name == 'type':
- enum = dialects.postgresql.ENUM(
- 'ssh', 'x509', name='keypair_types', create_type=False)
- column_copy = sa.Column(
- column.name, enum, nullable=False,
- server_default=keypair.KEYPAIR_TYPE_SSH)
- elif (
- table_name == 'migrations' and
- column.name == 'migration_type'
- ):
- enum = dialects.postgresql.ENUM(
- 'migration', 'resize', 'live-migration', 'evacuation',
- name='migration_type', create_type=False)
- column_copy = sa.Column(column.name, enum, nullable=True)
-
- if column_copy is None:
- # NOTE(stephenfin): Yes, this is private. Yes, this is what we
- # were told to use. Blame zzzeek!
- column_copy = column._copy()
-
- columns.append(column_copy)
-
- shadow_table = sa.Table(
- 'shadow_' + table_name, meta, *columns, mysql_engine='InnoDB',
- )
-
- try:
- shadow_table.create()
- except Exception:
- LOG.info(repr(shadow_table))
- LOG.exception('Exception while creating table.')
- raise
-
- # TODO(stephenfin): Fix these various bugs in a follow-up
-
- # 252_add_instance_extra_table; we don't create indexes for shadow tables
- # in general and these should be removed
-
- table = sa.Table(
- 'shadow_instance_extra', meta, autoload_with=migrate_engine,
- )
- idx = sa.Index('shadow_instance_extra_idx', table.c.instance_uuid)
- idx.create(migrate_engine)
-
- # 373_migration_uuid; we should't create indexes for shadow tables
-
- table = sa.Table('shadow_migrations', meta, autoload_with=migrate_engine)
- idx = sa.Index('shadow_migrations_uuid', table.c.uuid, unique=True)
- idx.create(migrate_engine)
-
-
-def upgrade(migrate_engine):
- meta = sa.MetaData()
- # NOTE(stephenfin): This is not compatible with SQLAlchemy 2.0 but neither
- # is sqlalchemy-migrate which requires this. We'll remove these migrations
- # when dropping SQLAlchemy < 2.x support
- meta.bind = migrate_engine
-
- agent_builds = sa.Table('agent_builds', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('hypervisor', sa.String(length=255)),
- sa.Column('os', sa.String(length=255)),
- sa.Column('architecture', sa.String(length=255)),
- sa.Column('version', sa.String(length=255)),
- sa.Column('url', sa.String(length=255)),
- sa.Column('md5hash', sa.String(length=255)),
- sa.Column('deleted', sa.Integer),
- sa.Index(
- 'agent_builds_hypervisor_os_arch_idx',
- 'hypervisor', 'os', 'architecture'),
- UniqueConstraint(
- 'hypervisor', 'os', 'architecture', 'deleted',
- name='uniq_agent_builds0hypervisor0os0architecture0deleted'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- aggregate_hosts = sa.Table('aggregate_hosts', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('host', sa.String(length=255)),
- sa.Column(
- 'aggregate_id', sa.Integer, sa.ForeignKey('aggregates.id'),
- nullable=False),
- sa.Column('deleted', sa.Integer),
- UniqueConstraint(
- 'host', 'aggregate_id', 'deleted',
- name='uniq_aggregate_hosts0host0aggregate_id0deleted'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- aggregate_metadata = sa.Table('aggregate_metadata', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column(
- 'aggregate_id', sa.Integer, sa.ForeignKey('aggregates.id'),
- nullable=False),
- sa.Column('key', sa.String(length=255), nullable=False),
- sa.Column('value', sa.String(length=255), nullable=False),
- sa.Column('deleted', sa.Integer),
- sa.Index('aggregate_metadata_key_idx', 'key'),
- sa.Index('aggregate_metadata_value_idx', 'value'),
- UniqueConstraint(
- 'aggregate_id', 'key', 'deleted',
- name='uniq_aggregate_metadata0aggregate_id0key0deleted'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- aggregates = sa.Table('aggregates', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('name', sa.String(length=255)),
- sa.Column('deleted', sa.Integer),
- sa.Column('uuid', sa.String(36)),
- sa.Index('aggregate_uuid_idx', 'uuid'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- allocations = sa.Table('allocations', meta,
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('resource_provider_id', sa.Integer, nullable=False),
- sa.Column('consumer_id', sa.String(36), nullable=False),
- sa.Column('resource_class_id', sa.Integer, nullable=False),
- sa.Column('used', sa.Integer, nullable=False),
- sa.Index(
- 'allocations_resource_provider_class_used_idx',
- 'resource_provider_id', 'resource_class_id', 'used'),
- sa.Index('allocations_consumer_id_idx', 'consumer_id'),
- sa.Index('allocations_resource_class_id_idx', 'resource_class_id'),
- mysql_engine='InnoDB',
- mysql_charset='latin1',
- )
-
- block_device_mapping = sa.Table('block_device_mapping', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('device_name', sa.String(length=255), nullable=True),
- sa.Column('delete_on_termination', sa.Boolean),
- sa.Column('snapshot_id', sa.String(length=36), nullable=True),
- sa.Column('volume_id', sa.String(length=36), nullable=True),
- sa.Column('volume_size', sa.Integer),
- sa.Column('no_device', sa.Boolean),
- sa.Column('connection_info', types.MediumText()),
- sa.Column(
- 'instance_uuid', sa.String(length=36),
- sa.ForeignKey(
- 'instances.uuid',
- name='block_device_mapping_instance_uuid_fkey')),
- sa.Column('deleted', sa.Integer),
- sa.Column('source_type', sa.String(length=255), nullable=True),
- sa.Column('destination_type', sa.String(length=255), nullable=True),
- sa.Column('guest_format', sa.String(length=255), nullable=True),
- sa.Column('device_type', sa.String(length=255), nullable=True),
- sa.Column('disk_bus', sa.String(length=255), nullable=True),
- sa.Column('boot_index', sa.Integer),
- sa.Column('image_id', sa.String(length=36), nullable=True),
- sa.Column('tag', sa.String(255)),
- sa.Column('attachment_id', sa.String(36), nullable=True),
- sa.Column('uuid', sa.String(36), nullable=True),
- sa.Column('volume_type', sa.String(255), nullable=True),
- sa.Index('snapshot_id', 'snapshot_id'),
- sa.Index('volume_id', 'volume_id'),
- sa.Index('block_device_mapping_instance_uuid_idx', 'instance_uuid'),
- sa.Index(
- 'block_device_mapping_instance_uuid_device_name_idx',
- 'instance_uuid', 'device_name'),
- sa.Index(
- 'block_device_mapping_instance_uuid_volume_id_idx',
- 'instance_uuid', 'volume_id'),
- UniqueConstraint('uuid', name='uniq_block_device_mapping0uuid'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- bw_usage_cache = sa.Table('bw_usage_cache', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('start_period', sa.DateTime, nullable=False),
- sa.Column('last_refreshed', sa.DateTime),
- sa.Column('bw_in', sa.BigInteger),
- sa.Column('bw_out', sa.BigInteger),
- sa.Column('mac', sa.String(length=255)),
- sa.Column('uuid', sa.String(length=36)),
- sa.Column('last_ctr_in', sa.BigInteger()),
- sa.Column('last_ctr_out', sa.BigInteger()),
- sa.Column('deleted', sa.Integer),
- sa.Index(
- 'bw_usage_cache_uuid_start_period_idx',
- 'uuid', 'start_period'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- cells = sa.Table('cells', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('api_url', sa.String(length=255)),
- sa.Column('weight_offset', sa.Float),
- sa.Column('weight_scale', sa.Float),
- sa.Column('name', sa.String(length=255)),
- sa.Column('is_parent', sa.Boolean),
- sa.Column('deleted', sa.Integer),
- sa.Column('transport_url', sa.String(length=255), nullable=False),
- UniqueConstraint(
- 'name', 'deleted',
- name='uniq_cells0name0deleted'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- certificates = sa.Table('certificates', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('user_id', sa.String(length=255)),
- sa.Column('project_id', sa.String(length=255)),
- sa.Column('file_name', sa.String(length=255)),
- sa.Column('deleted', sa.Integer),
- sa.Index(
- 'certificates_project_id_deleted_idx',
- 'project_id', 'deleted'),
- sa.Index('certificates_user_id_deleted_idx', 'user_id', 'deleted'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- compute_nodes = sa.Table('compute_nodes', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('service_id', sa.Integer, nullable=True),
- sa.Column('vcpus', sa.Integer, nullable=False),
- sa.Column('memory_mb', sa.Integer, nullable=False),
- sa.Column('local_gb', sa.Integer, nullable=False),
- sa.Column('vcpus_used', sa.Integer, nullable=False),
- sa.Column('memory_mb_used', sa.Integer, nullable=False),
- sa.Column('local_gb_used', sa.Integer, nullable=False),
- sa.Column('hypervisor_type', types.MediumText(), nullable=False),
- sa.Column('hypervisor_version', sa.Integer, nullable=False),
- sa.Column('cpu_info', types.MediumText(), nullable=False),
- sa.Column('disk_available_least', sa.Integer),
- sa.Column('free_ram_mb', sa.Integer),
- sa.Column('free_disk_gb', sa.Integer),
- sa.Column('current_workload', sa.Integer),
- sa.Column('running_vms', sa.Integer),
- sa.Column('hypervisor_hostname', sa.String(length=255)),
- sa.Column('deleted', sa.Integer),
- sa.Column('host_ip', InetSmall()),
- sa.Column('supported_instances', sa.Text),
- sa.Column('pci_stats', sa.Text, nullable=True),
- sa.Column('metrics', sa.Text, nullable=True),
- sa.Column('extra_resources', sa.Text, nullable=True),
- sa.Column('stats', sa.Text, default='{}'),
- sa.Column('numa_topology', sa.Text, nullable=True),
- sa.Column('host', sa.String(255), nullable=True),
- sa.Column('ram_allocation_ratio', sa.Float, nullable=True),
- sa.Column('cpu_allocation_ratio', sa.Float, nullable=True),
- sa.Column('uuid', sa.String(36), nullable=True),
- sa.Column('disk_allocation_ratio', sa.Float, nullable=True),
- sa.Column('mapped', sa.Integer, default=0, nullable=True),
- sa.Index('compute_nodes_uuid_idx', 'uuid', unique=True),
- UniqueConstraint(
- 'host', 'hypervisor_hostname', 'deleted',
- name='uniq_compute_nodes0host0hypervisor_hostname0deleted',
- ),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- console_auth_tokens = sa.Table('console_auth_tokens', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('token_hash', sa.String(255), nullable=False),
- sa.Column('console_type', sa.String(255), nullable=False),
- sa.Column('host', sa.String(255), nullable=False),
- sa.Column('port', sa.Integer, nullable=False),
- sa.Column('internal_access_path', sa.String(255)),
- sa.Column('instance_uuid', sa.String(36), nullable=False),
- sa.Column('expires', sa.Integer, nullable=False),
- sa.Column('access_url_base', sa.String(255), nullable=True),
- sa.Index('console_auth_tokens_instance_uuid_idx', 'instance_uuid'),
- sa.Index('console_auth_tokens_host_expires_idx', 'host', 'expires'),
- sa.Index('console_auth_tokens_token_hash_idx', 'token_hash'),
- sa.Index(
- 'console_auth_tokens_token_hash_instance_uuid_idx',
- 'token_hash', 'instance_uuid'),
- UniqueConstraint(
- 'token_hash', name='uniq_console_auth_tokens0token_hash'),
- mysql_engine='InnoDB',
- mysql_charset='utf8',
- )
-
- console_pools = sa.Table('console_pools', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('address', InetSmall()),
- sa.Column('username', sa.String(length=255)),
- sa.Column('password', sa.String(length=255)),
- sa.Column('console_type', sa.String(length=255)),
- sa.Column('public_hostname', sa.String(length=255)),
- sa.Column('host', sa.String(length=255)),
- sa.Column('compute_host', sa.String(length=255)),
- sa.Column('deleted', sa.Integer),
- UniqueConstraint(
- 'host', 'console_type', 'compute_host', 'deleted',
- name='uniq_console_pools0host0console_type0compute_host0deleted'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- consoles = sa.Table('consoles', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('instance_name', sa.String(length=255)),
- sa.Column('password', sa.String(length=255)),
- sa.Column('port', sa.Integer),
- sa.Column('pool_id', sa.Integer, sa.ForeignKey('console_pools.id')),
- sa.Column(
- 'instance_uuid', sa.String(length=36),
- sa.ForeignKey(
- 'instances.uuid', name='consoles_instance_uuid_fkey')),
- sa.Column('deleted', sa.Integer),
- sa.Index('consoles_instance_uuid_idx', 'instance_uuid'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- dns_domains = sa.Table('dns_domains', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('deleted', sa.Boolean),
- sa.Column(
- 'domain', sa.String(length=255), primary_key=True, nullable=False),
- sa.Column('scope', sa.String(length=255)),
- sa.Column('availability_zone', sa.String(length=255)),
- sa.Column('project_id', sa.String(length=255)),
- sa.Index('dns_domains_domain_deleted_idx', 'domain', 'deleted'),
- sa.Index('dns_domains_project_id_idx', 'project_id'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- fixed_ips = sa.Table('fixed_ips', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('address', InetSmall()),
- sa.Column('network_id', sa.Integer),
- sa.Column('allocated', sa.Boolean),
- sa.Column('leased', sa.Boolean),
- sa.Column('reserved', sa.Boolean),
- sa.Column('virtual_interface_id', sa.Integer),
- sa.Column('host', sa.String(length=255)),
- sa.Column(
- 'instance_uuid', sa.String(length=36),
- sa.ForeignKey(
- 'instances.uuid', name='fixed_ips_instance_uuid_fkey'),
- ),
- sa.Column('deleted', sa.Integer),
- sa.Index('network_id', 'network_id'),
- sa.Index('address', 'address'),
- sa.Index('fixed_ips_instance_uuid_fkey', 'instance_uuid'),
- sa.Index(
- 'fixed_ips_virtual_interface_id_fkey',
- 'virtual_interface_id'),
- sa.Index('fixed_ips_host_idx', 'host'),
- sa.Index(
- 'fixed_ips_network_id_host_deleted_idx', 'network_id',
- 'host', 'deleted'),
- sa.Index(
- 'fixed_ips_address_reserved_network_id_deleted_idx',
- 'address', 'reserved',
- 'network_id', 'deleted'),
- sa.Index(
- 'fixed_ips_deleted_allocated_idx',
- 'address', 'deleted', 'allocated'),
- sa.Index(
- 'fixed_ips_deleted_allocated_updated_at_idx',
- 'deleted', 'allocated', 'updated_at'),
- UniqueConstraint(
- 'address', 'deleted',
- name='uniq_fixed_ips0address0deleted'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- floating_ips = sa.Table('floating_ips', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('address', InetSmall()),
- sa.Column('fixed_ip_id', sa.Integer),
- sa.Column('project_id', sa.String(length=255)),
- sa.Column('host', sa.String(length=255)),
- sa.Column('auto_assigned', sa.Boolean),
- sa.Column('pool', sa.String(length=255)),
- sa.Column('interface', sa.String(length=255)),
- sa.Column('deleted', sa.Integer),
- sa.Index('fixed_ip_id', 'fixed_ip_id'),
- sa.Index('floating_ips_host_idx', 'host'),
- sa.Index('floating_ips_project_id_idx', 'project_id'),
- sa.Index(
- 'floating_ips_pool_deleted_fixed_ip_id_project_id_idx',
- 'pool', 'deleted', 'fixed_ip_id', 'project_id'),
- UniqueConstraint(
- 'address', 'deleted',
- name='uniq_floating_ips0address0deleted'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- instance_faults = sa.Table('instance_faults', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column(
- 'instance_uuid', sa.String(length=36),
- sa.ForeignKey(
- 'instances.uuid', name='fk_instance_faults_instance_uuid')),
- sa.Column('code', sa.Integer, nullable=False),
- sa.Column('message', sa.String(length=255)),
- sa.Column('details', types.MediumText()),
- sa.Column('host', sa.String(length=255)),
- sa.Column('deleted', sa.Integer),
- sa.Index('instance_faults_host_idx', 'host'),
- sa.Index(
- 'instance_faults_instance_uuid_deleted_created_at_idx',
- 'instance_uuid', 'deleted', 'created_at'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- instance_id_mappings = sa.Table('instance_id_mappings', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('uuid', sa.String(36), nullable=False),
- sa.Column('deleted', sa.Integer),
- sa.Index('ix_instance_id_mappings_uuid', 'uuid'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- instance_info_caches = sa.Table('instance_info_caches', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('network_info', types.MediumText()),
- sa.Column(
- 'instance_uuid', sa.String(length=36),
- sa.ForeignKey(
- 'instances.uuid',
- name='instance_info_caches_instance_uuid_fkey'),
- nullable=False),
- sa.Column('deleted', sa.Integer),
- UniqueConstraint(
- 'instance_uuid',
- name='uniq_instance_info_caches0instance_uuid'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- groups = sa.Table('instance_groups', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('deleted', sa.Integer),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('user_id', sa.String(length=255)),
- sa.Column('project_id', sa.String(length=255)),
- sa.Column('uuid', sa.String(length=36), nullable=False),
- sa.Column('name', sa.String(length=255)),
- UniqueConstraint(
- 'uuid', 'deleted',
- name='uniq_instance_groups0uuid0deleted'),
- mysql_engine='InnoDB',
- mysql_charset='utf8',
- )
-
- group_policy = sa.Table('instance_group_policy', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('deleted', sa.Integer),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('policy', sa.String(length=255)),
- sa.Column(
- 'group_id', sa.Integer, sa.ForeignKey('instance_groups.id'),
- nullable=False),
- sa.Index('instance_group_policy_policy_idx', 'policy'),
- mysql_engine='InnoDB',
- mysql_charset='utf8',
- )
-
- group_member = sa.Table('instance_group_member', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('deleted', sa.Integer),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('instance_id', sa.String(length=255)),
- sa.Column(
- 'group_id', sa.Integer, sa.ForeignKey('instance_groups.id'),
- nullable=False),
- sa.Index(
- 'instance_group_member_instance_idx',
- 'instance_id'),
- mysql_engine='InnoDB',
- mysql_charset='utf8',
- )
-
- instance_metadata = sa.Table('instance_metadata', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('key', sa.String(length=255)),
- sa.Column('value', sa.String(length=255)),
- sa.Column(
- 'instance_uuid', sa.String(length=36),
- sa.ForeignKey(
- 'instances.uuid', name='instance_metadata_instance_uuid_fkey'),
- nullable=True),
- sa.Column('deleted', sa.Integer),
- sa.Index('instance_metadata_instance_uuid_idx', 'instance_uuid'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- instance_system_metadata = sa.Table('instance_system_metadata', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column(
- 'instance_uuid', sa.String(length=36),
- sa.ForeignKey(
- 'instances.uuid', name='instance_system_metadata_ibfk_1'),
- nullable=False),
- sa.Column('key', sa.String(length=255), nullable=False),
- sa.Column('value', sa.String(length=255)),
- sa.Column('deleted', sa.Integer),
- sa.Index('instance_uuid', 'instance_uuid'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- # TODO(stephenfin): Remove this table since it has been moved to the API DB
- instance_type_extra_specs = sa.Table('instance_type_extra_specs', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column(
- 'instance_type_id', sa.Integer, sa.ForeignKey('instance_types.id'),
- nullable=False),
- sa.Column('key', sa.String(length=255)),
- sa.Column('value', sa.String(length=255)),
- sa.Column('deleted', sa.Integer),
- sa.Index(
- 'instance_type_extra_specs_instance_type_id_key_idx',
- 'instance_type_id', 'key'),
- UniqueConstraint(
- 'instance_type_id', 'key', 'deleted',
- name='uniq_instance_type_extra_specs0instance_type_id0key0deleted'
- ),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- # TODO(stephenfin): Remove this table since it has been moved to the API DB
- instance_type_projects = sa.Table('instance_type_projects', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column(
- 'instance_type_id', sa.Integer,
- sa.ForeignKey(
- 'instance_types.id', name='instance_type_projects_ibfk_1'),
- nullable=False),
- sa.Column('project_id', sa.String(length=255)),
- sa.Column('deleted', sa.Integer),
- UniqueConstraint(
- 'instance_type_id', 'project_id', 'deleted',
- name='uniq_instance_type_projects0instance_type_id0project_id'
- '0deleted'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- # TODO(stephenfin): Remove this table since it has been moved to the API DB
- instance_types = sa.Table('instance_types', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('name', sa.String(length=255)),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('memory_mb', sa.Integer, nullable=False),
- sa.Column('vcpus', sa.Integer, nullable=False),
- sa.Column('swap', sa.Integer, nullable=False),
- sa.Column('vcpu_weight', sa.Integer),
- sa.Column('flavorid', sa.String(length=255)),
- sa.Column('rxtx_factor', sa.Float),
- sa.Column('root_gb', sa.Integer),
- sa.Column('ephemeral_gb', sa.Integer),
- sa.Column('disabled', sa.Boolean),
- sa.Column('is_public', sa.Boolean),
- sa.Column('deleted', sa.Integer),
- UniqueConstraint(
- 'name', 'deleted',
- name='uniq_instance_types0name0deleted'),
- UniqueConstraint(
- 'flavorid', 'deleted',
- name='uniq_instance_types0flavorid0deleted'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- instances = sa.Table('instances', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('internal_id', sa.Integer),
- sa.Column('user_id', sa.String(length=255)),
- sa.Column('project_id', sa.String(length=255)),
- sa.Column('image_ref', sa.String(length=255)),
- sa.Column('kernel_id', sa.String(length=255)),
- sa.Column('ramdisk_id', sa.String(length=255)),
- sa.Column('launch_index', sa.Integer),
- sa.Column('key_name', sa.String(length=255)),
- sa.Column('key_data', types.MediumText()),
- sa.Column('power_state', sa.Integer),
- sa.Column('vm_state', sa.String(length=255)),
- sa.Column('memory_mb', sa.Integer),
- sa.Column('vcpus', sa.Integer),
- sa.Column('hostname', sa.String(length=255)),
- sa.Column('host', sa.String(length=255)),
- sa.Column('user_data', types.MediumText()),
- sa.Column('reservation_id', sa.String(length=255)),
- sa.Column('launched_at', sa.DateTime),
- sa.Column('terminated_at', sa.DateTime),
- sa.Column('display_name', sa.String(length=255)),
- sa.Column('display_description', sa.String(length=255)),
- sa.Column('availability_zone', sa.String(length=255)),
- sa.Column('locked', sa.Boolean),
- sa.Column('os_type', sa.String(length=255)),
- sa.Column('launched_on', types.MediumText()),
- sa.Column('instance_type_id', sa.Integer),
- sa.Column('vm_mode', sa.String(length=255)),
- sa.Column('uuid', sa.String(length=36), nullable=False),
- sa.Column('architecture', sa.String(length=255)),
- sa.Column('root_device_name', sa.String(length=255)),
- sa.Column('access_ip_v4', InetSmall()),
- sa.Column('access_ip_v6', InetSmall()),
- sa.Column('config_drive', sa.String(length=255)),
- sa.Column('task_state', sa.String(length=255)),
- sa.Column('default_ephemeral_device', sa.String(length=255)),
- sa.Column('default_swap_device', sa.String(length=255)),
- sa.Column('progress', sa.Integer),
- sa.Column('auto_disk_config', sa.Boolean),
- sa.Column('shutdown_terminate', sa.Boolean),
- sa.Column('disable_terminate', sa.Boolean),
- sa.Column('root_gb', sa.Integer),
- sa.Column('ephemeral_gb', sa.Integer),
- sa.Column('cell_name', sa.String(length=255)),
- sa.Column('node', sa.String(length=255)),
- sa.Column('deleted', sa.Integer),
- sa.Column(
- 'locked_by',
- sa.Enum('owner', 'admin', name='instances0locked_by')),
- sa.Column('cleaned', sa.Integer, default=0),
- sa.Column('ephemeral_key_uuid', sa.String(36)),
- # NOTE(danms): This column originally included default=False. We
- # discovered in bug #1862205 that this will attempt to rewrite
- # the entire instances table with that value, which can time out
- # for large data sets (and does not even abort).
- # NOTE(stephenfin): This was originally added by sqlalchemy-migrate
- # which did not generate the constraints
- sa.Column('hidden', sa.Boolean(create_constraint=False)),
- sa.Index('uuid', 'uuid', unique=True),
- sa.Index('instances_reservation_id_idx', 'reservation_id'),
- sa.Index(
- 'instances_terminated_at_launched_at_idx',
- 'terminated_at', 'launched_at'),
- sa.Index(
- 'instances_task_state_updated_at_idx',
- 'task_state', 'updated_at'),
- sa.Index('instances_uuid_deleted_idx', 'uuid', 'deleted'),
- sa.Index('instances_host_node_deleted_idx', 'host', 'node', 'deleted'),
- sa.Index(
- 'instances_host_deleted_cleaned_idx',
- 'host', 'deleted', 'cleaned'),
- sa.Index('instances_project_id_deleted_idx', 'project_id', 'deleted'),
- sa.Index('instances_deleted_created_at_idx', 'deleted', 'created_at'),
- sa.Index('instances_project_id_idx', 'project_id'),
- sa.Index(
- 'instances_updated_at_project_id_idx',
- 'updated_at', 'project_id'),
- UniqueConstraint('uuid', name='uniq_instances0uuid'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- instance_actions = sa.Table('instance_actions', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('action', sa.String(length=255)),
- sa.Column(
- 'instance_uuid', sa.String(length=36),
- sa.ForeignKey(
- 'instances.uuid', name='fk_instance_actions_instance_uuid')),
- sa.Column('request_id', sa.String(length=255)),
- sa.Column('user_id', sa.String(length=255)),
- sa.Column('project_id', sa.String(length=255)),
- sa.Column('start_time', sa.DateTime),
- sa.Column('finish_time', sa.DateTime),
- sa.Column('message', sa.String(length=255)),
- sa.Column('deleted', sa.Integer),
- sa.Index('instance_uuid_idx', 'instance_uuid'),
- sa.Index('request_id_idx', 'request_id'),
- sa.Index(
- 'instance_actions_instance_uuid_updated_at_idx',
- 'instance_uuid', 'updated_at'),
- mysql_engine='InnoDB',
- mysql_charset='utf8',
- )
-
- instance_actions_events = sa.Table('instance_actions_events', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('event', sa.String(length=255)),
- sa.Column(
- 'action_id', sa.Integer, sa.ForeignKey('instance_actions.id')),
- sa.Column('start_time', sa.DateTime),
- sa.Column('finish_time', sa.DateTime),
- sa.Column('result', sa.String(length=255)),
- sa.Column('traceback', sa.Text),
- sa.Column('deleted', sa.Integer),
- sa.Column('host', sa.String(255)),
- sa.Column('details', sa.Text),
- mysql_engine='InnoDB',
- mysql_charset='utf8',
- )
-
- instance_extra = sa.Table('instance_extra', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('deleted', sa.Integer),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column(
- 'instance_uuid', sa.String(length=36),
- sa.ForeignKey(
- 'instances.uuid', name='instance_extra_instance_uuid_fkey'),
- nullable=False),
- sa.Column('numa_topology', sa.Text, nullable=True),
- sa.Column('pci_requests', sa.Text, nullable=True),
- sa.Column('flavor', sa.Text, nullable=True),
- sa.Column('vcpu_model', sa.Text, nullable=True),
- sa.Column('migration_context', sa.Text, nullable=True),
- sa.Column('keypairs', sa.Text, nullable=True),
- sa.Column('device_metadata', sa.Text, nullable=True),
- sa.Column('trusted_certs', sa.Text, nullable=True),
- sa.Column('vpmems', sa.Text, nullable=True),
- sa.Column('resources', sa.Text, nullable=True),
- sa.Index('instance_extra_idx', 'instance_uuid'),
- mysql_engine='InnoDB',
- mysql_charset='utf8',
- )
-
- inventories = sa.Table('inventories', meta,
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('resource_provider_id', sa.Integer, nullable=False),
- sa.Column('resource_class_id', sa.Integer, nullable=False),
- sa.Column('total', sa.Integer, nullable=False),
- sa.Column('reserved', sa.Integer, nullable=False),
- sa.Column('min_unit', sa.Integer, nullable=False),
- sa.Column('max_unit', sa.Integer, nullable=False),
- sa.Column('step_size', sa.Integer, nullable=False),
- sa.Column('allocation_ratio', sa.Float, nullable=False),
- sa.Index(
- 'inventories_resource_provider_id_idx', 'resource_provider_id'),
- sa.Index(
- 'inventories_resource_class_id_idx', 'resource_class_id'),
- sa.Index(
- 'inventories_resource_provider_resource_class_idx',
- 'resource_provider_id', 'resource_class_id'),
- UniqueConstraint(
- 'resource_provider_id', 'resource_class_id',
- name='uniq_inventories0resource_provider_resource_class'),
- mysql_engine='InnoDB',
- mysql_charset='latin1',
- )
-
- key_pairs = sa.Table('key_pairs', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('name', sa.String(length=255), nullable=False),
- sa.Column('user_id', sa.String(length=255)),
- sa.Column('fingerprint', sa.String(length=255)),
- sa.Column('public_key', types.MediumText()),
- sa.Column('deleted', sa.Integer),
- sa.Column(
- 'type', sa.Enum('ssh', 'x509', name='keypair_types'),
- nullable=False, server_default=keypair.KEYPAIR_TYPE_SSH),
- UniqueConstraint(
- 'user_id', 'name', 'deleted',
- name='uniq_key_pairs0user_id0name0deleted'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- migrations = sa.Table('migrations', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('source_compute', sa.String(length=255)),
- sa.Column('dest_compute', sa.String(length=255)),
- sa.Column('dest_host', sa.String(length=255)),
- sa.Column('status', sa.String(length=255)),
- sa.Column(
- 'instance_uuid', sa.String(length=36),
- sa.ForeignKey(
- 'instances.uuid', name='fk_migrations_instance_uuid')),
- sa.Column('old_instance_type_id', sa.Integer),
- sa.Column('new_instance_type_id', sa.Integer),
- sa.Column('source_node', sa.String(length=255)),
- sa.Column('dest_node', sa.String(length=255)),
- sa.Column('deleted', sa.Integer),
- sa.Column(
- 'migration_type',
- sa.Enum(
- 'migration', 'resize', 'live-migration', 'evacuation',
- name='migration_type'),
- nullable=True),
- # NOTE(stephenfin): This was originally added by sqlalchemy-migrate
- # which did not generate the constraints
- sa.Column(
- 'hidden', sa.Boolean(create_constraint=False), default=False),
- sa.Column('memory_total', sa.BigInteger, nullable=True),
- sa.Column('memory_processed', sa.BigInteger, nullable=True),
- sa.Column('memory_remaining', sa.BigInteger, nullable=True),
- sa.Column('disk_total', sa.BigInteger, nullable=True),
- sa.Column('disk_processed', sa.BigInteger, nullable=True),
- sa.Column('disk_remaining', sa.BigInteger, nullable=True),
- sa.Column('uuid', sa.String(36)),
- # NOTE(stephenfin): This was originally added by sqlalchemy-migrate
- # which did not generate the constraints
- sa.Column(
- 'cross_cell_move', sa.Boolean(create_constraint=False),
- default=False),
- sa.Column('user_id', sa.String(255), nullable=True),
- sa.Column('project_id', sa.String(255), nullable=True),
- sa.Index('migrations_uuid', 'uuid', unique=True),
- sa.Index(
- 'migrations_instance_uuid_and_status_idx',
- 'deleted', 'instance_uuid', 'status'),
- sa.Index('migrations_updated_at_idx', 'updated_at'),
- # mysql-specific index by leftmost 100 chars. (mysql gets angry if the
- # index key length is too long.)
- sa.Index(
- 'migrations_by_host_nodes_and_status_idx',
- 'deleted', 'source_compute', 'dest_compute', 'source_node',
- 'dest_node', 'status',
- mysql_length={
- 'source_compute': 100,
- 'dest_compute': 100,
- 'source_node': 100,
- 'dest_node': 100,
- }),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- networks = sa.Table('networks', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('injected', sa.Boolean),
- sa.Column('cidr', Inet()),
- sa.Column('netmask', InetSmall()),
- sa.Column('bridge', sa.String(length=255)),
- sa.Column('gateway', InetSmall()),
- sa.Column('broadcast', InetSmall()),
- sa.Column('dns1', InetSmall()),
- sa.Column('vlan', sa.Integer),
- sa.Column('vpn_public_address', InetSmall()),
- sa.Column('vpn_public_port', sa.Integer),
- sa.Column('vpn_private_address', InetSmall()),
- sa.Column('dhcp_start', InetSmall()),
- sa.Column('project_id', sa.String(length=255)),
- sa.Column('host', sa.String(length=255)),
- sa.Column('cidr_v6', Inet()),
- sa.Column('gateway_v6', InetSmall()),
- sa.Column('label', sa.String(length=255)),
- sa.Column('netmask_v6', InetSmall()),
- sa.Column('bridge_interface', sa.String(length=255)),
- sa.Column('multi_host', sa.Boolean),
- sa.Column('dns2', InetSmall()),
- sa.Column('uuid', sa.String(length=36)),
- sa.Column('priority', sa.Integer),
- sa.Column('rxtx_base', sa.Integer),
- sa.Column('deleted', sa.Integer),
- sa.Column('mtu', sa.Integer),
- sa.Column('dhcp_server', types.IPAddress),
- # NOTE(stephenfin): These were originally added by sqlalchemy-migrate
- # which did not generate the constraints
- sa.Column(
- 'enable_dhcp', sa.Boolean(create_constraint=False), default=True),
- sa.Column(
- 'share_address', sa.Boolean(create_constraint=False),
- default=False),
- sa.Index('networks_host_idx', 'host'),
- sa.Index('networks_cidr_v6_idx', 'cidr_v6'),
- sa.Index('networks_bridge_deleted_idx', 'bridge', 'deleted'),
- sa.Index('networks_project_id_deleted_idx', 'project_id', 'deleted'),
- sa.Index(
- 'networks_uuid_project_id_deleted_idx',
- 'uuid', 'project_id', 'deleted'),
- sa.Index('networks_vlan_deleted_idx', 'vlan', 'deleted'),
- UniqueConstraint('vlan', 'deleted', name='uniq_networks0vlan0deleted'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- pci_devices = sa.Table('pci_devices', meta,
- sa.Column('created_at', sa.DateTime(timezone=False)),
- sa.Column('updated_at', sa.DateTime(timezone=False)),
- sa.Column('deleted_at', sa.DateTime(timezone=False)),
- sa.Column('deleted', sa.Integer, default=0, nullable=True),
- sa.Column('id', sa.Integer, primary_key=True),
- sa.Column(
- 'compute_node_id', sa.Integer,
- sa.ForeignKey(
- 'compute_nodes.id', name='pci_devices_compute_node_id_fkey'),
- nullable=False),
- sa.Column('address', sa.String(12), nullable=False),
- sa.Column('product_id', sa.String(4), nullable=False),
- sa.Column('vendor_id', sa.String(4), nullable=False),
- sa.Column('dev_type', sa.String(8), nullable=False),
- sa.Column('dev_id', sa.String(255)),
- sa.Column('label', sa.String(255), nullable=False),
- sa.Column('status', sa.String(36), nullable=False),
- sa.Column('extra_info', sa.Text, nullable=True),
- sa.Column('instance_uuid', sa.String(36), nullable=True),
- sa.Column('request_id', sa.String(36), nullable=True),
- sa.Column('numa_node', sa.Integer, default=None),
- sa.Column('parent_addr', sa.String(12), nullable=True),
- sa.Column('uuid', sa.String(36)),
- sa.Index(
- 'ix_pci_devices_instance_uuid_deleted',
- 'instance_uuid', 'deleted'),
- sa.Index(
- 'ix_pci_devices_compute_node_id_deleted',
- 'compute_node_id', 'deleted'),
- sa.Index(
- 'ix_pci_devices_compute_node_id_parent_addr_deleted',
- 'compute_node_id', 'parent_addr', 'deleted'),
- UniqueConstraint(
- 'compute_node_id', 'address', 'deleted',
- name='uniq_pci_devices0compute_node_id0address0deleted'),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
-
- provider_fw_rules = sa.Table('provider_fw_rules', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('protocol', sa.String(length=5)),
- sa.Column('from_port', sa.Integer),
- sa.Column('to_port', sa.Integer),
- sa.Column('cidr', Inet()),
- sa.Column('deleted', sa.Integer),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- quota_classes = sa.Table('quota_classes', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('class_name', sa.String(length=255)),
- sa.Column('resource', sa.String(length=255)),
- sa.Column('hard_limit', sa.Integer),
- sa.Column('deleted', sa.Integer),
- sa.Index('ix_quota_classes_class_name', 'class_name'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- quota_usages = sa.Table('quota_usages', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('project_id', sa.String(length=255)),
- sa.Column('resource', sa.String(length=255), nullable=False),
- sa.Column('in_use', sa.Integer, nullable=False),
- sa.Column('reserved', sa.Integer, nullable=False),
- sa.Column('until_refresh', sa.Integer),
- sa.Column('deleted', sa.Integer),
- sa.Column('user_id', sa.String(length=255)),
- sa.Index('ix_quota_usages_project_id', 'project_id'),
- sa.Index('ix_quota_usages_user_id_deleted', 'user_id', 'deleted'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- quotas = sa.Table('quotas', meta,
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('project_id', sa.String(length=255)),
- sa.Column('resource', sa.String(length=255), nullable=False),
- sa.Column('hard_limit', sa.Integer),
- sa.Column('deleted', sa.Integer),
- UniqueConstraint(
- 'project_id', 'resource', 'deleted',
- name='uniq_quotas0project_id0resource0deleted'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- project_user_quotas = sa.Table('project_user_quotas', meta,
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('deleted', sa.Integer),
- sa.Column('user_id', sa.String(length=255), nullable=False),
- sa.Column('project_id', sa.String(length=255), nullable=False),
- sa.Column('resource', sa.String(length=255), nullable=False),
- sa.Column('hard_limit', sa.Integer, nullable=True),
- sa.Index(
- 'project_user_quotas_project_id_deleted_idx',
- 'project_id', 'deleted'),
- sa.Index(
- 'project_user_quotas_user_id_deleted_idx',
- 'user_id', 'deleted'),
- UniqueConstraint(
- 'user_id', 'project_id', 'resource', 'deleted',
- name='uniq_project_user_quotas0user_id0project_id0resource0'
- 'deleted'),
- mysql_engine='InnoDB',
- mysql_charset='utf8',
- )
-
- reservations = sa.Table('reservations', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('uuid', sa.String(length=36), nullable=False),
- sa.Column(
- 'usage_id', sa.Integer,
- sa.ForeignKey('quota_usages.id', name='reservations_ibfk_1'),
- nullable=False),
- sa.Column('project_id', sa.String(length=255)),
- sa.Column('resource', sa.String(length=255)),
- sa.Column('delta', sa.Integer, nullable=False),
- sa.Column('expire', sa.DateTime),
- sa.Column('deleted', sa.Integer),
- sa.Column('user_id', sa.String(length=255)),
- sa.Index('ix_reservations_project_id', 'project_id'),
- sa.Index('ix_reservations_user_id_deleted', 'user_id', 'deleted'),
- sa.Index('reservations_uuid_idx', 'uuid'),
- sa.Index('reservations_deleted_expire_idx', 'deleted', 'expire'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- resource_providers = sa.Table('resource_providers', meta,
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('uuid', sa.String(36), nullable=False),
- sa.Column('name', sa.Unicode(200), nullable=True),
- sa.Column('generation', sa.Integer, default=0),
- sa.Column('can_host', sa.Integer, default=0),
- UniqueConstraint('uuid', name='uniq_resource_providers0uuid'),
- UniqueConstraint('name', name='uniq_resource_providers0name'),
- sa.Index('resource_providers_name_idx', 'name'),
- sa.Index('resource_providers_uuid_idx', 'uuid'),
- mysql_engine='InnoDB',
- mysql_charset='latin1',
- )
-
- resource_provider_aggregates = sa.Table(
- 'resource_provider_aggregates', meta,
- sa.Column(
- 'resource_provider_id', sa.Integer, primary_key=True,
- nullable=False),
- sa.Column(
- 'aggregate_id', sa.Integer, primary_key=True, nullable=False),
- sa.Index(
- 'resource_provider_aggregates_aggregate_id_idx', 'aggregate_id'),
- mysql_engine='InnoDB',
- mysql_charset='latin1',
- )
-
- s3_images = sa.Table('s3_images', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('uuid', sa.String(length=36), nullable=False),
- sa.Column('deleted', sa.Integer),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- security_group_instance_association = sa.Table(
- 'security_group_instance_association', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column(
- 'security_group_id', sa.Integer,
- sa.ForeignKey(
- 'security_groups.id',
- name='security_group_instance_association_ibfk_1'),
- ),
- sa.Column(
- 'instance_uuid', sa.String(length=36),
- sa.ForeignKey(
- 'instances.uuid',
- name='security_group_instance_association_instance_uuid_fkey'),
- ),
- sa.Column('deleted', sa.Integer),
- sa.Index(
- 'security_group_instance_association_instance_uuid_idx',
- 'instance_uuid'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- security_group_rules = sa.Table('security_group_rules', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column(
- 'parent_group_id', sa.Integer,
- sa.ForeignKey('security_groups.id')),
- sa.Column('protocol', sa.String(length=255)),
- sa.Column('from_port', sa.Integer),
- sa.Column('to_port', sa.Integer),
- sa.Column('cidr', Inet()),
- sa.Column('group_id', sa.Integer, sa.ForeignKey('security_groups.id')),
- sa.Column('deleted', sa.Integer),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- security_groups = sa.Table('security_groups', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('name', sa.String(length=255)),
- sa.Column('description', sa.String(length=255)),
- sa.Column('user_id', sa.String(length=255)),
- sa.Column('project_id', sa.String(length=255)),
- sa.Column('deleted', sa.Integer),
- UniqueConstraint(
- 'project_id', 'name', 'deleted',
- name='uniq_security_groups0project_id0name0deleted'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- security_group_default_rules = sa.Table(
- 'security_group_default_rules', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('deleted', sa.Integer, default=0),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('protocol', sa.String(length=5)),
- sa.Column('from_port', sa.Integer),
- sa.Column('to_port', sa.Integer),
- sa.Column('cidr', Inet()),
- mysql_engine='InnoDB',
- mysql_charset='utf8',
- )
-
- services = sa.Table('services', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('host', sa.String(length=255)),
- sa.Column('binary', sa.String(length=255)),
- sa.Column('topic', sa.String(length=255)),
- sa.Column('report_count', sa.Integer, nullable=False),
- sa.Column('disabled', sa.Boolean),
- sa.Column('deleted', sa.Integer),
- sa.Column('disabled_reason', sa.String(length=255)),
- sa.Column('last_seen_up', sa.DateTime, nullable=True),
- # NOTE(stephenfin): This was originally added by sqlalchemy-migrate
- # which did not generate the constraints
- sa.Column(
- 'forced_down', sa.Boolean(create_constraint=False), default=False),
- sa.Column('version', sa.Integer, default=0),
- sa.Column('uuid', sa.String(36), nullable=True),
- sa.Index('services_uuid_idx', 'uuid', unique=True),
- UniqueConstraint(
- 'host', 'topic', 'deleted',
- name='uniq_services0host0topic0deleted'),
- UniqueConstraint(
- 'host', 'binary', 'deleted',
- name='uniq_services0host0binary0deleted'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- snapshot_id_mappings = sa.Table('snapshot_id_mappings', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('uuid', sa.String(length=36), nullable=False),
- sa.Column('deleted', sa.Integer),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- snapshots = sa.Table('snapshots', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column(
- 'id', sa.String(length=36), primary_key=True, nullable=False),
- sa.Column('volume_id', sa.String(length=36), nullable=False),
- sa.Column('user_id', sa.String(length=255)),
- sa.Column('project_id', sa.String(length=255)),
- sa.Column('status', sa.String(length=255)),
- sa.Column('progress', sa.String(length=255)),
- sa.Column('volume_size', sa.Integer),
- sa.Column('scheduled_at', sa.DateTime),
- sa.Column('display_name', sa.String(length=255)),
- sa.Column('display_description', sa.String(length=255)),
- sa.Column('deleted', sa.String(length=36)),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- tags = sa.Table('tags', meta,
- sa.Column(
- 'resource_id', sa.String(36), primary_key=True, nullable=False),
- sa.Column('tag', sa.Unicode(80), primary_key=True, nullable=False),
- sa.Index('tags_tag_idx', 'tag'),
- mysql_engine='InnoDB',
- mysql_charset='utf8',
- )
-
- task_log = sa.Table('task_log', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('task_name', sa.String(length=255), nullable=False),
- sa.Column('state', sa.String(length=255), nullable=False),
- sa.Column('host', sa.String(length=255), nullable=False),
- sa.Column('period_beginning', sa.DateTime, nullable=False),
- sa.Column('period_ending', sa.DateTime, nullable=False),
- sa.Column('message', sa.String(length=255), nullable=False),
- sa.Column('task_items', sa.Integer),
- sa.Column('errors', sa.Integer),
- sa.Column('deleted', sa.Integer),
- sa.Index('ix_task_log_period_beginning', 'period_beginning'),
- sa.Index('ix_task_log_host', 'host'),
- sa.Index('ix_task_log_period_ending', 'period_ending'),
- UniqueConstraint(
- 'task_name', 'host', 'period_beginning', 'period_ending',
- name='uniq_task_log0task_name0host0period_beginning0period_ending',
- ),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- virtual_interfaces = sa.Table('virtual_interfaces', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('address', sa.String(length=255)),
- sa.Column('network_id', sa.Integer),
- sa.Column('uuid', sa.String(length=36)),
- sa.Column(
- 'instance_uuid', sa.String(length=36),
- sa.ForeignKey(
- 'instances.uuid',
- name='virtual_interfaces_instance_uuid_fkey'),
- nullable=True),
- sa.Column('deleted', sa.Integer),
- sa.Column('tag', sa.String(255)),
- sa.Index('virtual_interfaces_instance_uuid_fkey', 'instance_uuid'),
- sa.Index('virtual_interfaces_network_id_idx', 'network_id'),
- sa.Index('virtual_interfaces_uuid_idx', 'uuid'),
- UniqueConstraint(
- 'address', 'deleted',
- name='uniq_virtual_interfaces0address0deleted'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- volume_id_mappings = sa.Table('volume_id_mappings', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('uuid', sa.String(length=36), nullable=False),
- sa.Column('deleted', sa.Integer),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- volume_usage_cache = sa.Table('volume_usage_cache', meta,
- sa.Column('created_at', sa.DateTime(timezone=False)),
- sa.Column('updated_at', sa.DateTime(timezone=False)),
- sa.Column('deleted_at', sa.DateTime(timezone=False)),
- sa.Column('id', sa.Integer(), primary_key=True, nullable=False),
- sa.Column('volume_id', sa.String(36), nullable=False),
- sa.Column('tot_last_refreshed', sa.DateTime(timezone=False)),
- sa.Column('tot_reads', sa.BigInteger(), default=0),
- sa.Column('tot_read_bytes', sa.BigInteger(), default=0),
- sa.Column('tot_writes', sa.BigInteger(), default=0),
- sa.Column('tot_write_bytes', sa.BigInteger(), default=0),
- sa.Column('curr_last_refreshed', sa.DateTime(timezone=False)),
- sa.Column('curr_reads', sa.BigInteger(), default=0),
- sa.Column('curr_read_bytes', sa.BigInteger(), default=0),
- sa.Column('curr_writes', sa.BigInteger(), default=0),
- sa.Column('curr_write_bytes', sa.BigInteger(), default=0),
- sa.Column('deleted', sa.Integer),
- sa.Column('instance_uuid', sa.String(length=36)),
- sa.Column('project_id', sa.String(length=36)),
- sa.Column('user_id', sa.String(length=64)),
- sa.Column('availability_zone', sa.String(length=255)),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- # create all tables
- tables = [instances, aggregates, console_auth_tokens,
- console_pools, instance_types,
- security_groups, snapshots,
- # those that are children and others later
- agent_builds, aggregate_hosts, aggregate_metadata,
- block_device_mapping, bw_usage_cache, cells,
- certificates, compute_nodes, consoles,
- dns_domains, fixed_ips, floating_ips,
- instance_faults, instance_id_mappings, instance_info_caches,
- instance_metadata, instance_system_metadata,
- instance_type_extra_specs, instance_type_projects,
- instance_actions, instance_actions_events, instance_extra,
- groups, group_policy, group_member,
- key_pairs, migrations, networks,
- pci_devices, provider_fw_rules, quota_classes, quota_usages,
- quotas, project_user_quotas,
- reservations, s3_images, security_group_instance_association,
- security_group_rules, security_group_default_rules,
- services, snapshot_id_mappings, tags, task_log,
- virtual_interfaces,
- volume_id_mappings,
- volume_usage_cache,
- resource_providers, inventories, allocations,
- resource_provider_aggregates]
-
- for table in tables:
- try:
- table.create()
- except Exception:
- LOG.info(repr(table))
- LOG.exception('Exception while creating table.')
- raise
-
- # MySQL specific indexes
- if migrate_engine.name == 'mysql':
- # NOTE(stephenfin): For some reason, we have to put this within the if
- # statement to avoid it being evaluated for the sqlite case. Even
- # though we don't call create except in the MySQL case... Failure to do
- # this will result in the following ugly error message:
- #
- # sqlalchemy.exc.OperationalError: (sqlite3.OperationalError) no such
- # index: instance_type_id
- #
- # Yeah, I don't get it either...
- mysql_specific_indexes = [
- sa.Index(
- 'instance_type_id',
- instance_type_projects.c.instance_type_id),
- sa.Index('usage_id', reservations.c.usage_id),
- sa.Index(
- 'security_group_id',
- security_group_instance_association.c.security_group_id),
- ]
-
- for index in mysql_specific_indexes:
- index.create(migrate_engine)
-
- if migrate_engine.name == 'mysql':
- # In Folsom we explicitly converted migrate_version to UTF8.
- with migrate_engine.connect() as conn:
- conn.exec_driver_sql(
- 'ALTER TABLE migrate_version CONVERT TO CHARACTER SET utf8'
- )
- # Set default DB charset to UTF8.
- conn.exec_driver_sql(
- 'ALTER DATABASE `%s` DEFAULT CHARACTER SET utf8' % (
- migrate_engine.url.database,
- )
- )
-
- # NOTE(cdent): The resource_providers table is defined as latin1 to
- # be more efficient. Now we need the name column to be UTF8. We
- # modify it here otherwise the declarative handling in sqlalchemy
- # gets confused.
- conn.exec_driver_sql(
- 'ALTER TABLE resource_providers MODIFY name '
- 'VARCHAR(200) CHARACTER SET utf8'
- )
-
- _create_shadow_tables(migrate_engine)
-
- # TODO(stephenfin): Fix these various bugs in a follow-up
-
- # 298_mysql_extra_specs_binary_collation; we should update the shadow table
- # also
-
- if migrate_engine.name == 'mysql':
- with migrate_engine.connect() as conn:
- # Use binary collation for extra specs table
- conn.exec_driver_sql(
- 'ALTER TABLE instance_type_extra_specs '
- 'CONVERT TO CHARACTER SET utf8 '
- 'COLLATE utf8_bin'
- )
diff --git a/nova/db/main/legacy_migrations/versions/403_placeholder.py b/nova/db/main/legacy_migrations/versions/403_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/main/legacy_migrations/versions/403_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/main/legacy_migrations/versions/404_placeholder.py b/nova/db/main/legacy_migrations/versions/404_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/main/legacy_migrations/versions/404_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/main/legacy_migrations/versions/405_placeholder.py b/nova/db/main/legacy_migrations/versions/405_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/main/legacy_migrations/versions/405_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/main/legacy_migrations/versions/406_placeholder.py b/nova/db/main/legacy_migrations/versions/406_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/main/legacy_migrations/versions/406_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/main/legacy_migrations/versions/407_placeholder.py b/nova/db/main/legacy_migrations/versions/407_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/main/legacy_migrations/versions/407_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/main/legacy_migrations/versions/408_placeholder.py b/nova/db/main/legacy_migrations/versions/408_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/main/legacy_migrations/versions/408_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/main/legacy_migrations/versions/409_placeholder.py b/nova/db/main/legacy_migrations/versions/409_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/main/legacy_migrations/versions/409_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/main/legacy_migrations/versions/410_placeholder.py b/nova/db/main/legacy_migrations/versions/410_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/main/legacy_migrations/versions/410_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/main/legacy_migrations/versions/411_placeholder.py b/nova/db/main/legacy_migrations/versions/411_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/main/legacy_migrations/versions/411_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/main/legacy_migrations/versions/412_placeholder.py b/nova/db/main/legacy_migrations/versions/412_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/main/legacy_migrations/versions/412_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/main/legacy_migrations/versions/413_placeholder.py b/nova/db/main/legacy_migrations/versions/413_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/main/legacy_migrations/versions/413_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/main/legacy_migrations/versions/414_placeholder.py b/nova/db/main/legacy_migrations/versions/414_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/main/legacy_migrations/versions/414_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/main/legacy_migrations/versions/415_placeholder.py b/nova/db/main/legacy_migrations/versions/415_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/main/legacy_migrations/versions/415_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/main/legacy_migrations/versions/416_placeholder.py b/nova/db/main/legacy_migrations/versions/416_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/main/legacy_migrations/versions/416_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/main/legacy_migrations/versions/417_placeholder.py b/nova/db/main/legacy_migrations/versions/417_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/main/legacy_migrations/versions/417_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/main/legacy_migrations/versions/418_placeholder.py b/nova/db/main/legacy_migrations/versions/418_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/main/legacy_migrations/versions/418_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/main/legacy_migrations/versions/419_placeholder.py b/nova/db/main/legacy_migrations/versions/419_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/main/legacy_migrations/versions/419_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/main/legacy_migrations/versions/420_placeholder.py b/nova/db/main/legacy_migrations/versions/420_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/main/legacy_migrations/versions/420_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/main/legacy_migrations/versions/421_placeholder.py b/nova/db/main/legacy_migrations/versions/421_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/main/legacy_migrations/versions/421_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/main/legacy_migrations/versions/422_placeholder.py b/nova/db/main/legacy_migrations/versions/422_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/main/legacy_migrations/versions/422_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/main/legacy_migrations/versions/__init__.py b/nova/db/main/legacy_migrations/versions/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/nova/db/main/legacy_migrations/versions/__init__.py
+++ /dev/null
diff --git a/nova/db/main/migrations/versions/960aac0e09ea_de_duplicate_indexes_in_instances__.py b/nova/db/main/migrations/versions/960aac0e09ea_de_duplicate_indexes_in_instances__.py
new file mode 100644
index 0000000000..f4666a2b00
--- /dev/null
+++ b/nova/db/main/migrations/versions/960aac0e09ea_de_duplicate_indexes_in_instances__.py
@@ -0,0 +1,35 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""de-duplicate_indexes_in_instances__console_auth_tokens
+
+Revision ID: 960aac0e09ea
+Revises: ccb0fa1a2252
+Create Date: 2022-09-15 17:00:23.175991
+"""
+
+from alembic import op
+
+
+# revision identifiers, used by Alembic.
+revision = '960aac0e09ea'
+down_revision = 'ccb0fa1a2252'
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ with op.batch_alter_table('console_auth_tokens', schema=None) as batch_op:
+ batch_op.drop_index('console_auth_tokens_token_hash_idx')
+
+ with op.batch_alter_table('instances', schema=None) as batch_op:
+ batch_op.drop_index('uuid')
diff --git a/nova/db/main/models.py b/nova/db/main/models.py
index 7551584c1c..f8363a89c0 100644
--- a/nova/db/main/models.py
+++ b/nova/db/main/models.py
@@ -266,7 +266,6 @@ class Instance(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents a guest VM."""
__tablename__ = 'instances'
__table_args__ = (
- sa.Index('uuid', 'uuid', unique=True),
sa.Index('instances_project_id_idx', 'project_id'),
sa.Index('instances_project_id_deleted_idx',
'project_id', 'deleted'),
@@ -1046,7 +1045,6 @@ class ConsoleAuthToken(BASE, NovaBase):
__table_args__ = (
sa.Index('console_auth_tokens_instance_uuid_idx', 'instance_uuid'),
sa.Index('console_auth_tokens_host_expires_idx', 'host', 'expires'),
- sa.Index('console_auth_tokens_token_hash_idx', 'token_hash'),
sa.Index(
'console_auth_tokens_token_hash_instance_uuid_idx', 'token_hash',
'instance_uuid',
diff --git a/nova/db/migration.py b/nova/db/migration.py
index 80410c3192..2b185af1a6 100644
--- a/nova/db/migration.py
+++ b/nova/db/migration.py
@@ -19,24 +19,12 @@ import os
from alembic import command as alembic_api
from alembic import config as alembic_config
from alembic.runtime import migration as alembic_migration
-from migrate import exceptions as migrate_exceptions
-from migrate.versioning import api as migrate_api
-from migrate.versioning import repository as migrate_repository
from oslo_log import log as logging
from nova.db.api import api as api_db_api
from nova.db.main import api as main_db_api
from nova import exception
-MIGRATE_INIT_VERSION = {
- 'main': 401,
- 'api': 66,
-}
-ALEMBIC_INIT_VERSION = {
- 'main': '8f2f1571d55b',
- 'api': 'd67eeaabee36',
-}
-
LOG = logging.getLogger(__name__)
@@ -48,16 +36,6 @@ def _get_engine(database='main', context=None):
return api_db_api.get_engine()
-def _find_migrate_repo(database='main'):
- """Get the path for the migrate repository."""
-
- path = os.path.join(
- os.path.abspath(os.path.dirname(__file__)),
- database, 'legacy_migrations')
-
- return migrate_repository.Repository(path)
-
-
def _find_alembic_conf(database='main'):
"""Get the path for the alembic repository."""
@@ -73,35 +51,6 @@ def _find_alembic_conf(database='main'):
return config
-def _is_database_under_migrate_control(engine, repository):
- try:
- migrate_api.db_version(engine, repository)
- return True
- except migrate_exceptions.DatabaseNotControlledError:
- return False
-
-
-def _is_database_under_alembic_control(engine):
- with engine.connect() as conn:
- context = alembic_migration.MigrationContext.configure(conn)
- return bool(context.get_current_revision())
-
-
-def _init_alembic_on_legacy_database(engine, database, repository, config):
- """Init alembic in an existing environment with sqlalchemy-migrate."""
- LOG.info(
- 'The database is still under sqlalchemy-migrate control; '
- 'applying any remaining sqlalchemy-migrate-based migrations '
- 'and fake applying the initial alembic migration'
- )
- migrate_api.upgrade(engine, repository)
-
- # re-use the connection rather than creating a new one
- with engine.begin() as connection:
- config.attributes['connection'] = connection
- alembic_api.stamp(config, ALEMBIC_INIT_VERSION[database])
-
-
def _upgrade_alembic(engine, config, version):
# re-use the connection rather than creating a new one
with engine.begin() as connection:
@@ -126,7 +75,6 @@ def db_sync(version=None, database='main', context=None):
engine = _get_engine(database, context=context)
- repository = _find_migrate_repo(database)
config = _find_alembic_conf(database)
# discard the URL stored in alembic.ini in favour of the URL configured
# for the engine, casting from 'sqlalchemy.engine.url.URL' to str in the
@@ -138,16 +86,6 @@ def db_sync(version=None, database='main', context=None):
url = str(engine.url).replace('%', '%%')
config.set_main_option('sqlalchemy.url', url)
- # if we're in a deployment where sqlalchemy-migrate is already present,
- # then apply all the updates for that and fake apply the initial alembic
- # migration; if we're not then 'upgrade' will take care of everything
- # this should be a one-time operation
- if (
- _is_database_under_migrate_control(engine, repository) and
- not _is_database_under_alembic_control(engine)
- ):
- _init_alembic_on_legacy_database(engine, database, repository, config)
-
# apply anything later
LOG.info('Applying migration(s)')
@@ -161,17 +99,10 @@ def db_version(database='main', context=None):
if database not in ('main', 'api'):
raise exception.Invalid('%s is not a valid database' % database)
- repository = _find_migrate_repo(database)
engine = _get_engine(database, context=context)
- migrate_version = None
- if _is_database_under_migrate_control(engine, repository):
- migrate_version = migrate_api.db_version(engine, repository)
-
- alembic_version = None
- if _is_database_under_alembic_control(engine):
- with engine.connect() as conn:
- m_context = alembic_migration.MigrationContext.configure(conn)
- alembic_version = m_context.get_current_revision()
+ with engine.connect() as conn:
+ m_context = alembic_migration.MigrationContext.configure(conn)
+ version = m_context.get_current_revision()
- return alembic_version or migrate_version
+ return version
diff --git a/nova/exception.py b/nova/exception.py
index cebaea4bee..0c0ffa85a1 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -1451,6 +1451,11 @@ class InstanceEvacuateNotSupported(Invalid):
msg_fmt = _('Instance evacuate is not supported.')
+class InstanceEvacuateNotSupportedTargetState(Invalid):
+ msg_fmt = _("Target state '%(target_state)s' for instance evacuate "
+ "is not supported.")
+
+
class DBNotAllowed(NovaException):
msg_fmt = _('%(binary)s attempted direct database access which is '
'not allowed by policy')
@@ -1479,6 +1484,11 @@ class UnsupportedRescueImage(Invalid):
msg_fmt = _("Requested rescue image '%(image)s' is not supported")
+class UnsupportedRPCVersion(Invalid):
+ msg_fmt = _("Unsupported RPC version for %(api)s. "
+ "Required >= %(required)s")
+
+
class Base64Exception(NovaException):
msg_fmt = _("Invalid Base 64 data for file %(path)s")
@@ -2496,3 +2506,18 @@ class PlacementPciMixedTraitsException(PlacementPciException):
class ReimageException(NovaException):
msg_fmt = _("Reimaging volume failed.")
+
+
+class InvalidNodeConfiguration(NovaException):
+ msg_fmt = _('Invalid node identity configuration: %(reason)s')
+
+
+class DuplicateRecord(NovaException):
+ msg_fmt = _('Unable to create duplicate record for %(target)s')
+
+
+class NotSupportedComputeForEvacuateV295(NotSupported):
+ msg_fmt = _("Starting with microversion 2.95, evacuate API will stop "
+ "instance on destination. To evacuate before upgrades are "
+ "complete please use an older microversion. Required version "
+ "for compute %(expected), current version %(currently)s")
diff --git a/nova/filesystem.py b/nova/filesystem.py
new file mode 100644
index 0000000000..5394d2d835
--- /dev/null
+++ b/nova/filesystem.py
@@ -0,0 +1,59 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Functions to address filesystem calls, particularly sysfs."""
+
+import os
+
+from oslo_log import log as logging
+
+from nova import exception
+
+LOG = logging.getLogger(__name__)
+
+
+SYS = '/sys'
+
+
+# NOTE(bauzas): this method is deliberately not wrapped in a privsep entrypoint
+def read_sys(path: str) -> str:
+ """Reads the content of a file in the sys filesystem.
+
+ :param path: relative or absolute. If relative, will be prefixed by /sys.
+ :returns: contents of that file.
+ :raises: nova.exception.FileNotFound if we can't read that file.
+ """
+ try:
+ # The path can be absolute with a /sys prefix but that's fine.
+ with open(os.path.join(SYS, path), mode='r') as data:
+ return data.read()
+ except (OSError, ValueError) as exc:
+ raise exception.FileNotFound(file_path=path) from exc
+
+
+# NOTE(bauzas): this method is deliberately not wrapped in a privsep entrypoint
+# In order to correctly use it, you need to decorate the caller with a specific
+# privsep entrypoint.
+def write_sys(path: str, data: str) -> None:
+ """Writes the content of a file in the sys filesystem with data.
+
+ :param path: relative or absolute. If relative, will be prefixed by /sys.
+ :param data: the data to write.
+ :returns: contents of that file.
+ :raises: nova.exception.FileNotFound if we can't write that file.
+ """
+ try:
+ # The path can be absolute with a /sys prefix but that's fine.
+ with open(os.path.join(SYS, path), mode='w') as fd:
+ fd.write(data)
+ except (OSError, ValueError) as exc:
+ raise exception.FileNotFound(file_path=path) from exc
diff --git a/nova/hacking/checks.py b/nova/hacking/checks.py
index cd393e7b33..704538250f 100644
--- a/nova/hacking/checks.py
+++ b/nova/hacking/checks.py
@@ -141,6 +141,8 @@ rwlock_re = re.compile(
r"(?P<module_part>(oslo_concurrency\.)?(lockutils|fasteners))"
r"\.ReaderWriterLock\(.*\)")
six_re = re.compile(r"^(import six(\..*)?|from six(\..*)? import .*)$")
+# Regex for catching the setDaemon method
+set_daemon_re = re.compile(r"\.setDaemon\(")
class BaseASTChecker(ast.NodeVisitor):
@@ -1078,3 +1080,22 @@ def import_stock_mock(logical_line):
"N371: You must explicitly import python's mock: "
"``from unittest import mock``"
)
+
+
+@core.flake8ext
+def check_set_daemon(logical_line):
+ """Check for use of the setDaemon method of the threading.Thread class
+
+ The setDaemon method of the threading.Thread class has been deprecated
+ since Python 3.10. Use the daemon attribute instead.
+
+ See
+ https://docs.python.org/3.10/library/threading.html#threading.Thread.setDaemon
+ for details.
+
+ N372
+ """
+ res = set_daemon_re.search(logical_line)
+ if res:
+ yield (0, "N372: Don't use the setDaemon method. "
+ "Use the daemon attribute instead.")
diff --git a/nova/manager.py b/nova/manager.py
index 9c00401b96..df03305367 100644
--- a/nova/manager.py
+++ b/nova/manager.py
@@ -103,12 +103,15 @@ class Manager(PeriodicTasks, metaclass=ManagerMeta):
"""Tasks to be run at a periodic interval."""
return self.run_periodic_tasks(context, raise_on_error=raise_on_error)
- def init_host(self):
+ def init_host(self, service_ref):
"""Hook to do additional manager initialization when one requests
the service be started. This is called before any service record
- is created.
+ is created, but if one already exists for this service, it is
+ provided.
Child classes should override this method.
+
+ :param service_ref: An objects.Service if one exists, else None.
"""
pass
diff --git a/nova/network/neutron.py b/nova/network/neutron.py
index 27e7d06455..0ae0f4099f 100644
--- a/nova/network/neutron.py
+++ b/nova/network/neutron.py
@@ -612,10 +612,22 @@ class API:
raise exception.ExternalNetworkAttachForbidden(
network_uuid=net['id'])
+ def unbind_ports(self, context, ports, detach=True):
+ """Unbind and detach the given ports by clearing their
+ device_owner and dns_name.
+ The device_id will also be cleaned if detach=True.
+
+ :param context: The request context.
+ :param ports: list of port IDs.
+ """
+ neutron = get_client(context)
+ self._unbind_ports(context, ports, neutron, detach=detach)
+
def _unbind_ports(self, context, ports,
- neutron, port_client=None):
- """Unbind the given ports by clearing their device_id,
+ neutron, port_client=None, detach=True):
+ """Unbind and detach the given ports by clearing their
device_owner and dns_name.
+ The device_id will also be cleaned if detach=True.
:param context: The request context.
:param ports: list of port IDs.
@@ -638,11 +650,12 @@ class API:
port_req_body: ty.Dict[str, ty.Any] = {
'port': {
- 'device_id': '',
- 'device_owner': '',
constants.BINDING_HOST_ID: None,
}
}
+ if detach:
+ port_req_body['port']['device_id'] = ''
+ port_req_body['port']['device_owner'] = ''
try:
port = self._show_port(
context, port_id, neutron_client=neutron,
@@ -3894,7 +3907,7 @@ class API:
'Failed to get segment IDs for network %s' % network_id) from e
# The segment field of an unconfigured subnet could be None
return [subnet['segment_id'] for subnet in subnets
- if subnet['segment_id'] is not None]
+ if subnet.get('segment_id') is not None]
def get_segment_id_for_subnet(
self,
diff --git a/nova/objects/compute_node.py b/nova/objects/compute_node.py
index 60c2be71cd..dfc1b2ae28 100644
--- a/nova/objects/compute_node.py
+++ b/nova/objects/compute_node.py
@@ -12,6 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+from oslo_db import exception as db_exc
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
from oslo_utils import versionutils
@@ -339,7 +340,12 @@ class ComputeNode(base.NovaPersistentObject, base.NovaObject):
self._convert_supported_instances_to_db_format(updates)
self._convert_pci_stats_to_db_format(updates)
- db_compute = db.compute_node_create(self._context, updates)
+ try:
+ db_compute = db.compute_node_create(self._context, updates)
+ except db_exc.DBDuplicateEntry:
+ target = 'compute node %s:%s' % (updates['hypervisor_hostname'],
+ updates['uuid'])
+ raise exception.DuplicateRecord(target=target)
self._from_db_object(self._context, self, db_compute)
@base.remotable
@@ -388,8 +394,11 @@ class ComputeNode(base.NovaPersistentObject, base.NovaObject):
# The uuid field is read-only so it should only be set when
# creating the compute node record for the first time. Ignore
# it otherwise.
- if key == 'uuid' and 'uuid' in self:
- continue
+ if (key == 'uuid' and 'uuid' in self and
+ resources[key] != self.uuid):
+ raise exception.InvalidNodeConfiguration(
+ reason='Attempt to overwrite node %s with %s!' % (
+ self.uuid, resources[key]))
setattr(self, key, resources[key])
# supported_instances has a different name in compute_node
diff --git a/nova/objects/request_spec.py b/nova/objects/request_spec.py
index c17c963e77..a4ca77edf6 100644
--- a/nova/objects/request_spec.py
+++ b/nova/objects/request_spec.py
@@ -14,12 +14,15 @@
import copy
import itertools
+import typing as ty
import os_resource_classes as orc
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import versionutils
+from nova.compute import pci_placement_translator
+import nova.conf
from nova.db.api import api as api_db_api
from nova.db.api import models as api_models
from nova import exception
@@ -28,6 +31,7 @@ from nova.objects import base
from nova.objects import fields
from nova.objects import instance as obj_instance
+CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
REQUEST_SPEC_OPTIONAL_ATTRS = ['requested_destination',
@@ -473,6 +477,113 @@ class RequestSpec(base.NovaObject):
filt_props['requested_destination'] = self.requested_destination
return filt_props
+ @staticmethod
+ def _rc_from_request(spec: ty.Dict[str, ty.Any]) -> str:
+ return pci_placement_translator.get_resource_class(
+ spec.get("resource_class"),
+ spec.get("vendor_id"),
+ spec.get("product_id"),
+ )
+
+ @staticmethod
+ def _traits_from_request(spec: ty.Dict[str, ty.Any]) -> ty.Set[str]:
+ return pci_placement_translator.get_traits(spec.get("traits", ""))
+
+ def generate_request_groups_from_pci_requests(self):
+ if not CONF.filter_scheduler.pci_in_placement:
+ return False
+
+ for pci_request in self.pci_requests.requests:
+ if pci_request.source == objects.InstancePCIRequest.NEUTRON_PORT:
+ # TODO(gibi): Handle neutron based PCI requests here in a later
+ # cycle.
+ continue
+
+ if len(pci_request.spec) != 1:
+ # We are instantiating InstancePCIRequest objects with spec in
+ # two cases:
+ # 1) when a neutron port is translated to InstancePCIRequest
+ # object in
+ # nova.network.neutron.API.create_resource_requests
+ # 2) when the pci_passthrough:alias flavor extra_spec is
+ # translated to InstancePCIRequest objects in
+ # nova.pci.request._get_alias_from_config which enforces the
+ # json schema defined in nova.pci.request.
+ #
+ # In both cases only a single dict is added to the spec list.
+ # If we ever want to add support for multiple specs per request
+ # then we have to solve the issue that each spec can request a
+ # different resource class from placement. The only place in
+ # nova that currently handles multiple specs per request is
+ # nova.pci.utils.pci_device_prop_match() and it considers them
+ # as alternatives. So specs with different resource classes
+ # would mean alternative resource_class requests. This cannot
+ # be expressed today in the allocation_candidate query towards
+ # placement.
+ raise ValueError(
+ "PCI tracking in placement does not support multiple "
+ "specs per PCI request"
+ )
+
+ spec = pci_request.spec[0]
+
+ # The goal is to translate InstancePCIRequest to RequestGroup. Each
+ # InstancePCIRequest can be fulfilled from the whole RP tree. And
+ # a flavor based InstancePCIRequest might request more than one
+ # device (if count > 1) and those devices still need to be placed
+ # independently to RPs. So we could have two options to translate
+ # an InstancePCIRequest object to RequestGroup objects:
+ # 1) put the all the requested resources from every
+ # InstancePCIRequest to the unsuffixed RequestGroup.
+ # 2) generate a separate RequestGroup for each individual device
+ # request
+ #
+ # While #1) feels simpler it has a big downside. The unsuffixed
+ # group will have a bulk request group resource provider mapping
+ # returned from placement. So there would be no easy way to later
+ # untangle which InstancePCIRequest is fulfilled by which RP, and
+ # therefore which PCI device should be used to allocate a specific
+ # device on the hypervisor during the PCI claim. Note that there
+ # could be multiple PF RPs providing the same type of resources but
+ # still we need to make sure that if a resource is allocated in
+ # placement from a specific RP (representing a physical device)
+ # then the PCI claim should consume resources from the same
+ # physical device.
+ #
+ # So we need at least a separate RequestGroup per
+ # InstancePCIRequest. However, for a InstancePCIRequest(count=2)
+ # that would mean a RequestGroup(RC:2) which would mean both
+ # resource should come from the same RP in placement. This is
+ # impossible for PF or PCI type requests and over restrictive for
+ # VF type requests. Therefore we need to generate one RequestGroup
+ # per requested device. So for InstancePCIRequest(count=2) we need
+ # to generate two separate RequestGroup(RC:1) objects.
+
+ # NOTE(gibi): If we have count=2 requests then the multiple
+ # RequestGroup split below only works if group_policy is set to
+ # none as group_policy=isolate would prevent allocating two VFs
+ # from the same PF. Fortunately
+ # nova.scheduler.utils.resources_from_request_spec() already
+ # defaults group_policy to none if it is not specified in the
+ # flavor and there are multiple RequestGroups in the RequestSpec.
+
+ for i in range(pci_request.count):
+ rg = objects.RequestGroup(
+ use_same_provider=True,
+ # we need to generate a unique ID for each group, so we use
+ # a counter
+ requester_id=f"{pci_request.request_id}-{i}",
+ # as we split count >= 2 requests to independent groups
+ # each group will have a resource request of one
+ resources={
+ self._rc_from_request(spec): 1
+ },
+ required_traits=self._traits_from_request(spec),
+ # TODO(gibi): later we can add support for complex trait
+ # queries here including forbidden_traits.
+ )
+ self.requested_resources.append(rg)
+
@classmethod
def from_components(
cls, context, instance_uuid, image, flavor,
@@ -539,6 +650,8 @@ class RequestSpec(base.NovaObject):
if port_resource_requests:
spec_obj.requested_resources.extend(port_resource_requests)
+ spec_obj.generate_request_groups_from_pci_requests()
+
# NOTE(gibi): later the scheduler adds more request level params but
# never overrides existing ones so we can initialize them here.
if request_level_params is None:
diff --git a/nova/objects/service.py b/nova/objects/service.py
index 71361e0168..1a4629cc84 100644
--- a/nova/objects/service.py
+++ b/nova/objects/service.py
@@ -31,7 +31,7 @@ LOG = logging.getLogger(__name__)
# NOTE(danms): This is the global service version counter
-SERVICE_VERSION = 64
+SERVICE_VERSION = 66
# NOTE(danms): This is our SERVICE_VERSION history. The idea is that any
@@ -225,17 +225,42 @@ SERVICE_VERSION_HISTORY = (
# Version 64: Compute RPC v6.1:
# Add reimage_boot_volume parameter to rebuild_instance()
{'compute_rpc': '6.1'},
+ # Version 65: Compute RPC v6.1:
+ # Added stable local node identity
+ {'compute_rpc': '6.1'},
+ # Version 66: Compute RPC v6.2:
+ # Add target_state parameter to rebuild_instance()
+ {'compute_rpc': '6.2'},
)
-# This is used to raise an error at service startup if older than N-1 computes
-# are detected. Update this at the beginning of every release cycle to point to
-# the smallest service version that was added in N-1.
-OLDEST_SUPPORTED_SERVICE_VERSION = 'Yoga'
+# This is the version after which we can rely on having a persistent
+# local node identity for single-node systems.
+NODE_IDENTITY_VERSION = 65
+
+# This is used to raise an error at service startup if older than supported
+# computes are detected.
+# NOTE(sbauza) : Please modify it this way :
+# * At the beginning of a non-SLURP release (eg. 2023.2 Bobcat) (or just after
+# the previous SLURP release RC1, like 2023.1 Antelope), please bump
+# OLDEST_SUPPORTED_SERVICE_VERSION to the previous SLURP release (in that
+# example, Antelope)
+# * At the beginning of a SLURP release (eg. 2024.1 C) (or just after the
+# previous non-SLURP release RC1, like 2023.2 Bobcat), please keep the
+# OLDEST_SUPPORTED_SERVICE_VERSION value using the previous SLURP release
+# (in that example, Antelope)
+# * At the end of any release (SLURP or non-SLURP), please modify
+# SERVICE_VERSION_ALIASES to add a key/value with key being the release name
+# and value be the latest service version that the release supports (for
+# example, before Bobcat RC1, please add 'Bobcat': XX where X is the latest
+# servion version that was added)
+OLDEST_SUPPORTED_SERVICE_VERSION = 'Antelope'
SERVICE_VERSION_ALIASES = {
'Victoria': 52,
'Wallaby': 54,
'Xena': 57,
'Yoga': 61,
+ 'Zed': 64,
+ 'Antelope': 66,
}
diff --git a/nova/pci/request.py b/nova/pci/request.py
index 38056d79b3..8ae2385549 100644
--- a/nova/pci/request.py
+++ b/nova/pci/request.py
@@ -106,6 +106,12 @@ _ALIAS_SCHEMA = {
"type": "string",
"enum": list(obj_fields.PCINUMAAffinityPolicy.ALL),
},
+ "resource_class": {
+ "type": "string",
+ },
+ "traits": {
+ "type": "string",
+ },
},
"required": ["name"],
}
@@ -114,7 +120,7 @@ _ALIAS_SCHEMA = {
def _get_alias_from_config() -> Alias:
"""Parse and validate PCI aliases from the nova config.
- :returns: A dictionary where the keys are device names and the values are
+ :returns: A dictionary where the keys are alias names and the values are
tuples of form ``(numa_policy, specs)``. ``numa_policy`` describes the
required NUMA affinity of the device(s), while ``specs`` is a list of
PCI device specs.
@@ -162,7 +168,7 @@ def _get_alias_from_config() -> Alias:
def _translate_alias_to_requests(
- alias_spec: str, affinity_policy: str = None,
+ alias_spec: str, affinity_policy: ty.Optional[str] = None,
) -> ty.List['objects.InstancePCIRequest']:
"""Generate complete pci requests from pci aliases in extra_spec."""
pci_aliases = _get_alias_from_config()
@@ -249,7 +255,7 @@ def get_instance_pci_request_from_vif(
def get_pci_requests_from_flavor(
- flavor: 'objects.Flavor', affinity_policy: str = None,
+ flavor: 'objects.Flavor', affinity_policy: ty.Optional[str] = None,
) -> 'objects.InstancePCIRequests':
"""Validate and return PCI requests.
diff --git a/nova/pci/stats.py b/nova/pci/stats.py
index 3518b95289..c6e4844b34 100644
--- a/nova/pci/stats.py
+++ b/nova/pci/stats.py
@@ -13,7 +13,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-
+import collections
import copy
import typing as ty
@@ -64,12 +64,25 @@ class PciDeviceStats(object):
"""
pool_keys = ['product_id', 'vendor_id', 'numa_node', 'dev_type']
+ # these can be specified in the [pci]device_spec and can be requested via
+ # the PCI alias, but they are matched by the placement
+ # allocation_candidates query, so we can ignore them during pool creation
+ # and during filtering here
+ ignored_spec_tags = ignored_pool_tags = ['resource_class', 'traits']
+ # this is a metadata key in the spec that is matched
+ # specially in _filter_pools_based_on_placement_allocation. So we can
+ # ignore them in the general matching logic.
+ ignored_spec_tags += ['rp_uuids']
+ # this is a metadata key in the pool that is matched
+ # specially in _filter_pools_based_on_placement_allocation. So we can
+ # ignore them in the general matching logic.
+ ignored_pool_tags += ['rp_uuid']
def __init__(
self,
numa_topology: 'objects.NUMATopology',
stats: 'objects.PCIDevicePoolList' = None,
- dev_filter: whitelist.Whitelist = None,
+ dev_filter: ty.Optional[whitelist.Whitelist] = None,
) -> None:
self.numa_topology = numa_topology
self.pools = (
@@ -134,8 +147,22 @@ class PciDeviceStats(object):
return None
tags = devspec.get_tags()
pool = {k: getattr(dev, k) for k in self.pool_keys}
+
if tags:
- pool.update(tags)
+ pool.update(
+ {
+ k: v
+ for k, v in tags.items()
+ if k not in self.ignored_pool_tags
+ }
+ )
+ # NOTE(gibi): since PCI in placement maps a PCI dev or a PF to a
+ # single RP and the scheduler allocates from a specific RP we need
+ # to split the pools by PCI or PF address. We can still keep
+ # the VFs from the same parent PF in a single pool though as they
+ # are equivalent from placement perspective.
+ pool['address'] = dev.parent_addr or dev.address
+
# NOTE(gibi): parent_ifname acts like a tag during pci claim but
# not provided as part of the whitelist spec as it is auto detected
# by the virt driver.
@@ -224,6 +251,17 @@ class PciDeviceStats(object):
free_devs.extend(pool['devices'])
return free_devs
+ def _allocate_devs(
+ self, pool: Pool, num: int, request_id: str
+ ) -> ty.List["objects.PciDevice"]:
+ alloc_devices = []
+ for _ in range(num):
+ pci_dev = pool['devices'].pop()
+ self._handle_device_dependents(pci_dev)
+ pci_dev.request_id = request_id
+ alloc_devices.append(pci_dev)
+ return alloc_devices
+
def consume_requests(
self,
pci_requests: 'objects.InstancePCIRequests',
@@ -235,7 +273,10 @@ class PciDeviceStats(object):
for request in pci_requests:
count = request.count
- pools = self._filter_pools(self.pools, request, numa_cells)
+ rp_uuids = self._get_rp_uuids_for_request(
+ request=request, provider_mapping=None)
+ pools = self._filter_pools(
+ self.pools, request, numa_cells, rp_uuids=rp_uuids)
# Failed to allocate the required number of devices. Return the
# devices already allocated during previous iterations back to
@@ -251,20 +292,29 @@ class PciDeviceStats(object):
self.add_device(alloc_devices.pop())
raise exception.PciDeviceRequestFailed(requests=pci_requests)
- for pool in pools:
- if pool['count'] >= count:
- num_alloc = count
- else:
- num_alloc = pool['count']
- count -= num_alloc
- pool['count'] -= num_alloc
- for d in range(num_alloc):
- pci_dev = pool['devices'].pop()
- self._handle_device_dependents(pci_dev)
- pci_dev.request_id = request.request_id
- alloc_devices.append(pci_dev)
- if count == 0:
- break
+ if not rp_uuids:
+ # if there is no placement allocation then we are free to
+ # consume from the pools in any order:
+ for pool in pools:
+ if pool['count'] >= count:
+ num_alloc = count
+ else:
+ num_alloc = pool['count']
+ count -= num_alloc
+ pool['count'] -= num_alloc
+ alloc_devices += self._allocate_devs(
+ pool, num_alloc, request.request_id)
+ if count == 0:
+ break
+ else:
+ # but if there is placement allocation then we have to follow
+ # it
+ requested_devs_per_pool_rp = collections.Counter(rp_uuids)
+ for pool in pools:
+ count = requested_devs_per_pool_rp[pool['rp_uuid']]
+ pool['count'] -= count
+ alloc_devices += self._allocate_devs(
+ pool, count, request.request_id)
return alloc_devices
@@ -313,7 +363,15 @@ class PciDeviceStats(object):
:returns: A list of pools that can be used to support the request if
this is possible.
"""
- request_specs = request.spec
+
+ def ignore_keys(spec):
+ return {
+ k: v
+ for k, v in spec.items()
+ if k not in self.ignored_spec_tags
+ }
+
+ request_specs = [ignore_keys(spec) for spec in request.spec]
return [
pool for pool in pools
if utils.pci_device_prop_match(pool, request_specs)
@@ -510,11 +568,52 @@ class PciDeviceStats(object):
pool.get(PCI_REMOTE_MANAGED_TAG))]
return pools
+ def _filter_pools_based_on_placement_allocation(
+ self,
+ pools: ty.List[Pool],
+ request: 'objects.InstancePCIRequest',
+ rp_uuids: ty.List[str],
+ ) -> ty.List[Pool]:
+ if not rp_uuids:
+ # If there is no placement allocation then we don't need to filter
+ # by it. This could happen if the instance only has neutron port
+ # based InstancePCIRequest as that is currently not having
+ # placement allocation (except for QoS ports, but that handled in a
+ # separate codepath) or if the [filter_scheduler]pci_in_placement
+ # configuration option is not enabled in the scheduler.
+ return pools
+
+ requested_dev_count_per_rp = collections.Counter(rp_uuids)
+ matching_pools = []
+ for pool in pools:
+ rp_uuid = pool.get('rp_uuid')
+ if rp_uuid is None:
+ # NOTE(gibi): As rp_uuids is not empty the scheduler allocated
+ # PCI resources on this host, so we know that
+ # [pci]report_in_placement is enabled on this host. But this
+ # pool has no RP mapping which can only happen if the pool
+ # contains PCI devices with physical_network tag, as those
+ # devices not yet reported in placement. But if they are not
+ # reported then we can ignore them here too.
+ continue
+
+ if (
+ # the placement allocation contains this pool
+ rp_uuid in requested_dev_count_per_rp and
+ # the amount of dev allocated in placement can be consumed
+ # from the pool
+ pool["count"] >= requested_dev_count_per_rp[rp_uuid]
+ ):
+ matching_pools.append(pool)
+
+ return matching_pools
+
def _filter_pools(
self,
pools: ty.List[Pool],
request: 'objects.InstancePCIRequest',
numa_cells: ty.Optional[ty.List['objects.InstanceNUMACell']],
+ rp_uuids: ty.List[str],
) -> ty.Optional[ty.List[Pool]]:
"""Determine if an individual PCI request can be met.
@@ -529,6 +628,9 @@ class PciDeviceStats(object):
quantity and required NUMA affinity of device(s) we want.
:param numa_cells: A list of InstanceNUMACell objects whose ``id``
corresponds to the ``id`` of host NUMACell objects.
+ :param rp_uuids: A list of PR uuids this request fulfilled from in
+ placement. So here we have to consider only the pools matching with
+ thes RP uuids
:returns: A list of pools that can be used to support the request if
this is possible, else None.
"""
@@ -613,6 +715,19 @@ class PciDeviceStats(object):
before_count - after_count
)
+ # if there is placement allocation for the request then we have to
+ # remove the pools that are not in the placement allocation
+ before_count = after_count
+ pools = self._filter_pools_based_on_placement_allocation(
+ pools, request, rp_uuids)
+ after_count = sum([pool['count'] for pool in pools])
+ if after_count < before_count:
+ LOG.debug(
+ 'Dropped %d device(s) that are not part of the placement '
+ 'allocation',
+ before_count - after_count
+ )
+
if after_count < request.count:
LOG.debug('Not enough PCI devices left to satisfy request')
return None
@@ -622,6 +737,7 @@ class PciDeviceStats(object):
def support_requests(
self,
requests: ty.List['objects.InstancePCIRequest'],
+ provider_mapping: ty.Optional[ty.Dict[str, ty.List[str]]],
numa_cells: ty.Optional[ty.List['objects.InstanceNUMACell']] = None,
) -> bool:
"""Determine if the PCI requests can be met.
@@ -635,6 +751,12 @@ class PciDeviceStats(object):
:param requests: A list of InstancePCIRequest object describing the
types, quantities and required NUMA affinities of devices we want.
:type requests: nova.objects.InstancePCIRequests
+ :param provider_mapping: A dict keyed by RequestGroup requester_id,
+ to a list of resource provider UUIDs which provide resource
+ for that RequestGroup. If it is None then it signals that the
+ InstancePCIRequest objects already stores a mapping per request.
+ I.e.: we are called _after_ the scheduler made allocations for this
+ request in placement.
:param numa_cells: A list of InstanceNUMACell objects whose ``id``
corresponds to the ``id`` of host NUMACells, or None.
:returns: Whether this compute node can satisfy the given request.
@@ -650,7 +772,7 @@ class PciDeviceStats(object):
# objects.
stats = copy.deepcopy(self)
try:
- stats.apply_requests(requests, numa_cells)
+ stats.apply_requests(requests, provider_mapping, numa_cells)
except exception.PciDeviceRequestFailed:
return False
@@ -660,6 +782,7 @@ class PciDeviceStats(object):
self,
pools: ty.List[Pool],
request: 'objects.InstancePCIRequest',
+ rp_uuids: ty.List[str],
numa_cells: ty.Optional[ty.List['objects.InstanceNUMACell']] = None,
) -> bool:
"""Apply an individual PCI request.
@@ -673,6 +796,8 @@ class PciDeviceStats(object):
:param pools: A list of PCI device pool dicts
:param request: An InstancePCIRequest object describing the type,
quantity and required NUMA affinity of device(s) we want.
+ :param rp_uuids: A list of PR uuids this request fulfilled from in
+ placement
:param numa_cells: A list of InstanceNUMACell objects whose ``id``
corresponds to the ``id`` of host NUMACell objects.
:returns: True if the request was applied against the provided pools
@@ -682,22 +807,77 @@ class PciDeviceStats(object):
# Two concurrent requests may succeed when called support_requests
# because this method does not remove related devices from the pools
- filtered_pools = self._filter_pools(pools, request, numa_cells)
+ filtered_pools = self._filter_pools(
+ pools, request, numa_cells, rp_uuids)
if not filtered_pools:
return False
- count = request.count
- for pool in filtered_pools:
- count = self._decrease_pool_count(pools, pool, count)
- if not count:
- break
+ if not rp_uuids:
+ # If there is no placement allocation for this request then we are
+ # free to consume from the filtered pools in any order
+ count = request.count
+ for pool in filtered_pools:
+ count = self._decrease_pool_count(pools, pool, count)
+ if not count:
+ break
+ else:
+ # but if there is placement allocation then we have to follow that
+ requested_devs_per_pool_rp = collections.Counter(rp_uuids)
+ for pool in filtered_pools:
+ count = requested_devs_per_pool_rp[pool['rp_uuid']]
+ pool['count'] -= count
+ if pool['count'] == 0:
+ pools.remove(pool)
return True
+ def _get_rp_uuids_for_request(
+ self,
+ provider_mapping: ty.Optional[ty.Dict[str, ty.List[str]]],
+ request: 'objects.InstancePCIRequest'
+ ) -> ty.List[str]:
+ """Return the list of RP uuids that are fulfilling the request.
+
+ An RP will be in the list as many times as many devices needs to
+ be allocated from that RP.
+ """
+
+ if request.source == objects.InstancePCIRequest.NEUTRON_PORT:
+ # TODO(gibi): support neutron based requests in a later cycle
+ # an empty list will signal that any PCI pool can be used for this
+ # request
+ return []
+
+ if not provider_mapping:
+ # NOTE(gibi): AFAIK specs is always a list of a single dict
+ # but the object is hard to change retroactively
+ rp_uuids = request.spec[0].get('rp_uuids')
+ if not rp_uuids:
+ # This can happen if [filter_scheduler]pci_in_placement is not
+ # enabled yet
+ # An empty list will signal that any PCI pool can be used for
+ # this request
+ return []
+
+ # TODO(gibi): this is baaad but spec is a dict of string so
+ # the list is serialized
+ return rp_uuids.split(',')
+
+ # NOTE(gibi): the PCI prefilter generates RequestGroup suffixes from
+ # InstancePCIRequests in the form of {request_id}-{count_index}
+ # NOTE(gibi): a suffixed request group always fulfilled from a single
+ # RP
+ return [
+ rp_uuids[0]
+ for group_id, rp_uuids in provider_mapping.items()
+ if group_id.startswith(request.request_id)
+ ]
+
def apply_requests(
self,
requests: ty.List['objects.InstancePCIRequest'],
+ provider_mapping: ty.Optional[ty.Dict[str, ty.List[str]]],
numa_cells: ty.Optional[ty.List['objects.InstanceNUMACell']] = None,
) -> None:
"""Apply PCI requests to the PCI stats.
@@ -711,15 +891,23 @@ class PciDeviceStats(object):
:param requests: A list of InstancePCIRequest object describing the
types, quantities and required NUMA affinities of devices we want.
:type requests: nova.objects.InstancePCIRequests
+ :param provider_mapping: A dict keyed by RequestGroup requester_id,
+ to a list of resource provider UUIDs which provide resource
+ for that RequestGroup. If it is None then it signals that the
+ InstancePCIRequest objects already stores a mapping per request.
+ I.e.: we are called _after_ the scheduler made allocations for this
+ request in placement.
:param numa_cells: A list of InstanceNUMACell objects whose ``id``
corresponds to the ``id`` of host NUMACells, or None.
:raises: exception.PciDeviceRequestFailed if this compute node cannot
satisfy the given request.
"""
- if not all(
- self._apply_request(self.pools, r, numa_cells) for r in requests
- ):
- raise exception.PciDeviceRequestFailed(requests=requests)
+
+ for r in requests:
+ rp_uuids = self._get_rp_uuids_for_request(provider_mapping, r)
+
+ if not self._apply_request(self.pools, r, rp_uuids, numa_cells):
+ raise exception.PciDeviceRequestFailed(requests=requests)
def __iter__(self) -> ty.Iterator[Pool]:
pools: ty.List[Pool] = []
@@ -757,3 +945,40 @@ class PciDeviceStats(object):
)
pools = self._filter_pools_for_spec(self.pools, dummy_req)
return bool(pools)
+
+ def populate_pools_metadata_from_assigned_devices(self):
+ """Populate the rp_uuid of each pool based on the rp_uuid of the
+ devices assigned to the pool. This can only be called from the compute
+ where devices are assigned to each pool. This should not be called from
+ the scheduler as there device - pool assignment is not known.
+ """
+ # PciDevices are tracked in placement and flavor based PCI requests
+ # are scheduled and allocated in placement. To be able to correlate
+ # what is allocated in placement and what is consumed in nova we
+ # need to map device pools to RPs. We can do that as the PciDevice
+ # contains the RP UUID that represents it in placement.
+ # NOTE(gibi): We cannot do this when the device is originally added to
+ # the pool as the device -> placement translation, that creates the
+ # RPs, runs after all the device is created and assigned to pools.
+ for pool in self.pools:
+ pool_rps = {
+ dev.extra_info.get("rp_uuid")
+ for dev in pool["devices"]
+ if "rp_uuid" in dev.extra_info
+ }
+ if len(pool_rps) >= 2:
+ # FIXME(gibi): Do we have a 1:1 pool - RP mapping even
+ # if two PFs providing very similar VFs?
+ raise ValueError(
+ "We have a pool %s connected to more than one RPs %s in "
+ "placement via devs %s" % (pool, pool_rps, pool["devices"])
+ )
+
+ if not pool_rps:
+ # this can happen if the nova-compute is upgraded to have the
+ # PCI in placement inventory handling code but
+ # [pci]report_in_placement is not turned on yet.
+ continue
+
+ if pool_rps: # now we know that it is a single RP
+ pool['rp_uuid'] = next(iter(pool_rps))
diff --git a/nova/pci/whitelist.py b/nova/pci/whitelist.py
index 8862a0ef4f..152cc29ca6 100644
--- a/nova/pci/whitelist.py
+++ b/nova/pci/whitelist.py
@@ -33,7 +33,7 @@ class Whitelist(object):
assignable.
"""
- def __init__(self, whitelist_spec: str = None) -> None:
+ def __init__(self, whitelist_spec: ty.Optional[str] = None) -> None:
"""White list constructor
For example, the following json string specifies that devices whose
diff --git a/nova/policies/tenant_networks.py b/nova/policies/tenant_networks.py
index ee5bd66cdf..79f8d21eaa 100644
--- a/nova/policies/tenant_networks.py
+++ b/nova/policies/tenant_networks.py
@@ -38,7 +38,7 @@ DEPRECATED_POLICY = policy.DeprecatedRule(
tenant_networks_policies = [
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'list',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="""List project networks.
This API is proxy calls to the Network service. This is deprecated.""",
@@ -52,7 +52,7 @@ This API is proxy calls to the Network service. This is deprecated.""",
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'show',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="""Show project network details.
This API is proxy calls to the Network service. This is deprecated.""",
diff --git a/nova/policy.py b/nova/policy.py
index 55455a9271..c66489cc8d 100644
--- a/nova/policy.py
+++ b/nova/policy.py
@@ -41,11 +41,15 @@ USER_BASED_RESOURCES = ['os-keypairs']
saved_file_rules = []
KEY_EXPR = re.compile(r'%\((\w+)\)s')
-# TODO(gmann): Remove setting the default value of config policy_file
-# once oslo_policy change the default value to 'policy.yaml'.
-# https://github.com/openstack/oslo.policy/blob/a626ad12fe5a3abd49d70e3e5b95589d279ab578/oslo_policy/opts.py#L49
+# TODO(gmann): Remove overriding the default value of config options
+# 'policy_file', 'enforce_scope', and 'enforce_new_defaults' once
+# oslo_policy change their default value to what is overridden here.
DEFAULT_POLICY_FILE = 'policy.yaml'
-opts.set_defaults(cfg.CONF, DEFAULT_POLICY_FILE)
+opts.set_defaults(
+ cfg.CONF,
+ DEFAULT_POLICY_FILE,
+ enforce_scope=True,
+ enforce_new_defaults=True)
def reset():
diff --git a/nova/rpc.py b/nova/rpc.py
index a32b920e06..7a92650414 100644
--- a/nova/rpc.py
+++ b/nova/rpc.py
@@ -204,11 +204,9 @@ def get_client(target, version_cap=None, serializer=None,
else:
serializer = RequestContextSerializer(serializer)
- return messaging.RPCClient(TRANSPORT,
- target,
- version_cap=version_cap,
- serializer=serializer,
- call_monitor_timeout=call_monitor_timeout)
+ return messaging.get_rpc_client(TRANSPORT, target,
+ version_cap=version_cap, serializer=serializer,
+ call_monitor_timeout=call_monitor_timeout)
def get_server(target, endpoints, serializer=None):
@@ -436,9 +434,9 @@ class ClientRouter(periodic_task.PeriodicTasks):
transport = context.mq_connection
if transport:
cmt = self.default_client.call_monitor_timeout
- return messaging.RPCClient(transport, self.target,
- version_cap=self.version_cap,
- serializer=self.serializer,
- call_monitor_timeout=cmt)
+ return messaging.get_rpc_client(transport, self.target,
+ version_cap=self.version_cap,
+ serializer=self.serializer,
+ call_monitor_timeout=cmt)
else:
return self.default_client
diff --git a/nova/scheduler/client/report.py b/nova/scheduler/client/report.py
index 1242752be1..7c14f3d7ef 100644
--- a/nova/scheduler/client/report.py
+++ b/nova/scheduler/client/report.py
@@ -1047,7 +1047,7 @@ class SchedulerReportClient(object):
context: nova_context.RequestContext,
rp_uuid: str,
traits: ty.Iterable[str],
- generation: int = None
+ generation: ty.Optional[int] = None
):
"""Replace a provider's traits with those specified.
diff --git a/nova/scheduler/filters/__init__.py b/nova/scheduler/filters/__init__.py
index 44f283f7ac..785a13279e 100644
--- a/nova/scheduler/filters/__init__.py
+++ b/nova/scheduler/filters/__init__.py
@@ -16,8 +16,12 @@
"""
Scheduler host filters
"""
+from oslo_log import log as logging
+
from nova import filters
+LOG = logging.getLogger(__name__)
+
class BaseHostFilter(filters.BaseFilter):
"""Base class for host filters."""
@@ -28,6 +32,9 @@ class BaseHostFilter(filters.BaseFilter):
# other parameters. We care about running policy filters (i.e.
# ImagePropertiesFilter) but not things that check usage on the
# existing compute node, etc.
+ # This also means that filters marked with RUN_ON_REBUILD = True cannot
+ # filter on allocation candidates or need to handle the rebuild case
+ # specially.
RUN_ON_REBUILD = False
def _filter_one(self, obj, spec):
@@ -50,6 +57,43 @@ class BaseHostFilter(filters.BaseFilter):
raise NotImplementedError()
+class CandidateFilterMixin:
+ """Mixing that helps to implement a Filter that needs to filter host by
+ Placement allocation candidates.
+ """
+
+ def filter_candidates(self, host_state, filter_func):
+ """Checks still viable allocation candidates by the filter_func and
+ keep only those that are passing it.
+
+ :param host_state: HostState object holding the list of still viable
+ allocation candidates
+ :param filter_func: A callable that takes an allocation candidate and
+ returns a True like object if the candidate passed the filter or a
+ False like object if it doesn't.
+ """
+ good_candidates = []
+ for candidate in host_state.allocation_candidates:
+ LOG.debug(
+ f'{self.__class__.__name__} tries allocation candidate: '
+ f'{candidate}',
+ )
+ if filter_func(candidate):
+ LOG.debug(
+ f'{self.__class__.__name__} accepted allocation '
+ f'candidate: {candidate}',
+ )
+ good_candidates.append(candidate)
+ else:
+ LOG.debug(
+ f'{self.__class__.__name__} rejected allocation '
+ f'candidate: {candidate}',
+ )
+
+ host_state.allocation_candidates = good_candidates
+ return good_candidates
+
+
class HostFilterHandler(filters.BaseFilterHandler):
def __init__(self):
super(HostFilterHandler, self).__init__(BaseHostFilter)
diff --git a/nova/scheduler/filters/numa_topology_filter.py b/nova/scheduler/filters/numa_topology_filter.py
index 74d6012f82..ae50db90e5 100644
--- a/nova/scheduler/filters/numa_topology_filter.py
+++ b/nova/scheduler/filters/numa_topology_filter.py
@@ -20,7 +20,10 @@ from nova.virt import hardware
LOG = logging.getLogger(__name__)
-class NUMATopologyFilter(filters.BaseHostFilter):
+class NUMATopologyFilter(
+ filters.BaseHostFilter,
+ filters.CandidateFilterMixin,
+):
"""Filter on requested NUMA topology."""
# NOTE(sean-k-mooney): In change I0322d872bdff68936033a6f5a54e8296a6fb343
@@ -97,12 +100,19 @@ class NUMATopologyFilter(filters.BaseHostFilter):
if network_metadata:
limits.network_metadata = network_metadata
- instance_topology = (hardware.numa_fit_instance_to_host(
- host_topology, requested_topology,
- limits=limits,
- pci_requests=pci_requests,
- pci_stats=host_state.pci_stats))
- if not instance_topology:
+ good_candidates = self.filter_candidates(
+ host_state,
+ lambda candidate: hardware.numa_fit_instance_to_host(
+ host_topology,
+ requested_topology,
+ limits=limits,
+ pci_requests=pci_requests,
+ pci_stats=host_state.pci_stats,
+ provider_mapping=candidate["mappings"],
+ ),
+ )
+
+ if not good_candidates:
LOG.debug("%(host)s, %(node)s fails NUMA topology "
"requirements. The instance does not fit on this "
"host.", {'host': host_state.host,
diff --git a/nova/scheduler/filters/pci_passthrough_filter.py b/nova/scheduler/filters/pci_passthrough_filter.py
index f08899586a..992879072a 100644
--- a/nova/scheduler/filters/pci_passthrough_filter.py
+++ b/nova/scheduler/filters/pci_passthrough_filter.py
@@ -20,7 +20,10 @@ from nova.scheduler import filters
LOG = logging.getLogger(__name__)
-class PciPassthroughFilter(filters.BaseHostFilter):
+class PciPassthroughFilter(
+ filters.BaseHostFilter,
+ filters.CandidateFilterMixin,
+):
"""Pci Passthrough Filter based on PCI request
Filter that schedules instances on a host if the host has devices
@@ -47,10 +50,24 @@ class PciPassthroughFilter(filters.BaseHostFilter):
pci_requests = spec_obj.pci_requests
if not pci_requests or not pci_requests.requests:
return True
- if (not host_state.pci_stats or
- not host_state.pci_stats.support_requests(pci_requests.requests)):
+
+ if not host_state.pci_stats:
+ LOG.debug("%(host_state)s doesn't have the required PCI devices"
+ " (%(requests)s)",
+ {'host_state': host_state, 'requests': pci_requests})
+ return False
+
+ good_candidates = self.filter_candidates(
+ host_state,
+ lambda candidate: host_state.pci_stats.support_requests(
+ pci_requests.requests, provider_mapping=candidate["mappings"]
+ ),
+ )
+
+ if not good_candidates:
LOG.debug("%(host_state)s doesn't have the required PCI devices"
" (%(requests)s)",
{'host_state': host_state, 'requests': pci_requests})
return False
+
return True
diff --git a/nova/scheduler/host_manager.py b/nova/scheduler/host_manager.py
index 80511ffad6..8cb775a923 100644
--- a/nova/scheduler/host_manager.py
+++ b/nova/scheduler/host_manager.py
@@ -153,6 +153,8 @@ class HostState(object):
self.updated = None
+ self.allocation_candidates = []
+
def update(self, compute=None, service=None, aggregates=None,
inst_dict=None):
"""Update all information about a host."""
@@ -296,7 +298,9 @@ class HostState(object):
spec_obj.numa_topology = hardware.numa_fit_instance_to_host(
self.numa_topology, spec_obj.numa_topology,
limits=self.limits.get('numa_topology'),
- pci_requests=pci_requests, pci_stats=self.pci_stats)
+ pci_requests=pci_requests,
+ pci_stats=self.pci_stats,
+ provider_mapping=spec_obj.get_request_group_mapping())
self.numa_topology = hardware.numa_usage_from_instance_numa(
self.numa_topology, spec_obj.numa_topology)
@@ -306,7 +310,11 @@ class HostState(object):
instance_cells = None
if spec_obj.numa_topology:
instance_cells = spec_obj.numa_topology.cells
- self.pci_stats.apply_requests(pci_requests, instance_cells)
+ self.pci_stats.apply_requests(
+ pci_requests,
+ spec_obj.get_request_group_mapping(),
+ instance_cells
+ )
# NOTE(sbauza): By considering all cases when the scheduler is called
# and when consume_from_request() is run, we can safely say that there
@@ -314,13 +322,21 @@ class HostState(object):
self.num_io_ops += 1
def __repr__(self):
- return ("(%(host)s, %(node)s) ram: %(free_ram)sMB "
- "disk: %(free_disk)sMB io_ops: %(num_io_ops)s "
- "instances: %(num_instances)s" %
- {'host': self.host, 'node': self.nodename,
- 'free_ram': self.free_ram_mb, 'free_disk': self.free_disk_mb,
- 'num_io_ops': self.num_io_ops,
- 'num_instances': self.num_instances})
+ return (
+ "(%(host)s, %(node)s) ram: %(free_ram)sMB "
+ "disk: %(free_disk)sMB io_ops: %(num_io_ops)s "
+ "instances: %(num_instances)s, "
+ "allocation_candidates: %(num_a_c)s"
+ % {
+ "host": self.host,
+ "node": self.nodename,
+ "free_ram": self.free_ram_mb,
+ "free_disk": self.free_disk_mb,
+ "num_io_ops": self.num_io_ops,
+ "num_instances": self.num_instances,
+ "num_a_c": len(self.allocation_candidates),
+ }
+ )
class HostManager(object):
diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py
index 10b330653d..620519d403 100644
--- a/nova/scheduler/manager.py
+++ b/nova/scheduler/manager.py
@@ -20,8 +20,10 @@ Scheduler Service
"""
import collections
+import copy
import random
+from keystoneauth1 import exceptions as ks_exc
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_serialization import jsonutils
@@ -66,10 +68,42 @@ class SchedulerManager(manager.Manager):
self.host_manager = host_manager.HostManager()
self.servicegroup_api = servicegroup.API()
self.notifier = rpc.get_notifier('scheduler')
- self.placement_client = report.report_client_singleton()
+ self._placement_client = None
+
+ try:
+ # Test our placement client during initialization
+ self.placement_client
+ except (ks_exc.EndpointNotFound,
+ ks_exc.DiscoveryFailure,
+ ks_exc.RequestTimeout,
+ ks_exc.GatewayTimeout,
+ ks_exc.ConnectFailure) as e:
+ # Non-fatal, likely transient (although not definitely);
+ # continue startup but log the warning so that when things
+ # fail later, it will be clear why we can not do certain
+ # things.
+ LOG.warning('Unable to initialize placement client (%s); '
+ 'Continuing with startup, but scheduling '
+ 'will not be possible.', e)
+ except (ks_exc.MissingAuthPlugin,
+ ks_exc.Unauthorized) as e:
+ # This is almost definitely fatal mis-configuration. The
+ # Unauthorized error might be transient, but it is
+ # probably reasonable to consider it fatal.
+ LOG.error('Fatal error initializing placement client; '
+ 'config is incorrect or incomplete: %s', e)
+ raise
+ except Exception as e:
+ # Unknown/unexpected errors here are fatal
+ LOG.error('Fatal error initializing placement client: %s', e)
+ raise
super().__init__(service_name='scheduler', *args, **kwargs)
+ @property
+ def placement_client(self):
+ return report.report_client_singleton()
+
@periodic_task.periodic_task(
spacing=CONF.scheduler.discover_hosts_in_cells_interval,
run_immediately=True)
@@ -299,12 +333,29 @@ class SchedulerManager(manager.Manager):
# host, we virtually consume resources on it so subsequent
# selections can adjust accordingly.
+ def hosts_with_alloc_reqs(hosts_gen):
+ """Extend the HostState objects returned by the generator with
+ the allocation requests of that host
+ """
+ for host in hosts_gen:
+ host.allocation_candidates = copy.deepcopy(
+ alloc_reqs_by_rp_uuid[host.uuid])
+ yield host
+
# Note: remember, we are using a generator-iterator here. So only
# traverse this list once. This can bite you if the hosts
# are being scanned in a filter or weighing function.
hosts = self._get_all_host_states(
elevated, spec_obj, provider_summaries)
+ # alloc_reqs_by_rp_uuid is None during rebuild, so this mean we cannot
+ # run filters that are using allocation candidates during rebuild
+ if alloc_reqs_by_rp_uuid is not None:
+ # wrap the generator to extend the HostState objects with the
+ # allocation requests for that given host. This is needed to
+ # support scheduler filters filtering on allocation candidates.
+ hosts = hosts_with_alloc_reqs(hosts)
+
# NOTE(sbauza): The RequestSpec.num_instances field contains the number
# of instances created when the RequestSpec was used to first boot some
# instances. This is incorrect when doing a move or resize operation,
@@ -332,6 +383,13 @@ class SchedulerManager(manager.Manager):
# the older dict format representing HostState objects.
# TODO(stephenfin): Remove this when we bump scheduler the RPC API
# version to 5.0
+ # NOTE(gibi): We cannot remove this branch as it is actively used
+ # when nova calls the scheduler during rebuild (not evacuate) to
+ # check if the current host is still good for the new image used
+ # for the rebuild. In this case placement cannot be used to
+ # generate candidates as that would require space on the current
+ # compute for double allocation. So no allocation candidates for
+ # rebuild and therefore alloc_reqs_by_rp_uuid is None
return self._legacy_find_hosts(
context, num_instances, spec_obj, hosts, num_alts,
instance_uuids=instance_uuids)
@@ -345,6 +403,9 @@ class SchedulerManager(manager.Manager):
# The list of hosts that have been selected (and claimed).
claimed_hosts = []
+ # The allocation request allocated on the given claimed host
+ claimed_alloc_reqs = []
+
for num, instance_uuid in enumerate(instance_uuids):
# In a multi-create request, the first request spec from the list
# is passed to the scheduler and that request spec's instance_uuid
@@ -371,21 +432,20 @@ class SchedulerManager(manager.Manager):
# resource provider UUID
claimed_host = None
for host in hosts:
- cn_uuid = host.uuid
- if cn_uuid not in alloc_reqs_by_rp_uuid:
- msg = ("A host state with uuid = '%s' that did not have a "
- "matching allocation_request was encountered while "
- "scheduling. This host was skipped.")
- LOG.debug(msg, cn_uuid)
+ if not host.allocation_candidates:
+ LOG.debug(
+ "The nova scheduler removed every allocation candidate"
+ "for host %s so this host was skipped.",
+ host
+ )
continue
- alloc_reqs = alloc_reqs_by_rp_uuid[cn_uuid]
# TODO(jaypipes): Loop through all allocation_requests instead
# of just trying the first one. For now, since we'll likely
# want to order the allocation_requests in the future based on
# information in the provider summaries, we'll just try to
# claim resources using the first allocation_request
- alloc_req = alloc_reqs[0]
+ alloc_req = host.allocation_candidates[0]
if utils.claim_resources(
elevated, self.placement_client, spec_obj, instance_uuid,
alloc_req,
@@ -405,6 +465,15 @@ class SchedulerManager(manager.Manager):
claimed_instance_uuids.append(instance_uuid)
claimed_hosts.append(claimed_host)
+ claimed_alloc_reqs.append(alloc_req)
+
+ # update the provider mapping in the request spec based
+ # on the allocated candidate as the _consume_selected_host depends
+ # on this information to temporally consume PCI devices tracked in
+ # placement
+ for request_group in spec_obj.requested_resources:
+ request_group.provider_uuids = alloc_req[
+ 'mappings'][request_group.requester_id]
# Now consume the resources so the filter/weights will change for
# the next instance.
@@ -416,11 +485,19 @@ class SchedulerManager(manager.Manager):
self._ensure_sufficient_hosts(
context, claimed_hosts, num_instances, claimed_instance_uuids)
- # We have selected and claimed hosts for each instance. Now we need to
- # find alternates for each host.
+ # We have selected and claimed hosts for each instance along with a
+ # claimed allocation request. Now we need to find alternates for each
+ # host.
return self._get_alternate_hosts(
- claimed_hosts, spec_obj, hosts, num, num_alts,
- alloc_reqs_by_rp_uuid, allocation_request_version)
+ claimed_hosts,
+ spec_obj,
+ hosts,
+ num,
+ num_alts,
+ alloc_reqs_by_rp_uuid,
+ allocation_request_version,
+ claimed_alloc_reqs,
+ )
def _ensure_sufficient_hosts(
self, context, hosts, required_count, claimed_uuids=None,
@@ -532,7 +609,21 @@ class SchedulerManager(manager.Manager):
def _get_alternate_hosts(
self, selected_hosts, spec_obj, hosts, index, num_alts,
alloc_reqs_by_rp_uuid=None, allocation_request_version=None,
+ selected_alloc_reqs=None,
):
+ """Generate the main Selection and possible alternate Selection
+ objects for each "instance".
+
+ :param selected_hosts: This is a list of HostState objects. Each
+ HostState represents the main selection for a given instance being
+ scheduled (we can have multiple instances during multi create).
+ :param selected_alloc_reqs: This is a list of allocation requests that
+ are already allocated in placement for the main Selection for each
+ instance. This list is matching with selected_hosts by index. So
+ for the first instance the selected host is selected_host[0] and
+ the already allocated placement candidate is
+ selected_alloc_reqs[0].
+ """
# We only need to filter/weigh the hosts again if we're dealing with
# more than one instance and are going to be picking alternates.
if index > 0 and num_alts > 0:
@@ -546,11 +637,10 @@ class SchedulerManager(manager.Manager):
# representing the selected host along with alternates from the same
# cell.
selections_to_return = []
- for selected_host in selected_hosts:
+ for i, selected_host in enumerate(selected_hosts):
# This is the list of hosts for one particular instance.
if alloc_reqs_by_rp_uuid:
- selected_alloc_req = alloc_reqs_by_rp_uuid.get(
- selected_host.uuid)[0]
+ selected_alloc_req = selected_alloc_reqs[i]
else:
selected_alloc_req = None
@@ -571,15 +661,17 @@ class SchedulerManager(manager.Manager):
if len(selected_plus_alts) >= num_alts + 1:
break
+ # TODO(gibi): In theory we could generate alternatives on the
+ # same host if that host has different possible allocation
+ # candidates for the request. But we don't do that today
if host.cell_uuid == cell_uuid and host not in selected_hosts:
if alloc_reqs_by_rp_uuid is not None:
- alt_uuid = host.uuid
- if alt_uuid not in alloc_reqs_by_rp_uuid:
+ if not host.allocation_candidates:
msg = ("A host state with uuid = '%s' that did "
- "not have a matching allocation_request "
+ "not have any remaining allocation_request "
"was encountered while scheduling. This "
"host was skipped.")
- LOG.debug(msg, alt_uuid)
+ LOG.debug(msg, host.uuid)
continue
# TODO(jaypipes): Loop through all allocation_requests
@@ -588,7 +680,13 @@ class SchedulerManager(manager.Manager):
# the future based on information in the provider
# summaries, we'll just try to claim resources using
# the first allocation_request
- alloc_req = alloc_reqs_by_rp_uuid[alt_uuid][0]
+ # NOTE(gibi): we are using, and re-using, allocation
+ # candidates for alternatives here. This is OK as
+ # these candidates are not yet allocated in placement
+ # and we don't know if an alternate will ever be used.
+ # To increase our success we could try to use different
+ # candidate for different alternative though.
+ alloc_req = host.allocation_candidates[0]
alt_selection = objects.Selection.from_host_state(
host, alloc_req, allocation_request_version)
else:
diff --git a/nova/scheduler/utils.py b/nova/scheduler/utils.py
index c7e6ffed97..02c44093bd 100644
--- a/nova/scheduler/utils.py
+++ b/nova/scheduler/utils.py
@@ -1080,6 +1080,17 @@ _SUPPORTS_SOFT_AFFINITY = None
_SUPPORTS_SOFT_ANTI_AFFINITY = None
+def reset_globals():
+ global _SUPPORTS_AFFINITY
+ _SUPPORTS_AFFINITY = None
+ global _SUPPORTS_ANTI_AFFINITY
+ _SUPPORTS_ANTI_AFFINITY = None
+ global _SUPPORTS_SOFT_AFFINITY
+ _SUPPORTS_SOFT_AFFINITY = None
+ global _SUPPORTS_SOFT_ANTI_AFFINITY
+ _SUPPORTS_SOFT_ANTI_AFFINITY = None
+
+
def _get_group_details(context, instance_uuid, user_group_hosts=None):
"""Provide group_hosts and group_policies sets related to instances if
those instances are belonging to a group and if corresponding filters are
diff --git a/nova/scheduler/weights/hypervisor_version.py b/nova/scheduler/weights/hypervisor_version.py
new file mode 100644
index 0000000000..0cd7b0a824
--- /dev/null
+++ b/nova/scheduler/weights/hypervisor_version.py
@@ -0,0 +1,39 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Hypervisor Version Weigher. Weigh hosts by their relative hypervior version.
+
+The default is to select newer hosts. If you prefer
+to invert the behavior set the 'hypervisor_version_weight_multiplier' option
+to a negative number and the weighing has the opposite effect of the default.
+"""
+
+import nova.conf
+from nova.scheduler import utils
+from nova.scheduler import weights
+
+CONF = nova.conf.CONF
+
+
+class HypervisorVersionWeigher(weights.BaseHostWeigher):
+
+ def weight_multiplier(self, host_state):
+ """Override the weight multiplier."""
+ return utils.get_weight_multiplier(
+ host_state, 'hypervisor_version_weight_multiplier',
+ CONF.filter_scheduler.hypervisor_version_weight_multiplier)
+
+ def _weigh_object(self, host_state, weight_properties):
+ """Higher weights win. We want newer hosts by default."""
+ # convert None to 0
+ return host_state.hypervisor_version or 0
diff --git a/nova/service.py b/nova/service.py
index 2c10224926..bd3b49ae66 100644
--- a/nova/service.py
+++ b/nova/service.py
@@ -156,11 +156,11 @@ class Service(service.Service):
LOG.info('Starting %(topic)s node (version %(version)s)',
{'topic': self.topic, 'version': verstr})
self.basic_config_check()
- self.manager.init_host()
- self.model_disconnected = False
ctxt = context.get_admin_context()
self.service_ref = objects.Service.get_by_host_and_binary(
ctxt, self.host, self.binary)
+ self.manager.init_host(self.service_ref)
+ self.model_disconnected = False
if self.service_ref:
_update_service_ref(self.service_ref)
diff --git a/nova/test.py b/nova/test.py
index 689d5ba291..e37967b06d 100644
--- a/nova/test.py
+++ b/nova/test.py
@@ -62,6 +62,7 @@ from nova import objects
from nova.objects import base as objects_base
from nova import quota
from nova.scheduler.client import report
+from nova.scheduler import utils as scheduler_utils
from nova.tests import fixtures as nova_fixtures
from nova.tests.unit import matchers
from nova import utils
@@ -171,6 +172,12 @@ class TestCase(base.BaseTestCase):
# base class when USES_DB is True.
NUMBER_OF_CELLS = 1
+ # The stable compute id stuff is intentionally singleton-ish, which makes
+ # it a nightmare for testing multiple host/node combinations in tests like
+ # we do. So, mock it out by default, unless the test is specifically
+ # designed to handle it.
+ STUB_COMPUTE_ID = True
+
def setUp(self):
"""Run before each test method to initialize test environment."""
# Ensure BaseTestCase's ConfigureLogging fixture is disabled since
@@ -288,6 +295,10 @@ class TestCase(base.BaseTestCase):
self.useFixture(nova_fixtures.GenericPoisonFixture())
self.useFixture(nova_fixtures.SysFsPoisonFixture())
+ # Additional module names can be added to this set if needed
+ self.useFixture(nova_fixtures.ImportModulePoisonFixture(
+ set(['guestfs', 'libvirt'])))
+
# make sure that the wsgi app is fully initialized for all testcase
# instead of only once initialized for test worker
wsgi_app.init_global_data.reset()
@@ -295,6 +306,17 @@ class TestCase(base.BaseTestCase):
# Reset the placement client singleton
report.PLACEMENTCLIENT = None
+ # Reset our local node uuid cache (and avoid writing to the
+ # local filesystem when we generate a new one).
+ if self.STUB_COMPUTE_ID:
+ self.useFixture(nova_fixtures.ComputeNodeIdFixture())
+
+ # Reset globals indicating affinity filter support. Some tests may set
+ # self.flags(enabled_filters=...) which could make the affinity filter
+ # support globals get set to a non-default configuration which affects
+ # all other tests.
+ scheduler_utils.reset_globals()
+
def _setup_cells(self):
"""Setup a normal cellsv2 environment.
@@ -683,6 +705,7 @@ class SubclassSignatureTestCase(testtools.TestCase, metaclass=abc.ABCMeta):
raise NotImplementedError()
def setUp(self):
+ self.useFixture(nova_fixtures.ConfFixture(CONF))
self.base = self._get_base_class()
super(SubclassSignatureTestCase, self).setUp()
diff --git a/nova/tests/fixtures/__init__.py b/nova/tests/fixtures/__init__.py
index df254608fd..9ff4a2a601 100644
--- a/nova/tests/fixtures/__init__.py
+++ b/nova/tests/fixtures/__init__.py
@@ -16,6 +16,8 @@ from .cast_as_call import CastAsCallFixture # noqa: F401
from .cinder import CinderFixture # noqa: F401
from .conf import ConfFixture # noqa: F401, F403
from .cyborg import CyborgFixture # noqa: F401
+from .filesystem import SysFileSystemFixture # noqa: F401
+from .filesystem import TempFileSystemFixture # noqa: F401
from .glance import GlanceFixture # noqa: F401
from .libvirt import LibvirtFixture # noqa: F401
from .libvirt_imagebackend import LibvirtImageBackendFixture # noqa: F401
diff --git a/nova/tests/fixtures/filesystem.py b/nova/tests/fixtures/filesystem.py
new file mode 100644
index 0000000000..932d42fe27
--- /dev/null
+++ b/nova/tests/fixtures/filesystem.py
@@ -0,0 +1,81 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+import shutil
+import tempfile
+from unittest import mock
+
+import fixtures
+
+from nova import filesystem
+from nova.virt.libvirt.cpu import core
+
+
+SYS = 'sys'
+
+
+class TempFileSystemFixture(fixtures.Fixture):
+ """Creates a fake / filesystem"""
+
+ def _setUp(self):
+ self.temp_dir = tempfile.TemporaryDirectory(prefix='fake_fs')
+ # NOTE(sbauza): I/O disk errors may raise an exception here, as we
+ # don't ignore them. If that's causing a problem in our CI jobs, the
+ # recommended solution is to use shutil.rmtree instead of cleanup()
+ # with ignore_errors parameter set to True (or wait for the minimum
+ # python version to be 3.10 as TemporaryDirectory will provide
+ # ignore_cleanup_errors parameter)
+ self.addCleanup(self.temp_dir.cleanup)
+
+
+class SysFileSystemFixture(TempFileSystemFixture):
+ """Creates a fake /sys filesystem"""
+
+ def __init__(self, cpus_supported=None):
+ self.cpus_supported = cpus_supported or 10
+
+ def _setUp(self):
+ super()._setUp()
+ self.sys_path = os.path.join(self.temp_dir.name, SYS)
+ self.addCleanup(shutil.rmtree, self.sys_path, ignore_errors=True)
+
+ sys_patcher = mock.patch(
+ 'nova.filesystem.SYS',
+ new_callable=mock.PropertyMock(return_value=self.sys_path))
+ self.sys_mock = sys_patcher.start()
+ self.addCleanup(sys_patcher.stop)
+
+ avail_path_patcher = mock.patch(
+ 'nova.virt.libvirt.cpu.core.AVAILABLE_PATH',
+ new_callable=mock.PropertyMock(
+ return_value=os.path.join(self.sys_path,
+ 'devices/system/cpu/present')))
+ self.avail_path_mock = avail_path_patcher.start()
+ self.addCleanup(avail_path_patcher.stop)
+
+ cpu_path_patcher = mock.patch(
+ 'nova.virt.libvirt.cpu.core.CPU_PATH_TEMPLATE',
+ new_callable=mock.PropertyMock(
+ return_value=os.path.join(self.sys_path,
+ 'devices/system/cpu/cpu%(core)s')))
+ self.cpu_path_mock = cpu_path_patcher.start()
+ self.addCleanup(cpu_path_patcher.stop)
+
+ for cpu_nr in range(self.cpus_supported):
+ cpu_dir = os.path.join(self.cpu_path_mock % {'core': cpu_nr})
+ os.makedirs(os.path.join(cpu_dir, 'cpufreq'))
+ filesystem.write_sys(
+ os.path.join(cpu_dir, 'cpufreq/scaling_governor'),
+ data='powersave')
+ filesystem.write_sys(core.AVAILABLE_PATH,
+ f'0-{self.cpus_supported - 1}')
diff --git a/nova/tests/fixtures/libvirt.py b/nova/tests/fixtures/libvirt.py
index 5d20e7d54f..4f48463118 100644
--- a/nova/tests/fixtures/libvirt.py
+++ b/nova/tests/fixtures/libvirt.py
@@ -2044,6 +2044,12 @@ class Connection(object):
return VIR_CPU_COMPARE_IDENTICAL
+ def compareHypervisorCPU(
+ self, emulator, arch, machine, virttype,
+ xml, flags
+ ):
+ return self.compareCPU(xml, flags)
+
def getCPUStats(self, cpuNum, flag):
if cpuNum < 2:
return {'kernel': 5664160000000,
diff --git a/nova/tests/fixtures/libvirt_imagebackend.py b/nova/tests/fixtures/libvirt_imagebackend.py
index ea32b6b34a..4ce3f03710 100644
--- a/nova/tests/fixtures/libvirt_imagebackend.py
+++ b/nova/tests/fixtures/libvirt_imagebackend.py
@@ -190,6 +190,9 @@ class LibvirtImageBackendFixture(fixtures.Fixture):
# Set the SUPPORTS_CLONE member variable to mimic the Image base
# class.
image_init.SUPPORTS_CLONE = False
+ # Set the SUPPORTS_LUKS member variable to mimic the Image base
+ # class.
+ image_init.SUPPORTS_LUKS = False
# Ditto for the 'is_shared_block_storage' and
# 'is_file_in_instance_path' functions
diff --git a/nova/tests/fixtures/nova.py b/nova/tests/fixtures/nova.py
index 129b2f9abb..abfc3ecc6c 100644
--- a/nova/tests/fixtures/nova.py
+++ b/nova/tests/fixtures/nova.py
@@ -20,8 +20,10 @@ import collections
import contextlib
from contextlib import contextmanager
import functools
+from importlib.abc import MetaPathFinder
import logging as std_logging
import os
+import sys
import time
from unittest import mock
import warnings
@@ -63,6 +65,7 @@ from nova.scheduler import weights
from nova import service
from nova.tests.functional.api import client
from nova import utils
+from nova.virt import node
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
@@ -563,11 +566,10 @@ class CellDatabases(fixtures.Fixture):
call_monitor_timeout=None):
"""Mirror rpc.get_client() but with our special sauce."""
serializer = CheatingSerializer(serializer)
- return messaging.RPCClient(rpc.TRANSPORT,
- target,
- version_cap=version_cap,
- serializer=serializer,
- call_monitor_timeout=call_monitor_timeout)
+ return messaging.get_rpc_client(rpc.TRANSPORT, target,
+ version_cap=version_cap,
+ serializer=serializer,
+ call_monitor_timeout=call_monitor_timeout)
def add_cell_database(self, connection_str, default=False):
"""Add a cell database to the fixture.
@@ -1316,6 +1318,77 @@ class PrivsepFixture(fixtures.Fixture):
nova.privsep.sys_admin_pctxt, 'client_mode', False))
+class CGroupsFixture(fixtures.Fixture):
+ """Mocks checks made for available subsystems on the host's control group.
+
+ The fixture mocks all calls made on the host to verify the capabilities
+ provided by its kernel. Through this, one can simulate the underlying
+ system hosts work on top of and have tests react to expected outcomes from
+ such.
+
+ Use sample:
+ >>> cgroups = self.useFixture(CGroupsFixture())
+ >>> cgroups = self.useFixture(CGroupsFixture(version=2))
+ >>> cgroups = self.useFixture(CGroupsFixture())
+ ... cgroups.version = 2
+
+ :attr version: Arranges mocks to simulate the host interact with nova
+ following the given version of cgroups.
+ Available values are:
+ - 0: All checks related to cgroups will return False.
+ - 1: Checks related to cgroups v1 will return True.
+ - 2: Checks related to cgroups v2 will return True.
+ Defaults to 1.
+ """
+
+ def __init__(self, version=1):
+ self._cpuv1 = None
+ self._cpuv2 = None
+
+ self._version = version
+
+ @property
+ def version(self):
+ return self._version
+
+ @version.setter
+ def version(self, value):
+ self._version = value
+ self._update_mocks()
+
+ def setUp(self):
+ super().setUp()
+ self._cpuv1 = self.useFixture(fixtures.MockPatch(
+ 'nova.virt.libvirt.host.Host._has_cgroupsv1_cpu_controller')).mock
+ self._cpuv2 = self.useFixture(fixtures.MockPatch(
+ 'nova.virt.libvirt.host.Host._has_cgroupsv2_cpu_controller')).mock
+ self._update_mocks()
+
+ def _update_mocks(self):
+ if not self._cpuv1:
+ return
+
+ if not self._cpuv2:
+ return
+
+ if self.version == 0:
+ self._cpuv1.return_value = False
+ self._cpuv2.return_value = False
+ return
+
+ if self.version == 1:
+ self._cpuv1.return_value = True
+ self._cpuv2.return_value = False
+ return
+
+ if self.version == 2:
+ self._cpuv1.return_value = False
+ self._cpuv2.return_value = True
+ return
+
+ raise ValueError(f"Unknown cgroups version: '{self.version}'.")
+
+
class NoopQuotaDriverFixture(fixtures.Fixture):
"""A fixture to run tests using the NoopQuotaDriver.
@@ -1798,3 +1871,70 @@ class SysFsPoisonFixture(fixtures.Fixture):
# a bunch of test to fail
# self.inject_poison("os.path", "exists")
# self.inject_poison("os", "stat")
+
+
+class ImportModulePoisonFixture(fixtures.Fixture):
+ """Poison imports of modules unsuitable for the test environment.
+
+ Examples are guestfs and libvirt. Ordinarily, these would not be installed
+ in the test environment but if they _are_ present, it can result in
+ actual calls to libvirt, for example, which could cause tests to fail.
+
+ This fixture will inspect module imports and if they are in the disallowed
+ list, it will fail the test with a helpful message about mocking needed in
+ the test.
+ """
+
+ class ForbiddenModules(MetaPathFinder):
+ def __init__(self, test, modules):
+ super().__init__()
+ self.test = test
+ self.modules = modules
+
+ def find_spec(self, fullname, path, target=None):
+ if fullname in self.modules:
+ self.test.fail_message = (
+ f"This test imports the '{fullname}' module, which it "
+ f'should not in the test environment. Please add '
+ f'appropriate mocking to this test.'
+ )
+ raise ImportError(fullname)
+
+ def __init__(self, module_names):
+ self.module_names = module_names
+ self.fail_message = ''
+ if isinstance(module_names, str):
+ self.module_names = {module_names}
+ self.meta_path_finder = self.ForbiddenModules(self, self.module_names)
+
+ def setUp(self):
+ super().setUp()
+ self.addCleanup(self.cleanup)
+ sys.meta_path.insert(0, self.meta_path_finder)
+
+ def cleanup(self):
+ sys.meta_path.remove(self.meta_path_finder)
+ # We use a flag and check it during the cleanup phase to fail the test
+ # if needed. This is done because some module imports occur inside of a
+ # try-except block that ignores all exceptions, so raising an exception
+ # there (which is also what self.assert* and self.fail() do underneath)
+ # will not work to cause a failure in the test.
+ if self.fail_message:
+ raise ImportError(self.fail_message)
+
+
+class ComputeNodeIdFixture(fixtures.Fixture):
+ def setUp(self):
+ super().setUp()
+
+ node.LOCAL_NODE_UUID = None
+ self.useFixture(fixtures.MockPatch(
+ 'nova.virt.node.read_local_node_uuid',
+ lambda: None))
+ self.useFixture(fixtures.MockPatch(
+ 'nova.virt.node.write_local_node_uuid',
+ lambda uuid: None))
+ self.useFixture(fixtures.MockPatch(
+ 'nova.compute.manager.ComputeManager.'
+ '_ensure_existing_node_identity',
+ mock.DEFAULT))
diff --git a/nova/tests/functional/api_sample_tests/api_samples/os-evacuate/v2.95/server-evacuate-find-host-req.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/os-evacuate/v2.95/server-evacuate-find-host-req.json.tpl
new file mode 100644
index 0000000000..8abf0b4e18
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/os-evacuate/v2.95/server-evacuate-find-host-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "evacuate": {
+ "adminPass": "%(adminPass)s"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/os-evacuate/v2.95/server-evacuate-req.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/os-evacuate/v2.95/server-evacuate-req.json.tpl
new file mode 100644
index 0000000000..8abf0b4e18
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/os-evacuate/v2.95/server-evacuate-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "evacuate": {
+ "adminPass": "%(adminPass)s"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-action-rebuild-resp.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-action-rebuild-resp.json.tpl
new file mode 100644
index 0000000000..486433733d
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-action-rebuild-resp.json.tpl
@@ -0,0 +1,80 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO",
+ "OS-EXT-AZ:availability_zone": "us-west",
+ "OS-EXT-SRV-ATTR:hostname": "%(hostname)s",
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "OS-SRV-USG:launched_at": "%(strtime)s",
+ "OS-SRV-USG:terminated_at": null,
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "addresses": {
+ "private": [
+ {
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
+ "OS-EXT-IPS:type": "fixed",
+ "addr": "192.168.1.30",
+ "version": 4
+ }
+ ]
+ },
+ "adminPass": "seekr3t",
+ "config_drive": "",
+ "created": "%(isotime)s",
+ "description": null,
+ "flavor": {
+ "disk": 1,
+ "ephemeral": 0,
+ "extra_specs": {},
+ "original_name": "m1.tiny",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1
+ },
+ "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(compute_endpoint)s/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "locked": false,
+ "locked_reason": null,
+ "metadata": {
+ "meta_var": "meta_val"
+ },
+ "name": "foobar",
+ "os-extended-volumes:volumes_attached": [],
+ "progress": 0,
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "server_groups": [],
+ "status": "ACTIVE",
+ "tags": [],
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "trusted_image_certificates": null,
+ "updated": "%(isotime)s",
+ "user_data": "ZWNobyAiaGVsbG8gd29ybGQi",
+ "user_id": "fake"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-action-rebuild.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-action-rebuild.json.tpl
new file mode 100644
index 0000000000..3becc83fba
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-action-rebuild.json.tpl
@@ -0,0 +1,15 @@
+{
+ "rebuild" : {
+ "accessIPv4" : "%(access_ip_v4)s",
+ "accessIPv6" : "%(access_ip_v6)s",
+ "OS-DCF:diskConfig": "AUTO",
+ "imageRef" : "%(uuid)s",
+ "name" : "%(name)s",
+ "adminPass" : "%(pass)s",
+ "hostname": "%(hostname)s",
+ "metadata" : {
+ "meta_var" : "meta_val"
+ },
+ "user_data": "ZWNobyAiaGVsbG8gd29ybGQi"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-create-req.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-create-req.json.tpl
new file mode 100644
index 0000000000..f83c78fdc9
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-create-req.json.tpl
@@ -0,0 +1,21 @@
+{
+ "server" : {
+ "accessIPv4": "%(access_ip_v4)s",
+ "accessIPv6": "%(access_ip_v6)s",
+ "name" : "new-server-test",
+ "imageRef" : "%(image_id)s",
+ "flavorRef" : "1",
+ "OS-DCF:diskConfig": "AUTO",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "user_data" : "%(user_data)s",
+ "networks": "auto",
+ "hostname": "custom-hostname.example.com"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-create-resp.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-create-resp.json.tpl
new file mode 100644
index 0000000000..4b30e0cfbd
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-create-resp.json.tpl
@@ -0,0 +1,22 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO",
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ],
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-get-resp.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-get-resp.json.tpl
new file mode 100644
index 0000000000..ae2088619a
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-get-resp.json.tpl
@@ -0,0 +1,81 @@
+{
+ "server": {
+ "accessIPv4": "%(access_ip_v4)s",
+ "accessIPv6": "%(access_ip_v6)s",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
+ "OS-EXT-IPS:type": "fixed",
+ "version": 4
+ }
+ ]
+ },
+ "created": "%(isotime)s",
+ "description": null,
+ "locked": false,
+ "locked_reason": null,
+ "flavor": {
+ "disk": 1,
+ "ephemeral": 0,
+ "extra_specs": {},
+ "original_name": "m1.tiny",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(compute_endpoint)s/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "config_drive": "%(cdrive)s",
+ "OS-DCF:diskConfig": "AUTO",
+ "OS-EXT-AZ:availability_zone": "nova",
+ "OS-EXT-SRV-ATTR:hostname": "%(hostname)s",
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "os-extended-volumes:volumes_attached": [
+ {"id": "volume_id1", "delete_on_termination": false},
+ {"id": "volume_id2", "delete_on_termination": false}
+ ],
+ "OS-SRV-USG:launched_at": "%(strtime)s",
+ "OS-SRV-USG:terminated_at": null,
+ "progress": 0,
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "server_groups": [],
+ "status": "ACTIVE",
+ "tags": [],
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "trusted_image_certificates": null,
+ "updated": "%(isotime)s",
+ "user_id": "fake"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-update-req.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-update-req.json.tpl
new file mode 100644
index 0000000000..bc4be64a8e
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-update-req.json.tpl
@@ -0,0 +1,8 @@
+{
+ "server": {
+ "accessIPv4": "%(access_ip_v4)s",
+ "accessIPv6": "%(access_ip_v6)s",
+ "OS-DCF:diskConfig": "AUTO",
+ "hostname": "new-server-hostname.example.com"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-update-resp.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-update-resp.json.tpl
new file mode 100644
index 0000000000..2adc16df5e
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-update-resp.json.tpl
@@ -0,0 +1,78 @@
+{
+ "server": {
+ "accessIPv4": "%(access_ip_v4)s",
+ "accessIPv6": "%(access_ip_v6)s",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
+ "OS-EXT-IPS:type": "fixed",
+ "version": 4
+ }
+ ]
+ },
+ "created": "%(isotime)s",
+ "description": null,
+ "locked": false,
+ "locked_reason": null,
+ "flavor": {
+ "disk": 1,
+ "ephemeral": 0,
+ "extra_specs": {},
+ "original_name": "m1.tiny",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(compute_endpoint)s/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "config_drive": "",
+ "OS-DCF:diskConfig": "AUTO",
+ "OS-EXT-AZ:availability_zone": "us-west",
+ "OS-EXT-SRV-ATTR:hostname": "new-server-hostname.example.com",
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "os-extended-volumes:volumes_attached": [],
+ "OS-SRV-USG:launched_at": "%(strtime)s",
+ "OS-SRV-USG:terminated_at": null,
+ "progress": 0,
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "server_groups": [],
+ "status": "ACTIVE",
+ "tags": [],
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "trusted_image_certificates": null,
+ "updated": "%(isotime)s",
+ "user_id": "fake"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/servers-details-resp.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/servers-details-resp.json.tpl
new file mode 100644
index 0000000000..f49d21e7a2
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/servers-details-resp.json.tpl
@@ -0,0 +1,88 @@
+{
+ "servers": [
+ {
+ "accessIPv4": "%(access_ip_v4)s",
+ "accessIPv6": "%(access_ip_v6)s",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
+ "OS-EXT-IPS:type": "fixed",
+ "version": 4
+ }
+ ]
+ },
+ "created": "%(isotime)s",
+ "description": "",
+ "flavor": {
+ "disk": 1,
+ "ephemeral": 0,
+ "extra_specs": {},
+ "original_name": "m1.tiny",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(compute_endpoint)s/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "config_drive": "%(cdrive)s",
+ "locked": false,
+ "locked_reason": "",
+ "OS-DCF:diskConfig": "AUTO",
+ "OS-EXT-AZ:availability_zone": "nova",
+ "OS-EXT-SRV-ATTR:hostname": "custom-hostname.example.com",
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "os-extended-volumes:volumes_attached": [
+ {"id": "volume_id1", "delete_on_termination": false},
+ {"id": "volume_id2", "delete_on_termination": false}
+ ],
+ "OS-SRV-USG:launched_at": "%(strtime)s",
+ "OS-SRV-USG:terminated_at": null,
+ "progress": 0,
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "status": "ACTIVE",
+ "tags": [],
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "trusted_image_certificates": null,
+ "updated": "%(isotime)s",
+ "user_id": "fake"
+ }
+ ],
+ "servers_links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/servers/detail?limit=1&marker=%(id)s",
+ "rel": "next"
+ }
+ ]
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/servers-list-resp.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/servers-list-resp.json.tpl
new file mode 100644
index 0000000000..9cdb3aa644
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/servers-list-resp.json.tpl
@@ -0,0 +1,24 @@
+{
+ "servers": [
+ {
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "new-server-test"
+ }
+ ],
+ "servers_links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/servers?limit=1&marker=%(id)s",
+ "rel": "next"
+ }
+ ]
+}
diff --git a/nova/tests/functional/api_sample_tests/test_evacuate.py b/nova/tests/functional/api_sample_tests/test_evacuate.py
index ab3aa95ad8..15efb39d44 100644
--- a/nova/tests/functional/api_sample_tests/test_evacuate.py
+++ b/nova/tests/functional/api_sample_tests/test_evacuate.py
@@ -80,7 +80,7 @@ class EvacuateJsonTest(test_servers.ServersSampleBase):
orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
on_shared_storage=False, preserve_ephemeral=mock.ANY,
host='testHost', request_spec=mock.ANY,
- reimage_boot_volume=False)
+ reimage_boot_volume=False, target_state=None)
@mock.patch('nova.conductor.manager.ComputeTaskManager.rebuild_instance')
def test_server_evacuate_find_host(self, rebuild_mock):
@@ -97,7 +97,7 @@ class EvacuateJsonTest(test_servers.ServersSampleBase):
orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
on_shared_storage=False, preserve_ephemeral=mock.ANY,
host=None, request_spec=mock.ANY,
- reimage_boot_volume=False)
+ reimage_boot_volume=False, target_state=None)
class EvacuateJsonTestV214(EvacuateJsonTest):
@@ -119,7 +119,7 @@ class EvacuateJsonTestV214(EvacuateJsonTest):
orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
on_shared_storage=None, preserve_ephemeral=mock.ANY,
host='testHost', request_spec=mock.ANY,
- reimage_boot_volume=False)
+ reimage_boot_volume=False, target_state=None)
@mock.patch('nova.conductor.manager.ComputeTaskManager.rebuild_instance')
def test_server_evacuate_find_host(self, rebuild_mock):
@@ -135,7 +135,7 @@ class EvacuateJsonTestV214(EvacuateJsonTest):
orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
on_shared_storage=None, preserve_ephemeral=mock.ANY,
host=None, request_spec=mock.ANY,
- reimage_boot_volume=False)
+ reimage_boot_volume=False, target_state=None)
class EvacuateJsonTestV229(EvacuateJsonTestV214):
@@ -163,7 +163,7 @@ class EvacuateJsonTestV229(EvacuateJsonTestV214):
orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
on_shared_storage=None, preserve_ephemeral=mock.ANY,
host=None, request_spec=mock.ANY,
- reimage_boot_volume=False)
+ reimage_boot_volume=False, target_state=None)
@mock.patch('nova.conductor.manager.ComputeTaskManager.rebuild_instance')
@mock.patch('nova.objects.ComputeNodeList.get_all_by_host')
@@ -184,7 +184,7 @@ class EvacuateJsonTestV229(EvacuateJsonTestV214):
orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
on_shared_storage=None, preserve_ephemeral=mock.ANY,
host='testHost', request_spec=mock.ANY,
- reimage_boot_volume=False)
+ reimage_boot_volume=False, target_state=None)
class EvacuateJsonTestV268(EvacuateJsonTestV229):
@@ -211,8 +211,46 @@ class EvacuateJsonTestV268(EvacuateJsonTestV229):
orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
on_shared_storage=None, preserve_ephemeral=mock.ANY,
host=None, request_spec=mock.ANY,
- reimage_boot_volume=False)
+ reimage_boot_volume=False, target_state=None)
def test_server_evacuate_with_force(self):
# doesn't apply to v2.68+, which removed the ability to force migrate
pass
+
+
+class EvacuateJsonTestV295(EvacuateJsonTestV268):
+ microversion = '2.95'
+ scenarios = [('v2_95', {'api_major_version': 'v2.1'})]
+
+ @mock.patch('nova.conductor.manager.ComputeTaskManager.rebuild_instance')
+ def test_server_evacuate(self, rebuild_mock):
+ req_subs = {
+ "adminPass": "MySecretPass",
+ }
+ self._test_evacuate(req_subs, 'server-evacuate-req',
+ server_resp=None, expected_resp_code=200)
+
+ rebuild_mock.assert_called_once_with(mock.ANY, instance=mock.ANY,
+ orig_image_ref=mock.ANY, image_ref=mock.ANY,
+ injected_files=mock.ANY, new_pass="MySecretPass",
+ orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
+ on_shared_storage=None, preserve_ephemeral=mock.ANY,
+ host=None, request_spec=mock.ANY,
+ reimage_boot_volume=False, target_state="stopped")
+
+ @mock.patch('nova.conductor.manager.ComputeTaskManager.rebuild_instance')
+ def test_server_evacuate_find_host(self, rebuild_mock):
+ req_subs = {
+ 'host': 'testHost',
+ "adminPass": "MySecretPass",
+ }
+ self._test_evacuate(req_subs, 'server-evacuate-find-host-req',
+ server_resp=None, expected_resp_code=200)
+
+ rebuild_mock.assert_called_once_with(mock.ANY, instance=mock.ANY,
+ orig_image_ref=mock.ANY, image_ref=mock.ANY,
+ injected_files=mock.ANY, new_pass="MySecretPass",
+ orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
+ on_shared_storage=None, preserve_ephemeral=mock.ANY,
+ host=None, request_spec=mock.ANY,
+ reimage_boot_volume=False, target_state="stopped")
diff --git a/nova/tests/functional/api_sample_tests/test_remote_consoles.py b/nova/tests/functional/api_sample_tests/test_remote_consoles.py
index 986826bfee..e304402ee9 100644
--- a/nova/tests/functional/api_sample_tests/test_remote_consoles.py
+++ b/nova/tests/functional/api_sample_tests/test_remote_consoles.py
@@ -13,6 +13,10 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
+from nova.compute import api as compute
+from nova import exception
from nova.tests.functional.api_sample_tests import test_servers
HTTP_RE = r'(https?://)([\w\d:#@%/;$()~_?\+-=\\.&](#!)?)*'
@@ -38,6 +42,22 @@ class ConsolesSampleJsonTests(test_servers.ServersSampleBase):
self._verify_response('get-vnc-console-post-resp', {'url': HTTP_RE},
response, 200)
+ @mock.patch.object(compute.API, 'get_vnc_console')
+ def test_get_vnc_console_instance_invalid_state(self,
+ mock_get_vnc_console):
+ uuid = self._post_server()
+
+ def fake_get_vnc_console(*args, **kwargs):
+ raise exception.InstanceInvalidState(
+ attr='fake_attr', state='fake_state', method='fake_method',
+ instance_uuid=uuid)
+
+ mock_get_vnc_console.side_effect = fake_get_vnc_console
+ response = self._do_post('servers/%s/action' % uuid,
+ 'get-vnc-console-post-req',
+ {'action': 'os-getVNCConsole'})
+ self.assertEqual(409, response.status_code)
+
def test_get_spice_console(self):
uuid = self._post_server()
response = self._do_post('servers/%s/action' % uuid,
diff --git a/nova/tests/functional/api_sample_tests/test_servers.py b/nova/tests/functional/api_sample_tests/test_servers.py
index aa07b88247..7679c9b734 100644
--- a/nova/tests/functional/api_sample_tests/test_servers.py
+++ b/nova/tests/functional/api_sample_tests/test_servers.py
@@ -618,6 +618,13 @@ class ServersSampleJson290Test(ServersSampleJsonTest):
ADMIN_API = False
+class ServersSampleJson294Test(ServersSampleJsonTest):
+ microversion = '2.94'
+ scenarios = [('v2_94', {'api_major_version': 'v2.1'})]
+ use_common_server_post = False
+ ADMIN_API = False
+
+
class ServersUpdateSampleJsonTest(ServersSampleBase):
# Many of the 'os_compute_api:servers:*' policies are admin-only, and we
@@ -702,6 +709,44 @@ class ServersUpdateSampleJson290Test(ServersUpdateSampleJsonTest):
self._verify_response('server-action-rebuild-resp', subs, resp, 202)
+class ServersUpdateSampleJson294Test(ServersUpdateSampleJsonTest):
+ microversion = '2.94'
+ scenarios = [('v2_94', {'api_major_version': 'v2.1'})]
+ ADMIN_API = False
+
+ def test_update_server(self):
+ uuid = self._post_server()
+ subs = {}
+ subs['hostid'] = '[a-f0-9]+'
+ subs['access_ip_v4'] = '1.2.3.4'
+ subs['access_ip_v6'] = '80fe::'
+ subs['hostname'] = 'updated-hostname.example.com'
+ response = self._do_put('servers/%s' % uuid,
+ 'server-update-req', subs)
+ self._verify_response('server-update-resp', subs, response, 200)
+
+ def test_server_rebuild(self):
+ uuid = self._post_server()
+ params = {
+ 'uuid': self.glance.auto_disk_config_enabled_image['id'],
+ 'name': 'foobar',
+ 'pass': 'seekr3t',
+ 'hostid': '[a-f0-9]+',
+ 'access_ip_v4': '1.2.3.4',
+ 'access_ip_v6': '80fe::',
+ 'hostname': 'updated-hostname.example.com',
+ }
+
+ resp = self._do_post(
+ 'servers/%s/action' % uuid,
+ 'server-action-rebuild',
+ params,
+ )
+ subs = params.copy()
+ del subs['uuid']
+ self._verify_response('server-action-rebuild-resp', subs, resp, 202)
+
+
class ServerSortKeysJsonTests(ServersSampleBase):
sample_dir = 'servers-sort'
diff --git a/nova/tests/functional/compute/test_resource_tracker.py b/nova/tests/functional/compute/test_resource_tracker.py
index fff08697ae..139fb5e6ac 100644
--- a/nova/tests/functional/compute/test_resource_tracker.py
+++ b/nova/tests/functional/compute/test_resource_tracker.py
@@ -248,6 +248,7 @@ class IronicResourceTrackerTest(test.TestCase):
'numa_topology': None,
'resource_class': None, # Act like admin hasn't set yet...
'stats': stats,
+ 'uuid': str(getattr(uuids, nodename)),
}
self.rt.update_available_resource(self.ctx, nodename)
diff --git a/nova/tests/functional/integrated_helpers.py b/nova/tests/functional/integrated_helpers.py
index 0f09d0dcb0..cdf71da0d4 100644
--- a/nova/tests/functional/integrated_helpers.py
+++ b/nova/tests/functional/integrated_helpers.py
@@ -598,7 +598,7 @@ class InstanceHelperMixin:
def _evacuate_server(
self, server, extra_post_args=None, expected_host=None,
- expected_state='ACTIVE', expected_task_state=NOT_SPECIFIED,
+ expected_state='SHUTOFF', expected_task_state=NOT_SPECIFIED,
expected_migration_status='done'):
"""Evacuate a server."""
api = getattr(self, 'admin_api', self.api)
@@ -876,6 +876,20 @@ class PlacementHelperMixin:
'Test expected a single migration but found %i' % len(migrations))
return migrations[0].uuid
+ def _reserve_placement_resource(self, rp_name, rc_name, reserved):
+ rp_uuid = self._get_provider_uuid_by_name(rp_name)
+ inv = self.placement.get(
+ '/resource_providers/%s/inventories/%s' % (rp_uuid, rc_name),
+ version='1.26'
+ ).body
+ inv["reserved"] = reserved
+ result = self.placement.put(
+ '/resource_providers/%s/inventories/%s' % (rp_uuid, rc_name),
+ version='1.26', body=inv
+ ).body
+ self.assertEqual(reserved, result["reserved"])
+ return result
+
class PlacementInstanceHelperMixin(InstanceHelperMixin, PlacementHelperMixin):
"""A placement-aware variant of InstanceHelperMixin."""
diff --git a/nova/tests/functional/libvirt/base.py b/nova/tests/functional/libvirt/base.py
index 47a8bbe81c..1ee46a3217 100644
--- a/nova/tests/functional/libvirt/base.py
+++ b/nova/tests/functional/libvirt/base.py
@@ -18,6 +18,7 @@ import io
from unittest import mock
import fixtures
+from oslo_utils.fixture import uuidsentinel as uuids
from nova import conf
from nova.tests import fixtures as nova_fixtures
@@ -42,6 +43,7 @@ class ServersTestBase(integrated_helpers._IntegratedTestBase):
super(ServersTestBase, self).setUp()
self.useFixture(nova_fixtures.LibvirtImageBackendFixture())
+ self.useFixture(nova_fixtures.CGroupsFixture())
self.libvirt = self.useFixture(nova_fixtures.LibvirtFixture())
self.useFixture(nova_fixtures.OSBrickFixture())
@@ -177,7 +179,12 @@ class ServersTestBase(integrated_helpers._IntegratedTestBase):
self.assertNotIn(hostname, self.computes)
self.assertNotIn(hostname, self.compute_rp_uuids)
- self.computes[hostname] = _start_compute(hostname, host_info)
+ with mock.patch('nova.virt.node.get_local_node_uuid') as m:
+ m.return_value = str(getattr(uuids, 'node_%s' % hostname))
+ self.computes[hostname] = _start_compute(hostname, host_info)
+ # We need to trigger libvirt.Host() to capture the node-local
+ # uuid while we have it mocked out.
+ self.computes[hostname].driver._host.get_node_uuid()
self.compute_rp_uuids[hostname] = self.placement.get(
'/resource_providers?name=%s' % hostname).body[
diff --git a/nova/tests/functional/libvirt/test_evacuate.py b/nova/tests/functional/libvirt/test_evacuate.py
index 9d3deec99d..0e89a3cdb6 100644
--- a/nova/tests/functional/libvirt/test_evacuate.py
+++ b/nova/tests/functional/libvirt/test_evacuate.py
@@ -415,7 +415,9 @@ class _LibvirtEvacuateTest(integrated_helpers.InstanceHelperMixin):
with mock.patch.object(fakelibvirt.Connection, 'getHostname',
return_value=name):
- compute = self.start_service('compute', host=name)
+ with mock.patch('nova.virt.node.get_local_node_uuid') as m:
+ m.return_value = str(getattr(uuids, 'node_%s' % name))
+ compute = self.start_service('compute', host=name)
compute.driver._host.get_connection().getHostname = lambda: name
return compute
@@ -427,6 +429,7 @@ class _LibvirtEvacuateTest(integrated_helpers.InstanceHelperMixin):
self.useFixture(nova_fixtures.NeutronFixture(self))
self.useFixture(nova_fixtures.GlanceFixture(self))
self.useFixture(func_fixtures.PlacementFixture())
+ self.useFixture(nova_fixtures.CGroupsFixture())
fake_network.set_stub_network_methods(self)
api_fixture = self.useFixture(
diff --git a/nova/tests/functional/libvirt/test_pci_in_placement.py b/nova/tests/functional/libvirt/test_pci_in_placement.py
index 32f6cfeca7..41d6c8e008 100644
--- a/nova/tests/functional/libvirt/test_pci_in_placement.py
+++ b/nova/tests/functional/libvirt/test_pci_in_placement.py
@@ -13,6 +13,7 @@
# under the License.
from unittest import mock
+import ddt
import fixtures
import os_resource_classes
import os_traits
@@ -73,10 +74,6 @@ class PlacementPCIReportingTests(test_pci_sriov_servers._PCIServersTestBase):
)
)
- @staticmethod
- def _to_device_spec_conf(spec_list):
- return [jsonutils.dumps(x) for x in spec_list]
-
class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
@@ -91,7 +88,7 @@ class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
num_pci=2, num_pfs=2, num_vfs=4)
# the emulated devices will then be filtered by the device_spec:
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
# PCI_PROD_ID will match two type-PCI devs (slot 0, 1)
{
@@ -168,7 +165,7 @@ class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
pci_info = fakelibvirt.HostPCIDevicesInfo(
num_pci=1, num_pfs=1, num_vfs=1)
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
# PCI_PROD_ID will match the type-PCI in slot 0
{
@@ -215,7 +212,7 @@ class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
pci_info = fakelibvirt.HostPCIDevicesInfo(
num_pci=0, num_pfs=1, num_vfs=1)
# both device will be matched by our config
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
# PF
{
@@ -248,7 +245,7 @@ class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
num_pci=0, num_pfs=1, num_vfs=2)
# the config matches the two VFs separately and tries to configure
# them with different resource class
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
{
"address": "0000:81:00.1",
@@ -282,7 +279,7 @@ class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
num_pci=0, num_pfs=1, num_vfs=2)
# the config matches the two VFs separately and tries to configure
# them with different trait list
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
{
"address": "0000:81:00.1",
@@ -316,7 +313,7 @@ class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
pci_info = fakelibvirt.HostPCIDevicesInfo(
num_pci=0, num_pfs=1, num_vfs=1)
# then the config assigns physnet to the dev
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
{
"vendor_id": fakelibvirt.PCI_VEND_ID,
@@ -336,7 +333,7 @@ class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
)
def test_devname_based_dev_spec_rejected(self):
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
{
"devname": "eth0",
@@ -364,7 +361,7 @@ class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
pci_info = fakelibvirt.HostPCIDevicesInfo(
num_pci=1, num_pfs=0, num_vfs=0)
# the config matches that PCI dev
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
{
"vendor_id": fakelibvirt.PCI_VEND_ID,
@@ -386,7 +383,7 @@ class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
)
# now un-configure the PCI device and restart the compute
- self.flags(group='pci', device_spec=self._to_device_spec_conf([]))
+ self.flags(group='pci', device_spec=self._to_list_of_json_str([]))
self.restart_compute_service(hostname="compute1")
# the RP had no allocation so nova could remove it
@@ -402,7 +399,7 @@ class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
pci_info = fakelibvirt.HostPCIDevicesInfo(
num_pci=0, num_pfs=1, num_vfs=2)
# then the config matching the VFs
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
{
"vendor_id": fakelibvirt.PCI_VEND_ID,
@@ -450,7 +447,7 @@ class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
pci_info = fakelibvirt.HostPCIDevicesInfo(
num_pci=0, num_pfs=1, num_vfs=2)
# then the config patches the VFs
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
{
"vendor_id": fakelibvirt.PCI_VEND_ID,
@@ -493,7 +490,7 @@ class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
pci_info = fakelibvirt.HostPCIDevicesInfo(
num_pci=0, num_pfs=1, num_vfs=2)
# then the config matches both VFs
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
{
"vendor_id": fakelibvirt.PCI_VEND_ID,
@@ -516,7 +513,7 @@ class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
# change the config to match the PF but do not match the VFs and
# restart the compute
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
{
"vendor_id": fakelibvirt.PCI_VEND_ID,
@@ -548,7 +545,7 @@ class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
pci_info = fakelibvirt.HostPCIDevicesInfo(
num_pci=0, num_pfs=1, num_vfs=2)
# then the config only matches the PF
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
{
"vendor_id": fakelibvirt.PCI_VEND_ID,
@@ -571,7 +568,7 @@ class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
# remove the PF from the config and add the VFs instead then restart
# the compute
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
{
"vendor_id": fakelibvirt.PCI_VEND_ID,
@@ -604,7 +601,7 @@ class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
num_pci=0, num_pfs=2, num_vfs=4)
# from slot 0 we match the PF only and ignore the VFs
# from slot 1 we match the VFs but ignore the parent PF
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
{
"vendor_id": fakelibvirt.PCI_VEND_ID,
@@ -649,7 +646,7 @@ class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
# change the resource class and traits configuration and restart the
# compute
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
{
"product_id": fakelibvirt.PF_PROD_ID,
@@ -702,7 +699,7 @@ class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
pci_info = fakelibvirt.HostPCIDevicesInfo(
num_pci=0, num_pfs=1, num_vfs=1)
# we match the PF only and ignore the VF
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
{
"vendor_id": fakelibvirt.PCI_VEND_ID,
@@ -756,7 +753,7 @@ class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
self._create_one_compute_with_a_pf_consumed_by_an_instance())
# remove 0000:81:00.0 from the device spec and restart the compute
- device_spec = self._to_device_spec_conf([])
+ device_spec = self._to_list_of_json_str([])
self.flags(group='pci', device_spec=device_spec)
# The PF is used but removed from the config. The PciTracker warns
# but keeps the device so the placement logic mimic this and only warns
@@ -800,7 +797,7 @@ class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
# in the config, then restart the compute service
# only match the VF now
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
{
"vendor_id": fakelibvirt.PCI_VEND_ID,
@@ -875,7 +872,7 @@ class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
pci_info = fakelibvirt.HostPCIDevicesInfo(
num_pci=1, num_pfs=0, num_vfs=0)
# the config matches the PCI dev
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
{
"vendor_id": fakelibvirt.PCI_VEND_ID,
@@ -902,7 +899,7 @@ class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
pci_info = fakelibvirt.HostPCIDevicesInfo(
num_pci=1, num_pfs=0, num_vfs=0)
# the config matches the PCI dev
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
{
"vendor_id": fakelibvirt.PCI_VEND_ID,
@@ -953,25 +950,13 @@ class PlacementPCIAllocationHealingTests(PlacementPCIReportingTests):
)
)
- @staticmethod
- def _move_allocation(allocations, from_uuid, to_uuid):
- allocations[to_uuid] = allocations[from_uuid]
- del allocations[from_uuid]
-
- def _move_server_allocation(self, allocations, server_uuid, revert=False):
- migration_uuid = self.get_migration_uuid_for_instance(server_uuid)
- if revert:
- self._move_allocation(allocations, migration_uuid, server_uuid)
- else:
- self._move_allocation(allocations, server_uuid, migration_uuid)
-
def test_heal_single_pci_allocation(self):
# The fake libvirt will emulate on the host:
# * one type-PCI in slot 0
pci_info = fakelibvirt.HostPCIDevicesInfo(
num_pci=1, num_pfs=0, num_vfs=0)
# the config matches the PCI dev
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
{
"vendor_id": fakelibvirt.PCI_VEND_ID,
@@ -1027,7 +1012,7 @@ class PlacementPCIAllocationHealingTests(PlacementPCIReportingTests):
pci_info = fakelibvirt.HostPCIDevicesInfo(
num_pci=2, num_pfs=2, num_vfs=8)
# the config matches:
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
# both type-PCI
{
@@ -1127,7 +1112,7 @@ class PlacementPCIAllocationHealingTests(PlacementPCIReportingTests):
pci_info = fakelibvirt.HostPCIDevicesInfo(
num_pci=2, num_pfs=2, num_vfs=8)
# the config matches:
- device_spec = self._to_device_spec_conf(
+ device_spec = self._to_list_of_json_str(
[
# both type-PCI
{
@@ -1216,7 +1201,7 @@ class PlacementPCIAllocationHealingTests(PlacementPCIReportingTests):
compute1_pci_info = fakelibvirt.HostPCIDevicesInfo(
num_pci=0, num_pfs=1, num_vfs=2)
# the config matches just the VFs
- compute1_device_spec = self._to_device_spec_conf(
+ compute1_device_spec = self._to_list_of_json_str(
[
{
"vendor_id": fakelibvirt.PCI_VEND_ID,
@@ -1273,7 +1258,7 @@ class PlacementPCIAllocationHealingTests(PlacementPCIReportingTests):
compute2_pci_info = fakelibvirt.HostPCIDevicesInfo(
num_pci=0, num_pfs=1, num_vfs=1)
# the config matches just the VFs
- compute2_device_spec = self._to_device_spec_conf(
+ compute2_device_spec = self._to_list_of_json_str(
[
{
"vendor_id": fakelibvirt.PCI_VEND_ID,
@@ -1377,7 +1362,7 @@ class PlacementPCIAllocationHealingTests(PlacementPCIReportingTests):
compute1_pci_info = fakelibvirt.HostPCIDevicesInfo(
num_pci=0, num_pfs=1, num_vfs=1)
# the config matches just the VFs
- compute1_device_spec = self._to_device_spec_conf(
+ compute1_device_spec = self._to_list_of_json_str(
[
{
"vendor_id": fakelibvirt.PCI_VEND_ID,
@@ -1435,7 +1420,7 @@ class PlacementPCIAllocationHealingTests(PlacementPCIReportingTests):
compute2_pci_info = fakelibvirt.HostPCIDevicesInfo(
num_pci=2, num_pfs=1, num_vfs=1)
# the config matches the PCI devs and hte PF but not the VFs
- compute2_device_spec = self._to_device_spec_conf(
+ compute2_device_spec = self._to_list_of_json_str(
[
{
"vendor_id": fakelibvirt.PCI_VEND_ID,
@@ -1516,7 +1501,7 @@ class PlacementPCIAllocationHealingTests(PlacementPCIReportingTests):
compute1_pci_info = fakelibvirt.HostPCIDevicesInfo(
num_pci=0, num_pfs=1, num_vfs=3)
# the config matches just the VFs
- compute1_device_spec = self._to_device_spec_conf(
+ compute1_device_spec = self._to_list_of_json_str(
[
{
"vendor_id": fakelibvirt.PCI_VEND_ID,
@@ -1613,8 +1598,400 @@ class PlacementPCIAllocationHealingTests(PlacementPCIReportingTests):
compute1_expected_placement_view["allocations"][server["id"]] = {
"0000:81:00.0": {self.VF_RC: 2}
}
+ # NOTE(gibi): This is unfortunate but during same host resize
+ # confirm when the PCI scheduling is not enabled the healing logic
+ # cannot heal the dest host allocation during the claim. It will only
+ # heal it in the next run of the ResourceTracker._update(). This due
+ # to the fact that ResourceTracker.drop_move_claim runs both for
+ # revert (on the dest) and confirm (on the source) and in same host
+ # resize this means that it runs on both the source and the dest as
+ # they are the same.
+ # Anyhow the healing will happen just a bit later. And the end goal is
+ # to make the scheduler support enabled by default and delete the
+ # whole healing logic. So I think this is acceptable.
+ self._run_periodics()
self.assert_placement_pci_view(
"compute1", **compute1_expected_placement_view)
- self._run_periodics()
+
+
+@ddt.ddt
+class SimpleRCAndTraitBasedPCIAliasTests(PlacementPCIReportingTests):
+ def setUp(self):
+ super().setUp()
+ self.flags(group='filter_scheduler', pci_in_placement=True)
+
+ # The fake libvirt will emulate on the host:
+ # * one type-PCI in slot 0
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=1, num_pfs=0, num_vfs=0)
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "address": "0000:81:00.0",
+ "resource_class": "gpu",
+ "traits": ",".join(
+ [
+ os_traits.HW_GPU_API_VULKAN,
+ "purple",
+ "round",
+ ]
+ ),
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ self.start_compute(hostname="compute1", pci_info=pci_info)
+
+ self.assertPCIDeviceCounts("compute1", total=1, free=1)
+ self.compute1_expected_placement_view = {
+ "inventories": {
+ "0000:81:00.0": {"CUSTOM_GPU": 1},
+ },
+ "traits": {
+ "0000:81:00.0": [
+ "HW_GPU_API_VULKAN",
+ "CUSTOM_PURPLE",
+ "CUSTOM_ROUND",
+ ],
+ },
+ "usages": {
+ "0000:81:00.0": {"CUSTOM_GPU": 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "compute1", **self.compute1_expected_placement_view)
+
+ @ddt.data(
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PCI_PROD_ID,
+ "name": "a-gpu-wrong-rc",
+ },
+ {
+ "resource_class": os_resource_classes.PGPU,
+ "name": "a-gpu-wrong-rc-2",
+ },
+ {
+ "resource_class": "GPU",
+ # NOTE(gibi): "big" is missing from device spec
+ "traits": "purple,big",
+ "name": "a-gpu-missing-trait",
+ },
+ )
+ def test_boot_with_custom_rc_and_traits_no_matching_device(
+ self, pci_alias
+ ):
+ self.flags(group="pci", alias=self._to_list_of_json_str([pci_alias]))
+ extra_spec = {"pci_passthrough:alias": f"{pci_alias['name']}:1"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._create_server(
+ flavor_id=flavor_id, networks=[], expected_state="ERROR"
+ )
+ self.assertIn("fault", server)
+ self.assertIn("No valid host", server["fault"]["message"])
+
+ self.assertPCIDeviceCounts("compute1", total=1, free=1)
+ self.assert_placement_pci_view(
+ "compute1", **self.compute1_expected_placement_view
+ )
+
+ def test_boot_with_custom_rc_and_traits_succeeds(self):
+ pci_alias_gpu = {
+ "resource_class": "GPU",
+ "traits": "HW_GPU_API_VULKAN,PURPLE",
+ "name": "a-gpu",
+ }
+ self.flags(
+ group="pci", alias=self._to_list_of_json_str([pci_alias_gpu])
+ )
+
+ extra_spec = {"pci_passthrough:alias": "a-gpu:1"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._create_server(flavor_id=flavor_id, networks=[])
+
+ self.assertPCIDeviceCounts("compute1", total=1, free=0)
+ self.compute1_expected_placement_view["usages"]["0000:81:00.0"][
+ "CUSTOM_GPU"
+ ] = 1
+ self.compute1_expected_placement_view["allocations"][server["id"]] = {
+ "0000:81:00.0": {"CUSTOM_GPU": 1}
+ }
+ self.assert_placement_pci_view(
+ "compute1", **self.compute1_expected_placement_view
+ )
+ self.assert_no_pci_healing("compute1")
+
+
+class RCAndTraitBasedPCIAliasTests(PlacementPCIReportingTests):
+ def setUp(self):
+ super().setUp()
+ self.flags(group='filter_scheduler', pci_in_placement=True)
+
+ def test_device_claim_consistent_with_placement_allocation(self):
+ """As soon as [filter_scheduler]pci_in_placement is enabled the
+ nova-scheduler will allocate PCI devices in placement. Then on the
+ nova-compute side the PCI claim will also allocate PCI devices in the
+ nova DB. This test will create a situation where the two allocation
+ could contradict and observes that in a contradicting situation the PCI
+ claim will fail instead of allocating a device that is not allocated in
+ placement.
+
+ For the contradiction to happen we need two PCI devices that looks
+ different from placement perspective than from the nova DB perspective.
+
+ We can do that by assigning different traits from in placement and
+ having different product_id in the Nova DB. Then we will create a
+ request that would match from placement perspective to one of the
+ device only and would match to the other device from nova DB
+ perspective. Then we will expect that the boot request fails with no
+ valid host.
+ """
+ # The fake libvirt will emulate on the host:
+ # * one type-PCI in slot 0
+ # * one type-PF in slot 1
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=1, num_pfs=1, num_vfs=0)
+ # we allow both device to be consumed, but we assign different traits
+ # so we can selectively schedule to one of the devices in placement
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "address": "0000:81:00.0",
+ "resource_class": "MY_DEV",
+ "traits": "A_PCI",
+ },
+ {
+ "address": "0000:81:01.0",
+ "resource_class": "MY_DEV",
+ "traits": "A_PF",
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ self.start_compute(hostname="compute1", pci_info=pci_info)
+
+ self.assertPCIDeviceCounts("compute1", total=2, free=2)
+ compute1_expected_placement_view = {
+ "inventories": {
+ "0000:81:00.0": {"CUSTOM_MY_DEV": 1},
+ "0000:81:01.0": {"CUSTOM_MY_DEV": 1},
+ },
+ "traits": {
+ "0000:81:00.0": [
+ "CUSTOM_A_PCI",
+ ],
+ "0000:81:01.0": [
+ "CUSTOM_A_PF",
+ ],
+ },
+ "usages": {
+ "0000:81:00.0": {"CUSTOM_MY_DEV": 0},
+ "0000:81:01.0": {"CUSTOM_MY_DEV": 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+
+ # now we create a PCI alias that cannot be fulfilled from both
+ # nova and placement perspective at the same time, but can be fulfilled
+ # from each perspective individually
+ pci_alias_no_match = {
+ "resource_class": "MY_DEV",
+ # by product_id this matches 81.00 only
+ "product_id": fakelibvirt.PCI_PROD_ID,
+ # by trait this matches 81.01 only
+ "traits": "A_PF",
+ "name": "a-pci",
+ }
+ self.flags(
+ group="pci",
+ alias=self._to_list_of_json_str([pci_alias_no_match]),
+ )
+
+ # then try to boot with the alias and expect no valid host error
+ extra_spec = {"pci_passthrough:alias": "a-pci:1"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._create_server(
+ flavor_id=flavor_id, networks=[], expected_state='ERROR')
+ self.assertIn('fault', server)
+ self.assertIn('No valid host', server['fault']['message'])
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+ self.assert_no_pci_healing("compute1")
+
+ def test_vf_with_split_allocation(self):
+ # The fake libvirt will emulate on the host:
+ # * two type-PFs in slot 0, 1 with 2 VFs each
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=0, num_pfs=2, num_vfs=4)
+ # make all 4 VFs available
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "product_id": fakelibvirt.VF_PROD_ID,
+ "resource_class": "MY_VF",
+ "traits": "blue",
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ self.start_compute(hostname="compute1", pci_info=pci_info)
+
+ compute1_expected_placement_view = {
+ "inventories": {
+ "0000:81:00.0": {"CUSTOM_MY_VF": 2},
+ "0000:81:01.0": {"CUSTOM_MY_VF": 2},
+ },
+ "traits": {
+ "0000:81:00.0": [
+ "CUSTOM_BLUE",
+ ],
+ "0000:81:01.0": [
+ "CUSTOM_BLUE",
+ ],
+ },
+ "usages": {
+ "0000:81:00.0": {"CUSTOM_MY_VF": 0},
+ "0000:81:01.0": {"CUSTOM_MY_VF": 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+ self.assertPCIDeviceCounts('compute1', total=4, free=4)
+
+ pci_alias_vf = {
+ "resource_class": "MY_VF",
+ "traits": "blue",
+ "name": "a-vf",
+ }
+ self.flags(
+ group="pci",
+ alias=self._to_list_of_json_str([pci_alias_vf]),
+ )
+
+ # reserve VFs from 81.01 in placement to drive the first instance to
+ # 81.00
+ self._reserve_placement_resource(
+ "compute1_0000:81:01.0", "CUSTOM_MY_VF", 2)
+ # boot an instance with a single VF
+ # we expect that it is allocated from 81.00 as both VF on 81.01 is
+ # reserved
+ extra_spec = {"pci_passthrough:alias": "a-vf:1"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server_1vf = self._create_server(flavor_id=flavor_id, networks=[])
+
+ self.assertPCIDeviceCounts('compute1', total=4, free=3)
+ compute1_expected_placement_view["usages"] = {
+ "0000:81:00.0": {"CUSTOM_MY_VF": 1}
+ }
+ compute1_expected_placement_view["allocations"][server_1vf["id"]] = {
+ "0000:81:00.0": {"CUSTOM_MY_VF": 1},
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+ self.assert_no_pci_healing("compute1")
+
+ # Boot a second instance requesting two VFs and ensure that the only
+ # way that placement allows this is to split the two VFs between PFs.
+ # Let's remove the reservation of one resource from 81.01 so the only
+ # viable placement candidate is: one VF from 81.00 and one VF from
+ # 81.01
+ self._reserve_placement_resource(
+ "compute1_0000:81:01.0", "CUSTOM_MY_VF", 1)
+
+ extra_spec = {"pci_passthrough:alias": "a-vf:2"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server_2vf = self._create_server(flavor_id=flavor_id, networks=[])
+
+ self.assertPCIDeviceCounts('compute1', total=4, free=1)
+ compute1_expected_placement_view["usages"] = {
+ # both VM uses one VF
+ "0000:81:00.0": {"CUSTOM_MY_VF": 2},
+ "0000:81:01.0": {"CUSTOM_MY_VF": 1},
+ }
+ compute1_expected_placement_view["allocations"][server_2vf["id"]] = {
+ "0000:81:00.0": {"CUSTOM_MY_VF": 1},
+ "0000:81:01.0": {"CUSTOM_MY_VF": 1},
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+ self.assert_no_pci_healing("compute1")
+
+ def test_3vfs_asymmetric_split_between_pfs(self):
+ # The fake libvirt will emulate on the host:
+ # * two type-PFs in slot 0, 1 with 2 VFs each
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=0, num_pfs=2, num_vfs=4)
+ # make all 4 VFs available
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "product_id": fakelibvirt.VF_PROD_ID,
+ "resource_class": "MY_VF",
+ "traits": "blue",
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ self.start_compute(hostname="compute1", pci_info=pci_info)
+
+ compute1_expected_placement_view = {
+ "inventories": {
+ "0000:81:00.0": {"CUSTOM_MY_VF": 2},
+ "0000:81:01.0": {"CUSTOM_MY_VF": 2},
+ },
+ "traits": {
+ "0000:81:00.0": [
+ "CUSTOM_BLUE",
+ ],
+ "0000:81:01.0": [
+ "CUSTOM_BLUE",
+ ],
+ },
+ "usages": {
+ "0000:81:00.0": {"CUSTOM_MY_VF": 0},
+ "0000:81:01.0": {"CUSTOM_MY_VF": 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+ self.assertPCIDeviceCounts('compute1', total=4, free=4)
+
+ pci_alias_vf = {
+ "resource_class": "MY_VF",
+ "traits": "blue",
+ "name": "a-vf",
+ }
+ self.flags(
+ group="pci",
+ alias=self._to_list_of_json_str([pci_alias_vf]),
+ )
+
+ # Boot an instance requesting three VFs. The 3 VFs can be split between
+ # the two PFs two ways: 2 from 81.00 and 1 from 81.01, or 1 from 81.00
+ # and 2 from 81.01.
+ # Let's block the first way in placement by reserving 1 device from
+ # 81.00
+ self._reserve_placement_resource(
+ "compute1_0000:81:00.0", "CUSTOM_MY_VF", 1)
+ extra_spec = {"pci_passthrough:alias": "a-vf:3"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ # We expect this to fit.
+ server_3vf = self._create_server(flavor_id=flavor_id, networks=[])
+
+ self.assertPCIDeviceCounts('compute1', total=4, free=1)
+ compute1_expected_placement_view["usages"] = {
+ "0000:81:00.0": {"CUSTOM_MY_VF": 1},
+ "0000:81:01.0": {"CUSTOM_MY_VF": 2},
+ }
+ compute1_expected_placement_view["allocations"][server_3vf["id"]] = {
+ "0000:81:00.0": {"CUSTOM_MY_VF": 1},
+ "0000:81:01.0": {"CUSTOM_MY_VF": 2},
+ }
self.assert_placement_pci_view(
"compute1", **compute1_expected_placement_view)
+ self.assert_no_pci_healing("compute1")
diff --git a/nova/tests/functional/libvirt/test_pci_sriov_servers.py b/nova/tests/functional/libvirt/test_pci_sriov_servers.py
index b32d165e10..098a0e857b 100644
--- a/nova/tests/functional/libvirt/test_pci_sriov_servers.py
+++ b/nova/tests/functional/libvirt/test_pci_sriov_servers.py
@@ -14,6 +14,8 @@
# under the License.
import copy
+import pprint
+import typing as ty
from unittest import mock
from urllib import parse as urlparse
@@ -27,6 +29,7 @@ from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import units
import nova
+from nova.compute import pci_placement_translator
from nova import context
from nova import exception
from nova.network import constants
@@ -42,10 +45,58 @@ CONF = cfg.CONF
LOG = logging.getLogger(__name__)
+class PciPlacementHealingFixture(fixtures.Fixture):
+ """Allow asserting if the pci_placement_translator module needed to
+ heal PCI allocations. Such healing is only normal during upgrade. After
+ every compute is upgraded and the scheduling support of PCI tracking in
+ placement is enabled there should be no need to heal PCI allocations in
+ the resource tracker. We assert this as we eventually want to remove the
+ automatic healing logic from the resource tracker.
+ """
+
+ def __init__(self):
+ super().__init__()
+ # a list of (nodename, result, allocation_before, allocation_after)
+ # tuples recoding the result of the calls to
+ # update_provider_tree_for_pci
+ self.calls = []
+
+ def setUp(self):
+ super().setUp()
+
+ orig = pci_placement_translator.update_provider_tree_for_pci
+
+ def wrapped_update(
+ provider_tree, nodename, pci_tracker, allocations, same_host
+ ):
+ alloc_before = copy.deepcopy(allocations)
+ updated = orig(
+ provider_tree, nodename, pci_tracker, allocations, same_host)
+ alloc_after = copy.deepcopy(allocations)
+ self.calls.append((nodename, updated, alloc_before, alloc_after))
+ return updated
+
+ self.useFixture(
+ fixtures.MonkeyPatch(
+ "nova.compute.pci_placement_translator."
+ "update_provider_tree_for_pci",
+ wrapped_update,
+ )
+ )
+
+ def last_healing(self, hostname: str) -> ty.Optional[ty.Tuple[dict, dict]]:
+ for h, updated, before, after in self.calls:
+ if h == hostname and updated:
+ return before, after
+ return None
+
+
class _PCIServersTestBase(base.ServersTestBase):
ADDITIONAL_FILTERS = ['NUMATopologyFilter', 'PciPassthroughFilter']
+ PCI_RC = f"CUSTOM_PCI_{fakelibvirt.PCI_VEND_ID}_{fakelibvirt.PCI_PROD_ID}"
+
def setUp(self):
self.ctxt = context.get_admin_context()
self.flags(
@@ -66,6 +117,9 @@ class _PCIServersTestBase(base.ServersTestBase):
'.PciPassthroughFilter.host_passes',
side_effect=host_pass_mock)).mock
+ self.pci_healing_fixture = self.useFixture(
+ PciPlacementHealingFixture())
+
def assertPCIDeviceCounts(self, hostname, total, free):
"""Ensure $hostname has $total devices, $free of which are free."""
devices = objects.PciDeviceList.get_by_compute_node(
@@ -75,21 +129,31 @@ class _PCIServersTestBase(base.ServersTestBase):
self.assertEqual(total, len(devices))
self.assertEqual(free, len([d for d in devices if d.is_available()]))
+ def assert_no_pci_healing(self, hostname):
+ last_healing = self.pci_healing_fixture.last_healing(hostname)
+ before = last_healing[0] if last_healing else None
+ after = last_healing[1] if last_healing else None
+ self.assertIsNone(
+ last_healing,
+ "The resource tracker needed to heal PCI allocation in placement "
+ "on host %s. This should not happen in normal operation as the "
+ "scheduler should create the proper allocation instead.\n"
+ "Allocations before healing:\n %s\n"
+ "Allocations after healing:\n %s\n"
+ % (
+ hostname,
+ pprint.pformat(before),
+ pprint.pformat(after),
+ ),
+ )
+
def _get_rp_by_name(self, name, rps):
for rp in rps:
if rp["name"] == name:
return rp
self.fail(f'RP {name} is not found in Placement {rps}')
- def assert_placement_pci_view(
- self, hostname, inventories, traits, usages=None, allocations=None
- ):
- if not usages:
- usages = {}
-
- if not allocations:
- allocations = {}
-
+ def assert_placement_pci_inventory(self, hostname, inventories, traits):
compute_rp_uuid = self.compute_rp_uuids[hostname]
rps = self._get_all_rps_in_a_tree(compute_rp_uuid)
@@ -129,6 +193,10 @@ class _PCIServersTestBase(base.ServersTestBase):
f"Traits on RP {real_rp_name} does not match with expectation"
)
+ def assert_placement_pci_usages(self, hostname, usages):
+ compute_rp_uuid = self.compute_rp_uuids[hostname]
+ rps = self._get_all_rps_in_a_tree(compute_rp_uuid)
+
for rp_name, usage in usages.items():
real_rp_name = f'{hostname}_{rp_name}'
rp = self._get_rp_by_name(real_rp_name, rps)
@@ -139,6 +207,38 @@ class _PCIServersTestBase(base.ServersTestBase):
f"Usage on RP {real_rp_name} does not match with expectation"
)
+ def assert_placement_pci_allocations(self, allocations):
+ for consumer, expected_allocations in allocations.items():
+ actual_allocations = self._get_allocations_by_server_uuid(consumer)
+ self.assertEqual(
+ len(expected_allocations),
+ len(actual_allocations),
+ f"The consumer {consumer} allocates from different number of "
+ f"RPs than expected. Expected: {expected_allocations}, "
+ f"Actual: {actual_allocations}"
+ )
+ for rp_name, expected_rp_allocs in expected_allocations.items():
+ rp_uuid = self._get_provider_uuid_by_name(rp_name)
+ self.assertIn(
+ rp_uuid,
+ actual_allocations,
+ f"The consumer {consumer} expected to allocate from "
+ f"{rp_name}. Expected: {expected_allocations}, "
+ f"Actual: {actual_allocations}"
+ )
+ actual_rp_allocs = actual_allocations[rp_uuid]['resources']
+ self.assertEqual(
+ expected_rp_allocs,
+ actual_rp_allocs,
+ f"The consumer {consumer} expected to have allocation "
+ f"{expected_rp_allocs} on {rp_name} but it has "
+ f"{actual_rp_allocs} instead."
+ )
+
+ def assert_placement_pci_allocations_on_host(self, hostname, allocations):
+ compute_rp_uuid = self.compute_rp_uuids[hostname]
+ rps = self._get_all_rps_in_a_tree(compute_rp_uuid)
+
for consumer, expected_allocations in allocations.items():
actual_allocations = self._get_allocations_by_server_uuid(consumer)
self.assertEqual(
@@ -170,6 +270,35 @@ class _PCIServersTestBase(base.ServersTestBase):
f"{actual_rp_allocs} instead."
)
+ def assert_placement_pci_view(
+ self, hostname, inventories, traits, usages=None, allocations=None
+ ):
+ if not usages:
+ usages = {}
+
+ if not allocations:
+ allocations = {}
+
+ self.assert_placement_pci_inventory(hostname, inventories, traits)
+ self.assert_placement_pci_usages(hostname, usages)
+ self.assert_placement_pci_allocations_on_host(hostname, allocations)
+
+ @staticmethod
+ def _to_list_of_json_str(list):
+ return [jsonutils.dumps(x) for x in list]
+
+ @staticmethod
+ def _move_allocation(allocations, from_uuid, to_uuid):
+ allocations[to_uuid] = allocations[from_uuid]
+ del allocations[from_uuid]
+
+ def _move_server_allocation(self, allocations, server_uuid, revert=False):
+ migration_uuid = self.get_migration_uuid_for_instance(server_uuid)
+ if revert:
+ self._move_allocation(allocations, migration_uuid, server_uuid)
+ else:
+ self._move_allocation(allocations, server_uuid, migration_uuid)
+
class _PCIServersWithMigrationTestBase(_PCIServersTestBase):
@@ -809,7 +938,7 @@ class SRIOVServersTest(_PCIServersWithMigrationTestBase):
# start two compute services with differing PCI device inventory
source_pci_info = fakelibvirt.HostPCIDevicesInfo(
- num_pfs=2, num_vfs=8, numa_node=0)
+ num_pfs=1, num_vfs=4, numa_node=0)
# add an extra PF without VF to be used by direct-physical ports
source_pci_info.add_device(
dev_type='PF',
@@ -862,7 +991,7 @@ class SRIOVServersTest(_PCIServersWithMigrationTestBase):
# our source host should have marked two PCI devices as used, the VF
# and the parent PF, while the future destination is currently unused
self.assertEqual('test_compute0', server['OS-EXT-SRV-ATTR:host'])
- self.assertPCIDeviceCounts('test_compute0', total=11, free=8)
+ self.assertPCIDeviceCounts('test_compute0', total=6, free=3)
self.assertPCIDeviceCounts('test_compute1', total=4, free=4)
# the instance should be on host NUMA node 0, since that's where our
@@ -886,7 +1015,7 @@ class SRIOVServersTest(_PCIServersWithMigrationTestBase):
# TODO(stephenfin): Stop relying on a side-effect of how nova
# chooses from multiple PCI devices (apparently the last
# matching one)
- 'pci_slot': '0000:81:01.4',
+ 'pci_slot': '0000:81:00.4',
'physical_network': 'physnet4',
},
port['binding:profile'],
@@ -910,7 +1039,7 @@ class SRIOVServersTest(_PCIServersWithMigrationTestBase):
# we should now have transitioned our usage to the destination, freeing
# up the source in the process
- self.assertPCIDeviceCounts('test_compute0', total=11, free=11)
+ self.assertPCIDeviceCounts('test_compute0', total=6, free=6)
self.assertPCIDeviceCounts('test_compute1', total=4, free=1)
# the instance should now be on host NUMA node 1, since that's where
@@ -1420,7 +1549,11 @@ class VDPAServersTest(_PCIServersWithMigrationTestBase):
'not supported for instance with vDPA ports',
ex.response.text)
+ # NOTE(sbauza): Now we're post-Antelope release, we don't need to support
+ # this test
def test_attach_interface_service_version_61(self):
+ self.flags(disable_compute_service_check_for_ffu=True,
+ group='workarounds')
with mock.patch(
"nova.objects.service.get_minimum_version_all_cells",
return_value=61
@@ -1449,7 +1582,11 @@ class VDPAServersTest(_PCIServersWithMigrationTestBase):
self.assertEqual(hostname, port['binding:host_id'])
self.assertEqual(server['id'], port['device_id'])
+ # NOTE(sbauza): Now we're post-Antelope release, we don't need to support
+ # this test
def test_detach_interface_service_version_61(self):
+ self.flags(disable_compute_service_check_for_ffu=True,
+ group='workarounds')
with mock.patch(
"nova.objects.service.get_minimum_version_all_cells",
return_value=61
@@ -1483,10 +1620,7 @@ class VDPAServersTest(_PCIServersWithMigrationTestBase):
# to any host but should still be owned by the vm
port = self.neutron.show_port(vdpa_port['id'])['port']
self.assertEqual(server['id'], port['device_id'])
- # FIXME(sean-k-mooney): we should be unbinding the port from
- # the host when we shelve offload but we don't today.
- # This is unrelated to vdpa port and is a general issue.
- self.assertEqual(hostname, port['binding:host_id'])
+ self.assertIsNone(port['binding:host_id'])
self.assertIn('binding:profile', port)
self.assertIsNone(server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
self.assertIsNone(server['OS-EXT-SRV-ATTR:host'])
@@ -1508,9 +1642,7 @@ class VDPAServersTest(_PCIServersWithMigrationTestBase):
self.assertPCIDeviceCounts(hostname, total=num_pci, free=num_pci)
self.assertIsNone(server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
port = self.neutron.show_port(vdpa_port['id'])['port']
- # FIXME(sean-k-mooney): shelve offload should unbind the port
- # self.assertEqual('', port['binding:host_id'])
- self.assertEqual(hostname, port['binding:host_id'])
+ self.assertIsNone(port['binding:host_id'])
server = self._unshelve_server(server)
self.assertPCIDeviceCounts(hostname, total=num_pci, free=num_pci - 2)
@@ -1541,9 +1673,7 @@ class VDPAServersTest(_PCIServersWithMigrationTestBase):
self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci)
self.assertIsNone(server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
port = self.neutron.show_port(vdpa_port['id'])['port']
- # FIXME(sean-k-mooney): shelve should unbind the port
- # self.assertEqual('', port['binding:host_id'])
- self.assertEqual(source, port['binding:host_id'])
+ self.assertIsNone(port['binding:host_id'])
# force the unshelve to the other host
self.api.put_service(
@@ -1742,7 +1872,11 @@ class VDPAServersTest(_PCIServersWithMigrationTestBase):
self.assertEqual(
dest, server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
+ # NOTE(sbauza): Now we're post-Antelope release, we don't need to support
+ # this test
def test_suspend_and_resume_service_version_62(self):
+ self.flags(disable_compute_service_check_for_ffu=True,
+ group='workarounds')
with mock.patch(
"nova.objects.service.get_minimum_version_all_cells",
return_value=62
@@ -1761,7 +1895,11 @@ class VDPAServersTest(_PCIServersWithMigrationTestBase):
self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2)
self.assertEqual('ACTIVE', server['status'])
+ # NOTE(sbauza): Now we're post-Antelope release, we don't need to support
+ # this test
def test_live_migrate_service_version_62(self):
+ self.flags(disable_compute_service_check_for_ffu=True,
+ group='workarounds')
with mock.patch(
"nova.objects.service.get_minimum_version_all_cells",
return_value=62
@@ -1819,15 +1957,16 @@ class PCIServersTest(_PCIServersTestBase):
'name': ALIAS_NAME,
}
)]
- PCI_RC = f"CUSTOM_PCI_{fakelibvirt.PCI_VEND_ID}_{fakelibvirt.PCI_PROD_ID}"
def setUp(self):
super().setUp()
self.flags(group="pci", report_in_placement=True)
+ self.flags(group='filter_scheduler', pci_in_placement=True)
def test_create_server_with_pci_dev_and_numa(self):
"""Verifies that an instance can be booted with cpu pinning and with an
- assigned pci device.
+ assigned pci device with legacy policy and numa info for the pci
+ device.
"""
self.flags(cpu_dedicated_set='0-7', group='compute')
@@ -1839,6 +1978,7 @@ class PCIServersTest(_PCIServersTestBase):
"compute1",
inventories={"0000:81:00.0": {self.PCI_RC: 1}},
traits={"0000:81:00.0": []},
+ usages={"0000:81:00.0": {self.PCI_RC: 0}},
)
# create a flavor
@@ -1848,23 +1988,34 @@ class PCIServersTest(_PCIServersTestBase):
}
flavor_id = self._create_flavor(extra_spec=extra_spec)
- self._create_server(flavor_id=flavor_id, networks='none')
+ server = self._create_server(flavor_id=flavor_id, networks='none')
+
+ self.assert_placement_pci_view(
+ "compute1",
+ inventories={"0000:81:00.0": {self.PCI_RC: 1}},
+ traits={"0000:81:00.0": []},
+ usages={"0000:81:00.0": {self.PCI_RC: 1}},
+ allocations={server['id']: {"0000:81:00.0": {self.PCI_RC: 1}}},
+ )
+ self.assert_no_pci_healing("compute1")
def test_create_server_with_pci_dev_and_numa_fails(self):
"""This test ensures that it is not possible to allocated CPU and
- memory resources from one NUMA node and a PCI device from another.
+ memory resources from one NUMA node and a PCI device from another
+ if we use the legacy policy and the pci device reports numa info.
"""
-
self.flags(cpu_dedicated_set='0-7', group='compute')
pci_info = fakelibvirt.HostPCIDevicesInfo(num_pci=1, numa_node=0)
self.start_compute(pci_info=pci_info)
+ compute1_placement_pci_view = {
+ "inventories": {"0000:81:00.0": {self.PCI_RC: 1}},
+ "traits": {"0000:81:00.0": []},
+ "usages": {"0000:81:00.0": {self.PCI_RC: 0}},
+ }
self.assert_placement_pci_view(
- "compute1",
- inventories={"0000:81:00.0": {self.PCI_RC: 1}},
- traits={"0000:81:00.0": []},
- )
+ "compute1", **compute1_placement_pci_view)
# boot one instance with no PCI device to "fill up" NUMA node 0
extra_spec = {'hw:cpu_policy': 'dedicated'}
@@ -1877,6 +2028,10 @@ class PCIServersTest(_PCIServersTestBase):
self._create_server(
flavor_id=flavor_id, networks='none', expected_state='ERROR')
+ self.assert_placement_pci_view(
+ "compute1", **compute1_placement_pci_view)
+ self.assert_no_pci_healing("compute1")
+
def test_live_migrate_server_with_pci(self):
"""Live migrate an instance with a PCI passthrough device.
@@ -1889,26 +2044,41 @@ class PCIServersTest(_PCIServersTestBase):
hostname='test_compute0',
pci_info=fakelibvirt.HostPCIDevicesInfo(num_pci=1))
+ test_compute0_placement_pci_view = {
+ "inventories": {"0000:81:00.0": {self.PCI_RC: 1}},
+ "traits": {"0000:81:00.0": []},
+ "usages": {"0000:81:00.0": {self.PCI_RC: 0}},
+ "allocations": {},
+ }
self.assert_placement_pci_view(
- "test_compute0",
- inventories={"0000:81:00.0": {self.PCI_RC: 1}},
- traits={"0000:81:00.0": []},
- )
+ "test_compute0", **test_compute0_placement_pci_view)
self.start_compute(
hostname='test_compute1',
pci_info=fakelibvirt.HostPCIDevicesInfo(num_pci=1))
+ test_compute1_placement_pci_view = {
+ "inventories": {"0000:81:00.0": {self.PCI_RC: 1}},
+ "traits": {"0000:81:00.0": []},
+ "usages": {"0000:81:00.0": {self.PCI_RC: 0}},
+ }
self.assert_placement_pci_view(
- "test_compute1",
- inventories={"0000:81:00.0": {self.PCI_RC: 1}},
- traits={"0000:81:00.0": []},
- )
+ "test_compute1", **test_compute1_placement_pci_view)
# create a server
extra_spec = {'pci_passthrough:alias': f'{self.ALIAS_NAME}:1'}
flavor_id = self._create_flavor(extra_spec=extra_spec)
- server = self._create_server(flavor_id=flavor_id, networks='none')
+ server = self._create_server(
+ flavor_id=flavor_id, networks='none', host="test_compute0")
+
+ test_compute0_placement_pci_view[
+ "usages"]["0000:81:00.0"][self.PCI_RC] = 1
+ test_compute0_placement_pci_view[
+ "allocations"][server['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
# now live migrate that server
ex = self.assertRaises(
@@ -1920,29 +2090,51 @@ class PCIServersTest(_PCIServersTestBase):
# this will bubble to the API
self.assertEqual(500, ex.response.status_code)
self.assertIn('NoValidHost', str(ex))
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+ self.assert_no_pci_healing("test_compute0")
+ self.assert_no_pci_healing("test_compute1")
def test_resize_pci_to_vanilla(self):
# Start two computes, one with PCI and one without.
self.start_compute(
hostname='test_compute0',
pci_info=fakelibvirt.HostPCIDevicesInfo(num_pci=1))
+ test_compute0_placement_pci_view = {
+ "inventories": {"0000:81:00.0": {self.PCI_RC: 1}},
+ "traits": {"0000:81:00.0": []},
+ "usages": {"0000:81:00.0": {self.PCI_RC: 0}},
+ "allocations": {},
+ }
self.assert_placement_pci_view(
- "test_compute0",
- inventories={"0000:81:00.0": {self.PCI_RC: 1}},
- traits={"0000:81:00.0": []},
- )
+ "test_compute0", **test_compute0_placement_pci_view)
+
self.start_compute(hostname='test_compute1')
+ test_compute1_placement_pci_view = {
+ "inventories": {},
+ "traits": {},
+ "usages": {},
+ "allocations": {},
+ }
self.assert_placement_pci_view(
- "test_compute1",
- inventories={},
- traits={},
- )
+ "test_compute1", **test_compute1_placement_pci_view)
# Boot a server with a single PCI device.
extra_spec = {'pci_passthrough:alias': f'{self.ALIAS_NAME}:1'}
pci_flavor_id = self._create_flavor(extra_spec=extra_spec)
server = self._create_server(flavor_id=pci_flavor_id, networks='none')
+ test_compute0_placement_pci_view[
+ "usages"]["0000:81:00.0"][self.PCI_RC] = 1
+ test_compute0_placement_pci_view[
+ "allocations"][server['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
# Resize it to a flavor without PCI devices. We expect this to work, as
# test_compute1 is available.
flavor_id = self._create_flavor()
@@ -1955,6 +2147,343 @@ class PCIServersTest(_PCIServersTestBase):
self._confirm_resize(server)
self.assertPCIDeviceCounts('test_compute0', total=1, free=1)
self.assertPCIDeviceCounts('test_compute1', total=0, free=0)
+ test_compute0_placement_pci_view[
+ "usages"]["0000:81:00.0"][self.PCI_RC] = 0
+ del test_compute0_placement_pci_view["allocations"][server['id']]
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+ self.assert_no_pci_healing("test_compute0")
+ self.assert_no_pci_healing("test_compute1")
+
+ def test_resize_vanilla_to_pci(self):
+ """Resize an instance from a non PCI flavor to a PCI flavor"""
+ # Start two computes, one with PCI and one without.
+ self.start_compute(
+ hostname='test_compute0',
+ pci_info=fakelibvirt.HostPCIDevicesInfo(num_pci=1))
+ test_compute0_placement_pci_view = {
+ "inventories": {"0000:81:00.0": {self.PCI_RC: 1}},
+ "traits": {"0000:81:00.0": []},
+ "usages": {"0000:81:00.0": {self.PCI_RC: 0}},
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ self.start_compute(hostname='test_compute1')
+ test_compute1_placement_pci_view = {
+ "inventories": {},
+ "traits": {},
+ "usages": {},
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
+ # Boot a server without PCI device and make sure it lands on the
+ # compute that has no device, so we can resize it later to the other
+ # host having PCI device.
+ extra_spec = {}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._create_server(
+ flavor_id=flavor_id, networks='none', host="test_compute1")
+
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=1)
+ self.assertPCIDeviceCounts('test_compute1', total=0, free=0)
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
+ # Resize it to a flavor with a PCI devices. We expect this to work, as
+ # test_compute0 is available and having PCI devices.
+ extra_spec = {'pci_passthrough:alias': f'{self.ALIAS_NAME}:1'}
+ pci_flavor_id = self._create_flavor(extra_spec=extra_spec)
+ with mock.patch(
+ 'nova.virt.libvirt.driver.LibvirtDriver'
+ '.migrate_disk_and_power_off',
+ return_value='{}',
+ ):
+ self._resize_server(server, pci_flavor_id)
+ self._confirm_resize(server)
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=0)
+ self.assertPCIDeviceCounts('test_compute1', total=0, free=0)
+ test_compute0_placement_pci_view[
+ "usages"]["0000:81:00.0"][self.PCI_RC] = 1
+ test_compute0_placement_pci_view[
+ "allocations"][server['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+ self.assert_no_pci_healing("test_compute0")
+ self.assert_no_pci_healing("test_compute1")
+
+ def test_resize_from_one_dev_to_two(self):
+ self.start_compute(
+ hostname='test_compute0',
+ pci_info=fakelibvirt.HostPCIDevicesInfo(num_pci=1))
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=1)
+ test_compute0_placement_pci_view = {
+ "inventories": {"0000:81:00.0": {self.PCI_RC: 1}},
+ "traits": {"0000:81:00.0": []},
+ "usages": {"0000:81:00.0": {self.PCI_RC: 0}},
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ self.start_compute(
+ hostname='test_compute1',
+ pci_info=fakelibvirt.HostPCIDevicesInfo(num_pci=2),
+ )
+ self.assertPCIDeviceCounts('test_compute1', total=2, free=2)
+ test_compute1_placement_pci_view = {
+ "inventories": {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ },
+ "traits": {
+ "0000:81:00.0": [],
+ "0000:81:01.0": [],
+ },
+ "usages": {
+ "0000:81:00.0": {self.PCI_RC: 0},
+ "0000:81:01.0": {self.PCI_RC: 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
+ # boot a VM on test_compute0 with a single PCI dev
+ extra_spec = {'pci_passthrough:alias': f'{self.ALIAS_NAME}:1'}
+ pci_flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._create_server(
+ flavor_id=pci_flavor_id, networks='none', host="test_compute0")
+
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=0)
+ test_compute0_placement_pci_view["usages"][
+ "0000:81:00.0"][self.PCI_RC] = 1
+ test_compute0_placement_pci_view["allocations"][
+ server['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ # resize the server to a flavor requesting two devices
+ extra_spec = {'pci_passthrough:alias': f'{self.ALIAS_NAME}:2'}
+ pci_flavor_id = self._create_flavor(extra_spec=extra_spec)
+ with mock.patch(
+ 'nova.virt.libvirt.driver.LibvirtDriver'
+ '.migrate_disk_and_power_off',
+ return_value='{}',
+ ):
+ self._resize_server(server, pci_flavor_id)
+
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=0)
+ # one the source host the PCI allocation is now held by the migration
+ self._move_server_allocation(
+ test_compute0_placement_pci_view['allocations'], server['id'])
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+ # on the dest we have now two device allocated
+ self.assertPCIDeviceCounts('test_compute1', total=2, free=0)
+ test_compute1_placement_pci_view["usages"] = {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ }
+ test_compute1_placement_pci_view["allocations"][
+ server['id']] = {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ }
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
+ # now revert the resize
+ self._revert_resize(server)
+
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=0)
+ # on the host the allocation should move back to the instance UUID
+ self._move_server_allocation(
+ test_compute0_placement_pci_view["allocations"],
+ server["id"],
+ revert=True,
+ )
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+ # so the dest should be freed
+ self.assertPCIDeviceCounts('test_compute1', total=2, free=2)
+ test_compute1_placement_pci_view["usages"] = {
+ "0000:81:00.0": {self.PCI_RC: 0},
+ "0000:81:01.0": {self.PCI_RC: 0},
+ }
+ del test_compute1_placement_pci_view["allocations"][server['id']]
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
+ # now resize again and confirm it
+ with mock.patch(
+ 'nova.virt.libvirt.driver.LibvirtDriver'
+ '.migrate_disk_and_power_off',
+ return_value='{}',
+ ):
+ self._resize_server(server, pci_flavor_id)
+ self._confirm_resize(server)
+
+ # the source host now need to be freed up
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=1)
+ test_compute0_placement_pci_view["usages"] = {
+ "0000:81:00.0": {self.PCI_RC: 0},
+ }
+ test_compute0_placement_pci_view["allocations"] = {}
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+ # and dest allocated
+ self.assertPCIDeviceCounts('test_compute1', total=2, free=0)
+ test_compute1_placement_pci_view["usages"] = {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ }
+ test_compute1_placement_pci_view["allocations"][
+ server['id']] = {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ }
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
+ self.assert_no_pci_healing("test_compute0")
+ self.assert_no_pci_healing("test_compute1")
+
+ def test_same_host_resize_with_pci(self):
+ """Start a single compute with 3 PCI devs and resize and instance
+ from one dev to two devs
+ """
+ self.flags(allow_resize_to_same_host=True)
+ self.start_compute(
+ hostname='test_compute0',
+ pci_info=fakelibvirt.HostPCIDevicesInfo(num_pci=3))
+ self.assertPCIDeviceCounts('test_compute0', total=3, free=3)
+ test_compute0_placement_pci_view = {
+ "inventories": {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ "0000:81:02.0": {self.PCI_RC: 1},
+ },
+ "traits": {
+ "0000:81:00.0": [],
+ "0000:81:01.0": [],
+ "0000:81:02.0": [],
+ },
+ "usages": {
+ "0000:81:00.0": {self.PCI_RC: 0},
+ "0000:81:01.0": {self.PCI_RC: 0},
+ "0000:81:02.0": {self.PCI_RC: 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ # Boot a server with a single PCI device.
+ # To stabilize the test we reserve 81.01 and 81.02 in placement so
+ # we can be sure that the instance will use 81.00, otherwise the
+ # allocation will be random between 00, 01, and 02
+ self._reserve_placement_resource(
+ "test_compute0_0000:81:01.0", self.PCI_RC, 1)
+ self._reserve_placement_resource(
+ "test_compute0_0000:81:02.0", self.PCI_RC, 1)
+ extra_spec = {'pci_passthrough:alias': f'{self.ALIAS_NAME}:1'}
+ pci_flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._create_server(flavor_id=pci_flavor_id, networks='none')
+
+ self.assertPCIDeviceCounts('test_compute0', total=3, free=2)
+ test_compute0_placement_pci_view[
+ "usages"]["0000:81:00.0"][self.PCI_RC] = 1
+ test_compute0_placement_pci_view[
+ "allocations"][server['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+ # remove the reservations, so we can resize on the same host and
+ # consume 01 and 02
+ self._reserve_placement_resource(
+ "test_compute0_0000:81:01.0", self.PCI_RC, 0)
+ self._reserve_placement_resource(
+ "test_compute0_0000:81:02.0", self.PCI_RC, 0)
+
+ # Resize the server to use 2 PCI devices
+ extra_spec = {'pci_passthrough:alias': f'{self.ALIAS_NAME}:2'}
+ pci_flavor_id = self._create_flavor(extra_spec=extra_spec)
+ with mock.patch(
+ 'nova.virt.libvirt.driver.LibvirtDriver'
+ '.migrate_disk_and_power_off',
+ return_value='{}',
+ ):
+ self._resize_server(server, pci_flavor_id)
+
+ self.assertPCIDeviceCounts('test_compute0', total=3, free=0)
+ # the source host side of the allocation is now held by the migration
+ # UUID
+ self._move_server_allocation(
+ test_compute0_placement_pci_view["allocations"], server['id'])
+ # but we have the dest host side of the allocations on the same host
+ test_compute0_placement_pci_view[
+ "usages"]["0000:81:01.0"][self.PCI_RC] = 1
+ test_compute0_placement_pci_view[
+ "usages"]["0000:81:02.0"][self.PCI_RC] = 1
+ test_compute0_placement_pci_view["allocations"][server['id']] = {
+ "0000:81:01.0": {self.PCI_RC: 1},
+ "0000:81:02.0": {self.PCI_RC: 1},
+ }
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ # revert the resize so the instance should go back to use a single
+ # device
+ self._revert_resize(server)
+ self.assertPCIDeviceCounts('test_compute0', total=3, free=2)
+ # the migration allocation is moved back to the instance UUID
+ self._move_server_allocation(
+ test_compute0_placement_pci_view["allocations"],
+ server["id"],
+ revert=True,
+ )
+ # and the "dest" side of the allocation is dropped
+ test_compute0_placement_pci_view[
+ "usages"]["0000:81:01.0"][self.PCI_RC] = 0
+ test_compute0_placement_pci_view[
+ "usages"]["0000:81:02.0"][self.PCI_RC] = 0
+ test_compute0_placement_pci_view["allocations"][server['id']] = {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ }
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ # resize again but now confirm the same host resize and assert that
+ # only the new flavor usage remains
+ with mock.patch(
+ 'nova.virt.libvirt.driver.LibvirtDriver'
+ '.migrate_disk_and_power_off',
+ return_value='{}',
+ ):
+ self._resize_server(server, pci_flavor_id)
+ self._confirm_resize(server)
+
+ self.assertPCIDeviceCounts('test_compute0', total=3, free=1)
+ test_compute0_placement_pci_view["usages"] = {
+ "0000:81:01.0": {self.PCI_RC: 1},
+ "0000:81:02.0": {self.PCI_RC: 1},
+ }
+ test_compute0_placement_pci_view["allocations"][
+ server['id']] = {self.PCI_RC: 1}
+ test_compute0_placement_pci_view["allocations"][server['id']] = {
+ "0000:81:01.0": {self.PCI_RC: 1},
+ "0000:81:02.0": {self.PCI_RC: 1},
+ }
+ self.assert_no_pci_healing("test_compute0")
def _confirm_resize(self, server, host='host1'):
# NOTE(sbauza): Unfortunately, _cleanup_resize() in libvirt checks the
@@ -1969,7 +2498,6 @@ class PCIServersTest(_PCIServersTestBase):
self.flags(host=orig_host)
def test_cold_migrate_server_with_pci(self):
-
host_devices = {}
orig_create = nova.virt.libvirt.guest.Guest.create
@@ -1998,17 +2526,41 @@ class PCIServersTest(_PCIServersTestBase):
for hostname in ('test_compute0', 'test_compute1'):
pci_info = fakelibvirt.HostPCIDevicesInfo(num_pci=2)
self.start_compute(hostname=hostname, pci_info=pci_info)
- self.assert_placement_pci_view(
- hostname,
- inventories={
- "0000:81:00.0": {self.PCI_RC: 1},
- "0000:81:01.0": {self.PCI_RC: 1},
- },
- traits={
- "0000:81:00.0": [],
- "0000:81:01.0": [],
- },
- )
+ test_compute0_placement_pci_view = {
+ "inventories": {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ },
+ "traits": {
+ "0000:81:00.0": [],
+ "0000:81:01.0": [],
+ },
+ "usages": {
+ "0000:81:00.0": {self.PCI_RC: 0},
+ "0000:81:01.0": {self.PCI_RC: 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ test_compute1_placement_pci_view = {
+ "inventories": {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ },
+ "traits": {
+ "0000:81:00.0": [],
+ "0000:81:01.0": [],
+ },
+ "usages": {
+ "0000:81:00.0": {self.PCI_RC: 0},
+ "0000:81:01.0": {self.PCI_RC: 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
# boot an instance with a PCI device on each host
extra_spec = {
@@ -2016,8 +2568,16 @@ class PCIServersTest(_PCIServersTestBase):
}
flavor_id = self._create_flavor(extra_spec=extra_spec)
+ # force the allocation on test_compute0 to 81:00 to make it easy
+ # to assert the placement allocation
+ self._reserve_placement_resource(
+ "test_compute0_0000:81:01.0", self.PCI_RC, 1)
server_a = self._create_server(
flavor_id=flavor_id, networks='none', host='test_compute0')
+ # force the allocation on test_compute1 to 81:00 to make it easy
+ # to assert the placement allocation
+ self._reserve_placement_resource(
+ "test_compute1_0000:81:01.0", self.PCI_RC, 1)
server_b = self._create_server(
flavor_id=flavor_id, networks='none', host='test_compute1')
@@ -2029,6 +2589,25 @@ class PCIServersTest(_PCIServersTestBase):
for hostname in ('test_compute0', 'test_compute1'):
self.assertPCIDeviceCounts(hostname, total=2, free=1)
+ test_compute0_placement_pci_view["usages"][
+ "0000:81:00.0"][self.PCI_RC] = 1
+ test_compute0_placement_pci_view["allocations"][
+ server_a['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ test_compute1_placement_pci_view[
+ "usages"]["0000:81:00.0"][self.PCI_RC] = 1
+ test_compute1_placement_pci_view["allocations"][
+ server_b['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
+ # remove the resource reservation from test_compute1 to be able to
+ # migrate server_a there
+ self._reserve_placement_resource(
+ "test_compute1_0000:81:01.0", self.PCI_RC, 0)
+
# TODO(stephenfin): The mock of 'migrate_disk_and_power_off' should
# probably be less...dumb
with mock.patch(
@@ -2046,13 +2625,41 @@ class PCIServersTest(_PCIServersTestBase):
server_a['OS-EXT-SRV-ATTR:host'], server_b['OS-EXT-SRV-ATTR:host'],
)
self.assertPCIDeviceCounts('test_compute0', total=2, free=1)
+ # on the source host the allocation is now held by the migration UUID
+ self._move_server_allocation(
+ test_compute0_placement_pci_view["allocations"], server_a['id'])
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
self.assertPCIDeviceCounts('test_compute1', total=2, free=0)
+ # sever_a now have allocation on test_compute1 on 81:01
+ test_compute1_placement_pci_view["usages"][
+ "0000:81:01.0"][self.PCI_RC] = 1
+ test_compute1_placement_pci_view["allocations"][
+ server_a['id']] = {"0000:81:01.0": {self.PCI_RC: 1}}
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
# now, confirm the migration and check our counts once again
self._confirm_resize(server_a)
self.assertPCIDeviceCounts('test_compute0', total=2, free=2)
+ # the source host now has no allocations as the migration allocation
+ # is removed by confirm resize
+ test_compute0_placement_pci_view["usages"] = {
+ "0000:81:00.0": {self.PCI_RC: 0},
+ "0000:81:01.0": {self.PCI_RC: 0},
+ }
+ test_compute0_placement_pci_view["allocations"] = {}
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
self.assertPCIDeviceCounts('test_compute1', total=2, free=0)
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
+ self.assert_no_pci_healing("test_compute0")
+ self.assert_no_pci_healing("test_compute1")
def test_request_two_pci_but_host_has_one(self):
# simulate a single type-PCI device on the host
@@ -2083,6 +2690,320 @@ class PCIServersTest(_PCIServersTestBase):
self.assertIn('fault', server)
self.assertIn('No valid host', server['fault']['message'])
+ def _create_two_computes(self):
+ self.start_compute(
+ hostname='test_compute0',
+ pci_info=fakelibvirt.HostPCIDevicesInfo(num_pci=1))
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=1)
+ test_compute0_placement_pci_view = {
+ "inventories": {"0000:81:00.0": {self.PCI_RC: 1}},
+ "traits": {"0000:81:00.0": []},
+ "usages": {"0000:81:00.0": {self.PCI_RC: 0}},
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ self.start_compute(
+ hostname='test_compute1',
+ pci_info=fakelibvirt.HostPCIDevicesInfo(num_pci=1),
+ )
+ self.assertPCIDeviceCounts('test_compute1', total=1, free=1)
+ test_compute1_placement_pci_view = {
+ "inventories": {"0000:81:00.0": {self.PCI_RC: 1}},
+ "traits": {"0000:81:00.0": []},
+ "usages": {"0000:81:00.0": {self.PCI_RC: 0}},
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
+ return (
+ test_compute0_placement_pci_view,
+ test_compute1_placement_pci_view,
+ )
+
+ def _create_two_computes_and_an_instance_on_the_first(self):
+ (
+ test_compute0_placement_pci_view,
+ test_compute1_placement_pci_view,
+ ) = self._create_two_computes()
+
+ # boot a VM on test_compute0 with a single PCI dev
+ extra_spec = {'pci_passthrough:alias': f'{self.ALIAS_NAME}:1'}
+ pci_flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._create_server(
+ flavor_id=pci_flavor_id, networks='none', host="test_compute0")
+
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=0)
+ test_compute0_placement_pci_view["usages"][
+ "0000:81:00.0"][self.PCI_RC] = 1
+ test_compute0_placement_pci_view["allocations"][
+ server['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ return (
+ server,
+ test_compute0_placement_pci_view,
+ test_compute1_placement_pci_view,
+ )
+
+ def test_evacuate(self):
+ (
+ server,
+ test_compute0_placement_pci_view,
+ test_compute1_placement_pci_view,
+ ) = self._create_two_computes_and_an_instance_on_the_first()
+
+ # kill test_compute0 and evacuate the instance
+ self.computes['test_compute0'].stop()
+ self.api.put_service(
+ self.computes["test_compute0"].service_ref.uuid,
+ {"forced_down": True},
+ )
+ self._evacuate_server(server)
+ # source allocation should be kept as source is dead but the server
+ # now has allocation on both hosts as evacuation does not use migration
+ # allocations.
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=0)
+ self.assert_placement_pci_inventory(
+ "test_compute0",
+ test_compute0_placement_pci_view["inventories"],
+ test_compute0_placement_pci_view["traits"]
+ )
+ self.assert_placement_pci_usages(
+ "test_compute0", test_compute0_placement_pci_view["usages"]
+ )
+ self.assert_placement_pci_allocations(
+ {
+ server['id']: {
+ "test_compute0": {
+ "VCPU": 2,
+ "MEMORY_MB": 2048,
+ "DISK_GB": 20,
+ },
+ "test_compute0_0000:81:00.0": {self.PCI_RC: 1},
+ "test_compute1": {
+ "VCPU": 2,
+ "MEMORY_MB": 2048,
+ "DISK_GB": 20,
+ },
+ "test_compute1_0000:81:00.0": {self.PCI_RC: 1},
+ },
+ }
+ )
+
+ # dest allocation should be created
+ self.assertPCIDeviceCounts('test_compute1', total=1, free=0)
+ test_compute1_placement_pci_view["usages"][
+ "0000:81:00.0"][self.PCI_RC] = 1
+ test_compute1_placement_pci_view["allocations"][
+ server['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
+ self.assert_placement_pci_inventory(
+ "test_compute1",
+ test_compute1_placement_pci_view["inventories"],
+ test_compute1_placement_pci_view["traits"]
+ )
+ self.assert_placement_pci_usages(
+ "test_compute1", test_compute0_placement_pci_view["usages"]
+ )
+
+ # recover test_compute0 and check that it is cleaned
+ self.restart_compute_service('test_compute0')
+
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=1)
+ test_compute0_placement_pci_view = {
+ "inventories": {"0000:81:00.0": {self.PCI_RC: 1}},
+ "traits": {"0000:81:00.0": []},
+ "usages": {"0000:81:00.0": {self.PCI_RC: 0}},
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ # and test_compute1 is not changes (expect that the instance now has
+ # only allocation on this compute)
+ self.assertPCIDeviceCounts('test_compute1', total=1, free=0)
+ test_compute1_placement_pci_view["usages"][
+ "0000:81:00.0"][self.PCI_RC] = 1
+ test_compute1_placement_pci_view["allocations"][
+ server['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
+ self.assert_no_pci_healing("test_compute0")
+ self.assert_no_pci_healing("test_compute1")
+
+ def test_unshelve_after_offload(self):
+ (
+ server,
+ test_compute0_placement_pci_view,
+ test_compute1_placement_pci_view,
+ ) = self._create_two_computes_and_an_instance_on_the_first()
+
+ # shelve offload the server
+ self._shelve_server(server)
+
+ # source allocation should be freed
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=1)
+ test_compute0_placement_pci_view["usages"][
+ "0000:81:00.0"][self.PCI_RC] = 0
+ del test_compute0_placement_pci_view["allocations"][server['id']]
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ # test_compute1 should not be touched
+ self.assertPCIDeviceCounts('test_compute1', total=1, free=1)
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
+ # disable test_compute0 and unshelve the instance
+ self.api.put_service(
+ self.computes["test_compute0"].service_ref.uuid,
+ {"status": "disabled"},
+ )
+ self._unshelve_server(server)
+
+ # test_compute0 should be unchanged
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=1)
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ # test_compute1 should be allocated
+ self.assertPCIDeviceCounts('test_compute1', total=1, free=0)
+ test_compute1_placement_pci_view["usages"][
+ "0000:81:00.0"][self.PCI_RC] = 1
+ test_compute1_placement_pci_view["allocations"][
+ server['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
+ self.assert_no_pci_healing("test_compute0")
+ self.assert_no_pci_healing("test_compute1")
+
+ def test_reschedule(self):
+ (
+ test_compute0_placement_pci_view,
+ test_compute1_placement_pci_view,
+ ) = self._create_two_computes()
+
+ # try to boot a VM with a single device but inject fault on the first
+ # compute so that the VM is re-scheduled to the other
+ extra_spec = {'pci_passthrough:alias': f'{self.ALIAS_NAME}:1'}
+ pci_flavor_id = self._create_flavor(extra_spec=extra_spec)
+
+ calls = []
+ orig_guest_create = (
+ nova.virt.libvirt.driver.LibvirtDriver._create_guest)
+
+ def fake_guest_create(*args, **kwargs):
+ if not calls:
+ calls.append(1)
+ raise fakelibvirt.make_libvirtError(
+ fakelibvirt.libvirtError,
+ "internal error",
+ error_code=fakelibvirt.VIR_ERR_INTERNAL_ERROR,
+ )
+ else:
+ return orig_guest_create(*args, **kwargs)
+
+ with mock.patch(
+ 'nova.virt.libvirt.driver.LibvirtDriver._create_guest',
+ new=fake_guest_create
+ ):
+ server = self._create_server(
+ flavor_id=pci_flavor_id, networks='none')
+
+ compute_pci_view_map = {
+ 'test_compute0': test_compute0_placement_pci_view,
+ 'test_compute1': test_compute1_placement_pci_view,
+ }
+ allocated_compute = server['OS-EXT-SRV-ATTR:host']
+ not_allocated_compute = (
+ "test_compute0"
+ if allocated_compute == "test_compute1"
+ else "test_compute1"
+ )
+
+ allocated_pci_view = compute_pci_view_map.pop(
+ server['OS-EXT-SRV-ATTR:host'])
+ not_allocated_pci_view = list(compute_pci_view_map.values())[0]
+
+ self.assertPCIDeviceCounts(allocated_compute, total=1, free=0)
+ allocated_pci_view["usages"][
+ "0000:81:00.0"][self.PCI_RC] = 1
+ allocated_pci_view["allocations"][
+ server['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
+ self.assert_placement_pci_view(allocated_compute, **allocated_pci_view)
+
+ self.assertPCIDeviceCounts(not_allocated_compute, total=1, free=1)
+ self.assert_placement_pci_view(
+ not_allocated_compute, **not_allocated_pci_view)
+ self.assert_no_pci_healing("test_compute0")
+ self.assert_no_pci_healing("test_compute1")
+
+ def test_multi_create(self):
+ self.start_compute(
+ hostname='test_compute0',
+ pci_info=fakelibvirt.HostPCIDevicesInfo(num_pci=3))
+ self.assertPCIDeviceCounts('test_compute0', total=3, free=3)
+ test_compute0_placement_pci_view = {
+ "inventories": {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ "0000:81:02.0": {self.PCI_RC: 1},
+ },
+ "traits": {
+ "0000:81:00.0": [],
+ "0000:81:01.0": [],
+ "0000:81:02.0": [],
+ },
+ "usages": {
+ "0000:81:00.0": {self.PCI_RC: 0},
+ "0000:81:01.0": {self.PCI_RC: 0},
+ "0000:81:02.0": {self.PCI_RC: 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ extra_spec = {'pci_passthrough:alias': f'{self.ALIAS_NAME}:1'}
+ pci_flavor_id = self._create_flavor(extra_spec=extra_spec)
+ body = self._build_server(flavor_id=pci_flavor_id, networks='none')
+ body.update(
+ {
+ "min_count": "2",
+ }
+ )
+ self.api.post_server({'server': body})
+
+ servers = self.api.get_servers(detail=False)
+ for server in servers:
+ self._wait_for_state_change(server, 'ACTIVE')
+
+ self.assertEqual(2, len(servers))
+ self.assertPCIDeviceCounts('test_compute0', total=3, free=1)
+ # we have no way to influence which instance takes which device, so
+ # we need to look at the nova DB to properly assert the placement
+ # allocation
+ devices = objects.PciDeviceList.get_by_compute_node(
+ self.ctxt,
+ objects.ComputeNode.get_by_nodename(self.ctxt, 'test_compute0').id,
+ )
+ for dev in devices:
+ if dev.instance_uuid:
+ test_compute0_placement_pci_view["usages"][
+ dev.address][self.PCI_RC] = 1
+ test_compute0_placement_pci_view["allocations"][
+ dev.instance_uuid] = {dev.address: {self.PCI_RC: 1}}
+
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ self.assert_no_pci_healing("test_compute0")
+
class PCIServersWithPreferredNUMATest(_PCIServersTestBase):
@@ -2104,6 +3025,11 @@ class PCIServersWithPreferredNUMATest(_PCIServersTestBase):
)]
expected_state = 'ACTIVE'
+ def setUp(self):
+ super().setUp()
+ self.flags(group="pci", report_in_placement=True)
+ self.flags(group='filter_scheduler', pci_in_placement=True)
+
def test_create_server_with_pci_dev_and_numa(self):
"""Validate behavior of 'preferred' PCI NUMA policy.
@@ -2116,6 +3042,20 @@ class PCIServersWithPreferredNUMATest(_PCIServersTestBase):
pci_info = fakelibvirt.HostPCIDevicesInfo(num_pci=1, numa_node=0)
self.start_compute(pci_info=pci_info)
+ compute1_placement_pci_view = {
+ "inventories": {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ },
+ "traits": {
+ "0000:81:00.0": [],
+ },
+ "usages": {
+ "0000:81:00.0": {self.PCI_RC: 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_placement_pci_view)
# boot one instance with no PCI device to "fill up" NUMA node 0
extra_spec = {
@@ -2124,13 +3064,26 @@ class PCIServersWithPreferredNUMATest(_PCIServersTestBase):
flavor_id = self._create_flavor(vcpu=4, extra_spec=extra_spec)
self._create_server(flavor_id=flavor_id)
+ self.assert_placement_pci_view(
+ "compute1", **compute1_placement_pci_view)
+
# now boot one with a PCI device, which should succeed thanks to the
# use of the PCI policy
extra_spec['pci_passthrough:alias'] = '%s:1' % self.ALIAS_NAME
flavor_id = self._create_flavor(extra_spec=extra_spec)
- self._create_server(
+ server_with_pci = self._create_server(
flavor_id=flavor_id, expected_state=self.expected_state)
+ if self.expected_state == 'ACTIVE':
+ compute1_placement_pci_view["usages"][
+ "0000:81:00.0"][self.PCI_RC] = 1
+ compute1_placement_pci_view["allocations"][
+ server_with_pci['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
+
+ self.assert_placement_pci_view(
+ "compute1", **compute1_placement_pci_view)
+ self.assert_no_pci_healing("compute1")
+
class PCIServersWithRequiredNUMATest(PCIServersWithPreferredNUMATest):
@@ -2146,6 +3099,99 @@ class PCIServersWithRequiredNUMATest(PCIServersWithPreferredNUMATest):
)]
expected_state = 'ERROR'
+ def setUp(self):
+ super().setUp()
+ self.useFixture(
+ fixtures.MockPatch(
+ 'nova.pci.utils.is_physical_function', return_value=False
+ )
+ )
+
+ def test_create_server_with_pci_dev_and_numa_placement_conflict(self):
+ # fakelibvirt will simulate the devices:
+ # * one type-PCI in 81.00 on numa 0
+ # * one type-PCI in 81.01 on numa 1
+ pci_info = fakelibvirt.HostPCIDevicesInfo(num_pci=2)
+ # the device_spec will assign different traits to 81.00 than 81.01
+ # so the two devices become different from placement perspective
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ 'vendor_id': fakelibvirt.PCI_VEND_ID,
+ 'product_id': fakelibvirt.PCI_PROD_ID,
+ "address": "0000:81:00.0",
+ "traits": "green",
+ },
+ {
+ 'vendor_id': fakelibvirt.PCI_VEND_ID,
+ 'product_id': fakelibvirt.PCI_PROD_ID,
+ "address": "0000:81:01.0",
+ "traits": "red",
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ # both numa 0 and numa 1 has 4 PCPUs
+ self.flags(cpu_dedicated_set='0-7', group='compute')
+ self.start_compute(pci_info=pci_info)
+ compute1_placement_pci_view = {
+ "inventories": {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ },
+ "traits": {
+ "0000:81:00.0": ["CUSTOM_GREEN"],
+ "0000:81:01.0": ["CUSTOM_RED"],
+ },
+ "usages": {
+ "0000:81:00.0": {self.PCI_RC: 0},
+ "0000:81:01.0": {self.PCI_RC: 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_placement_pci_view)
+
+ # boot one instance with no PCI device to "fill up" NUMA node 0
+ # so we will have PCPUs on numa 0 and we have PCI on both nodes
+ extra_spec = {
+ 'hw:cpu_policy': 'dedicated',
+ }
+ flavor_id = self._create_flavor(vcpu=4, extra_spec=extra_spec)
+ self._create_server(flavor_id=flavor_id)
+
+ pci_alias = {
+ "resource_class": self.PCI_RC,
+ # this means only 81.00 will match in placement which is on numa 0
+ "traits": "green",
+ "name": "pci-dev",
+ # this forces the scheduler to only accept a solution where the
+ # PCI device is on the same numa node as the pinned CPUs
+ 'numa_policy': fields.PCINUMAAffinityPolicy.REQUIRED,
+ }
+ self.flags(
+ group="pci",
+ alias=self._to_list_of_json_str([pci_alias]),
+ )
+
+ # Ask for dedicated CPUs, that can only be fulfilled on numa 1.
+ # And ask for a PCI alias that can only be fulfilled on numa 0 due to
+ # trait request.
+ # We expect that this makes the scheduling fail.
+ extra_spec = {
+ "hw:cpu_policy": "dedicated",
+ "pci_passthrough:alias": "pci-dev:1",
+ }
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._create_server(
+ flavor_id=flavor_id, expected_state="ERROR")
+
+ self.assertIn('fault', server)
+ self.assertIn('No valid host', server['fault']['message'])
+ self.assert_placement_pci_view(
+ "compute1", **compute1_placement_pci_view)
+ self.assert_no_pci_healing("compute1")
+
@ddt.ddt
class PCIServersWithSRIOVAffinityPoliciesTest(_PCIServersTestBase):
@@ -2889,17 +3935,7 @@ class RemoteManagedServersTest(_PCIServersWithMigrationTestBase):
port = self.neutron.show_port(uuids.dpu_tunnel_port)['port']
self.assertIn('binding:profile', port)
- self.assertEqual(
- {
- 'pci_vendor_info': '15b3:101e',
- 'pci_slot': '0000:82:00.4',
- 'physical_network': None,
- 'pf_mac_address': '52:54:00:1e:59:02',
- 'vf_num': 3,
- 'card_serial_number': 'MT0000X00002',
- },
- port['binding:profile'],
- )
+ self.assertEqual({}, port['binding:profile'])
def test_suspend(self):
self.start_compute()
diff --git a/nova/tests/functional/libvirt/test_power_manage.py b/nova/tests/functional/libvirt/test_power_manage.py
new file mode 100644
index 0000000000..9f80446bd6
--- /dev/null
+++ b/nova/tests/functional/libvirt/test_power_manage.py
@@ -0,0 +1,270 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from unittest import mock
+
+import fixtures
+
+from nova import context as nova_context
+from nova import exception
+from nova import objects
+from nova.tests import fixtures as nova_fixtures
+from nova.tests.fixtures import libvirt as fakelibvirt
+from nova.tests.functional.libvirt import base
+from nova.virt import hardware
+from nova.virt.libvirt.cpu import api as cpu_api
+
+
+class PowerManagementTestsBase(base.ServersTestBase):
+
+ ADDITIONAL_FILTERS = ['NUMATopologyFilter']
+
+ ADMIN_API = True
+
+ def setUp(self):
+ super(PowerManagementTestsBase, self).setUp()
+
+ self.ctxt = nova_context.get_admin_context()
+
+ # Mock the 'NUMATopologyFilter' filter, as most tests need to inspect
+ # this
+ host_manager = self.scheduler.manager.host_manager
+ numa_filter_class = host_manager.filter_cls_map['NUMATopologyFilter']
+ host_pass_mock = mock.Mock(wraps=numa_filter_class().host_passes)
+ _p = mock.patch('nova.scheduler.filters'
+ '.numa_topology_filter.NUMATopologyFilter.host_passes',
+ side_effect=host_pass_mock)
+ self.mock_filter = _p.start()
+ self.addCleanup(_p.stop)
+
+ # for the sake of resizing, we need to patch the two methods below
+ self.useFixture(fixtures.MockPatch(
+ 'nova.virt.libvirt.LibvirtDriver._get_instance_disk_info',
+ return_value=[]))
+ self.useFixture(fixtures.MockPatch('os.rename'))
+
+ self.useFixture(nova_fixtures.PrivsepFixture())
+
+ # Defining the main flavor for 4 vCPUs all pinned
+ self.extra_spec = {
+ 'hw:cpu_policy': 'dedicated',
+ 'hw:cpu_thread_policy': 'prefer',
+ }
+ self.pcpu_flavor_id = self._create_flavor(
+ vcpu=4, extra_spec=self.extra_spec)
+
+ def _assert_server_cpus_state(self, server, expected='online'):
+ inst = objects.Instance.get_by_uuid(self.ctxt, server['id'])
+ if not inst.numa_topology:
+ self.fail('Instance should have a NUMA topology in order to know '
+ 'its physical CPUs')
+ instance_pcpus = inst.numa_topology.cpu_pinning
+ self._assert_cpu_set_state(instance_pcpus, expected=expected)
+ return instance_pcpus
+
+ def _assert_cpu_set_state(self, cpu_set, expected='online'):
+ for i in cpu_set:
+ core = cpu_api.Core(i)
+ if expected == 'online':
+ self.assertTrue(core.online, f'{i} is not online')
+ elif expected == 'offline':
+ self.assertFalse(core.online, f'{i} is online')
+ elif expected == 'powersave':
+ self.assertEqual('powersave', core.governor)
+ elif expected == 'performance':
+ self.assertEqual('performance', core.governor)
+
+
+class PowerManagementTests(PowerManagementTestsBase):
+ """Test suite for a single host with 9 dedicated cores and 1 used for OS"""
+
+ def setUp(self):
+ super(PowerManagementTests, self).setUp()
+
+ self.useFixture(nova_fixtures.SysFileSystemFixture())
+
+ # Definining the CPUs to be pinned.
+ self.flags(cpu_dedicated_set='1-9', cpu_shared_set=None,
+ group='compute')
+ self.flags(vcpu_pin_set=None)
+ self.flags(cpu_power_management=True, group='libvirt')
+
+ self.flags(allow_resize_to_same_host=True)
+ self.host_info = fakelibvirt.HostInfo(cpu_nodes=1, cpu_sockets=1,
+ cpu_cores=5, cpu_threads=2)
+ self.compute1 = self.start_compute(host_info=self.host_info,
+ hostname='compute1')
+
+ # All cores are shutdown at startup, let's check.
+ cpu_dedicated_set = hardware.get_cpu_dedicated_set()
+ self._assert_cpu_set_state(cpu_dedicated_set, expected='offline')
+
+ def test_hardstop_compute_service_if_wrong_opt(self):
+ self.flags(cpu_dedicated_set=None, cpu_shared_set=None,
+ group='compute')
+ self.flags(vcpu_pin_set=None)
+ self.flags(cpu_power_management=True, group='libvirt')
+ self.assertRaises(exception.InvalidConfiguration,
+ self.start_compute, host_info=self.host_info,
+ hostname='compute2')
+
+ def test_create_server(self):
+ server = self._create_server(
+ flavor_id=self.pcpu_flavor_id,
+ expected_state='ACTIVE')
+ # Let's verify that the pinned CPUs are now online
+ self._assert_server_cpus_state(server, expected='online')
+
+ # Verify that the unused CPUs are still offline
+ inst = objects.Instance.get_by_uuid(self.ctxt, server['id'])
+ instance_pcpus = inst.numa_topology.cpu_pinning
+ cpu_dedicated_set = hardware.get_cpu_dedicated_set()
+ unused_cpus = cpu_dedicated_set - instance_pcpus
+ self._assert_cpu_set_state(unused_cpus, expected='offline')
+
+ def test_stop_start_server(self):
+ server = self._create_server(
+ flavor_id=self.pcpu_flavor_id,
+ expected_state='ACTIVE')
+
+ server = self._stop_server(server)
+ # Let's verify that the pinned CPUs are now stopped...
+ self._assert_server_cpus_state(server, expected='offline')
+
+ server = self._start_server(server)
+ # ...and now, they should be back.
+ self._assert_server_cpus_state(server, expected='online')
+
+ def test_resize(self):
+ server = self._create_server(
+ flavor_id=self.pcpu_flavor_id,
+ expected_state='ACTIVE')
+ server_pcpus = self._assert_server_cpus_state(server,
+ expected='online')
+
+ new_flavor_id = self._create_flavor(
+ vcpu=5, extra_spec=self.extra_spec)
+ self._resize_server(server, new_flavor_id)
+ server2_pcpus = self._assert_server_cpus_state(server,
+ expected='online')
+ # Even if the resize is not confirmed yet, the original guest is now
+ # destroyed so the cores are now offline.
+ self._assert_cpu_set_state(server_pcpus, expected='offline')
+
+ # let's revert the resize
+ self._revert_resize(server)
+ # So now the original CPUs will be online again, while the previous
+ # cores should be back offline.
+ self._assert_cpu_set_state(server_pcpus, expected='online')
+ self._assert_cpu_set_state(server2_pcpus, expected='offline')
+
+ def test_changing_strategy_fails(self):
+ # As a reminder, all cores have been shutdown before.
+ # Now we want to change the strategy and then we restart the service
+ self.flags(cpu_power_management_strategy='governor', group='libvirt')
+ # See, this is not possible as we would have offline CPUs.
+ self.assertRaises(exception.InvalidConfiguration,
+ self.restart_compute_service, hostname='compute1')
+
+
+class PowerManagementTestsGovernor(PowerManagementTestsBase):
+ """Test suite for speific governor usage (same 10-core host)"""
+
+ def setUp(self):
+ super(PowerManagementTestsGovernor, self).setUp()
+
+ self.useFixture(nova_fixtures.SysFileSystemFixture())
+
+ # Definining the CPUs to be pinned.
+ self.flags(cpu_dedicated_set='1-9', cpu_shared_set=None,
+ group='compute')
+ self.flags(vcpu_pin_set=None)
+ self.flags(cpu_power_management=True, group='libvirt')
+ self.flags(cpu_power_management_strategy='governor', group='libvirt')
+
+ self.flags(allow_resize_to_same_host=True)
+ self.host_info = fakelibvirt.HostInfo(cpu_nodes=1, cpu_sockets=1,
+ cpu_cores=5, cpu_threads=2)
+ self.compute1 = self.start_compute(host_info=self.host_info,
+ hostname='compute1')
+
+ def test_create(self):
+ cpu_dedicated_set = hardware.get_cpu_dedicated_set()
+ # With the governor strategy, cores are still online but run with a
+ # powersave governor.
+ self._assert_cpu_set_state(cpu_dedicated_set, expected='powersave')
+
+ # Now, start an instance
+ server = self._create_server(
+ flavor_id=self.pcpu_flavor_id,
+ expected_state='ACTIVE')
+ # When pinned cores are run, the governor state is now performance
+ self._assert_server_cpus_state(server, expected='performance')
+
+ def test_changing_strategy_fails(self):
+ # Arbitratly set a core governor strategy to be performance
+ cpu_api.Core(1).set_high_governor()
+ # and then forget about it while changing the strategy.
+ self.flags(cpu_power_management_strategy='cpu_state', group='libvirt')
+ # This time, this wouldn't be acceptable as some core would have a
+ # difference performance while Nova would only online/offline it.
+ self.assertRaises(exception.InvalidConfiguration,
+ self.restart_compute_service, hostname='compute1')
+
+
+class PowerManagementMixedInstances(PowerManagementTestsBase):
+ """Test suite for a single host with 6 dedicated cores, 3 shared and one
+ OS-restricted.
+ """
+
+ def setUp(self):
+ super(PowerManagementMixedInstances, self).setUp()
+
+ self.useFixture(nova_fixtures.SysFileSystemFixture())
+
+ # Definining 6 CPUs to be dedicated, not all of them in a series.
+ self.flags(cpu_dedicated_set='1-3,5-7', cpu_shared_set='4,8-9',
+ group='compute')
+ self.flags(vcpu_pin_set=None)
+ self.flags(cpu_power_management=True, group='libvirt')
+
+ self.host_info = fakelibvirt.HostInfo(cpu_nodes=1, cpu_sockets=1,
+ cpu_cores=5, cpu_threads=2)
+ self.compute1 = self.start_compute(host_info=self.host_info,
+ hostname='compute1')
+
+ # Make sure only 6 are offline now
+ cpu_dedicated_set = hardware.get_cpu_dedicated_set()
+ self._assert_cpu_set_state(cpu_dedicated_set, expected='offline')
+
+ # cores 4 and 8-9 should be online
+ self._assert_cpu_set_state({4, 8, 9}, expected='online')
+
+ def test_standard_server_works_and_passes(self):
+
+ std_flavor_id = self._create_flavor(vcpu=2)
+ self._create_server(flavor_id=std_flavor_id, expected_state='ACTIVE')
+
+ # Since this is an instance with floating vCPUs on the shared set, we
+ # can only lookup the host CPUs and see they haven't changed state.
+ cpu_dedicated_set = hardware.get_cpu_dedicated_set()
+ self._assert_cpu_set_state(cpu_dedicated_set, expected='offline')
+ self._assert_cpu_set_state({4, 8, 9}, expected='online')
+
+ # We can now try to boot an instance with pinned CPUs to test the mix
+ pinned_server = self._create_server(
+ flavor_id=self.pcpu_flavor_id,
+ expected_state='ACTIVE')
+ # We'll see that its CPUs are now online
+ self._assert_server_cpus_state(pinned_server, expected='online')
+ # but it doesn't change the shared set
+ self._assert_cpu_set_state({4, 8, 9}, expected='online')
diff --git a/nova/tests/functional/libvirt/test_vpmem.py b/nova/tests/functional/libvirt/test_vpmem.py
index d1cad0e376..cb524fe8b6 100644
--- a/nova/tests/functional/libvirt/test_vpmem.py
+++ b/nova/tests/functional/libvirt/test_vpmem.py
@@ -12,9 +12,11 @@
# under the License.
import fixtures
+from unittest import mock
from oslo_config import cfg
from oslo_log import log as logging
+from oslo_utils.fixture import uuidsentinel as uuids
from nova import objects
from nova.tests import fixtures as nova_fixtures
@@ -75,6 +77,7 @@ class VPMEMTestBase(integrated_helpers.LibvirtProviderUsageBaseTestCase):
'nova.privsep.libvirt.get_pmem_namespaces',
return_value=self.fake_pmem_namespaces))
self.useFixture(nova_fixtures.LibvirtImageBackendFixture())
+ self.useFixture(nova_fixtures.CGroupsFixture())
self.useFixture(fixtures.MockPatch(
'nova.virt.libvirt.LibvirtDriver._get_local_gb_info',
return_value={'total': 128,
@@ -99,7 +102,9 @@ class VPMEMTestBase(integrated_helpers.LibvirtProviderUsageBaseTestCase):
cpu_cores=2, cpu_threads=2),
hostname=hostname)
self.mock_conn.return_value = fake_connection
- compute = self._start_compute(host=hostname)
+ with mock.patch('nova.virt.node.get_local_node_uuid') as m:
+ m.return_value = str(getattr(uuids, 'node_%s' % hostname))
+ compute = self._start_compute(host=hostname)
# Ensure populating the existing pmems correctly.
vpmems = compute.driver._vpmems_by_name
diff --git a/nova/tests/functional/notification_sample_tests/test_compute_task.py b/nova/tests/functional/notification_sample_tests/test_compute_task.py
index 3de1c7d4e1..05d2d32fde 100644
--- a/nova/tests/functional/notification_sample_tests/test_compute_task.py
+++ b/nova/tests/functional/notification_sample_tests/test_compute_task.py
@@ -10,6 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
+from nova import objects
from nova.tests import fixtures
from nova.tests.functional.notification_sample_tests \
import notification_sample_base
@@ -53,6 +56,10 @@ class TestComputeTaskNotificationSample(
},
actual=self.notifier.versioned_notifications[1])
+ @mock.patch.object(
+ objects.service, 'get_minimum_version_all_cells',
+ new=mock.Mock(return_value=62)
+ )
def test_rebuild_fault(self):
server = self._boot_a_server(
extra_params={'networks': [{'port': self.neutron.port_1['id']}]},
diff --git a/nova/tests/functional/notification_sample_tests/test_instance.py b/nova/tests/functional/notification_sample_tests/test_instance.py
index f671a8abca..5a52c2dad6 100644
--- a/nova/tests/functional/notification_sample_tests/test_instance.py
+++ b/nova/tests/functional/notification_sample_tests/test_instance.py
@@ -46,18 +46,18 @@ class TestInstanceNotificationSampleWithMultipleCompute(
self.compute2 = self.start_service('compute', host='host2')
actions = [
- self._test_live_migration_rollback,
- self._test_live_migration_abort,
- self._test_live_migration_success,
- self._test_evacuate_server,
- self._test_live_migration_force_complete
+ (self._test_live_migration_rollback, 'ACTIVE'),
+ (self._test_live_migration_abort, 'ACTIVE'),
+ (self._test_live_migration_success, 'ACTIVE'),
+ (self._test_evacuate_server, 'SHUTOFF'),
+ (self._test_live_migration_force_complete, 'ACTIVE'),
]
- for action in actions:
+ for action, expected_state in actions:
self.notifier.reset()
action(server)
# Ensure that instance is in active state after an action
- self._wait_for_state_change(server, 'ACTIVE')
+ self._wait_for_state_change(server, expected_state)
@mock.patch('nova.compute.manager.ComputeManager.'
'_live_migration_cleanup_flags', return_value=[True, False])
@@ -275,6 +275,12 @@ class TestInstanceNotificationSampleWithMultipleCompute(
self.admin_api.put_service(service_id, {'forced_down': False})
def _test_live_migration_force_complete(self, server):
+ # In the scenario evacuate happened before which stopped the
+ # server.
+ self._start_server(server)
+ self._wait_for_state_change(server, 'ACTIVE')
+ self.notifier.reset()
+
post = {
'os-migrateLive': {
'host': 'host2',
diff --git a/nova/tests/functional/regressions/test_bug_1595962.py b/nova/tests/functional/regressions/test_bug_1595962.py
index 94421a81f9..9232eea335 100644
--- a/nova/tests/functional/regressions/test_bug_1595962.py
+++ b/nova/tests/functional/regressions/test_bug_1595962.py
@@ -47,6 +47,7 @@ class TestSerialConsoleLiveMigrate(test.TestCase):
'nova.virt.libvirt.guest.libvirt',
fakelibvirt))
self.useFixture(nova_fixtures.LibvirtFixture())
+ self.useFixture(nova_fixtures.CGroupsFixture())
self.admin_api = api_fixture.admin_api
self.api = api_fixture.api
diff --git a/nova/tests/functional/regressions/test_bug_1669054.py b/nova/tests/functional/regressions/test_bug_1669054.py
index 6180dbfbaa..b20e1530cc 100644
--- a/nova/tests/functional/regressions/test_bug_1669054.py
+++ b/nova/tests/functional/regressions/test_bug_1669054.py
@@ -59,7 +59,8 @@ class ResizeEvacuateTestCase(integrated_helpers._IntegratedTestBase):
# Now try to evacuate the server back to the original source compute.
server = self._evacuate_server(
server, {'onSharedStorage': 'False'},
- expected_host=self.compute.host, expected_migration_status='done')
+ expected_host=self.compute.host, expected_migration_status='done',
+ expected_state='ACTIVE')
# Assert the RequestSpec.ignore_hosts field is not populated.
reqspec = objects.RequestSpec.get_by_instance_uuid(
diff --git a/nova/tests/functional/regressions/test_bug_1713783.py b/nova/tests/functional/regressions/test_bug_1713783.py
index 9a6a79d7a2..8088ccfe06 100644
--- a/nova/tests/functional/regressions/test_bug_1713783.py
+++ b/nova/tests/functional/regressions/test_bug_1713783.py
@@ -13,9 +13,11 @@
# limitations under the License.
import time
+from unittest import mock
from oslo_log import log as logging
+from nova import objects
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import fixtures as func_fixtures
@@ -81,6 +83,10 @@ class FailedEvacuateStateTests(test.TestCase,
created_server = self.api.post_server({'server': server_req})
return self._wait_for_state_change(created_server, 'ACTIVE')
+ @mock.patch.object(
+ objects.service, 'get_minimum_version_all_cells',
+ new=mock.Mock(return_value=62)
+ )
def test_evacuate_no_valid_host(self):
# Boot a server
server = self._boot_a_server()
diff --git a/nova/tests/functional/regressions/test_bug_1764883.py b/nova/tests/functional/regressions/test_bug_1764883.py
index aa86770584..59bbed4f46 100644
--- a/nova/tests/functional/regressions/test_bug_1764883.py
+++ b/nova/tests/functional/regressions/test_bug_1764883.py
@@ -95,7 +95,8 @@ class TestEvacuationWithSourceReturningDuringRebuild(
# Evacuate the instance from the source_host
server = self._evacuate_server(
- server, expected_migration_status='done')
+ server, expected_migration_status='done',
+ expected_state='ACTIVE')
host = server['OS-EXT-SRV-ATTR:host']
migrations = self.api.get_migrations()
diff --git a/nova/tests/functional/regressions/test_bug_1823370.py b/nova/tests/functional/regressions/test_bug_1823370.py
index 5e69905f5f..af134070cd 100644
--- a/nova/tests/functional/regressions/test_bug_1823370.py
+++ b/nova/tests/functional/regressions/test_bug_1823370.py
@@ -66,4 +66,5 @@ class MultiCellEvacuateTestCase(integrated_helpers._IntegratedTestBase):
# higher than host3.
self._evacuate_server(
server, {'onSharedStorage': 'False'}, expected_host='host3',
- expected_migration_status='done')
+ expected_migration_status='done',
+ expected_state='ACTIVE')
diff --git a/nova/tests/functional/regressions/test_bug_1896463.py b/nova/tests/functional/regressions/test_bug_1896463.py
index dc74791e0e..3cfece8d36 100644
--- a/nova/tests/functional/regressions/test_bug_1896463.py
+++ b/nova/tests/functional/regressions/test_bug_1896463.py
@@ -216,7 +216,7 @@ class TestEvacuateResourceTrackerRace(
self._run_periodics()
self._wait_for_server_parameter(
- server, {'OS-EXT-SRV-ATTR:host': 'host2', 'status': 'ACTIVE'})
+ server, {'OS-EXT-SRV-ATTR:host': 'host2', 'status': 'SHUTOFF'})
self._assert_pci_device_allocated(server['id'], self.compute1_id)
self._assert_pci_device_allocated(server['id'], self.compute2_id)
diff --git a/nova/tests/functional/regressions/test_bug_1922053.py b/nova/tests/functional/regressions/test_bug_1922053.py
index 612be27b2b..70bb3d4cab 100644
--- a/nova/tests/functional/regressions/test_bug_1922053.py
+++ b/nova/tests/functional/regressions/test_bug_1922053.py
@@ -1,3 +1,4 @@
+
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@@ -27,6 +28,7 @@ class ForceUpWithDoneEvacuations(integrated_helpers._IntegratedTestBase):
ADMIN_API = True
microversion = 'latest'
+ expected_state = 'SHUTOFF'
def _create_test_server(self, compute_host):
return self._create_server(host=compute_host, networks='none')
@@ -59,7 +61,8 @@ class ForceUpWithDoneEvacuations(integrated_helpers._IntegratedTestBase):
server = self._evacuate_server(
server,
expected_host='compute2',
- expected_migration_status='done'
+ expected_migration_status='done',
+ expected_state=self.expected_state
)
# Assert that the request to force up the host is rejected
@@ -97,6 +100,7 @@ class ForceUpWithDoneEvacuationsv252(ForceUpWithDoneEvacuations):
"""
microversion = '2.52'
+ expected_state = 'ACTIVE'
def _create_test_server(self, compute_host):
return self._create_server(az='nova:compute', networks='none')
diff --git a/nova/tests/functional/regressions/test_bug_1995153.py b/nova/tests/functional/regressions/test_bug_1995153.py
new file mode 100644
index 0000000000..f4e61d06df
--- /dev/null
+++ b/nova/tests/functional/regressions/test_bug_1995153.py
@@ -0,0 +1,107 @@
+# Copyright (C) 2023 Red Hat, Inc
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import fixtures
+from unittest import mock
+
+from oslo_serialization import jsonutils
+from oslo_utils import units
+
+from nova.objects import fields
+from nova.tests.fixtures import libvirt as fakelibvirt
+from nova.tests.functional import integrated_helpers
+from nova.tests.functional.libvirt import base
+
+
+class Bug1995153RegressionTest(
+ base.ServersTestBase,
+ integrated_helpers.InstanceHelperMixin
+):
+
+ ADDITIONAL_FILTERS = ['NUMATopologyFilter', 'PciPassthroughFilter']
+
+ ALIAS_NAME = 'a1'
+ PCI_DEVICE_SPEC = [jsonutils.dumps(
+ {
+ 'vendor_id': fakelibvirt.PCI_VEND_ID,
+ 'product_id': fakelibvirt.PCI_PROD_ID,
+ }
+ )]
+ # we set the numa_affinity policy to required to ensure strict affinity
+ # between pci devices and the guest cpu and memory will be enforced.
+ PCI_ALIAS = [jsonutils.dumps(
+ {
+ 'vendor_id': fakelibvirt.PCI_VEND_ID,
+ 'product_id': fakelibvirt.PCI_PROD_ID,
+ 'name': ALIAS_NAME,
+ 'device_type': fields.PciDeviceType.STANDARD,
+ 'numa_policy': fields.PCINUMAAffinityPolicy.REQUIRED,
+ }
+ )]
+
+ def setUp(self):
+ super(Bug1995153RegressionTest, self).setUp()
+ self.flags(
+ device_spec=self.PCI_DEVICE_SPEC,
+ alias=self.PCI_ALIAS,
+ group='pci'
+ )
+ host_manager = self.scheduler.manager.host_manager
+ pci_filter_class = host_manager.filter_cls_map['PciPassthroughFilter']
+ host_pass_mock = mock.Mock(wraps=pci_filter_class().host_passes)
+ self.mock_filter = self.useFixture(fixtures.MockPatch(
+ 'nova.scheduler.filters.pci_passthrough_filter'
+ '.PciPassthroughFilter.host_passes',
+ side_effect=host_pass_mock)).mock
+
+ def test_socket_policy_bug_1995153(self):
+ """Previously, the numa_usage_from_instance_numa() method in
+ hardware.py saved the host NUMAToplogy object with NUMACells that have
+ no `socket` set. This was an omission in the original implementation of
+ the `socket` PCI NUMA affinity policy. The consequence was that any
+ code path that called into numa_usage_from_instance_numa() would
+ clobber the host NUMA topology in the database with a socket-less
+ version. Booting an instance with NUMA toplogy would do that, for
+ example. If then a second instance was booted with the `socket` PCI
+ NUMA affinity policy, it would read the socket-less host NUMATopology
+ from the database, and error out with a NotImplementedError. This was
+ bug 1995153. Demonstrate that this is fixed.
+ """
+ host_info = fakelibvirt.HostInfo(
+ cpu_nodes=2, cpu_sockets=1, cpu_cores=2, cpu_threads=2,
+ kB_mem=(16 * units.Gi) // units.Ki)
+ self.flags(cpu_dedicated_set='0-3', group='compute')
+ pci_info = fakelibvirt.HostPCIDevicesInfo(num_pci=1, numa_node=1)
+
+ self.start_compute(host_info=host_info, pci_info=pci_info)
+
+ extra_spec = {
+ 'hw:cpu_policy': 'dedicated',
+ 'pci_passthrough:alias': '%s:1' % self.ALIAS_NAME,
+ 'hw:pci_numa_affinity_policy': 'socket'
+ }
+ # Boot a first instance with a guest NUMA topology to run the
+ # numa_usage_from_instance_numa() and update the host NUMATopology in
+ # the database.
+ self._create_server(
+ flavor_id=self._create_flavor(
+ extra_spec={'hw:cpu_policy': 'dedicated'}))
+
+ # Boot an instance with the `socket` PCI NUMA affinity policy and
+ # assert that it boots correctly now.
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ self._create_server(flavor_id=flavor_id)
+ self.assertTrue(self.mock_filter.called)
diff --git a/nova/tests/functional/test_server_group.py b/nova/tests/functional/test_server_group.py
index 7cbe8bdb67..01e3547f7e 100644
--- a/nova/tests/functional/test_server_group.py
+++ b/nova/tests/functional/test_server_group.py
@@ -444,7 +444,8 @@ class ServerGroupTestV21(ServerGroupTestBase):
evacuated_server = self._evacuate_server(
servers[1], {'onSharedStorage': 'False'},
- expected_migration_status='done')
+ expected_migration_status='done',
+ expected_state='ACTIVE')
# check that the server is evacuated to another host
self.assertNotEqual(evacuated_server['OS-EXT-SRV-ATTR:host'],
@@ -621,7 +622,8 @@ class ServerGroupTestV215(ServerGroupTestV21):
compute3 = self.start_service('compute', host='host3')
evacuated_server = self._evacuate_server(
- servers[1], expected_migration_status='done')
+ servers[1], expected_migration_status='done',
+ expected_state='ACTIVE')
# check that the server is evacuated
self.assertNotEqual(evacuated_server['OS-EXT-SRV-ATTR:host'],
@@ -800,7 +802,8 @@ class ServerGroupTestV215(ServerGroupTestV21):
self._set_forced_down(host, True)
evacuated_server = self._evacuate_server(
- servers[1], expected_migration_status='done')
+ servers[1], expected_migration_status='done',
+ expected_state='ACTIVE')
# Note(gibi): need to get the server again as the state of the instance
# goes to ACTIVE first then the host of the instance changes to the
@@ -870,6 +873,54 @@ class ServerGroupTestV264(ServerGroupTestV215):
self.assertEqual(2, hosts.count(host))
+class ServerGroupTestV295(ServerGroupTestV264):
+ microversion = '2.95'
+
+ def _evacuate_with_soft_anti_affinity_policies(self, group):
+ created_group = self.api.post_server_groups(group)
+ servers = self._boot_servers_to_group(created_group)
+
+ host = self._get_compute_service_by_host_name(
+ servers[1]['OS-EXT-SRV-ATTR:host'])
+ # Set forced_down on the host to ensure nova considers the host down.
+ self._set_forced_down(host, True)
+
+ evacuated_server = self._evacuate_server(
+ servers[1], expected_migration_status='done')
+
+ # Note(gibi): need to get the server again as the state of the instance
+ # goes to ACTIVE first then the host of the instance changes to the
+ # new host later
+ evacuated_server = self.admin_api.get_server(evacuated_server['id'])
+
+ return [evacuated_server['OS-EXT-SRV-ATTR:host'],
+ servers[0]['OS-EXT-SRV-ATTR:host']]
+
+ def test_evacuate_with_anti_affinity(self):
+ created_group = self.api.post_server_groups(self.anti_affinity)
+ servers = self._boot_servers_to_group(created_group)
+
+ host = self._get_compute_service_by_host_name(
+ servers[1]['OS-EXT-SRV-ATTR:host'])
+ # Set forced_down on the host to ensure nova considers the host down.
+ self._set_forced_down(host, True)
+
+ # Start additional host to test evacuation
+ compute3 = self.start_service('compute', host='host3')
+
+ evacuated_server = self._evacuate_server(
+ servers[1], expected_migration_status='done')
+
+ # check that the server is evacuated
+ self.assertNotEqual(evacuated_server['OS-EXT-SRV-ATTR:host'],
+ servers[1]['OS-EXT-SRV-ATTR:host'])
+ # check that policy is kept
+ self.assertNotEqual(evacuated_server['OS-EXT-SRV-ATTR:host'],
+ servers[0]['OS-EXT-SRV-ATTR:host'])
+
+ compute3.kill()
+
+
class ServerGroupTestMultiCell(ServerGroupTestBase):
NUMBER_OF_CELLS = 2
diff --git a/nova/tests/functional/test_server_rescue.py b/nova/tests/functional/test_server_rescue.py
index fa96c10344..8f5b912943 100644
--- a/nova/tests/functional/test_server_rescue.py
+++ b/nova/tests/functional/test_server_rescue.py
@@ -10,6 +10,10 @@
# License for the specific language governing permissions and limitations
# under the License.
+import datetime
+
+from oslo_utils.fixture import uuidsentinel as uuids
+
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional.api import client
from nova.tests.functional import integrated_helpers
@@ -23,7 +27,37 @@ class BFVRescue(integrated_helpers.ProviderUsageBaseTestCase):
self.useFixture(nova_fixtures.CinderFixture(self))
self._start_compute(host='host1')
- def _create_bfv_server(self):
+ def _create_image(self, metadata=None):
+ image = {
+ 'id': uuids.stable_rescue_image,
+ 'name': 'fake-image-rescue-property',
+ 'created_at': datetime.datetime(2011, 1, 1, 1, 2, 3),
+ 'updated_at': datetime.datetime(2011, 1, 1, 1, 2, 3),
+ 'deleted_at': None,
+ 'deleted': False,
+ 'status': 'active',
+ 'is_public': False,
+ 'container_format': 'raw',
+ 'disk_format': 'raw',
+ 'size': '25165824',
+ 'min_ram': 0,
+ 'min_disk': 0,
+ 'protected': False,
+ 'visibility': 'public',
+ 'tags': ['tag1', 'tag2'],
+ 'properties': {
+ 'kernel_id': 'nokernel',
+ 'ramdisk_id': 'nokernel',
+ 'hw_rescue_device': 'disk',
+ 'hw_rescue_bus': 'scsi',
+ },
+ }
+ if metadata:
+ image['properties'].update(metadata)
+ return self.glance.create(None, image)
+
+ def _create_bfv_server(self, metadata=None):
+ image = self._create_image(metadata=metadata)
server_request = self._build_server(networks=[])
server_request.pop('imageRef')
server_request['block_device_mapping_v2'] = [{
@@ -33,7 +67,7 @@ class BFVRescue(integrated_helpers.ProviderUsageBaseTestCase):
'destination_type': 'volume'}]
server = self.api.post_server({'server': server_request})
self._wait_for_state_change(server, 'ACTIVE')
- return server
+ return server, image
class DisallowBFVRescuev286(BFVRescue):
@@ -43,10 +77,10 @@ class DisallowBFVRescuev286(BFVRescue):
microversion = '2.86'
def test_bfv_rescue_not_supported(self):
- server = self._create_bfv_server()
+ server, image = self._create_bfv_server()
ex = self.assertRaises(client.OpenStackApiException,
self.api.post_server_action, server['id'], {'rescue': {
- 'rescue_image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6'}})
+ 'rescue_image_ref': image['id']}})
self.assertEqual(400, ex.response.status_code)
self.assertIn('Cannot rescue a volume-backed instance',
ex.response.text)
@@ -60,10 +94,10 @@ class DisallowBFVRescuev286WithTrait(BFVRescue):
microversion = '2.86'
def test_bfv_rescue_not_supported(self):
- server = self._create_bfv_server()
+ server, image = self._create_bfv_server()
ex = self.assertRaises(client.OpenStackApiException,
self.api.post_server_action, server['id'], {'rescue': {
- 'rescue_image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6'}})
+ 'rescue_image_ref': image['id']}})
self.assertEqual(400, ex.response.status_code)
self.assertIn('Cannot rescue a volume-backed instance',
ex.response.text)
@@ -77,10 +111,10 @@ class DisallowBFVRescuev287WithoutTrait(BFVRescue):
microversion = '2.87'
def test_bfv_rescue_not_supported(self):
- server = self._create_bfv_server()
+ server, image = self._create_bfv_server()
ex = self.assertRaises(client.OpenStackApiException,
self.api.post_server_action, server['id'], {'rescue': {
- 'rescue_image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6'}})
+ 'rescue_image_ref': image['id']}})
self.assertEqual(400, ex.response.status_code)
self.assertIn('Host unable to rescue a volume-backed instance',
ex.response.text)
@@ -94,7 +128,41 @@ class AllowBFVRescuev287WithTrait(BFVRescue):
microversion = '2.87'
def test_bfv_rescue_supported(self):
- server = self._create_bfv_server()
+ server, image = self._create_bfv_server()
self.api.post_server_action(server['id'], {'rescue': {
+ 'rescue_image_ref': image['id']}})
+ self._wait_for_state_change(server, 'RESCUE')
+
+
+class DisallowBFVRescuev287WithoutRescueImageProperties(BFVRescue):
+ """Asserts that BFV rescue requests fail with microversion 2.87 (or later)
+ when the required hw_rescue_device and hw_rescue_bus image properties
+ are not set on the image.
+ """
+ compute_driver = 'fake.MediumFakeDriver'
+ microversion = '2.87'
+
+ def test_bfv_rescue_failed(self):
+ server, image = self._create_bfv_server()
+ # try rescue without hw_rescue_device and hw_rescue_bus properties set
+ ex = self.assertRaises(client.OpenStackApiException,
+ self.api.post_server_action, server['id'], {'rescue': {
'rescue_image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6'}})
+ self.assertEqual(400, ex.response.status_code)
+ self.assertIn('Cannot rescue a volume-backed instance',
+ ex.response.text)
+
+
+class AllowBFVRescuev287WithRescueImageProperties(BFVRescue):
+ """Asserts that BFV rescue requests pass with microversion 2.87 (or later)
+ when the required hw_rescue_device and hw_rescue_bus image properties
+ are set on the image.
+ """
+ compute_driver = 'fake.RescueBFVDriver'
+ microversion = '2.87'
+
+ def test_bfv_rescue_done(self):
+ server, image = self._create_bfv_server()
+ self.api.post_server_action(server['id'], {'rescue': {
+ 'rescue_image_ref': image['id']}})
self._wait_for_state_change(server, 'RESCUE')
diff --git a/nova/tests/functional/test_servers.py b/nova/tests/functional/test_servers.py
index d1ab84aa7b..5887c99081 100644
--- a/nova/tests/functional/test_servers.py
+++ b/nova/tests/functional/test_servers.py
@@ -2260,7 +2260,8 @@ class ServerMovingTests(integrated_helpers.ProviderUsageBaseTestCase):
}
server = self._evacuate_server(
- server, extra_post_args=post, expected_host=dest_hostname)
+ server, extra_post_args=post, expected_host=dest_hostname,
+ expected_state='ACTIVE')
# Run the periodics to show those don't modify allocations.
self._run_periodics()
@@ -2437,7 +2438,8 @@ class ServerMovingTests(integrated_helpers.ProviderUsageBaseTestCase):
# stay ACTIVE and task_state will be set to None.
server = self._evacuate_server(
server, expected_task_state=None,
- expected_migration_status='failed')
+ expected_migration_status='failed',
+ expected_state='ACTIVE')
# Run the periodics to show those don't modify allocations.
self._run_periodics()
@@ -5324,7 +5326,8 @@ class ServerMovingTestsWithNestedResourceRequests(
server = self._evacuate_server(
server, extra_post_args=post, expected_migration_status='error',
- expected_host=source_hostname)
+ expected_host=source_hostname,
+ expected_state='ACTIVE')
self.assertIn('Unable to move instance %s to host host2. The instance '
'has complex allocations on the source host so move '
@@ -5530,7 +5533,8 @@ class ServerMovingTestsFromFlatToNested(
self._evacuate_server(
server, extra_post_args=post, expected_host='host1',
- expected_migration_status='error')
+ expected_migration_status='error',
+ expected_state='ACTIVE')
# We expect that the evacuation will fail as force evacuate tries to
# blindly copy the source allocation to the destination but on the
@@ -6522,3 +6526,41 @@ class PortAndFlavorAccelsServerCreateTest(AcceleratorServerBase):
binding_profile = neutronapi.get_binding_profile(updated_port)
self.assertNotIn('arq_uuid', binding_profile)
self.assertNotIn('pci_slot', binding_profile)
+
+
+class PortBindingShelvedServerTest(integrated_helpers._IntegratedTestBase):
+ """Tests for servers with ports."""
+
+ compute_driver = 'fake.SmallFakeDriver'
+
+ def setUp(self):
+ super(PortBindingShelvedServerTest, self).setUp()
+ self.flavor_id = self._create_flavor(
+ disk=10, ephemeral=20, swap=5 * 1024)
+
+ def test_shelve_offload_with_port(self):
+ # Do not wait before offloading
+ self.flags(shelved_offload_time=0)
+
+ server = self._create_server(
+ flavor_id=self.flavor_id,
+ networks=[{'port': self.neutron.port_1['id']}])
+
+ port = self.neutron.show_port(self.neutron.port_1['id'])['port']
+
+ # Assert that the port is actually associated to the instance
+ self.assertEqual(port['device_id'], server['id'])
+ self.assertEqual(port['binding:host_id'], 'compute')
+ self.assertEqual(port['binding:status'], 'ACTIVE')
+
+ # Do shelve
+ server = self._shelve_server(server, 'SHELVED_OFFLOADED')
+
+ # Retrieve the updated port
+ port = self.neutron.show_port(self.neutron.port_1['id'])['port']
+
+ # Assert that the port is still associated to the instance
+ # but the binding is not on the compute anymore
+ self.assertEqual(port['device_id'], server['id'])
+ self.assertIsNone(port['binding:host_id'])
+ self.assertNotIn('binding:status', port)
diff --git a/nova/tests/functional/test_servers_resource_request.py b/nova/tests/functional/test_servers_resource_request.py
index e31ff42f14..9c91af7218 100644
--- a/nova/tests/functional/test_servers_resource_request.py
+++ b/nova/tests/functional/test_servers_resource_request.py
@@ -1068,7 +1068,7 @@ class PortResourceRequestBasedSchedulingTest(
def test_interface_attach_sriov_with_qos_pci_update_fails(self):
# Update the name of the network device RP of PF2 on host2 to something
# unexpected. This will cause
- # update_pci_request_spec_with_allocated_interface_name() to raise
+ # update_pci_request_with_placement_allocations() to raise
# when the sriov interface is attached.
rsp = self.placement.put(
'/resource_providers/%s'
@@ -1115,7 +1115,7 @@ class PortResourceRequestBasedSchedulingTest(
):
# Update the name of the network device RP of PF2 on host2 to something
# unexpected. This will cause
- # update_pci_request_spec_with_allocated_interface_name() to raise
+ # update_pci_request_with_placement_allocations() to raise
# when the sriov interface is attached.
rsp = self.placement.put(
'/resource_providers/%s'
@@ -1923,7 +1923,7 @@ class ServerMoveWithPortResourceRequestTest(
def test_migrate_server_with_qos_port_pci_update_fail_not_reschedule(self):
# Update the name of the network device RP of PF2 on host2 to something
# unexpected. This will cause
- # update_pci_request_spec_with_allocated_interface_name() to raise
+ # update_pci_request_with_placement_allocations() to raise
# when the instance is migrated to the host2.
rsp = self.placement.put(
'/resource_providers/%s'
@@ -1943,7 +1943,7 @@ class ServerMoveWithPortResourceRequestTest(
non_qos_port, qos_port, qos_sriov_port)
# The compute manager on host2 will raise from
- # update_pci_request_spec_with_allocated_interface_name which will
+ # update_pci_request_with_placement_allocations which will
# intentionally not trigger a re-schedule even if there is host3 as an
# alternate.
self.api.post_server_action(server['id'], {'migrate': None})
@@ -2162,7 +2162,8 @@ class ServerMoveWithPortResourceRequestTest(
# simply fail and the server remains on the source host
server = self._evacuate_server(
server, expected_host='host1', expected_task_state=None,
- expected_migration_status='failed')
+ expected_migration_status='failed',
+ expected_state="ACTIVE")
# As evacuation failed the resource allocation should be untouched
self._check_allocation(
@@ -2186,7 +2187,7 @@ class ServerMoveWithPortResourceRequestTest(
def test_evacuate_with_qos_port_pci_update_fail(self):
# Update the name of the network device RP of PF2 on host2 to something
# unexpected. This will cause
- # update_pci_request_spec_with_allocated_interface_name() to raise
+ # update_pci_request_with_placement_allocations() to raise
# when the instance is evacuated to the host2.
rsp = self.placement.put(
'/resource_providers/%s'
@@ -2207,7 +2208,7 @@ class ServerMoveWithPortResourceRequestTest(
self.compute1_service_id, {'forced_down': 'true'})
# The compute manager on host2 will raise from
- # update_pci_request_spec_with_allocated_interface_name
+ # update_pci_request_with_placement_allocations
server = self._evacuate_server(
server, expected_host='host1', expected_state='ERROR',
expected_task_state=None, expected_migration_status='failed')
@@ -2363,7 +2364,7 @@ class ServerMoveWithPortResourceRequestTest(
def test_live_migrate_with_qos_port_pci_update_fails(self):
# Update the name of the network device RP of PF2 on host2 to something
# unexpected. This will cause
- # update_pci_request_spec_with_allocated_interface_name() to raise
+ # update_pci_request_with_placement_allocations() to raise
# when the instance is live migrated to the host2.
rsp = self.placement.put(
'/resource_providers/%s'
@@ -2504,7 +2505,7 @@ class ServerMoveWithPortResourceRequestTest(
def test_unshelve_offloaded_server_with_qos_port_pci_update_fails(self):
# Update the name of the network device RP of PF2 on host2 to something
# unexpected. This will cause
- # update_pci_request_spec_with_allocated_interface_name() to raise
+ # update_pci_request_with_placement_allocations() to raise
# when the instance is unshelved to the host2.
rsp = self.placement.put(
'/resource_providers/%s'
@@ -2537,7 +2538,7 @@ class ServerMoveWithPortResourceRequestTest(
self.api.post_server_action(server['id'], {'unshelve': None})
# Unshelve fails on host2 due to
- # update_pci_request_spec_with_allocated_interface_name fails so the
+ # update_pci_request_with_placement_allocations fails so the
# instance goes back to shelve offloaded state
self.notifier.wait_for_versioned_notifications(
'instance.unshelve.start')
@@ -2979,6 +2980,7 @@ class ExtendedResourceRequestOldCompute(
super().setUp()
self.neutron = self.useFixture(
ExtendedResourceRequestNeutronFixture(self))
+ self.api.microversion = '2.72'
@mock.patch.object(
objects.service, 'get_minimum_version_all_cells',
diff --git a/nova/tests/functional/test_service.py b/nova/tests/functional/test_service.py
index 65b41594bd..21e9a519ee 100644
--- a/nova/tests/functional/test_service.py
+++ b/nova/tests/functional/test_service.py
@@ -10,8 +10,12 @@
# License for the specific language governing permissions and limitations
# under the License.
+import functools
from unittest import mock
+import fixtures
+from oslo_utils.fixture import uuidsentinel as uuids
+
from nova import context as nova_context
from nova import exception
from nova.objects import service
@@ -19,6 +23,7 @@ from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import fixtures as func_fixtures
from nova.tests.functional import integrated_helpers
+from nova.virt import node
class ServiceTestCase(test.TestCase,
@@ -137,3 +142,83 @@ class TestOldComputeCheck(
return_value=old_version):
self.assertRaises(
exception.TooOldComputeService, self._start_compute, 'host1')
+
+
+class TestComputeStartupChecks(test.TestCase):
+ STUB_COMPUTE_ID = False
+
+ def setUp(self):
+ super().setUp()
+ self.useFixture(nova_fixtures.RealPolicyFixture())
+ self.useFixture(nova_fixtures.NeutronFixture(self))
+ self.useFixture(nova_fixtures.GlanceFixture(self))
+ self.useFixture(func_fixtures.PlacementFixture())
+
+ self._local_uuid = str(uuids.node)
+
+ self.useFixture(fixtures.MockPatch(
+ 'nova.virt.node.get_local_node_uuid',
+ functools.partial(self.local_uuid, True)))
+ self.useFixture(fixtures.MockPatch(
+ 'nova.virt.node.read_local_node_uuid',
+ self.local_uuid))
+ self.useFixture(fixtures.MockPatch(
+ 'nova.virt.node.write_local_node_uuid',
+ mock.DEFAULT))
+ self.flags(compute_driver='fake.FakeDriverWithoutFakeNodes')
+
+ def local_uuid(self, get=False):
+ if get and not self._local_uuid:
+ # Simulate the get_local_node_uuid behavior of calling write once
+ self._local_uuid = str(uuids.node)
+ node.write_local_node_uuid(self._local_uuid)
+ return self._local_uuid
+
+ def test_compute_node_identity_greenfield(self):
+ # Level-set test case to show that starting and re-starting without
+ # any error cases works as expected.
+
+ # Start with no local compute_id
+ self._local_uuid = None
+ self.start_service('compute')
+
+ # Start should have generated and written a compute id
+ node.write_local_node_uuid.assert_called_once_with(str(uuids.node))
+
+ # Starting again should succeed and not cause another write
+ self.start_service('compute')
+ node.write_local_node_uuid.assert_called_once_with(str(uuids.node))
+
+ def test_compute_node_identity_deleted(self):
+ self.start_service('compute')
+
+ # Simulate the compute_id file being deleted
+ self._local_uuid = None
+
+ # Should refuse to start because it's not our first time and the file
+ # being missing is a hard error.
+ exc = self.assertRaises(exception.InvalidConfiguration,
+ self.start_service, 'compute')
+ self.assertIn('lost that state', str(exc))
+
+ def test_compute_node_hostname_changed(self):
+ # Start our compute once to create the node record
+ self.start_service('compute')
+
+ # Starting with a different hostname should trigger the abort
+ exc = self.assertRaises(exception.InvalidConfiguration,
+ self.start_service, 'compute', host='other')
+ self.assertIn('hypervisor_hostname', str(exc))
+
+ def test_compute_node_uuid_changed(self):
+ # Start our compute once to create the node record
+ self.start_service('compute')
+
+ # Simulate a changed local compute_id file
+ self._local_uuid = str(uuids.othernode)
+
+ # We should fail to create the compute node record again, but with a
+ # useful error message about why.
+ exc = self.assertRaises(exception.InvalidConfiguration,
+ self.start_service, 'compute')
+ self.assertIn('Duplicate compute node record', str(exc))
diff --git a/nova/tests/unit/api/openstack/compute/test_evacuate.py b/nova/tests/unit/api/openstack/compute/test_evacuate.py
index fb7f7662d8..bd88bb8d6e 100644
--- a/nova/tests/unit/api/openstack/compute/test_evacuate.py
+++ b/nova/tests/unit/api/openstack/compute/test_evacuate.py
@@ -416,3 +416,32 @@ class EvacuateTestV268(EvacuateTestV229):
def test_forced_evacuate_with_no_host_provided(self):
# not applicable for v2.68, which removed the 'force' parameter
pass
+
+
+class EvacuateTestV295(EvacuateTestV268):
+ def setUp(self):
+ super(EvacuateTestV268, self).setUp()
+ self.admin_req = fakes.HTTPRequest.blank('', use_admin_context=True,
+ version='2.95')
+ self.req = fakes.HTTPRequest.blank('', version='2.95')
+ self.mock_get_min_ver = self.useFixture(fixtures.MockPatch(
+ 'nova.objects.service.get_minimum_version_all_cells',
+ return_value=62)).mock
+
+ def test_evacuate_version_error(self):
+ self.mock_get_min_ver.return_value = 61
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self._get_evacuate_response,
+ {'host': 'my-host', 'adminPass': 'foo'})
+
+ def test_evacuate_unsupported_rpc(self):
+ def fake_evacuate(*args, **kwargs):
+ raise exception.UnsupportedRPCVersion(
+ api="fakeapi",
+ required="x.xx")
+
+ self.stub_out('nova.compute.api.API.evacuate', fake_evacuate)
+ self._check_evacuate_failure(webob.exc.HTTPConflict,
+ {'host': 'my-host',
+ 'onSharedStorage': 'False',
+ 'adminPass': 'MyNewPass'})
diff --git a/nova/tests/unit/api/openstack/compute/test_flavor_access.py b/nova/tests/unit/api/openstack/compute/test_flavor_access.py
index 0581a47c84..ea9ca2f632 100644
--- a/nova/tests/unit/api/openstack/compute/test_flavor_access.py
+++ b/nova/tests/unit/api/openstack/compute/test_flavor_access.py
@@ -353,14 +353,37 @@ class FlavorAccessTestV21(test.NoDBTestCase):
mock_verify.assert_called_once_with(
req.environ['nova.context'], 'proj2')
+ @mock.patch('nova.objects.Flavor.remove_access')
@mock.patch('nova.api.openstack.identity.verify_project_id',
side_effect=exc.HTTPBadRequest(
explanation="Project ID proj2 is not a valid project."))
- def test_remove_tenant_access_with_invalid_tenant(self, mock_verify):
+ def test_remove_tenant_access_with_invalid_tenant(self,
+ mock_verify,
+ mock_remove_access):
"""Tests the case that the tenant does not exist in Keystone."""
req = fakes.HTTPRequest.blank(self._prefix + '/flavors/2/action',
use_admin_context=True)
body = {'removeTenantAccess': {'tenant': 'proj2'}}
+
+ self.flavor_action_controller._remove_tenant_access(
+ req, '2', body=body)
+ mock_verify.assert_called_once_with(
+ req.environ['nova.context'], 'proj2')
+ mock_remove_access.assert_called_once_with('proj2')
+
+ @mock.patch('nova.api.openstack.identity.verify_project_id',
+ side_effect=exc.HTTPBadRequest(
+ explanation="Nova was unable to find Keystone "
+ "service endpoint."))
+ def test_remove_tenant_access_missing_keystone_endpoint(self,
+ mock_verify):
+ """Tests the case that Keystone identity service endpoint
+ version 3.0 was not found.
+ """
+ req = fakes.HTTPRequest.blank(self._prefix + '/flavors/2/action',
+ use_admin_context=True)
+ body = {'removeTenantAccess': {'tenant': 'proj2'}}
+
self.assertRaises(exc.HTTPBadRequest,
self.flavor_action_controller._remove_tenant_access,
req, '2', body=body)
diff --git a/nova/tests/unit/api/openstack/compute/test_remote_consoles.py b/nova/tests/unit/api/openstack/compute/test_remote_consoles.py
index bd09307567..961f4a02c9 100644
--- a/nova/tests/unit/api/openstack/compute/test_remote_consoles.py
+++ b/nova/tests/unit/api/openstack/compute/test_remote_consoles.py
@@ -104,6 +104,18 @@ class ConsolesExtensionTestV21(test.NoDBTestCase):
'get_vnc_console',
exception.InstanceNotFound(instance_id=fakes.FAKE_UUID))
+ def test_get_vnc_console_instance_invalid_state(self):
+ body = {'os-getVNCConsole': {'type': 'novnc'}}
+ self._check_console_failure(
+ self.controller.get_vnc_console,
+ webob.exc.HTTPConflict,
+ body,
+ 'get_vnc_console',
+ exception.InstanceInvalidState(
+ attr='fake-attr', state='fake-state', method='fake-method',
+ instance_uuid=fakes.FAKE_UUID)
+ )
+
def test_get_vnc_console_invalid_type(self):
body = {'os-getVNCConsole': {'type': 'invalid'}}
self._check_console_failure(
diff --git a/nova/tests/unit/api/openstack/compute/test_server_groups.py b/nova/tests/unit/api/openstack/compute/test_server_groups.py
index 636682a6b7..9d99c3ae6d 100644
--- a/nova/tests/unit/api/openstack/compute/test_server_groups.py
+++ b/nova/tests/unit/api/openstack/compute/test_server_groups.py
@@ -87,7 +87,8 @@ class ServerGroupTestV21(test.NoDBTestCase):
def setUp(self):
super(ServerGroupTestV21, self).setUp()
self._setup_controller()
- self.req = fakes.HTTPRequest.blank('')
+ self.member_req = fakes.HTTPRequest.member_req('')
+ self.reader_req = fakes.HTTPRequest.reader_req('')
self.admin_req = fakes.HTTPRequest.blank('', use_admin_context=True)
self.foo_req = fakes.HTTPRequest.blank('', project_id='foo')
self.policy = self.useFixture(fixtures.RealPolicyFixture())
@@ -114,20 +115,20 @@ class ServerGroupTestV21(test.NoDBTestCase):
def test_create_server_group_with_no_policies(self):
sgroup = server_group_template()
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
def _create_server_group_normal(self, policies=None, policy=None,
rules=None):
sgroup = server_group_template()
sgroup['policies'] = policies
- res_dict = self.controller.create(self.req,
+ res_dict = self.controller.create(self.member_req,
body={'server_group': sgroup})
self.assertEqual(res_dict['server_group']['name'], 'test')
self.assertTrue(uuidutils.is_uuid_like(res_dict['server_group']['id']))
self.assertEqual(res_dict['server_group']['policies'], policies)
def test_create_server_group_with_new_policy_before_264(self):
- req = fakes.HTTPRequest.blank('', version='2.63')
+ req = fakes.HTTPRequest.member_req('', version='2.63')
policy = 'anti-affinity'
rules = {'max_server_per_host': 3}
# 'policy' isn't an acceptable request key before 2.64
@@ -162,7 +163,7 @@ class ServerGroupTestV21(test.NoDBTestCase):
self.controller.create(self.admin_req, body={'server_group': sgroup})
# test as non-admin
- self.controller.create(self.req, body={'server_group': sgroup})
+ self.controller.create(self.member_req, body={'server_group': sgroup})
def _create_instance(self, ctx, cell):
with context.target_cell(ctx, cell) as cctx:
@@ -289,7 +290,7 @@ class ServerGroupTestV21(test.NoDBTestCase):
path = path or '/os-server-groups?all_projects=True'
if limited:
path += limited
- req = fakes.HTTPRequest.blank(path, version=api_version)
+ reader_req = fakes.HTTPRequest.reader_req(path, version=api_version)
admin_req = fakes.HTTPRequest.blank(path, use_admin_context=True,
version=api_version)
@@ -298,7 +299,7 @@ class ServerGroupTestV21(test.NoDBTestCase):
self.assertEqual(all, res_dict)
# test as non-admin
- res_dict = self.controller.index(req)
+ res_dict = self.controller.index(reader_req)
self.assertEqual(tenant_specific, res_dict)
@mock.patch('nova.objects.InstanceGroupList.get_by_project_id')
@@ -347,25 +348,27 @@ class ServerGroupTestV21(test.NoDBTestCase):
return_get_by_project = return_server_groups()
mock_get_by_project.return_value = return_get_by_project
path = '/os-server-groups'
- req = fakes.HTTPRequest.blank(path, version=api_version)
+ req = fakes.HTTPRequest.reader_req(path, version=api_version)
res_dict = self.controller.index(req)
self.assertEqual(expected, res_dict)
def test_display_members(self):
ctx = context.RequestContext('fake_user', fakes.FAKE_PROJECT_ID)
(ig_uuid, instances, members) = self._create_groups_and_instances(ctx)
- res_dict = self.controller.show(self.req, ig_uuid)
+ res_dict = self.controller.show(self.reader_req, ig_uuid)
result_members = res_dict['server_group']['members']
self.assertEqual(3, len(result_members))
for member in members:
self.assertIn(member, result_members)
def test_display_members_with_nonexistent_group(self):
- self.assertRaises(webob.exc.HTTPNotFound,
- self.controller.show, self.req, uuidsentinel.group)
+ self.assertRaises(
+ webob.exc.HTTPNotFound,
+ self.controller.show, self.reader_req, uuidsentinel.group)
def test_display_active_members_only(self):
- ctx = context.RequestContext('fake_user', fakes.FAKE_PROJECT_ID)
+ ctx = context.RequestContext('fake_user', fakes.FAKE_PROJECT_ID,
+ roles=['member', 'reader'])
(ig_uuid, instances, members) = self._create_groups_and_instances(ctx)
# delete an instance
@@ -379,7 +382,7 @@ class ServerGroupTestV21(test.NoDBTestCase):
self.assertRaises(exception.InstanceNotFound,
objects.Instance.get_by_uuid,
ctx, instances[1].uuid)
- res_dict = self.controller.show(self.req, ig_uuid)
+ res_dict = self.controller.show(self.reader_req, ig_uuid)
result_members = res_dict['server_group']['members']
# check that only the active instance is displayed
self.assertEqual(2, len(result_members))
@@ -393,7 +396,7 @@ class ServerGroupTestV21(test.NoDBTestCase):
self.controller.show(self.admin_req, ig_uuid)
# test as non-admin, same project
- self.controller.show(self.req, ig_uuid)
+ self.controller.show(self.reader_req, ig_uuid)
# test as non-admin, different project
self.assertRaises(webob.exc.HTTPNotFound,
@@ -406,7 +409,7 @@ class ServerGroupTestV21(test.NoDBTestCase):
sgroup = server_group_template(name='good* $%name',
policies=['affinity'])
- res_dict = self.controller.create(self.req,
+ res_dict = self.controller.create(self.member_req,
body={'server_group': sgroup})
self.assertEqual(res_dict['server_group']['name'], 'good* $%name')
@@ -414,99 +417,99 @@ class ServerGroupTestV21(test.NoDBTestCase):
# blank name
sgroup = server_group_template(name='', policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
# name with length 256
sgroup = server_group_template(name='1234567890' * 26,
policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
# non-string name
sgroup = server_group_template(name=12, policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
# name with leading spaces
sgroup = server_group_template(name=' leading spaces',
policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
# name with trailing spaces
sgroup = server_group_template(name='trailing space ',
policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
# name with all spaces
sgroup = server_group_template(name=' ',
policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
# name with unprintable character
sgroup = server_group_template(name='bad\x00name',
policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
# name with out of range char U0001F4A9
sgroup = server_group_template(name=u"\U0001F4A9",
policies=['affinity'])
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
def test_create_server_group_with_illegal_policies(self):
# blank policy
sgroup = server_group_template(name='fake-name', policies='')
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
# policy as integer
sgroup = server_group_template(name='fake-name', policies=7)
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
# policy as string
sgroup = server_group_template(name='fake-name', policies='invalid')
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
# policy as None
sgroup = server_group_template(name='fake-name', policies=None)
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
def test_create_server_group_conflicting_policies(self):
sgroup = server_group_template()
policies = ['anti-affinity', 'affinity']
sgroup['policies'] = policies
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
def test_create_server_group_with_duplicate_policies(self):
sgroup = server_group_template()
policies = ['affinity', 'affinity']
sgroup['policies'] = policies
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
def test_create_server_group_not_supported(self):
sgroup = server_group_template()
policies = ['storage-affinity', 'anti-affinity', 'rack-affinity']
sgroup['policies'] = policies
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
def test_create_server_group_with_no_body(self):
self.assertRaises(self.validation_error,
- self.controller.create, self.req, body=None)
+ self.controller.create, self.member_req, body=None)
def test_create_server_group_with_no_server_group(self):
body = {'no-instanceGroup': None}
self.assertRaises(self.validation_error,
- self.controller.create, self.req, body=body)
+ self.controller.create, self.member_req, body=body)
def test_list_server_group_by_tenant(self):
self._test_list_server_group_by_tenant(
@@ -528,7 +531,7 @@ class ServerGroupTestV21(test.NoDBTestCase):
self.controller.index(self.admin_req)
# test as non-admin
- self.controller.index(self.req)
+ self.controller.index(self.reader_req)
def test_list_server_group_multiple_param(self):
self._test_list_server_group(api_version=self.wsgi_api_version,
@@ -598,7 +601,7 @@ class ServerGroupTestV21(test.NoDBTestCase):
self.stub_out('nova.objects.InstanceGroup.get_by_uuid',
return_server_group)
- resp = self.controller.delete(self.req, uuidsentinel.sg1_id)
+ resp = self.controller.delete(self.member_req, uuidsentinel.sg1_id)
mock_destroy.assert_called_once_with()
# NOTE: on v2.1, http status code is set as wsgi_code of API
@@ -611,7 +614,7 @@ class ServerGroupTestV21(test.NoDBTestCase):
def test_delete_non_existing_server_group(self):
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
- self.req, 'invalid')
+ self.member_req, 'invalid')
def test_delete_server_group_rbac_default(self):
ctx = context.RequestContext('fake_user', fakes.FAKE_PROJECT_ID)
@@ -622,7 +625,7 @@ class ServerGroupTestV21(test.NoDBTestCase):
# test as non-admin
ig_uuid = self._create_groups_and_instances(ctx)[0]
- self.controller.delete(self.req, ig_uuid)
+ self.controller.delete(self.member_req, ig_uuid)
class ServerGroupTestV213(ServerGroupTestV21):
@@ -649,7 +652,7 @@ class ServerGroupTestV264(ServerGroupTestV213):
def _create_server_group_normal(self, policies=None, policy=None,
rules=None):
- req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
+ req = fakes.HTTPRequest.member_req('', version=self.wsgi_api_version)
sgroup = server_group_template()
sgroup['rules'] = rules or {}
sgroup['policy'] = policy
@@ -674,7 +677,7 @@ class ServerGroupTestV264(ServerGroupTestV213):
self.assertEqual(res_dict['server_group']['rules'], {})
def _display_server_group(self, uuid):
- req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
+ req = fakes.HTTPRequest.reader_req('', version=self.wsgi_api_version)
group = self.controller.show(req, uuid)
return group
@@ -690,7 +693,7 @@ class ServerGroupTestV264(ServerGroupTestV213):
self.assertEqual(res_dict['server_group']['rules'], rules)
def test_create_affinity_server_group_with_invalid_policy(self):
- req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
+ req = fakes.HTTPRequest.member_req('', version=self.wsgi_api_version)
sgroup = server_group_template(policy='affinity',
rules={'max_server_per_host': 3})
result = self.assertRaises(webob.exc.HTTPBadRequest,
@@ -698,7 +701,7 @@ class ServerGroupTestV264(ServerGroupTestV213):
self.assertIn("Only anti-affinity policy supports rules", str(result))
def test_create_anti_affinity_server_group_with_invalid_rules(self):
- req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
+ req = fakes.HTTPRequest.member_req('', version=self.wsgi_api_version)
# A negative test for key is unknown, the value is not positive
# and not integer
invalid_rules = [{'unknown_key': '3'},
@@ -718,7 +721,7 @@ class ServerGroupTestV264(ServerGroupTestV213):
return_value=32)
def test_create_server_group_with_low_version_compute_service(self,
mock_get_v):
- req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
+ req = fakes.HTTPRequest.member_req('', version=self.wsgi_api_version)
sgroup = server_group_template(policy='anti-affinity',
rules={'max_server_per_host': 3})
result = self.assertRaises(
@@ -734,7 +737,7 @@ class ServerGroupTestV264(ServerGroupTestV213):
self._create_server_group_normal(policy=policy)
def test_policies_since_264(self):
- req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
+ req = fakes.HTTPRequest.member_req('', version=self.wsgi_api_version)
# 'policies' isn't allowed in request >= 2.64
sgroup = server_group_template(policies=['anti-affinity'])
self.assertRaises(
@@ -742,14 +745,14 @@ class ServerGroupTestV264(ServerGroupTestV213):
req, body={'server_group': sgroup})
def test_create_server_group_without_policy(self):
- req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
+ req = fakes.HTTPRequest.member_req('', version=self.wsgi_api_version)
# 'policy' is required request key in request >= 2.64
sgroup = server_group_template()
self.assertRaises(self.validation_error, self.controller.create,
req, body={'server_group': sgroup})
def test_create_server_group_with_illegal_policies(self):
- req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
+ req = fakes.HTTPRequest.member_req('', version=self.wsgi_api_version)
# blank policy
sgroup = server_group_template(policy='')
self.assertRaises(self.validation_error, self.controller.create,
@@ -771,7 +774,7 @@ class ServerGroupTestV264(ServerGroupTestV213):
req, body={'server_group': sgroup})
def test_additional_params(self):
- req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
+ req = fakes.HTTPRequest.member_req('', version=self.wsgi_api_version)
sgroup = server_group_template(unknown='unknown')
self.assertRaises(self.validation_error, self.controller.create,
req, body={'server_group': sgroup})
@@ -786,7 +789,7 @@ class ServerGroupTestV275(ServerGroupTestV264):
path='/os-server-groups?dummy=False&all_projects=True')
def test_list_server_group_additional_param(self):
- req = fakes.HTTPRequest.blank('/os-server-groups?dummy=False',
- version=self.wsgi_api_version)
+ req = fakes.HTTPRequest.reader_req('/os-server-groups?dummy=False',
+ version=self.wsgi_api_version)
self.assertRaises(self.validation_error, self.controller.index,
req)
diff --git a/nova/tests/unit/api/openstack/fakes.py b/nova/tests/unit/api/openstack/fakes.py
index 8cf90ddebe..9ac970f787 100644
--- a/nova/tests/unit/api/openstack/fakes.py
+++ b/nova/tests/unit/api/openstack/fakes.py
@@ -240,6 +240,9 @@ class HTTPRequest(os_wsgi.Request):
def blank(cls, *args, **kwargs):
defaults = {'base_url': 'http://localhost/v2'}
use_admin_context = kwargs.pop('use_admin_context', False)
+ roles = kwargs.pop('roles', [])
+ if use_admin_context:
+ roles.append('admin')
project_id = kwargs.pop('project_id', FAKE_PROJECT_ID)
version = kwargs.pop('version', os_wsgi.DEFAULT_API_VERSION)
defaults.update(kwargs)
@@ -247,10 +250,19 @@ class HTTPRequest(os_wsgi.Request):
out.environ['nova.context'] = FakeRequestContext(
user_id='fake_user',
project_id=project_id,
- is_admin=use_admin_context)
+ is_admin=use_admin_context,
+ roles=roles)
out.api_version_request = api_version.APIVersionRequest(version)
return out
+ @classmethod
+ def member_req(cls, *args, **kwargs):
+ return cls.blank(*args, roles=['member', 'reader'], **kwargs)
+
+ @classmethod
+ def reader_req(cls, *args, **kwargs):
+ return cls.blank(*args, roles=['reader'], **kwargs)
+
class HTTPRequestV21(HTTPRequest):
pass
diff --git a/nova/tests/unit/cmd/test_policy.py b/nova/tests/unit/cmd/test_policy.py
index df51665959..29dd5610f6 100644
--- a/nova/tests/unit/cmd/test_policy.py
+++ b/nova/tests/unit/cmd/test_policy.py
@@ -128,20 +128,21 @@ class TestPolicyCheck(test.NoDBTestCase):
self.assertEqual(set(expected_rules), set(passing_rules))
def test_filter_rules_non_admin(self):
- context = nova_context.RequestContext()
+ context = nova_context.RequestContext(roles=['reader'])
rule_conditions = [base_policies.PROJECT_READER_OR_ADMIN]
expected_rules = [r.name for r in ia_policies.list_rules() if
r.check_str in rule_conditions]
self._check_filter_rules(context, expected_rules=expected_rules)
def test_filter_rules_admin(self):
- self._check_filter_rules()
+ context = nova_context.RequestContext(roles=['admin'])
+ self._check_filter_rules(context)
def test_filter_rules_instance_non_admin(self):
db_context = nova_context.RequestContext(user_id='fake-user',
project_id='fake-project')
instance = fake_instance.fake_instance_obj(db_context)
- context = nova_context.RequestContext()
+ context = nova_context.RequestContext(roles=['reader'])
expected_rules = [r.name for r in ia_policies.list_rules() if
r.check_str == base_policies.RULE_ANY]
self._check_filter_rules(context, instance, expected_rules)
@@ -150,11 +151,13 @@ class TestPolicyCheck(test.NoDBTestCase):
db_context = nova_context.RequestContext(user_id='fake-user',
project_id='fake-project')
instance = fake_instance.fake_instance_obj(db_context)
- self._check_filter_rules(target=instance)
+ context = nova_context.RequestContext(roles=['admin'])
+ self._check_filter_rules(context, target=instance)
def test_filter_rules_instance_owner(self):
db_context = nova_context.RequestContext(user_id='fake-user',
- project_id='fake-project')
+ project_id='fake-project',
+ roles=['reader'])
instance = fake_instance.fake_instance_obj(db_context)
rule_conditions = [base_policies.PROJECT_READER_OR_ADMIN]
expected_rules = [r.name for r in ia_policies.list_rules() if
diff --git a/nova/tests/unit/compute/test_api.py b/nova/tests/unit/compute/test_api.py
index f17a767b99..9d6e9ba4bd 100644
--- a/nova/tests/unit/compute/test_api.py
+++ b/nova/tests/unit/compute/test_api.py
@@ -4081,7 +4081,8 @@ class _ComputeAPIUnitTestMixIn(object):
injected_files=[], bdms=bdms,
preserve_ephemeral=False, host=None,
request_spec=fake_spec,
- reimage_boot_volume=True)
+ reimage_boot_volume=True,
+ target_state=None)
_check_auto_disk_config.assert_called_once_with(
image=image, auto_disk_config=None)
_checks_for_create_and_rebuild.assert_called_once_with(
@@ -4096,7 +4097,8 @@ class _ComputeAPIUnitTestMixIn(object):
instance,
uuids.image_ref,
admin_pass,
- reimage_boot_volume=False)
+ reimage_boot_volume=False,
+ target_state=None)
@mock.patch.object(objects.RequestSpec, 'save')
@mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
@@ -4155,7 +4157,8 @@ class _ComputeAPIUnitTestMixIn(object):
instance,
uuids.image_ref,
admin_pass,
- reimage_boot_volume=False)
+ reimage_boot_volume=False,
+ target_state=None)
@mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
@mock.patch.object(objects.Instance, 'save')
@@ -4205,7 +4208,8 @@ class _ComputeAPIUnitTestMixIn(object):
orig_image_ref=uuids.image_ref,
orig_sys_metadata=orig_system_metadata, bdms=bdms,
preserve_ephemeral=False, host=instance.host,
- request_spec=fake_spec, reimage_boot_volume=False)
+ request_spec=fake_spec, reimage_boot_volume=False,
+ target_state=None)
_check_auto_disk_config.assert_called_once_with(
image=image, auto_disk_config=None)
@@ -4278,7 +4282,8 @@ class _ComputeAPIUnitTestMixIn(object):
orig_image_ref=uuids.image_ref,
orig_sys_metadata=orig_system_metadata, bdms=bdms,
preserve_ephemeral=False, host=None,
- request_spec=fake_spec, reimage_boot_volume=False)
+ request_spec=fake_spec, reimage_boot_volume=False,
+ target_state=None)
# assert the request spec was modified so the scheduler picks
# the existing instance host/node
req_spec_save.assert_called_once_with()
@@ -4346,7 +4351,8 @@ class _ComputeAPIUnitTestMixIn(object):
orig_image_ref=uuids.image_ref,
orig_sys_metadata=orig_system_metadata, bdms=bdms,
preserve_ephemeral=False, host=instance.host,
- request_spec=fake_spec, reimage_boot_volume=False)
+ request_spec=fake_spec, reimage_boot_volume=False,
+ target_state=None)
_check_auto_disk_config.assert_called_once_with(
image=image, auto_disk_config=None)
@@ -4405,7 +4411,8 @@ class _ComputeAPIUnitTestMixIn(object):
orig_image_ref=uuids.image_ref,
orig_sys_metadata=orig_system_metadata, bdms=bdms,
preserve_ephemeral=False, host=instance.host,
- request_spec=fake_spec, reimage_boot_volume=False)
+ request_spec=fake_spec, reimage_boot_volume=False,
+ target_state=None)
_check_auto_disk_config.assert_called_once_with(
image=image, auto_disk_config=None)
@@ -4469,7 +4476,8 @@ class _ComputeAPIUnitTestMixIn(object):
orig_image_ref=uuids.image_ref,
orig_sys_metadata=orig_system_metadata, bdms=bdms,
preserve_ephemeral=False, host=instance.host,
- request_spec=fake_spec, reimage_boot_volume=False)
+ request_spec=fake_spec, reimage_boot_volume=False,
+ target_state=None)
_check_auto_disk_config.assert_called_once_with(
image=image, auto_disk_config=None)
@@ -5794,7 +5802,10 @@ class _ComputeAPIUnitTestMixIn(object):
destination_type='volume', volume_type=None,
snapshot_id=None, volume_id=uuids.volume_id,
volume_size=None)])
- rescue_image_meta_obj = image_meta_obj.ImageMeta.from_dict({})
+ rescue_image_meta_obj = image_meta_obj.ImageMeta.from_dict({
+ 'properties': {'hw_rescue_device': 'disk',
+ 'hw_rescue_bus': 'scsi'}
+ })
with test.nested(
mock.patch.object(self.compute_api.placementclient,
@@ -5846,6 +5857,7 @@ class _ComputeAPIUnitTestMixIn(object):
# Assert that the instance task state as set in the compute API
self.assertEqual(task_states.RESCUING, instance.task_state)
+ @mock.patch('nova.objects.instance.Instance.image_meta')
@mock.patch('nova.objects.compute_node.ComputeNode'
'.get_by_host_and_nodename')
@mock.patch('nova.compute.utils.is_volume_backed_instance',
@@ -5854,7 +5866,8 @@ class _ComputeAPIUnitTestMixIn(object):
'.get_by_instance_uuid')
def test_rescue_bfv_without_required_trait(self, mock_get_bdms,
mock_is_volume_backed,
- mock_get_cn):
+ mock_get_cn,
+ mock_image_meta):
instance = self._create_instance_obj()
bdms = objects.BlockDeviceMappingList(objects=[
objects.BlockDeviceMapping(
@@ -5862,6 +5875,12 @@ class _ComputeAPIUnitTestMixIn(object):
destination_type='volume', volume_type=None,
snapshot_id=None, volume_id=uuids.volume_id,
volume_size=None)])
+
+ instance.image_meta = image_meta_obj.ImageMeta.from_dict({
+ 'properties': {'hw_rescue_device': 'disk',
+ 'hw_rescue_bus': 'scsi'}
+ })
+
with test.nested(
mock.patch.object(self.compute_api.placementclient,
'get_provider_traits'),
@@ -5899,6 +5918,124 @@ class _ComputeAPIUnitTestMixIn(object):
mock_get_traits.assert_called_once_with(
self.context, uuids.cn)
+ @mock.patch('nova.objects.image_meta.ImageMeta.from_image_ref')
+ @mock.patch('nova.objects.compute_node.ComputeNode'
+ '.get_by_host_and_nodename')
+ @mock.patch('nova.compute.utils.is_volume_backed_instance',
+ return_value=True)
+ @mock.patch('nova.objects.block_device.BlockDeviceMappingList'
+ '.get_by_instance_uuid')
+ def test_rescue_bfv_with_required_image_properties(
+ self, mock_get_bdms, mock_is_volume_backed, mock_get_cn,
+ mock_image_meta_obj_from_ref):
+ instance = self._create_instance_obj()
+ bdms = objects.BlockDeviceMappingList(objects=[
+ objects.BlockDeviceMapping(
+ boot_index=0, image_id=uuids.image_id, source_type='image',
+ destination_type='volume', volume_type=None,
+ snapshot_id=None, volume_id=uuids.volume_id,
+ volume_size=None)])
+ rescue_image_meta_obj = image_meta_obj.ImageMeta.from_dict({
+ 'properties': {'hw_rescue_device': 'disk',
+ 'hw_rescue_bus': 'scsi'}
+ })
+
+ with test.nested(
+ mock.patch.object(self.compute_api.placementclient,
+ 'get_provider_traits'),
+ mock.patch.object(self.compute_api.volume_api, 'get'),
+ mock.patch.object(self.compute_api.volume_api, 'check_attached'),
+ mock.patch.object(instance, 'save'),
+ mock.patch.object(self.compute_api, '_record_action_start'),
+ mock.patch.object(self.compute_api.compute_rpcapi,
+ 'rescue_instance')
+ ) as (
+ mock_get_traits, mock_get_volume, mock_check_attached,
+ mock_instance_save, mock_record_start, mock_rpcapi_rescue
+ ):
+ # Mock out the returned compute node, image_meta, bdms and volume
+ mock_image_meta_obj_from_ref.return_value = rescue_image_meta_obj
+ mock_get_bdms.return_value = bdms
+ mock_get_volume.return_value = mock.sentinel.volume
+ mock_get_cn.return_value = mock.Mock(uuid=uuids.cn)
+
+ # Ensure the required trait is returned, allowing BFV rescue
+ mock_trait_info = mock.Mock(traits=[ot.COMPUTE_RESCUE_BFV])
+ mock_get_traits.return_value = mock_trait_info
+
+ # Try to rescue the instance
+ self.compute_api.rescue(self.context, instance,
+ rescue_image_ref=uuids.rescue_image_id,
+ allow_bfv_rescue=True)
+
+ # Assert all of the calls made in the compute API
+ mock_get_bdms.assert_called_once_with(self.context, instance.uuid)
+ mock_get_volume.assert_called_once_with(
+ self.context, uuids.volume_id)
+ mock_check_attached.assert_called_once_with(
+ self.context, mock.sentinel.volume)
+ mock_is_volume_backed.assert_called_once_with(
+ self.context, instance, bdms)
+ mock_get_cn.assert_called_once_with(
+ self.context, instance.host, instance.node)
+ mock_get_traits.assert_called_once_with(self.context, uuids.cn)
+ mock_instance_save.assert_called_once_with(
+ expected_task_state=[None])
+ mock_record_start.assert_called_once_with(
+ self.context, instance, instance_actions.RESCUE)
+ mock_rpcapi_rescue.assert_called_once_with(
+ self.context, instance=instance, rescue_password=None,
+ rescue_image_ref=uuids.rescue_image_id, clean_shutdown=True)
+
+ # Assert that the instance task state as set in the compute API
+ self.assertEqual(task_states.RESCUING, instance.task_state)
+
+ @mock.patch('nova.objects.image_meta.ImageMeta.from_image_ref')
+ @mock.patch('nova.compute.utils.is_volume_backed_instance',
+ return_value=True)
+ @mock.patch('nova.objects.block_device.BlockDeviceMappingList'
+ '.get_by_instance_uuid')
+ def test_rescue_bfv_without_required_image_properties(
+ self, mock_get_bdms, mock_is_volume_backed,
+ mock_image_meta_obj_from_ref):
+ instance = self._create_instance_obj()
+ bdms = objects.BlockDeviceMappingList(objects=[
+ objects.BlockDeviceMapping(
+ boot_index=0, image_id=uuids.image_id, source_type='image',
+ destination_type='volume', volume_type=None,
+ snapshot_id=None, volume_id=uuids.volume_id,
+ volume_size=None)])
+ rescue_image_meta_obj = image_meta_obj.ImageMeta.from_dict({
+ 'properties': {}
+ })
+
+ with test.nested(
+ mock.patch.object(self.compute_api.volume_api, 'get'),
+ mock.patch.object(self.compute_api.volume_api, 'check_attached'),
+ ) as (
+ mock_get_volume, mock_check_attached
+ ):
+ # Mock out the returned bdms, volume and image_meta
+ mock_get_bdms.return_value = bdms
+ mock_get_volume.return_value = mock.sentinel.volume
+ mock_image_meta_obj_from_ref.return_value = rescue_image_meta_obj
+
+ # Assert that any attempt to rescue a bfv instance on a compute
+ # node that does not report the COMPUTE_RESCUE_BFV trait fails and
+ # raises InstanceNotRescuable
+ self.assertRaises(exception.InstanceNotRescuable,
+ self.compute_api.rescue, self.context, instance,
+ rescue_image_ref=None, allow_bfv_rescue=True)
+
+ # Assert the calls made in the compute API prior to the failure
+ mock_get_bdms.assert_called_once_with(self.context, instance.uuid)
+ mock_get_volume.assert_called_once_with(
+ self.context, uuids.volume_id)
+ mock_check_attached.assert_called_once_with(
+ self.context, mock.sentinel.volume)
+ mock_is_volume_backed.assert_called_once_with(
+ self.context, instance, bdms)
+
@mock.patch('nova.compute.utils.is_volume_backed_instance',
return_value=True)
@mock.patch('nova.objects.block_device.BlockDeviceMappingList'
diff --git a/nova/tests/unit/compute/test_claims.py b/nova/tests/unit/compute/test_claims.py
index 9ef3999441..dcdef56fbe 100644
--- a/nova/tests/unit/compute/test_claims.py
+++ b/nova/tests/unit/compute/test_claims.py
@@ -169,7 +169,8 @@ class ClaimTestCase(test.NoDBTestCase):
spec=[{'vendor_id': 'v', 'product_id': 'p'}])
requests = objects.InstancePCIRequests(requests=[request])
self._claim(requests=requests)
- mock_pci_supports_requests.assert_called_once_with([request])
+ mock_pci_supports_requests.assert_called_once_with(
+ [request], provider_mapping=None)
@mock.patch('nova.pci.stats.PciDeviceStats.support_requests',
return_value=False)
@@ -181,7 +182,8 @@ class ClaimTestCase(test.NoDBTestCase):
exception.ComputeResourcesUnavailable,
'Claim pci failed.',
self._claim, requests=requests)
- mock_pci_supports_requests.assert_called_once_with([request])
+ mock_pci_supports_requests.assert_called_once_with(
+ [request], provider_mapping=None)
@mock.patch('nova.pci.stats.PciDeviceStats.support_requests')
def test_pci_pass_no_requests(self, mock_pci_supports_requests):
diff --git a/nova/tests/unit/compute/test_compute.py b/nova/tests/unit/compute/test_compute.py
index 314c29f583..36bcd368dc 100644
--- a/nova/tests/unit/compute/test_compute.py
+++ b/nova/tests/unit/compute/test_compute.py
@@ -2731,7 +2731,7 @@ class ComputeTestCase(BaseTestCase,
bdms=[], recreate=False, on_shared_storage=False,
preserve_ephemeral=False, migration=None, scheduled_node=None,
limits={}, request_spec=None, accel_uuids=[],
- reimage_boot_volume=False)
+ reimage_boot_volume=False, target_state=None)
self.compute.terminate_instance(self.context, instance, [])
def test_rebuild_driver(self):
@@ -2762,7 +2762,7 @@ class ComputeTestCase(BaseTestCase,
bdms=[], recreate=False, on_shared_storage=False,
preserve_ephemeral=False, migration=None, scheduled_node=None,
limits={}, request_spec=None, accel_uuids=[],
- reimage_boot_volume=False)
+ reimage_boot_volume=False, target_state=None)
self.assertTrue(called['rebuild'])
self.compute.terminate_instance(self.context, instance, [])
@@ -2815,7 +2815,7 @@ class ComputeTestCase(BaseTestCase,
bdms=bdms, recreate=False, preserve_ephemeral=False,
migration=None, scheduled_node=None, limits={},
on_shared_storage=False, request_spec=None, accel_uuids=[],
- reimage_boot_volume=False)
+ reimage_boot_volume=False, target_state=None)
self.assertTrue(called['rebuild'])
self.compute.terminate_instance(self.context, instance, [])
@@ -2834,7 +2834,8 @@ class ComputeTestCase(BaseTestCase,
new_pass="new_password", orig_sys_metadata=sys_metadata, bdms=[],
recreate=False, on_shared_storage=False, preserve_ephemeral=False,
migration=None, scheduled_node=None, limits=None,
- request_spec=None, accel_uuids=[], reimage_boot_volume=False)
+ request_spec=None, accel_uuids=[], reimage_boot_volume=False,
+ target_state=None)
self.compute.terminate_instance(self.context, instance, [])
def test_rebuild_launched_at_time(self):
@@ -2855,7 +2856,7 @@ class ComputeTestCase(BaseTestCase,
new_pass="new_password", orig_sys_metadata={}, bdms=[],
recreate=False, on_shared_storage=False, preserve_ephemeral=False,
migration=None, scheduled_node=None, limits={}, request_spec=None,
- accel_uuids=[], reimage_boot_volume=False)
+ accel_uuids=[], reimage_boot_volume=False, target_state=None)
instance.refresh()
self.assertEqual(cur_time,
instance['launched_at'].replace(tzinfo=None))
@@ -2889,7 +2890,7 @@ class ComputeTestCase(BaseTestCase,
orig_sys_metadata=sys_metadata, bdms=[], recreate=False,
on_shared_storage=False, preserve_ephemeral=False, migration=None,
scheduled_node=None, limits={}, request_spec=None, accel_uuids=[],
- reimage_boot_volume=False)
+ reimage_boot_volume=False, target_state=None)
self.compute.terminate_instance(self.context, instance, [])
@mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
@@ -4617,7 +4618,8 @@ class ComputeTestCase(BaseTestCase,
'request_spec': None,
'on_shared_storage': False,
'accel_uuids': (),
- 'reimage_boot_volume': False}),
+ 'reimage_boot_volume': False,
+ 'target_state': None}),
("set_admin_password", task_states.UPDATING_PASSWORD,
{'new_pass': None}),
("rescue_instance", task_states.RESCUING,
@@ -5136,7 +5138,7 @@ class ComputeTestCase(BaseTestCase,
orig_sys_metadata=orig_sys_metadata, bdms=[], recreate=False,
on_shared_storage=False, preserve_ephemeral=False, migration=None,
scheduled_node=None, limits={}, request_spec=None, accel_uuids=[],
- reimage_boot_volume=False)
+ reimage_boot_volume=False, target_state=None)
inst_ref.refresh()
@@ -5670,6 +5672,7 @@ class ComputeTestCase(BaseTestCase,
pagesize=2048,
cpu_usage=2,
memory_usage=0,
+ socket=0,
pinned_cpus=set([1, 2]),
siblings=[set([1]), set([2])],
mempages=[objects.NUMAPagesTopology(
@@ -5685,6 +5688,7 @@ class ComputeTestCase(BaseTestCase,
pagesize=2048,
memory_usage=0,
cpu_usage=0,
+ socket=0,
siblings=[set([3]), set([4])],
mempages=[objects.NUMAPagesTopology(
size_kb=2048, total=256, used=0)])
@@ -10843,7 +10847,7 @@ class ComputeAPITestCase(BaseTestCase):
'add_resources_to_instance_allocation'),
mock.patch(
'nova.compute.utils.'
- 'update_pci_request_spec_with_allocated_interface_name'),
+ 'update_pci_request_with_placement_allocations'),
) as (
mock_get_nodename, mock_get_alloc_candidates, mock_add_res,
mock_update_pci
@@ -10913,7 +10917,7 @@ class ComputeAPITestCase(BaseTestCase):
new=mock.NonCallableMock()),
mock.patch(
'nova.compute.utils.'
- 'update_pci_request_spec_with_allocated_interface_name',
+ 'update_pci_request_with_placement_allocations',
new=mock.NonCallableMock()),
) as (
mock_get_nodename, mock_get_alloc_candidates, mock_add_res,
@@ -10958,7 +10962,7 @@ class ComputeAPITestCase(BaseTestCase):
'add_resources_to_instance_allocation'),
mock.patch(
'nova.compute.utils.'
- 'update_pci_request_spec_with_allocated_interface_name',
+ 'update_pci_request_with_placement_allocations',
new=mock.NonCallableMock()),
) as (
mock_get_nodename, mock_get_alloc_candidates, mock_add_res,
@@ -11025,7 +11029,7 @@ class ComputeAPITestCase(BaseTestCase):
'add_resources_to_instance_allocation'),
mock.patch(
'nova.compute.utils.'
- 'update_pci_request_spec_with_allocated_interface_name'),
+ 'update_pci_request_with_placement_allocations'),
mock.patch(
'nova.scheduler.client.report.SchedulerReportClient.'
'remove_resources_from_instance_allocation'),
@@ -11961,7 +11965,7 @@ class ComputeAPITestCase(BaseTestCase):
force=False)
@mock.patch('nova.compute.utils.notify_about_instance_action')
- def _test_evacuate(self, mock_notify, force=None):
+ def _test_evacuate(self, mock_notify, force=None, target_state=None):
instance = self._create_fake_instance_obj(services=True)
self.assertIsNone(instance.task_state)
@@ -11998,7 +12002,8 @@ class ComputeAPITestCase(BaseTestCase):
host='fake_dest_host',
on_shared_storage=True,
admin_password=None,
- force=force)
+ force=force,
+ target_state=target_state)
if force is False:
host = None
else:
@@ -12015,7 +12020,8 @@ class ComputeAPITestCase(BaseTestCase):
recreate=True,
on_shared_storage=True,
request_spec=fake_spec,
- host=host)
+ host=host,
+ target_state=target_state)
do_test()
instance.refresh()
@@ -12047,6 +12053,9 @@ class ComputeAPITestCase(BaseTestCase):
def test_evacuate_with_forced_host(self):
self._test_evacuate(force=True)
+ def test_evacuate_with_target_state(self):
+ self._test_evacuate(target_state="stopped")
+
@mock.patch('nova.servicegroup.api.API.service_is_up',
return_value=False)
def test_fail_evacuate_with_non_existing_destination(self, _service_is_up):
@@ -13506,7 +13515,8 @@ class EvacuateHostTestCase(BaseTestCase):
super(EvacuateHostTestCase, self).tearDown()
def _rebuild(self, on_shared_storage=True, migration=None,
- send_node=False, vm_states_is_stopped=False):
+ send_node=False, vm_states_is_stopped=False,
+ expect_error=False):
network_api = self.compute.network_api
ctxt = context.get_admin_context()
@@ -13520,7 +13530,7 @@ class EvacuateHostTestCase(BaseTestCase):
return_value=mock.sentinel.mapping)
@mock.patch(
'nova.compute.utils.'
- 'update_pci_request_spec_with_allocated_interface_name')
+ 'update_pci_request_with_placement_allocations')
@mock.patch('nova.compute.utils.notify_about_instance_action')
@mock.patch('nova.compute.utils.notify_about_instance_rebuild')
@mock.patch.object(network_api, 'setup_networks_on_host')
@@ -13540,7 +13550,8 @@ class EvacuateHostTestCase(BaseTestCase):
image_ref, injected_files, 'newpass', {}, bdms, recreate=True,
on_shared_storage=on_shared_storage, migration=migration,
preserve_ephemeral=False, scheduled_node=node, limits=limits,
- request_spec=None, accel_uuids=[], reimage_boot_volume=False)
+ request_spec=None, accel_uuids=[], reimage_boot_volume=False,
+ target_state=None)
if vm_states_is_stopped:
mock_notify_rebuild.assert_has_calls([
mock.call(ctxt, self.inst, self.inst.host, phase='start',
@@ -13552,6 +13563,11 @@ class EvacuateHostTestCase(BaseTestCase):
action='power_off', phase='start'),
mock.call(ctxt, self.inst, self.inst.host,
action='power_off', phase='end')])
+ elif expect_error:
+ mock_notify_rebuild.assert_has_calls([
+ mock.call(ctxt, self.inst, self.compute.host,
+ phase='error', exception=mock.ANY, bdms=bdms)])
+ return
else:
mock_notify_rebuild.assert_has_calls([
mock.call(ctxt, self.inst, self.inst.host, phase='start',
@@ -13606,14 +13622,15 @@ class EvacuateHostTestCase(BaseTestCase):
mock.patch.object(self.compute, '_get_compute_info',
side_effect=fake_get_compute_info)
) as (mock_inst, mock_get):
- self._rebuild()
+ self.assertRaises(exception.InstanceFaultRollback,
+ self._rebuild, expect_error=True)
# Should be on destination host
instance = db.instance_get(self.context, self.inst.id)
- self.assertEqual(instance['host'], self.compute.host)
- self.assertIsNone(instance['node'])
- self.assertTrue(mock_inst.called)
- self.assertTrue(mock_get.called)
+ self.assertEqual('fake_host_2', instance['host'])
+ self.assertEqual('fakenode2', instance['node'])
+ mock_inst.assert_not_called()
+ mock_get.assert_called_once_with(mock.ANY, self.compute.host)
def test_rebuild_on_host_node_passed(self):
patch_get_info = mock.patch.object(self.compute, '_get_compute_info')
diff --git a/nova/tests/unit/compute/test_compute_mgr.py b/nova/tests/unit/compute/test_compute_mgr.py
index 1a4935f482..73c9d32197 100644
--- a/nova/tests/unit/compute/test_compute_mgr.py
+++ b/nova/tests/unit/compute/test_compute_mgr.py
@@ -57,6 +57,7 @@ from nova.objects import fields
from nova.objects import instance as instance_obj
from nova.objects import migrate_data as migrate_data_obj
from nova.objects import network_request as net_req_obj
+from nova.objects import service as service_obj
from nova.pci import request as pci_request
from nova.scheduler.client import report
from nova import test
@@ -76,6 +77,7 @@ from nova.virt import driver as virt_driver
from nova.virt import event as virtevent
from nova.virt import fake as fake_driver
from nova.virt import hardware
+from nova.virt import node as virt_node
from nova.volume import cinder
@@ -89,6 +91,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
# os-brick>=5.1 now uses external file system locks instead of internal
# locks so we need to set up locking
REQUIRES_LOCKING = True
+ STUB_COMPUTE_ID = False
def setUp(self):
super(ComputeManagerUnitTestCase, self).setUp()
@@ -906,6 +909,8 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
return instance_obj._make_instance_list(
self.context, objects.InstanceList(), db_list, None)
+ @mock.patch.object(manager.ComputeManager,
+ '_ensure_existing_node_identity')
@mock.patch.object(manager.ComputeManager, '_get_nodes')
@mock.patch.object(manager.ComputeManager,
'_error_out_instances_whose_build_was_interrupted')
@@ -924,17 +929,19 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
mock_validate_vtpm, mock_validate_pinning,
mock_destroy, mock_admin_ctxt, mock_host_get,
mock_init_host,
- mock_error_interrupted, mock_get_nodes):
+ mock_error_interrupted, mock_get_nodes,
+ mock_existing_node):
mock_admin_ctxt.return_value = self.context
inst_list = _make_instance_list(startup_instances)
mock_host_get.return_value = inst_list
our_node = objects.ComputeNode(
- host='fake-host', uuid=uuids.our_node_uuid,
+ host=self.compute.host, uuid=uuids.our_node_uuid,
hypervisor_hostname='fake-node')
mock_get_nodes.return_value = {uuids.our_node_uuid: our_node}
- self.compute.init_host()
+ self.compute.init_host(None)
+ mock_existing_node.assert_not_called()
mock_validate_pinning.assert_called_once_with(inst_list)
mock_validate_vtpm.assert_called_once_with(inst_list)
mock_destroy.assert_called_once_with(
@@ -977,8 +984,9 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
"""
mock_get_nodes.return_value = {
uuids.cn_uuid1: objects.ComputeNode(
- uuid=uuids.cn_uuid1, hypervisor_hostname='node1')}
- self.compute.init_host()
+ uuid=uuids.cn_uuid1, hypervisor_hostname='node1',
+ host=self.compute.host)}
+ self.compute.init_host(None)
mock_error_interrupted.assert_called_once_with(
test.MatchType(nova.context.RequestContext), set(),
@@ -988,16 +996,19 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
@mock.patch('nova.objects.InstanceList')
@mock.patch('nova.objects.MigrationList.get_by_filters')
- def test_cleanup_host(self, mock_miglist_get, mock_instance_list):
+ @mock.patch('nova.objects.ComputeNodeList.get_all_by_uuids')
+ def test_cleanup_host(self, mock_cnlist_get, mock_miglist_get,
+ mock_instance_list):
# just testing whether the cleanup_host method
# when fired will invoke the underlying driver's
# equivalent method.
mock_miglist_get.return_value = []
mock_instance_list.get_by_host.return_value = []
+ mock_cnlist_get.return_value = []
with mock.patch.object(self.compute, 'driver') as mock_driver:
- self.compute.init_host()
+ self.compute.init_host(None)
mock_driver.init_host.assert_called_once_with(host='fake-mini')
self.compute.cleanup_host()
@@ -1086,7 +1097,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
'remove_provider_tree_from_instance_allocation')
) as (mock_get_net, mock_remove_allocation):
- self.compute.init_host()
+ self.compute.init_host(None)
mock_remove_allocation.assert_called_once_with(
self.context, deleted_instance.uuid, uuids.our_node_uuid)
@@ -1139,11 +1150,11 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
uuids.evac_instance: evacuating_instance
}
our_node = objects.ComputeNode(
- host='fake-host', uuid=uuids.our_node_uuid,
+ host=self.compute.host, uuid=uuids.our_node_uuid,
hypervisor_hostname='fake-node')
mock_get_nodes.return_value = {uuids.our_node_uuid: our_node}
- self.compute.init_host()
+ self.compute.init_host(None)
mock_init_instance.assert_called_once_with(
self.context, active_instance)
@@ -1151,23 +1162,49 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self.context, {active_instance.uuid, evacuating_instance.uuid},
mock_get_nodes.return_value.keys())
- @mock.patch.object(objects.ComputeNode, 'get_by_host_and_nodename')
- @mock.patch.object(fake_driver.FakeDriver, 'get_available_nodes')
- def test_get_nodes(self, mock_driver_get_nodes, mock_get_by_host_and_node):
- mock_driver_get_nodes.return_value = ['fake-node1', 'fake-node2']
- cn1 = objects.ComputeNode(uuid=uuids.cn1)
- cn2 = objects.ComputeNode(uuid=uuids.cn2)
- mock_get_by_host_and_node.side_effect = [cn1, cn2]
+ @mock.patch.object(objects.ComputeNodeList, 'get_all_by_uuids')
+ @mock.patch.object(fake_driver.FakeDriver, 'get_nodenames_by_uuid')
+ def test_get_nodes(self, mock_driver_get_nodes, mock_get_by_uuid):
+ mock_driver_get_nodes.return_value = {uuids.node_fake_node1: 'host',
+ uuids.node_fake_node2: 'host'}
+ # NOTE(danms): The fake driver, by default, uses
+ # uuidsentinel.node_$node_name, so we can predict the uuids it will
+ # return here.
+ cn1 = objects.ComputeNode(uuid=uuids.node_fake_node1,
+ hypervisor_hostname='host')
+ cn2 = objects.ComputeNode(uuid=uuids.node_fake_node2,
+ hypervisor_hostname='host')
+ mock_get_by_uuid.return_value = [cn1, cn2]
nodes = self.compute._get_nodes(self.context)
- self.assertEqual({uuids.cn1: cn1, uuids.cn2: cn2}, nodes)
+ self.assertEqual({uuids.node_fake_node1: cn1,
+ uuids.node_fake_node2: cn2}, nodes)
mock_driver_get_nodes.assert_called_once_with()
- mock_get_by_host_and_node.assert_has_calls([
- mock.call(self.context, self.compute.host, 'fake-node1'),
- mock.call(self.context, self.compute.host, 'fake-node2'),
- ])
+ mock_get_by_uuid.assert_called_once_with(self.context,
+ [uuids.node_fake_node1,
+ uuids.node_fake_node2])
+
+ @mock.patch.object(objects.ComputeNodeList, 'get_all_by_uuids')
+ @mock.patch.object(fake_driver.FakeDriver, 'get_nodenames_by_uuid')
+ def test_get_nodes_mismatch(self, mock_driver_get_nodes, mock_get_by_uuid):
+ # Virt driver reports a (hypervisor_) hostname of 'host1'
+ mock_driver_get_nodes.return_value = {uuids.node_fake_node1: 'host1',
+ uuids.node_fake_node2: 'host1'}
+
+ # The database records for our compute nodes (by UUID) show a
+ # hypervisor_hostname of 'host2'
+ cn1 = objects.ComputeNode(uuid=uuids.node_fake_node1,
+ hypervisor_hostname='host2')
+ cn2 = objects.ComputeNode(uuid=uuids.node_fake_node2,
+ hypervisor_hostname='host2')
+ mock_get_by_uuid.return_value = [cn1, cn2]
+
+ # Possible hostname (as reported by the virt driver) rename,
+ # which should abort our startup
+ self.assertRaises(exception.InvalidConfiguration,
+ self.compute._get_nodes, self.context)
@mock.patch.object(manager.LOG, 'warning')
@mock.patch.object(
@@ -1189,37 +1226,35 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
"is starting on this host, then you can ignore this warning.")
@mock.patch.object(manager.LOG, 'warning')
- @mock.patch.object(objects.ComputeNode, 'get_by_host_and_nodename')
- @mock.patch.object(fake_driver.FakeDriver, 'get_available_nodes')
+ @mock.patch.object(objects.ComputeNodeList, 'get_all_by_uuids')
+ @mock.patch.object(fake_driver.FakeDriver, 'get_nodenames_by_uuid')
def test_get_nodes_node_not_found(
- self, mock_driver_get_nodes, mock_get_by_host_and_node,
+ self, mock_driver_get_nodes, mock_get_all_by_uuids,
mock_log_warning):
- mock_driver_get_nodes.return_value = ['fake-node1', 'fake-node2']
- cn2 = objects.ComputeNode(uuid=uuids.cn2)
- mock_get_by_host_and_node.side_effect = [
- exception.ComputeHostNotFound(host='fake-node1'), cn2]
+ mock_driver_get_nodes.return_value = {uuids.node_1: 'fake-node1'}
+ mock_get_all_by_uuids.return_value = []
nodes = self.compute._get_nodes(self.context)
- self.assertEqual({uuids.cn2: cn2}, nodes)
+ self.assertEqual({}, nodes)
mock_driver_get_nodes.assert_called_once_with()
- mock_get_by_host_and_node.assert_has_calls([
- mock.call(self.context, self.compute.host, 'fake-node1'),
- mock.call(self.context, self.compute.host, 'fake-node2'),
- ])
+ mock_get_all_by_uuids.assert_called_once_with(self.context,
+ [uuids.node_1])
mock_log_warning.assert_called_once_with(
- "Compute node %s not found in the database. If this is the first "
- "time this service is starting on this host, then you can ignore "
- "this warning.", 'fake-node1')
+ "Compute nodes %s for host %s were not found in the database. "
+ "If this is the first time this service is starting on this host, "
+ "then you can ignore this warning.", [uuids.node_1], 'fake-mini')
def test_init_host_disk_devices_configuration_failure(self):
self.flags(max_disk_devices_to_attach=0, group='compute')
self.assertRaises(exception.InvalidConfiguration,
- self.compute.init_host)
+ self.compute.init_host, None)
@mock.patch.object(objects.InstanceList, 'get_by_host',
new=mock.Mock())
+ @mock.patch('nova.objects.ComputeNodeList.get_all_by_uuids',
+ new=mock.Mock(return_value=[mock.MagicMock()]))
@mock.patch('nova.compute.manager.ComputeManager.'
'_validate_pinning_configuration')
def test_init_host_pinning_configuration_validation_failure(self,
@@ -1230,13 +1265,15 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
mock_validate_pinning.side_effect = exception.InvalidConfiguration
self.assertRaises(exception.InvalidConfiguration,
- self.compute.init_host)
+ self.compute.init_host, None)
@mock.patch.object(objects.InstanceList, 'get_by_host',
new=mock.Mock())
@mock.patch('nova.compute.manager.ComputeManager.'
'_validate_pinning_configuration',
new=mock.Mock())
+ @mock.patch('nova.objects.ComputeNodeList.get_all_by_uuids',
+ new=mock.Mock(return_value=[mock.MagicMock()]))
@mock.patch('nova.compute.manager.ComputeManager.'
'_validate_vtpm_configuration')
def test_init_host_vtpm_configuration_validation_failure(self,
@@ -1247,7 +1284,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
mock_validate_vtpm.side_effect = exception.InvalidConfiguration
self.assertRaises(exception.InvalidConfiguration,
- self.compute.init_host)
+ self.compute.init_host, None)
@mock.patch.object(objects.Instance, 'save')
@mock.patch.object(objects.InstanceList, 'get_by_filters')
@@ -2523,10 +2560,11 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self.assertFalse(mock_get_info.called)
self.assertFalse(mock_sync_power_state.called)
+ @mock.patch('nova.compute.resource_tracker.ResourceTracker.instance_claim')
@mock.patch('nova.compute.manager.ComputeManager.'
'_sync_instance_power_state')
def test_query_driver_power_state_and_sync_not_found_driver(
- self, mock_sync_power_state):
+ self, mock_sync_power_state, mock_claim):
error = exception.InstanceNotFound(instance_id=1)
with mock.patch.object(self.compute.driver,
'get_info', side_effect=error) as mock_get_info:
@@ -5145,7 +5183,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
group='pci'
)
self.assertRaises(exception.PciDeviceInvalidDeviceName,
- self.compute.init_host)
+ self.compute.init_host, None)
@mock.patch('nova.compute.manager.ComputeManager._instance_update')
def test_error_out_instance_on_exception_not_implemented_err(self,
@@ -5335,7 +5373,8 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self.compute.rebuild_instance(
self.context, instance, None, None, None, None, None, None,
- recreate, False, False, None, scheduled_node, {}, None, [], False)
+ recreate, False, False, None, scheduled_node, {}, None, [], False,
+ None)
mock_set.assert_called_once_with(None, 'failed')
mock_notify_about_instance_usage.assert_called_once_with(
mock.ANY, instance, 'rebuild.error', fault=mock_rebuild.side_effect
@@ -5447,7 +5486,8 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
preserve_ephemeral=False, migration=None,
scheduled_node='fake-node',
limits={}, request_spec=request_spec, accel_uuids=[],
- reimage_boot_volume=False)
+ reimage_boot_volume=False,
+ target_state=None)
mock_validate_policy.assert_called_once_with(
elevated_context, instance, {'group': [uuids.group]})
@@ -5487,7 +5527,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
recreate=True, on_shared_storage=None, preserve_ephemeral=False,
migration=None, scheduled_node='fake-node', limits={},
request_spec=request_spec, accel_uuids=[],
- reimage_boot_volume=False)
+ reimage_boot_volume=False, target_state=None)
mock_validate_policy.assert_called_once_with(
elevated_context, instance, {'group': [uuids.group]})
@@ -5513,7 +5553,8 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self.compute.rebuild_instance(
self.context, instance, None, None,
None, None, None, None, False,
- False, False, migration, None, {}, None, [], False)
+ False, False, migration, None, {}, None, [], False,
+ None)
self.assertFalse(mock_get.called)
self.assertEqual(node, instance.node)
self.assertEqual('done', migration.status)
@@ -5535,7 +5576,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self.compute.rebuild_instance(
self.context, instance, None, None, None, None, None,
None, True, False, False, mock.sentinel.migration, None, {},
- None, [], False)
+ None, [], False, None)
mock_get.assert_called_once_with(mock.ANY, self.compute.host)
mock_rt.finish_evacuation.assert_called_once_with(
instance, 'new-node', mock.sentinel.migration)
@@ -5618,7 +5659,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
preserve_ephemeral, {}, {},
self.allocations,
mock.sentinel.mapping, [],
- False)
+ False, None)
mock_notify_usage.assert_has_calls(
[mock.call(self.context, instance, "rebuild.start",
@@ -5877,7 +5918,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
request_spec=objects.RequestSpec(),
allocations=self.allocations,
request_group_resource_providers_mapping=mock.sentinel.mapping,
- accel_uuids=[], reimage_boot_volume=False)
+ accel_uuids=[], reimage_boot_volume=False, target_state=None)
self.assertIn('Trusted image certificates provided on host', str(ex))
def test_reverts_task_state_instance_not_found(self):
@@ -6321,6 +6362,171 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self.assertEqual({'one-image': 'cached',
'two-image': 'existing'}, r)
+ @mock.patch.object(virt_node, 'write_local_node_uuid')
+ @mock.patch.object(virt_node, 'read_local_node_uuid')
+ def test_ensure_node_uuid_not_needed_version(self, mock_read, mock_write):
+ # Make sure an up-to-date service bypasses the persistence
+ service_ref = service_obj.Service()
+ self.assertEqual(service_obj.SERVICE_VERSION, service_ref.version)
+ mock_read.return_value = 'not none'
+ mock_write.assert_not_called()
+ self.compute._ensure_existing_node_identity(service_ref)
+ mock_write.assert_not_called()
+
+ @mock.patch.object(virt_node, 'write_local_node_uuid')
+ def test_ensure_node_uuid_not_needed_ironic(self, mock_node):
+ # Make sure an old service for ironic does not write a local node uuid
+ with mock.patch.object(service_obj, 'SERVICE_VERSION', new=60):
+ service_ref = service_obj.Service()
+ self.assertEqual(60, service_ref.version)
+ self.flags(compute_driver='ironic')
+ self.compute._ensure_existing_node_identity(service_ref)
+ mock_node.assert_not_called()
+
+ @mock.patch.object(virt_node, 'write_local_node_uuid')
+ @mock.patch.object(virt_node, 'read_local_node_uuid')
+ def test_ensure_node_uuid_not_needed_preprovisioned(self,
+ mock_read_node,
+ mock_write_node):
+ # Make sure an old service does not write a uuid if one is present
+ with mock.patch.object(service_obj, 'SERVICE_VERSION', new=60):
+ service_ref = service_obj.Service()
+ self.assertEqual(60, service_ref.version)
+ mock_read_node.return_value = str(uuids.SOME_UUID)
+ self.compute._ensure_existing_node_identity(service_ref)
+ mock_write_node.assert_not_called()
+
+ @mock.patch.object(virt_node, 'write_local_node_uuid')
+ @mock.patch.object(virt_node, 'read_local_node_uuid')
+ @mock.patch('nova.objects.ComputeNodeList.get_all_by_host')
+ def test_ensure_node_uuid_upgrade_no_node(self, mock_get_cn,
+ mock_read_node,
+ mock_write_node):
+ # If we are not a new service, we have no pre-provisioned node uuid
+ # and we find no nodes in the database, we do not write a local
+ # node uuid *and* we abort startup since something is likely wrong.
+ with mock.patch.object(service_obj, 'SERVICE_VERSION', new=60):
+ service_ref = service_obj.Service()
+ self.assertEqual(60, service_ref.version)
+ mock_read_node.return_value = None
+ mock_get_cn.return_value = []
+ self.assertRaises(exception.InvalidConfiguration,
+ self.compute._ensure_existing_node_identity,
+ service_ref)
+ mock_get_cn.assert_called_once_with(mock.ANY, self.compute.host)
+ mock_write_node.assert_not_called()
+
+ @mock.patch.object(virt_node, 'write_local_node_uuid')
+ @mock.patch.object(virt_node, 'read_local_node_uuid')
+ @mock.patch('nova.objects.ComputeNodeList.get_all_by_host')
+ def test_ensure_node_uuid_upgrade_multi_node(self, mock_get_cn,
+ mock_read_node,
+ mock_write_node):
+ # If we are not a new service, we have no pre-provisioned node uuid
+ # and we find multiple nodes in the database, we do not write a local
+ # node uuid *and* we abort startup since something is likely wrong.
+ with mock.patch.object(service_obj, 'SERVICE_VERSION', new=60):
+ service_ref = service_obj.Service()
+ self.assertEqual(60, service_ref.version)
+ mock_read_node.return_value = None
+ mock_get_cn.return_value = [1, 2]
+ self.assertRaises(exception.InvalidConfiguration,
+ self.compute._ensure_existing_node_identity,
+ service_ref)
+ mock_get_cn.assert_called_once_with(mock.ANY, self.compute.host)
+ mock_write_node.assert_not_called()
+
+ @mock.patch.object(virt_node, 'write_local_node_uuid')
+ @mock.patch.object(virt_node, 'read_local_node_uuid')
+ @mock.patch('nova.objects.ComputeNodeList.get_all_by_host')
+ def test_ensure_node_uuid_upgrade_writes_node_uuid(self, mock_get_cn,
+ mock_read_node,
+ mock_write_node):
+ # If we are not a new service, there is no pre-provisioned local
+ # compute node uuid, and we find exactly one compute node in the
+ # database for our host, we persist that.
+ with mock.patch.object(service_obj, 'SERVICE_VERSION', new=60):
+ service_ref = service_obj.Service()
+ self.assertEqual(60, service_ref.version)
+ mock_read_node.return_value = None
+ mock_get_cn.return_value = [
+ objects.ComputeNode(uuid=str(uuids.compute)),
+ ]
+ self.compute._ensure_existing_node_identity(service_ref)
+ mock_get_cn.assert_called_once_with(mock.ANY, self.compute.host)
+ mock_write_node.assert_called_once_with(str(uuids.compute))
+
+ @mock.patch.object(virt_node, 'read_local_node_uuid')
+ def test_ensure_node_uuid_missing_file_ironic(self, mock_read):
+ mock_service = mock.MagicMock(
+ version=service_obj.NODE_IDENTITY_VERSION)
+ mock_read.return_value = None
+ self.assertRaises(exception.InvalidConfiguration,
+ self.compute._ensure_existing_node_identity,
+ mock_service)
+ mock_read.assert_called_once_with()
+
+ # Now make sure that ironic causes this exact configuration to pass
+ self.flags(compute_driver='ironic')
+ self.compute._ensure_existing_node_identity(mock_service)
+
+ def test_ensure_node_uuid_called_by_init_host(self):
+ # test_init_host() above ensures that we do not call
+ # _ensure_existing_node_identity() in the service_ref=None case.
+ # Since testing init_host() requires a billion mocks, this
+ # tests that we do call it when expected, but make it raise
+ # to avoid running the rest of init_host().
+ with mock.patch.object(self.compute,
+ '_ensure_existing_node_identity') as m:
+ m.side_effect = test.TestingException
+ self.assertRaises(test.TestingException,
+ self.compute.init_host,
+ mock.sentinel.service_ref)
+ m.assert_called_once_with(mock.sentinel.service_ref)
+
+ def test_check_for_host_rename_ironic(self):
+ self.flags(compute_driver='ironic')
+ # Passing None here makes sure we take the early exit because of our
+ # virt driver
+ nodes = {uuids.node1: mock.MagicMock(uuid=uuids.node1,
+ host='not-this-host')}
+ self.compute._check_for_host_rename(nodes)
+
+ def test_check_for_host_rename_renamed_only(self):
+ nodes = {uuids.node1: mock.MagicMock(uuid=uuids.node1,
+ host='not-this-host')}
+ self.assertRaises(exception.InvalidConfiguration,
+ self.compute._check_for_host_rename, nodes)
+
+ def test_check_for_host_rename_renamed_one(self):
+ nodes = {uuids.node1: mock.MagicMock(uuid=uuids.node1,
+ host=self.compute.host),
+ uuids.node2: mock.MagicMock(uuid=uuids.node2,
+ host='not-this-host')}
+ self.assertRaises(exception.InvalidConfiguration,
+ self.compute._check_for_host_rename, nodes)
+
+ def test_check_for_host_rename_not_renamed(self):
+ nodes = {uuids.node1: mock.MagicMock(uuid=uuids.node1,
+ host=self.compute.host)}
+ with mock.patch.object(manager.LOG, 'debug') as mock_debug:
+ self.compute._check_for_host_rename(nodes)
+ mock_debug.assert_called_once_with(
+ 'Verified node %s matches my host %s',
+ uuids.node1, self.compute.host)
+
+ @mock.patch('nova.compute.manager.ComputeManager._get_nodes')
+ def test_check_for_host_rename_called_by_init_host(self, mock_nodes):
+ # Since testing init_host() requires a billion mocks, this
+ # tests that we do call it when expected, but make it raise
+ # to avoid running the rest of init_host().
+ with mock.patch.object(self.compute,
+ '_check_for_host_rename') as m:
+ m.side_effect = test.TestingException
+ self.assertRaises(test.TestingException,
+ self.compute.init_host, None)
+ m.assert_called_once_with(mock_nodes.return_value)
+
class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
def setUp(self):
@@ -6363,6 +6569,8 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
fake_rt = fake_resource_tracker.FakeResourceTracker(self.compute.host,
self.compute.driver)
self.compute.rt = fake_rt
+ self.compute.driver._set_nodes([self.node])
+ self.compute.rt.compute_nodes = {self.node: objects.ComputeNode()}
self.allocations = {
uuids.provider1: {
@@ -6652,6 +6860,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
mock_get_arqs.assert_called_once_with(
self.instance.uuid, only_resolved=True)
+ @mock.patch('nova.compute.resource_tracker.ResourceTracker.instance_claim')
@mock.patch.object(fake_driver.FakeDriver, 'spawn')
@mock.patch('nova.objects.Instance.save')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
@@ -6663,7 +6872,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
@mock.patch.object(manager.ComputeManager, '_notify_about_instance_usage')
def test_spawn_called_with_accel_info(self, mock_ins_usage,
mock_ins_create, mock_dev_tag, mock_certs, mock_req_group_map,
- mock_get_allocations, mock_ins_save, mock_spawn):
+ mock_get_allocations, mock_ins_save, mock_spawn, mock_claim):
accel_info = [{'k1': 'v1', 'k2': 'v2'}]
@@ -6937,13 +7146,15 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.security_groups, self.block_device_mapping,
request_spec={}, host_lists=[fake_host_list])
+ @mock.patch('nova.compute.resource_tracker.ResourceTracker.instance_claim')
@mock.patch.object(manager.ComputeManager, '_shutdown_instance')
@mock.patch.object(manager.ComputeManager, '_build_networks_for_instance')
@mock.patch.object(fake_driver.FakeDriver, 'spawn')
@mock.patch.object(objects.Instance, 'save')
@mock.patch.object(manager.ComputeManager, '_notify_about_instance_usage')
def test_rescheduled_exception_with_non_ascii_exception(self,
- mock_notify, mock_save, mock_spawn, mock_build, mock_shutdown):
+ mock_notify, mock_save, mock_spawn, mock_build, mock_shutdown,
+ mock_claim):
exc = exception.NovaException(u's\xe9quence')
mock_build.return_value = self.network_info
@@ -6959,7 +7170,6 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.accel_uuids)
mock_save.assert_has_calls([
mock.call(),
- mock.call(),
mock.call(expected_task_state='block_device_mapping'),
])
mock_notify.assert_has_calls([
@@ -7465,6 +7675,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.assertEqual(10, mock_failed.call_count)
mock_succeeded.assert_not_called()
+ @mock.patch('nova.compute.resource_tracker.ResourceTracker.instance_claim')
@mock.patch.object(manager.ComputeManager, '_shutdown_instance')
@mock.patch.object(manager.ComputeManager, '_build_networks_for_instance')
@mock.patch.object(fake_driver.FakeDriver, 'spawn')
@@ -7472,7 +7683,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
@mock.patch.object(manager.ComputeManager, '_notify_about_instance_usage')
def _test_instance_exception(self, exc, raised_exc,
mock_notify, mock_save, mock_spawn,
- mock_build, mock_shutdown):
+ mock_build, mock_shutdown, mock_claim):
"""This method test the instance related InstanceNotFound
and reschedule on exception errors. The test cases get from
arguments.
@@ -7495,7 +7706,6 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
mock_save.assert_has_calls([
mock.call(),
- mock.call(),
mock.call(expected_task_state='block_device_mapping')])
mock_notify.assert_has_calls([
mock.call(self.context, self.instance, 'create.start',
@@ -7606,11 +7816,12 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
'_shutdown_instance'),
mock.patch.object(self.compute,
'_validate_instance_group_policy'),
+ mock.patch.object(self.compute.rt, 'instance_claim'),
mock.patch('nova.compute.utils.notify_about_instance_create')
) as (spawn, save,
_build_networks_for_instance, _notify_about_instance_usage,
_shutdown_instance, _validate_instance_group_policy,
- mock_notify):
+ mock_claim, mock_notify):
self.assertRaises(exception.BuildAbortException,
self.compute._build_and_run_instance, self.context,
@@ -7641,7 +7852,6 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
save.assert_has_calls([
mock.call(),
- mock.call(),
mock.call(
expected_task_state=task_states.BLOCK_DEVICE_MAPPING)])
@@ -7703,11 +7913,12 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
request_spec={}, host_lists=[fake_host_list])
mock_nil.assert_called_once_with(self.instance)
+ @mock.patch('nova.compute.resource_tracker.ResourceTracker.instance_claim')
@mock.patch.object(manager.ComputeManager, '_build_resources')
@mock.patch.object(objects.Instance, 'save')
@mock.patch.object(manager.ComputeManager, '_notify_about_instance_usage')
def test_build_resources_buildabort_reraise(self, mock_notify, mock_save,
- mock_build):
+ mock_build, mock_claim):
exc = exception.BuildAbortException(
instance_uuid=self.instance.uuid, reason='')
mock_build.side_effect = exc
@@ -7721,7 +7932,6 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.node, self.limits, self.filter_properties,
request_spec=[], accel_uuids=self.accel_uuids)
- mock_save.assert_called_once_with()
mock_notify.assert_has_calls([
mock.call(self.context, self.instance, 'create.start',
extra_usage_info={'image_name': self.image.get('name')}),
@@ -7927,6 +8137,42 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
@mock.patch.object(virt_driver.ComputeDriver, 'failed_spawn_cleanup')
@mock.patch.object(virt_driver.ComputeDriver, 'prepare_for_spawn')
+ @mock.patch.object(virt_driver.ComputeDriver,
+ 'prepare_networks_before_block_device_mapping')
+ @mock.patch.object(virt_driver.ComputeDriver,
+ 'clean_networks_preparation')
+ def test_failed_prepare_for_spawn(self, mock_clean, mock_prepnet,
+ mock_prepspawn, mock_failedspawn):
+ mock_prepspawn.side_effect = exception.ComputeResourcesUnavailable(
+ reason="asdf")
+ with mock.patch.object(self.compute,
+ '_build_networks_for_instance',
+ return_value=self.network_info
+ ) as _build_networks_for_instance:
+
+ try:
+ with self.compute._build_resources(self.context, self.instance,
+ self.requested_networks, self.security_groups,
+ self.image, self.block_device_mapping,
+ self.resource_provider_mapping, self.accel_uuids):
+ pass
+ except Exception as e:
+ self.assertIsInstance(e,
+ exception.ComputeResourcesUnavailable)
+
+ _build_networks_for_instance.assert_has_calls(
+ [mock.call(self.context, self.instance,
+ self.requested_networks, self.security_groups,
+ self.resource_provider_mapping,
+ self.network_arqs)])
+
+ mock_prepnet.assert_not_called()
+ mock_clean.assert_called_once_with(self.instance, self.network_info)
+ mock_prepspawn.assert_called_once_with(self.instance)
+ mock_failedspawn.assert_called_once_with(self.instance)
+
+ @mock.patch.object(virt_driver.ComputeDriver, 'failed_spawn_cleanup')
+ @mock.patch.object(virt_driver.ComputeDriver, 'prepare_for_spawn')
@mock.patch.object(manager.ComputeManager, '_build_networks_for_instance')
def test_build_resources_aborts_on_failed_network_alloc(self, mock_build,
mock_prepspawn,
@@ -8340,10 +8586,11 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
ctxt, instance, req_networks)
warning_mock.assert_not_called()
+ @mock.patch('nova.compute.resource_tracker.ResourceTracker.instance_claim')
@mock.patch('nova.compute.utils.notify_about_instance_create')
@mock.patch.object(manager.ComputeManager, '_instance_update')
def test_launched_at_in_create_end_notification(self,
- mock_instance_update, mock_notify_instance_create):
+ mock_instance_update, mock_notify_instance_create, mock_claim):
def fake_notify(*args, **kwargs):
if args[2] == 'create.end':
@@ -8383,6 +8630,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.flags(default_access_ip_network_name='test1')
instance = fake_instance.fake_db_instance()
+ @mock.patch.object(self.compute.rt, 'instance_claim')
@mock.patch.object(db, 'instance_update_and_get_original',
return_value=({}, instance))
@mock.patch.object(self.compute.driver, 'spawn')
@@ -8391,7 +8639,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
@mock.patch.object(db, 'instance_extra_update_by_uuid')
@mock.patch.object(self.compute, '_notify_about_instance_usage')
def _check_access_ip(mock_notify, mock_extra, mock_networks,
- mock_spawn, mock_db_update):
+ mock_spawn, mock_db_update, mock_claim):
self.compute._build_and_run_instance(self.context, self.instance,
self.image, self.injected_files, self.admin_pass,
self.requested_networks, self.security_groups,
@@ -8412,8 +8660,10 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
_check_access_ip()
+ @mock.patch('nova.compute.resource_tracker.ResourceTracker.instance_claim')
@mock.patch.object(manager.ComputeManager, '_instance_update')
- def test_create_error_on_instance_delete(self, mock_instance_update):
+ def test_create_error_on_instance_delete(self, mock_instance_update,
+ mock_claim):
def fake_notify(*args, **kwargs):
if args[2] == 'create.error':
@@ -8427,7 +8677,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
mock.patch.object(self.compute,
'_build_networks_for_instance', return_value=[]),
mock.patch.object(self.instance, 'save',
- side_effect=[None, None, None, exc]),
+ side_effect=[None, None, exc]),
mock.patch.object(self.compute, '_notify_about_instance_usage',
side_effect=fake_notify)
) as (mock_spawn, mock_networks, mock_save, mock_notify):
@@ -8456,7 +8706,8 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
mock.patch.object(
self.compute, '_build_networks_for_instance', return_value=[]),
mock.patch.object(self.instance, 'save'),
- ) as (mock_spawn, mock_networks, mock_save):
+ mock.patch.object(self.compute.rt, 'instance_claim'),
+ ) as (mock_spawn, mock_networks, mock_save, mock_claim):
self.compute._build_and_run_instance(
self.context,
self.instance, self.image, self.injected_files,
@@ -8487,11 +8738,17 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
# resource request and therefore no matching request group exists in
# the request spec.
self.instance.pci_requests = objects.InstancePCIRequests(requests=[
- objects.InstancePCIRequest(),
objects.InstancePCIRequest(
+ request_id=uuids.req0,
+ ),
+ objects.InstancePCIRequest(
+ request_id=uuids.req1,
requester_id=uuids.port1,
spec=[{'vendor_id': '1377', 'product_id': '0047'}]),
- objects.InstancePCIRequest(requester_id=uuids.port2),
+ objects.InstancePCIRequest(
+ request_id=uuids.req2,
+ requester_id=uuids.port2,
+ ),
])
with test.nested(
mock.patch.object(self.compute.driver, 'spawn'),
@@ -8500,7 +8757,8 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
mock.patch.object(self.instance, 'save'),
mock.patch('nova.scheduler.client.report.'
'SchedulerReportClient._get_resource_provider'),
- ) as (mock_spawn, mock_networks, mock_save, mock_get_rp):
+ mock.patch.object(self.compute.rt, 'instance_claim'),
+ ) as (mock_spawn, mock_networks, mock_save, mock_get_rp, mock_claim):
mock_get_rp.return_value = {
'uuid': uuids.rp1,
'name': 'compute1:sriov-agent:ens3'
@@ -8536,8 +8794,13 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
requester_id=uuids.port1,
provider_uuids=[uuids.rp1])])
- self.instance.pci_requests = objects.InstancePCIRequests(requests=[
- objects.InstancePCIRequest(requester_id=uuids.port1)])
+ self.instance.pci_requests = objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ requester_id=uuids.port1, request_id=uuids.req1
+ )
+ ]
+ )
with mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_get_resource_provider') as (mock_get_rp):
mock_get_rp.return_value = None
@@ -8559,8 +8822,13 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
requester_id=uuids.port1,
provider_uuids=[uuids.rp1])])
- self.instance.pci_requests = objects.InstancePCIRequests(requests=[
- objects.InstancePCIRequest(requester_id=uuids.port1)])
+ self.instance.pci_requests = objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ requester_id=uuids.port1, request_id=uuids.req1
+ )
+ ]
+ )
with mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_get_resource_provider') as (mock_get_rp):
mock_get_rp.return_value = {
@@ -8584,8 +8852,13 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
requester_id=uuids.port1,
provider_uuids=[uuids.rp1, uuids.rp2])])
- self.instance.pci_requests = objects.InstancePCIRequests(requests=[
- objects.InstancePCIRequest(requester_id=uuids.port1)])
+ self.instance.pci_requests = objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ requester_id=uuids.port1, request_id=uuids.req1
+ )
+ ]
+ )
self.assertRaises(
exception.BuildAbortException,
@@ -9386,9 +9659,15 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
self.assertEqual(driver_console.get_connection_info.return_value,
console)
+ @mock.patch('nova.utils.pass_context')
@mock.patch('nova.compute.manager.ComputeManager.'
'_do_live_migration')
- def _test_max_concurrent_live(self, mock_lm):
+ def _test_max_concurrent_live(self, mock_lm, mock_pass_context):
+ # pass_context wraps the function, which doesn't work with a mock
+ # So we simply mock it too
+ def _mock_pass_context(runner, func, *args, **kwargs):
+ return runner(func, *args, **kwargs)
+ mock_pass_context.side_effect = _mock_pass_context
@mock.patch('nova.objects.Migration.save')
def _do_it(mock_mig_save):
@@ -11061,7 +11340,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
@mock.patch('nova.compute.resource_tracker.ResourceTracker.resize_claim')
@mock.patch('nova.objects.Instance.save')
@mock.patch('nova.compute.utils.'
- 'update_pci_request_spec_with_allocated_interface_name')
+ 'update_pci_request_with_placement_allocations')
@mock.patch('nova.compute.utils.notify_usage_exists')
@mock.patch('nova.compute.manager.ComputeManager.'
'_notify_about_instance_usage')
@@ -11095,7 +11374,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
@mock.patch('nova.compute.resource_tracker.ResourceTracker.resize_claim')
@mock.patch('nova.objects.Instance.save')
@mock.patch('nova.compute.utils.'
- 'update_pci_request_spec_with_allocated_interface_name')
+ 'update_pci_request_with_placement_allocations')
@mock.patch('nova.compute.utils.notify_usage_exists')
@mock.patch('nova.compute.manager.ComputeManager.'
'_notify_about_instance_usage')
diff --git a/nova/tests/unit/compute/test_pci_placement_translator.py b/nova/tests/unit/compute/test_pci_placement_translator.py
index ee6a0469ac..0592186e54 100644
--- a/nova/tests/unit/compute/test_pci_placement_translator.py
+++ b/nova/tests/unit/compute/test_pci_placement_translator.py
@@ -12,12 +12,15 @@
# License for the specific language governing permissions and limitations
# under the License.
import ddt
+from oslo_utils.fixture import uuidsentinel as uuids
from unittest import mock
from nova.compute import pci_placement_translator as ppt
+from nova.compute import provider_tree
from nova import exception
from nova.objects import fields
from nova.objects import pci_device
+from nova.pci import devspec
from nova import test
@@ -88,8 +91,8 @@ class TestTranslator(test.NoDBTestCase):
)
def test_trait_normalization(self, trait_names, expected_traits):
self.assertEqual(
- expected_traits | {"COMPUTE_MANAGED_PCI_DEVICE"},
- ppt._get_traits_for_dev({"traits": trait_names})
+ expected_traits,
+ ppt.get_traits(trait_names)
)
@ddt.unpack
@@ -110,7 +113,9 @@ class TestTranslator(test.NoDBTestCase):
def test_resource_class_normalization(self, pci_dev, rc_name, expected_rc):
self.assertEqual(
expected_rc,
- ppt._get_rc_for_dev(pci_dev, {"resource_class": rc_name})
+ ppt.get_resource_class(
+ rc_name, pci_dev.vendor_id, pci_dev.product_id
+ ),
)
def test_dependent_device_pf_then_vf(self):
@@ -118,12 +123,16 @@ class TestTranslator(test.NoDBTestCase):
"fake-node", instances_under_same_host_resize=[])
pf = pci_device.PciDevice(
address="0000:81:00.0",
- dev_type=fields.PciDeviceType.SRIOV_PF
+ dev_type=fields.PciDeviceType.SRIOV_PF,
+ vendor_id="dead",
+ product_id="beef",
)
vf = pci_device.PciDevice(
address="0000:81:00.1",
parent_addr=pf.address,
- dev_type=fields.PciDeviceType.SRIOV_VF
+ dev_type=fields.PciDeviceType.SRIOV_VF,
+ vendor_id="dead",
+ product_id="beef",
)
pv._add_dev(pf, {"resource_class": "foo"})
@@ -146,17 +155,23 @@ class TestTranslator(test.NoDBTestCase):
"fake-node", instances_under_same_host_resize=[])
pf = pci_device.PciDevice(
address="0000:81:00.0",
- dev_type=fields.PciDeviceType.SRIOV_PF
+ dev_type=fields.PciDeviceType.SRIOV_PF,
+ vendor_id="dead",
+ product_id="beef",
)
vf = pci_device.PciDevice(
address="0000:81:00.1",
parent_addr=pf.address,
- dev_type=fields.PciDeviceType.SRIOV_VF
+ dev_type=fields.PciDeviceType.SRIOV_VF,
+ vendor_id="dead",
+ product_id="beef",
)
vf2 = pci_device.PciDevice(
address="0000:81:00.2",
parent_addr=pf.address,
- dev_type=fields.PciDeviceType.SRIOV_VF
+ dev_type=fields.PciDeviceType.SRIOV_VF,
+ vendor_id="dead",
+ product_id="beef",
)
pv._add_dev(vf, {"resource_class": "foo"})
@@ -182,7 +197,10 @@ class TestTranslator(test.NoDBTestCase):
pci_device.PciDevice(
address="0000:81:00.%d" % f,
parent_addr="0000:71:00.0",
- dev_type=fields.PciDeviceType.SRIOV_VF)
+ dev_type=fields.PciDeviceType.SRIOV_VF,
+ vendor_id="dead",
+ product_id="beef",
+ )
for f in range(0, 4)
]
@@ -220,3 +238,54 @@ class TestTranslator(test.NoDBTestCase):
"CUSTOM_BAR,CUSTOM_BAZ,CUSTOM_FOO for 0000:81:00.0,0000:81:00.1.",
str(ex),
)
+
+ def test_translator_maps_pci_device_to_rp(self):
+ pv = ppt.PlacementView(
+ "fake-node", instances_under_same_host_resize=[])
+ vf = pci_device.PciDevice(
+ address="0000:81:00.1",
+ parent_addr="0000:71:00.0",
+ dev_type=fields.PciDeviceType.SRIOV_VF,
+ vendor_id="dead",
+ product_id="beef",
+ )
+ pf = pci_device.PciDevice(
+ address="0000:72:00.0",
+ parent_addr=None,
+ dev_type=fields.PciDeviceType.SRIOV_PF,
+ vendor_id="dead",
+ product_id="beef",
+ )
+ pt = provider_tree.ProviderTree()
+ pt.new_root("fake-node", uuids.compute_rp)
+
+ pv._add_dev(vf, {})
+ pv._add_dev(pf, {})
+ pv.update_provider_tree(pt)
+
+ self.assertEqual(
+ pt.data("fake-node_0000:71:00.0").uuid, vf.extra_info["rp_uuid"]
+ )
+ self.assertEqual(
+ pt.data("fake-node_0000:72:00.0").uuid, pf.extra_info["rp_uuid"]
+ )
+
+ def test_update_provider_tree_for_pci_update_pools(self):
+ pt = provider_tree.ProviderTree()
+ pt.new_root("fake-node", uuids.compute_rp)
+ pf = pci_device.PciDevice(
+ address="0000:72:00.0",
+ parent_addr=None,
+ dev_type=fields.PciDeviceType.SRIOV_PF,
+ vendor_id="dead",
+ product_id="beef",
+ status=fields.PciDeviceStatus.AVAILABLE,
+ )
+ pci_tracker = mock.Mock()
+ pci_tracker.pci_devs = [pf]
+ pci_tracker.dev_filter.specs = [devspec.PciDeviceSpec({})]
+
+ ppt.update_provider_tree_for_pci(pt, 'fake-node', pci_tracker, {}, [])
+
+ pci_tracker.stats.populate_pools_metadata_from_assigned_devices.\
+ assert_called_once_with()
diff --git a/nova/tests/unit/compute/test_resource_tracker.py b/nova/tests/unit/compute/test_resource_tracker.py
index b81d7365d2..919dcb8334 100644
--- a/nova/tests/unit/compute/test_resource_tracker.py
+++ b/nova/tests/unit/compute/test_resource_tracker.py
@@ -64,11 +64,13 @@ _VIRT_DRIVER_AVAIL_RESOURCES = {
'hypervisor_hostname': _NODENAME,
'cpu_info': '',
'numa_topology': None,
+ 'uuid': uuids.cn1,
}
_COMPUTE_NODE_FIXTURES = [
objects.ComputeNode(
id=1,
+ deleted=False,
uuid=uuids.cn1,
host=_HOSTNAME,
vcpus=_VIRT_DRIVER_AVAIL_RESOURCES['vcpus'],
@@ -179,6 +181,7 @@ _NUMA_HOST_TOPOLOGIES = {
memory=_2MB,
cpu_usage=0,
memory_usage=0,
+ socket=0,
mempages=[_NUMA_PAGE_TOPOLOGIES['2mb*1024']],
siblings=[set([1]), set([2])],
pinned_cpus=set()),
@@ -189,6 +192,7 @@ _NUMA_HOST_TOPOLOGIES = {
memory=_2MB,
cpu_usage=0,
memory_usage=0,
+ socket=0,
mempages=[_NUMA_PAGE_TOPOLOGIES['2mb*1024']],
siblings=[set([3]), set([4])],
pinned_cpus=set())]),
@@ -586,7 +590,7 @@ class TestUpdateAvailableResources(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_disabled(self, get_mock, migr_mock, get_cn_mock, pci_mock,
@@ -619,7 +623,7 @@ class TestUpdateAvailableResources(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_no_instances_no_migrations_no_reserved(self, get_mock, migr_mock,
@@ -643,8 +647,7 @@ class TestUpdateAvailableResources(BaseTestCase):
'flavor',
'migration_context',
'resources'])
- get_cn_mock.assert_called_once_with(mock.ANY, _HOSTNAME,
- _NODENAME)
+ get_cn_mock.assert_called_once_with(mock.ANY, uuids.cn1)
migr_mock.assert_called_once_with(mock.ANY, _HOSTNAME,
_NODENAME)
@@ -671,7 +674,7 @@ class TestUpdateAvailableResources(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
@@ -730,7 +733,7 @@ class TestUpdateAvailableResources(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_no_instances_no_migrations_reserved_disk_ram_and_cpu(
@@ -747,7 +750,7 @@ class TestUpdateAvailableResources(BaseTestCase):
update_mock = self._update_available_resources()
- get_cn_mock.assert_called_once_with(mock.ANY, _HOSTNAME, _NODENAME)
+ get_cn_mock.assert_called_once_with(mock.ANY, uuids.cn1)
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
vals = {
'free_disk_gb': 5, # 6GB avail - 1 GB reserved
@@ -772,7 +775,7 @@ class TestUpdateAvailableResources(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_some_instances_no_migrations(self, get_mock, migr_mock,
@@ -797,7 +800,7 @@ class TestUpdateAvailableResources(BaseTestCase):
update_mock = self._update_available_resources()
- get_cn_mock.assert_called_once_with(mock.ANY, _HOSTNAME, _NODENAME)
+ get_cn_mock.assert_called_once_with(mock.ANY, uuids.cn1)
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
vals = {
'free_disk_gb': 5, # 6 - 1 used
@@ -823,7 +826,7 @@ class TestUpdateAvailableResources(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@@ -863,7 +866,7 @@ class TestUpdateAvailableResources(BaseTestCase):
update_mock = self._update_available_resources()
- get_cn_mock.assert_called_once_with(mock.ANY, _HOSTNAME, _NODENAME)
+ get_cn_mock.assert_called_once_with(mock.ANY, uuids.cn1)
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
vals = {
'free_disk_gb': 5,
@@ -889,7 +892,7 @@ class TestUpdateAvailableResources(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@@ -926,7 +929,7 @@ class TestUpdateAvailableResources(BaseTestCase):
update_mock = self._update_available_resources()
- get_cn_mock.assert_called_once_with(mock.ANY, _HOSTNAME, _NODENAME)
+ get_cn_mock.assert_called_once_with(mock.ANY, uuids.cn1)
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
vals = {
'free_disk_gb': 1,
@@ -952,7 +955,7 @@ class TestUpdateAvailableResources(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@@ -987,7 +990,7 @@ class TestUpdateAvailableResources(BaseTestCase):
update_mock = self._update_available_resources()
- get_cn_mock.assert_called_once_with(mock.ANY, _HOSTNAME, _NODENAME)
+ get_cn_mock.assert_called_once_with(mock.ANY, uuids.cn1)
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
vals = {
'free_disk_gb': 1,
@@ -1013,7 +1016,7 @@ class TestUpdateAvailableResources(BaseTestCase):
return_value=objects.PciDeviceList())
@mock.patch('nova.objects.MigrationContext.get_by_instance_uuid',
return_value=None)
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@@ -1056,7 +1059,7 @@ class TestUpdateAvailableResources(BaseTestCase):
update_mock = self._update_available_resources()
- get_cn_mock.assert_called_once_with(mock.ANY, _HOSTNAME, _NODENAME)
+ get_cn_mock.assert_called_once_with(mock.ANY, uuids.cn1)
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
vals = {
# 6 total - 1G existing - 5G new flav - 1G old flav
@@ -1084,7 +1087,7 @@ class TestUpdateAvailableResources(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@@ -1121,7 +1124,7 @@ class TestUpdateAvailableResources(BaseTestCase):
update_mock = self._update_available_resources()
- get_cn_mock.assert_called_once_with(mock.ANY, _HOSTNAME, _NODENAME)
+ get_cn_mock.assert_called_once_with(mock.ANY, uuids.cn1)
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
vals = {
'free_disk_gb': 1,
@@ -1147,7 +1150,7 @@ class TestUpdateAvailableResources(BaseTestCase):
new=mock.Mock(return_value=objects.PciDeviceList()))
@mock.patch('nova.objects.MigrationContext.get_by_instance_uuid',
new=mock.Mock(return_value=None))
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@@ -1199,7 +1202,7 @@ class TestUpdateAvailableResources(BaseTestCase):
new=mock.Mock(return_value=objects.PciDeviceList()))
@mock.patch('nova.objects.MigrationContext.get_by_instance_uuid',
new=mock.Mock(return_value=None))
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@@ -1240,7 +1243,7 @@ class TestUpdateAvailableResources(BaseTestCase):
new=mock.Mock(return_value=False))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
new=mock.Mock(return_value=objects.PciDeviceList()))
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_check_resources_startup_fail(self, mock_get_instances,
@@ -1273,7 +1276,7 @@ class TestInitComputeNode(BaseTestCase):
return_value=objects.PciDeviceList())
@mock.patch('nova.objects.ComputeNode.create')
@mock.patch('nova.objects.Service.get_by_compute_host')
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_update')
def test_no_op_init_compute_node(self, update_mock, get_mock, service_mock,
@@ -1296,14 +1299,14 @@ class TestInitComputeNode(BaseTestCase):
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
@mock.patch('nova.objects.ComputeNode.create')
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_update')
def test_compute_node_loaded(self, update_mock, get_mock, create_mock,
pci_mock):
self._setup_rt()
- def fake_get_node(_ctx, host, node):
+ def fake_get_node(_ctx, uuid):
res = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
return res
@@ -1313,85 +1316,67 @@ class TestInitComputeNode(BaseTestCase):
self.assertFalse(
self.rt._init_compute_node(mock.sentinel.ctx, resources))
- get_mock.assert_called_once_with(mock.sentinel.ctx, _HOSTNAME,
- _NODENAME)
+ get_mock.assert_called_once_with(mock.sentinel.ctx,
+ uuids.cn1)
self.assertFalse(create_mock.called)
self.assertFalse(update_mock.called)
- @mock.patch('nova.objects.ComputeNodeList.get_by_hypervisor')
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
@mock.patch('nova.objects.ComputeNode.create')
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_update')
def test_compute_node_rebalanced(self, update_mock, get_mock, create_mock,
- pci_mock, get_by_hypervisor_mock):
+ pci_mock):
self._setup_rt()
self.driver_mock.rebalances_nodes = True
cn = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
cn.host = "old-host"
- def fake_get_all(_ctx, nodename):
- return [cn]
+ def fake_get_node(_ctx, uuid):
+ return cn
- get_mock.side_effect = exc.NotFound
- get_by_hypervisor_mock.side_effect = fake_get_all
+ get_mock.side_effect = fake_get_node
resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES)
self.assertFalse(
self.rt._init_compute_node(mock.sentinel.ctx, resources))
- get_mock.assert_called_once_with(mock.sentinel.ctx, _HOSTNAME,
- _NODENAME)
- get_by_hypervisor_mock.assert_called_once_with(mock.sentinel.ctx,
- _NODENAME)
+ get_mock.assert_called_once_with(mock.sentinel.ctx, uuids.cn1)
create_mock.assert_not_called()
update_mock.assert_called_once_with(mock.sentinel.ctx, cn)
self.assertEqual(_HOSTNAME, self.rt.compute_nodes[_NODENAME].host)
- @mock.patch('nova.objects.ComputeNodeList.get_by_hypervisor')
@mock.patch('nova.objects.ComputeNode.create')
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_update')
def test_compute_node_created_on_empty(self, update_mock, get_mock,
- create_mock,
- get_by_hypervisor_mock):
- get_by_hypervisor_mock.return_value = []
- self._test_compute_node_created(update_mock, get_mock, create_mock,
- get_by_hypervisor_mock)
+ create_mock):
+ self._test_compute_node_created(update_mock, get_mock, create_mock)
- @mock.patch('nova.objects.ComputeNodeList.get_by_hypervisor')
@mock.patch('nova.objects.ComputeNode.create')
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_update')
def test_compute_node_created_on_empty_rebalance(self, update_mock,
get_mock,
- create_mock,
- get_by_hypervisor_mock):
- get_by_hypervisor_mock.return_value = []
+ create_mock):
self._test_compute_node_created(update_mock, get_mock, create_mock,
- get_by_hypervisor_mock,
rebalances_nodes=True)
- @mock.patch('nova.objects.ComputeNodeList.get_by_hypervisor')
@mock.patch('nova.objects.ComputeNode.create')
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_update')
def test_compute_node_created_too_many(self, update_mock, get_mock,
- create_mock,
- get_by_hypervisor_mock):
- get_by_hypervisor_mock.return_value = ["fake_node_1", "fake_node_2"]
+ create_mock):
self._test_compute_node_created(update_mock, get_mock, create_mock,
- get_by_hypervisor_mock,
rebalances_nodes=True)
def _test_compute_node_created(self, update_mock, get_mock, create_mock,
- get_by_hypervisor_mock,
rebalances_nodes=False):
self.flags(cpu_allocation_ratio=1.0, ram_allocation_ratio=1.0,
disk_allocation_ratio=1.0)
@@ -1452,13 +1437,9 @@ class TestInitComputeNode(BaseTestCase):
self.rt._init_compute_node(mock.sentinel.ctx, resources))
cn = self.rt.compute_nodes[_NODENAME]
- get_mock.assert_called_once_with(mock.sentinel.ctx, _HOSTNAME,
- _NODENAME)
- if rebalances_nodes:
- get_by_hypervisor_mock.assert_called_once_with(
- mock.sentinel.ctx, _NODENAME)
- else:
- get_by_hypervisor_mock.assert_not_called()
+ get_mock.assert_called_once_with(mock.sentinel.ctx,
+ uuids.compute_node_uuid)
+
create_mock.assert_called_once_with()
self.assertTrue(obj_base.obj_equal_prims(expected_compute, cn))
setup_pci.assert_called_once_with(mock.sentinel.ctx, cn, resources)
@@ -1466,7 +1447,7 @@ class TestInitComputeNode(BaseTestCase):
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_setup_pci_tracker')
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename',
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid',
side_effect=exc.ComputeHostNotFound(host=_HOSTNAME))
@mock.patch('nova.objects.ComputeNode.create',
side_effect=(test.TestingException, None))
@@ -1489,14 +1470,14 @@ class TestInitComputeNode(BaseTestCase):
self.assertTrue(self.rt._init_compute_node(ctxt, resources))
self.assertIn(_NODENAME, self.rt.compute_nodes)
mock_get.assert_has_calls([mock.call(
- ctxt, _HOSTNAME, _NODENAME)] * 2)
+ ctxt, uuids.cn_uuid)] * 2)
self.assertEqual(2, mock_create.call_count)
mock_setup_pci.assert_called_once_with(
ctxt, test.MatchType(objects.ComputeNode), resources)
@mock.patch('nova.objects.ComputeNodeList.get_by_hypervisor')
@mock.patch('nova.objects.ComputeNode.create')
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_update')
def test_node_removed(self, update_mock, get_mock,
@@ -1512,6 +1493,81 @@ class TestInitComputeNode(BaseTestCase):
self.assertNotIn(_NODENAME, self.rt.stats)
self.assertNotIn(_NODENAME, self.rt.old_resources)
+ @mock.patch.object(resource_tracker.ResourceTracker,
+ '_update')
+ @mock.patch.object(resource_tracker.ResourceTracker,
+ '_setup_pci_tracker')
+ def test_undelete_node(self, mock_pci, mock_update):
+ self._setup_rt()
+ node = mock.MagicMock()
+ node.deleted = True
+ node.uuid = str(uuids.host1)
+ node.host = 'fake-host'
+ context_mock = mock.MagicMock()
+ resources = {'hypervisor_hostname': 'fake-host',
+ 'uuid': str(uuids.host1)}
+ with mock.patch.object(self.rt, '_get_compute_node') as getcn:
+ getcn.return_value = node
+
+ # _init_compute_node() should return False to indicate that
+ # it found an existing node
+ self.assertFalse(
+ self.rt._init_compute_node(context_mock, resources))
+
+ # Node should be undeleted and saved
+ self.assertFalse(node.deleted)
+ self.assertIsNone(node.deleted_at)
+ node.save.assert_called_once_with()
+
+ # Host is the same, no _update()
+ self.assertEqual('fake-host', node.host)
+ mock_update.assert_not_called()
+
+ @mock.patch.object(resource_tracker.ResourceTracker,
+ '_update')
+ @mock.patch.object(resource_tracker.ResourceTracker,
+ '_setup_pci_tracker')
+ def test_undelete_node_move_host(self, mock_pci, mock_update):
+ self._setup_rt()
+ node = mock.MagicMock()
+ node.deleted = True
+ node.uuid = str(uuids.host1)
+ node.host = 'old-host'
+ context_mock = mock.MagicMock()
+ resources = {'hypervisor_hostname': 'fake-host',
+ 'uuid': str(uuids.host1)}
+ with mock.patch.object(self.rt, '_get_compute_node') as getcn:
+ getcn.return_value = node
+
+ # _init_compute_node() should return False to indicate that
+ # it found an existing node
+ self.assertFalse(
+ self.rt._init_compute_node(context_mock, resources))
+
+ # Node should be undeleted and saved
+ self.assertFalse(node.deleted)
+ self.assertIsNone(node.deleted_at)
+ node.save.assert_called_once_with()
+
+ # Our host changed, so we should have the updated value and have
+ # called _update()
+ self.assertEqual('fake-host', node.host)
+ mock_update.assert_called()
+
+ @mock.patch.object(resource_tracker.ResourceTracker,
+ '_get_compute_node',
+ return_value=None)
+ @mock.patch('nova.objects.compute_node.ComputeNode.create')
+ def test_create_failed_conflict(self, mock_create, mock_getcn):
+ self._setup_rt()
+ resources = {'hypervisor_hostname': 'node1',
+ 'uuid': uuids.node1}
+ mock_create.side_effect = exc.DuplicateRecord(target='foo')
+ self.assertRaises(exc.InvalidConfiguration,
+ self.rt._init_compute_node,
+ mock.MagicMock,
+ resources)
+
@ddt.ddt
class TestUpdateComputeNode(BaseTestCase):
@@ -1580,6 +1636,7 @@ class TestUpdateComputeNode(BaseTestCase):
self.rt._update(mock.sentinel.ctx, new_compute)
save_mock.assert_called_once_with()
+ @mock.patch('nova.objects.ComputeNode.save', new=mock.Mock())
@mock.patch(
'nova.pci.stats.PciDeviceStats.has_remote_managed_device_pools',
return_value=True)
@@ -1773,7 +1830,7 @@ class TestUpdateComputeNode(BaseTestCase):
self.assertEqual(4, ufpt_mock.call_count)
self.assertEqual(4, mock_sync_disabled.call_count)
# The retry is restricted to _update_to_placement
- self.assertEqual(1, mock_resource_change.call_count)
+ self.assertEqual(0, mock_resource_change.call_count)
@mock.patch(
'nova.compute.resource_tracker.ResourceTracker.'
@@ -2041,6 +2098,10 @@ class TestUpdateComputeNode(BaseTestCase):
self.assertIn('Unable to find services table record for nova-compute',
mock_log_error.call_args[0][0])
+ @mock.patch(
+ 'nova.compute.resource_tracker.ResourceTracker.'
+ '_update_to_placement',
+ new=mock.Mock())
def test_update_compute_node_save_fails_restores_old_resources(self):
"""Tests the scenario that compute_node.save() fails and the
old_resources value for the node is restored to its previous value
@@ -2172,14 +2233,19 @@ class TestInstanceClaim(BaseTestCase):
self.rt.compute_nodes = {}
self.assertTrue(self.rt.disabled(_NODENAME))
- with mock.patch.object(self.instance, 'save'):
- claim = self.rt.instance_claim(mock.sentinel.ctx, self.instance,
- _NODENAME, self.allocations, None)
+ # Reset all changes to the instance to make sure that we can detect
+ # any manipulation after the failure.
+ self.instance.obj_reset_changes(recursive=True)
- self.assertEqual(self.rt.host, self.instance.host)
- self.assertEqual(self.rt.host, self.instance.launched_on)
- self.assertEqual(_NODENAME, self.instance.node)
- self.assertIsInstance(claim, claims.NopClaim)
+ with mock.patch.object(self.instance, 'save') as mock_save:
+ self.assertRaises(exc.ComputeResourcesUnavailable,
+ self.rt.instance_claim,
+ mock.sentinel.ctx, self.instance,
+ _NODENAME, self.allocations, None)
+ mock_save.assert_not_called()
+
+ # Make sure the instance was not touched by the failed claim process
+ self.assertEqual(set(), self.instance.obj_what_changed())
@mock.patch('nova.compute.utils.is_volume_backed_instance')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@@ -2401,7 +2467,10 @@ class TestInstanceClaim(BaseTestCase):
vendor_id='0001',
product_id='0002',
numa_node=0,
- tags={'dev_type': 'type-PCI'},
+ tags={
+ 'dev_type': 'type-PCI',
+ 'address': '0000:81:00.0'
+ },
count=0
)
]
@@ -2422,7 +2491,8 @@ class TestInstanceClaim(BaseTestCase):
self.allocations, None)
cn = self.rt.compute_nodes[_NODENAME]
update_mock.assert_called_once_with(self.elevated, cn)
- pci_stats_mock.assert_called_once_with([request])
+ pci_stats_mock.assert_called_once_with(
+ [request], provider_mapping=None)
self.assertTrue(obj_base.obj_equal_prims(expected, cn))
@mock.patch('nova.compute.utils.is_volume_backed_instance',
@@ -2626,7 +2696,7 @@ class TestResize(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@mock.patch('nova.objects.ComputeNode.save')
@@ -2730,7 +2800,7 @@ class TestResize(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename',
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid',
return_value=_COMPUTE_NODE_FIXTURES[0])
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error',
return_value=[])
@@ -2902,7 +2972,7 @@ class TestResize(BaseTestCase):
@mock.patch('nova.pci.manager.PciDevTracker.claim_instance')
@mock.patch('nova.pci.request.get_pci_requests_from_flavor')
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node')
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@mock.patch('nova.objects.ComputeNode.save')
@@ -3072,7 +3142,7 @@ class TestResize(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@mock.patch('nova.objects.ComputeNode.save')
@@ -3204,7 +3274,7 @@ class TestRebuild(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@mock.patch('nova.objects.ComputeNode.save')
diff --git a/nova/tests/unit/compute/test_rpcapi.py b/nova/tests/unit/compute/test_rpcapi.py
index 55d0fc53e8..6f78678a92 100644
--- a/nova/tests/unit/compute/test_rpcapi.py
+++ b/nova/tests/unit/compute/test_rpcapi.py
@@ -836,7 +836,8 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
orig_sys_metadata=None, recreate=True, on_shared_storage=True,
preserve_ephemeral=True, migration=None, node=None,
limits=None, request_spec=None, accel_uuids=[],
- reimage_boot_volume=False, version='6.1')
+ reimage_boot_volume=False, target_state=None,
+ version='6.2')
def test_rebuild_instance_old_rpcapi(self):
# With rpcapi < 5.12, accel_uuids must be dropped in the client call.
@@ -868,9 +869,11 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
ctxt, instance=self.fake_instance_obj,
accel_uuids=['938af7f9-f136-4e5a-bdbe-3b6feab54311'],
node=None, host=None, reimage_boot_volume=False,
- **rebuild_args)
+ target_state=None, **rebuild_args)
- mock_client.can_send_version.assert_has_calls([mock.call('6.0'),
+ mock_client.can_send_version.assert_has_calls([mock.call('6.2'),
+ mock.call('6.1'),
+ mock.call('6.0'),
mock.call('5.12')])
mock_client.prepare.assert_called_with(
server=self.fake_instance_obj.host, version='5.0')
@@ -890,7 +893,7 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
compute_api.router.client.return_value = mock_client
# Force can_send_version to [False, True, True], so that 6.0
# version is used.
- mock_client.can_send_version.side_effect = [False, True, True]
+ mock_client.can_send_version.side_effect = [False, False, True, True]
mock_cctx = mock.MagicMock()
mock_client.prepare.return_value = mock_cctx
rebuild_args = {
@@ -908,12 +911,47 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
'limits': None,
'accel_uuids': [],
'reimage_boot_volume': True,
+ 'target_state': None,
}
self.assertRaises(
exception.NovaException, compute_api.rebuild_instance,
ctxt, instance=self.fake_instance_obj,
node=None, host=None, **rebuild_args)
- mock_client.can_send_version.assert_has_calls([mock.call('6.1')])
+ mock_client.can_send_version.assert_has_calls([mock.call('6.2')])
+
+ def test_rebuild_instance_evacuate_old_rpcapi(self):
+ # With rpcapi < 6.2, if evacuate we should raise error.
+ ctxt = context.RequestContext('fake_user', 'fake_project')
+ compute_api = compute_rpcapi.ComputeAPI()
+ compute_api.router.client = mock.Mock()
+ mock_client = mock.MagicMock()
+ compute_api.router.client.return_value = mock_client
+ # Force can_send_version to return False.
+ mock_client.can_send_version.return_value = False
+ mock_cctx = mock.MagicMock()
+ mock_client.prepare.return_value = mock_cctx
+ rebuild_args = {
+ 'new_pass': 'admin_password',
+ 'injected_files': 'files_to_inject',
+ 'image_ref': uuids.image_ref,
+ 'orig_image_ref': uuids.orig_image_ref,
+ 'orig_sys_metadata': 'orig_sys_meta',
+ 'bdms': {},
+ 'recreate': False,
+ 'on_shared_storage': False,
+ 'preserve_ephemeral': False,
+ 'request_spec': None,
+ 'migration': None,
+ 'limits': None,
+ 'accel_uuids': [],
+ 'reimage_boot_volume': True,
+ 'target_state': 'stopped',
+ }
+ self.assertRaises(
+ exception.UnsupportedRPCVersion,
+ compute_api.rebuild_instance,
+ ctxt, instance=self.fake_instance_obj,
+ node=None, host=None, **rebuild_args)
def test_reserve_block_device_name(self):
self.flags(long_rpc_timeout=1234)
diff --git a/nova/tests/unit/compute/test_shelve.py b/nova/tests/unit/compute/test_shelve.py
index c939b927f1..62321bddec 100644
--- a/nova/tests/unit/compute/test_shelve.py
+++ b/nova/tests/unit/compute/test_shelve.py
@@ -209,6 +209,7 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
instance = self._shelve_offload(clean_shutdown=False)
mock_power_off.assert_called_once_with(instance, 0, 0)
+ @mock.patch.object(neutron_api.API, 'unbind_ports')
@mock.patch.object(compute_utils, 'EventReporter')
@mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
@mock.patch.object(nova.compute.manager.ComputeManager,
@@ -225,7 +226,7 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
def _shelve_offload(self, mock_notify, mock_notify_instance_usage,
mock_get_power_state, mock_update_resource_tracker,
mock_delete_alloc, mock_terminate, mock_get_bdms,
- mock_event, clean_shutdown=True):
+ mock_event, mock_unbind_ports, clean_shutdown=True):
host = 'fake-mini'
instance = self._create_fake_instance_obj(params={'host': host})
instance.task_state = task_states.SHELVING
@@ -278,10 +279,13 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
instance.uuid,
graceful_exit=False)
+ mock_unbind_ports.assert_called_once_with(
+ self.context, mock.ANY, detach=False)
+
return instance
@mock.patch('nova.compute.utils.'
- 'update_pci_request_spec_with_allocated_interface_name',
+ 'update_pci_request_with_placement_allocations',
new=mock.NonCallableMock())
@mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid')
@mock.patch('nova.compute.utils.notify_about_instance_action')
@@ -631,7 +635,7 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
@mock.patch('nova.network.neutron.API.setup_instance_network_on_host')
@mock.patch('nova.compute.utils.'
- 'update_pci_request_spec_with_allocated_interface_name')
+ 'update_pci_request_with_placement_allocations')
def test_unshelve_with_resource_request(
self, mock_update_pci, mock_setup_network):
requested_res = [objects.RequestGroup(
@@ -642,7 +646,7 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
self.compute.unshelve_instance(
self.context, instance, image=None,
- filter_properties={}, node='fake-node', request_spec=request_spec,
+ filter_properties={}, node='fakenode2', request_spec=request_spec,
accel_uuids=[])
mock_update_pci.assert_called_once_with(
@@ -655,7 +659,7 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
@mock.patch('nova.network.neutron.API.setup_instance_network_on_host',
new=mock.NonCallableMock())
@mock.patch('nova.compute.utils.'
- 'update_pci_request_spec_with_allocated_interface_name')
+ 'update_pci_request_with_placement_allocations')
def test_unshelve_with_resource_request_update_raises(
self, mock_update_pci):
requested_res = [objects.RequestGroup(
@@ -696,7 +700,7 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
self.assertRaises(test.TestingException,
self.compute.unshelve_instance, self.context, instance,
image=shelved_image, filter_properties={},
- node='fake-node', request_spec=fake_spec, accel_uuids=[])
+ node='fakenode2', request_spec=fake_spec, accel_uuids=[])
self.assertEqual(instance.image_ref, initial_image_ref)
@mock.patch.object(objects.InstanceList, 'get_by_filters')
diff --git a/nova/tests/unit/compute/test_utils.py b/nova/tests/unit/compute/test_utils.py
index 848050d769..dd10ecd7df 100644
--- a/nova/tests/unit/compute/test_utils.py
+++ b/nova/tests/unit/compute/test_utils.py
@@ -1558,47 +1558,86 @@ class PciRequestUpdateTestCase(test.NoDBTestCase):
def test_no_pci_request(self):
provider_mapping = {}
- compute_utils.update_pci_request_spec_with_allocated_interface_name(
+ compute_utils.update_pci_request_with_placement_allocations(
self.context, mock.sentinel.report_client, [], provider_mapping)
- def test_pci_request_from_flavor(self):
- pci_requests = [objects.InstancePCIRequest(requester_id=None)]
+ def test_pci_request_from_flavor_no_mapping(self):
+ req = objects.InstancePCIRequest(
+ requester_id=None,
+ request_id=uuids.req1,
+ alias_name="a-dev",
+ spec=[{}],
+ )
+ pci_requests = [req]
+
provider_mapping = {}
- compute_utils.update_pci_request_spec_with_allocated_interface_name(
+ compute_utils.update_pci_request_with_placement_allocations(
+ self.context, mock.sentinel.report_client, pci_requests,
+ provider_mapping)
+
+ self.assertNotIn('rp_uuids', req.spec[0])
+
+ def test_pci_request_from_flavor_with_mapping(self):
+ req = objects.InstancePCIRequest(
+ requester_id=None,
+ request_id=uuids.req1,
+ alias_name="a-dev",
+ spec=[{}],
+ )
+ pci_requests = [req]
+
+ provider_mapping = {
+ f"{uuids.req1}-0": [uuids.rp1],
+ f"{uuids.req1}-1": [uuids.rp2],
+ }
+
+ compute_utils.update_pci_request_with_placement_allocations(
self.context, mock.sentinel.report_client, pci_requests,
provider_mapping)
+ self.assertEqual(
+ {uuids.rp1, uuids.rp2}, set(req.spec[0]["rp_uuids"].split(','))
+ )
+
def test_pci_request_has_no_mapping(self):
pci_requests = [objects.InstancePCIRequest(requester_id=uuids.port_1)]
provider_mapping = {}
- compute_utils.update_pci_request_spec_with_allocated_interface_name(
+ compute_utils.update_pci_request_with_placement_allocations(
self.context, mock.sentinel.report_client, pci_requests,
provider_mapping)
def test_pci_request_ambiguous_mapping(self):
- pci_requests = [objects.InstancePCIRequest(requester_id=uuids.port_1)]
+ pci_requests = [
+ objects.InstancePCIRequest(
+ requester_id=uuids.port_1, request_id=uuids.req1
+ )
+ ]
provider_mapping = {uuids.port_1: [uuids.rp1, uuids.rp2]}
self.assertRaises(
exception.AmbiguousResourceProviderForPCIRequest,
(compute_utils.
- update_pci_request_spec_with_allocated_interface_name),
+ update_pci_request_with_placement_allocations),
self.context, mock.sentinel.report_client, pci_requests,
provider_mapping)
def test_unexpected_provider_name(self):
report_client = mock.Mock(spec=report.SchedulerReportClient)
report_client.get_resource_provider_name.return_value = 'unexpected'
- pci_requests = [objects.InstancePCIRequest(
- requester_id=uuids.port_1, spec=[{}])]
+ pci_requests = [
+ objects.InstancePCIRequest(
+ requester_id=uuids.port_1, spec=[{}], request_id=uuids.req1
+ )
+ ]
+
provider_mapping = {uuids.port_1: [uuids.rp1]}
self.assertRaises(
exception.UnexpectedResourceProviderNameForPCIRequest,
(compute_utils.
- update_pci_request_spec_with_allocated_interface_name),
+ update_pci_request_with_placement_allocations),
self.context, report_client, pci_requests,
provider_mapping)
@@ -1610,11 +1649,14 @@ class PciRequestUpdateTestCase(test.NoDBTestCase):
report_client = mock.Mock(spec=report.SchedulerReportClient)
report_client.get_resource_provider_name.return_value = (
'host:agent:enp0s31f6')
- pci_requests = [objects.InstancePCIRequest(
- requester_id=uuids.port_1, spec=[{}],)]
+ pci_requests = [
+ objects.InstancePCIRequest(
+ requester_id=uuids.port_1, spec=[{}], request_id=uuids.req1
+ )
+ ]
provider_mapping = {uuids.port_1: [uuids.rp1]}
- compute_utils.update_pci_request_spec_with_allocated_interface_name(
+ compute_utils.update_pci_request_with_placement_allocations(
self.context, report_client, pci_requests, provider_mapping)
report_client.get_resource_provider_name.assert_called_once_with(
diff --git a/nova/tests/unit/conductor/tasks/test_live_migrate.py b/nova/tests/unit/conductor/tasks/test_live_migrate.py
index de15be28bd..4e888139f6 100644
--- a/nova/tests/unit/conductor/tasks/test_live_migrate.py
+++ b/nova/tests/unit/conductor/tasks/test_live_migrate.py
@@ -761,7 +761,7 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
@mock.patch(
'nova.compute.utils.'
- 'update_pci_request_spec_with_allocated_interface_name')
+ 'update_pci_request_with_placement_allocations')
@mock.patch('nova.scheduler.utils.fill_provider_mapping')
@mock.patch.object(live_migrate.LiveMigrationTask,
'_call_livem_checks_on_host')
diff --git a/nova/tests/unit/conductor/test_conductor.py b/nova/tests/unit/conductor/test_conductor.py
index e942217a6c..971570dfb5 100644
--- a/nova/tests/unit/conductor/test_conductor.py
+++ b/nova/tests/unit/conductor/test_conductor.py
@@ -389,7 +389,8 @@ class _BaseTaskTestCase(object):
'preserve_ephemeral': False,
'host': 'compute-host',
'request_spec': None,
- 'reimage_boot_volume': False}
+ 'reimage_boot_volume': False,
+ 'target_state': None}
if update_args:
rebuild_args.update(update_args)
compute_rebuild_args = copy.deepcopy(rebuild_args)
@@ -4751,9 +4752,34 @@ class ConductorTaskRPCAPITestCase(_BaseTaskTestCase,
mock.sentinel.migration)
can_send_version.assert_called_once_with('1.23')
+ def test_evacuate_old_rpc_with_target_state(self):
+ inst_obj = self._create_fake_instance_obj()
+ rebuild_args, compute_args = self._prepare_rebuild_args(
+ {'host': inst_obj.host,
+ 'target_state': 'stopped'})
+ with mock.patch.object(
+ self.conductor.client, 'can_send_version', return_value=False):
+ self.assertRaises(exc.UnsupportedRPCVersion,
+ self.conductor.rebuild_instance,
+ self.context, inst_obj, **rebuild_args)
+
+ def test_evacuate_old_rpc_without_target_state(self):
+ inst_obj = self._create_fake_instance_obj()
+ rebuild_args, compute_args = self._prepare_rebuild_args(
+ {'host': inst_obj.host,
+ 'target_state': None})
+ with mock.patch.object(
+ self.conductor.client, 'can_send_version',
+ return_value=False) as can_send_version:
+ self.conductor.rebuild_instance(
+ self.context, inst_obj, **rebuild_args)
+ can_send_version.assert_has_calls([
+ mock.call('1.25'), mock.call('1.24'),
+ mock.call('1.12')])
+
def test_rebuild_instance_volume_backed(self):
inst_obj = self._create_fake_instance_obj()
- version = '1.24'
+ version = '1.25'
cctxt_mock = mock.MagicMock()
rebuild_args, compute_args = self._prepare_rebuild_args(
{'host': inst_obj.host})
@@ -4785,7 +4811,8 @@ class ConductorTaskRPCAPITestCase(_BaseTaskTestCase,
self.conductor.rebuild_instance,
self.context, inst_obj,
**rebuild_args)
- can_send_version.assert_called_once_with('1.24')
+ can_send_version.assert_has_calls([mock.call('1.25'),
+ mock.call('1.24')])
class ConductorTaskAPITestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
diff --git a/nova/tests/unit/console/test_websocketproxy.py b/nova/tests/unit/console/test_websocketproxy.py
index fc25bef2bc..639623bbb5 100644
--- a/nova/tests/unit/console/test_websocketproxy.py
+++ b/nova/tests/unit/console/test_websocketproxy.py
@@ -635,7 +635,9 @@ class NovaProxyRequestHandlerTestCase(test.NoDBTestCase):
# now the same url but with extra leading '/' characters removed.
if expected_cpython in errmsg:
location = result[3].decode()
- location = location.removeprefix('Location: ').rstrip('\r\n')
+ if location.startswith('Location: '):
+ location = location[len('Location: '):]
+ location = location.rstrip('\r\n')
self.assertTrue(
location.startswith('/example.com/%2F..'),
msg='Redirect location is not the expected sanitized URL',
diff --git a/nova/tests/unit/db/api/test_migrations.py b/nova/tests/unit/db/api/test_migrations.py
index 3b9b17aab2..7c99f2f44a 100644
--- a/nova/tests/unit/db/api/test_migrations.py
+++ b/nova/tests/unit/db/api/test_migrations.py
@@ -25,7 +25,6 @@ from unittest import mock
from alembic import command as alembic_api
from alembic import script as alembic_script
-from migrate.versioning import api as migrate_api
from oslo_db.sqlalchemy import enginefacade
from oslo_db.sqlalchemy import test_fixtures
from oslo_db.sqlalchemy import test_migrations
@@ -127,47 +126,6 @@ class TestModelsSyncPostgreSQL(
FIXTURE = test_fixtures.PostgresqlOpportunisticFixture
-class NovaModelsMigrationsLegacySync(NovaModelsMigrationsSync):
- """Test that the models match the database after old migrations are run."""
-
- def db_sync(self, engine):
- # the 'nova.db.migration.db_sync' method will not use the legacy
- # sqlalchemy-migrate-based migration flow unless the database is
- # already controlled with sqlalchemy-migrate, so we need to manually
- # enable version controlling with this tool to test this code path
- repository = migration._find_migrate_repo(database='api')
- migrate_api.version_control(
- engine, repository, migration.MIGRATE_INIT_VERSION['api'])
-
- # now we can apply migrations as expected and the legacy path will be
- # followed
- super().db_sync(engine)
-
-
-class TestModelsLegacySyncSQLite(
- NovaModelsMigrationsLegacySync,
- test_fixtures.OpportunisticDBTestMixin,
- testtools.TestCase,
-):
- pass
-
-
-class TestModelsLegacySyncMySQL(
- NovaModelsMigrationsLegacySync,
- test_fixtures.OpportunisticDBTestMixin,
- testtools.TestCase,
-):
- FIXTURE = test_fixtures.MySQLOpportunisticFixture
-
-
-class TestModelsLegacySyncPostgreSQL(
- NovaModelsMigrationsLegacySync,
- test_fixtures.OpportunisticDBTestMixin,
- testtools.TestCase,
-):
- FIXTURE = test_fixtures.PostgresqlOpportunisticFixture
-
-
class NovaMigrationsWalk(
test_fixtures.OpportunisticDBTestMixin, test.NoDBTestCase,
):
@@ -180,7 +138,7 @@ class NovaMigrationsWalk(
super().setUp()
self.engine = enginefacade.writer.get_engine()
self.config = migration._find_alembic_conf('api')
- self.init_version = migration.ALEMBIC_INIT_VERSION['api']
+ self.init_version = 'd67eeaabee36'
def _migrate_up(self, connection, revision):
if revision == self.init_version: # no tests for the initial revision
diff --git a/nova/tests/unit/db/main/test_migrations.py b/nova/tests/unit/db/main/test_migrations.py
index d2c4ef9762..579888cfd2 100644
--- a/nova/tests/unit/db/main/test_migrations.py
+++ b/nova/tests/unit/db/main/test_migrations.py
@@ -30,7 +30,6 @@ from unittest import mock
from alembic import command as alembic_api
from alembic import script as alembic_script
import fixtures
-from migrate.versioning import api as migrate_api
from oslo_db.sqlalchemy import enginefacade
from oslo_db.sqlalchemy import test_fixtures
from oslo_db.sqlalchemy import test_migrations
@@ -174,47 +173,6 @@ class TestModelsSyncPostgreSQL(
FIXTURE = test_fixtures.PostgresqlOpportunisticFixture
-class NovaModelsMigrationsLegacySync(NovaModelsMigrationsSync):
- """Test that the models match the database after old migrations are run."""
-
- def db_sync(self, engine):
- # the 'nova.db.migration.db_sync' method will not use the legacy
- # sqlalchemy-migrate-based migration flow unless the database is
- # already controlled with sqlalchemy-migrate, so we need to manually
- # enable version controlling with this tool to test this code path
- repository = migration._find_migrate_repo(database='main')
- migrate_api.version_control(
- engine, repository, migration.MIGRATE_INIT_VERSION['main'])
-
- # now we can apply migrations as expected and the legacy path will be
- # followed
- super().db_sync(engine)
-
-
-class TestModelsLegacySyncSQLite(
- NovaModelsMigrationsLegacySync,
- test_fixtures.OpportunisticDBTestMixin,
- base.BaseTestCase,
-):
- pass
-
-
-class TestModelsLegacySyncMySQL(
- NovaModelsMigrationsLegacySync,
- test_fixtures.OpportunisticDBTestMixin,
- base.BaseTestCase,
-):
- FIXTURE = test_fixtures.MySQLOpportunisticFixture
-
-
-class TestModelsLegacySyncPostgreSQL(
- NovaModelsMigrationsLegacySync,
- test_fixtures.OpportunisticDBTestMixin,
- base.BaseTestCase,
-):
- FIXTURE = test_fixtures.PostgresqlOpportunisticFixture
-
-
class NovaMigrationsWalk(
test_fixtures.OpportunisticDBTestMixin, test.NoDBTestCase,
):
@@ -227,7 +185,7 @@ class NovaMigrationsWalk(
super().setUp()
self.engine = enginefacade.writer.get_engine()
self.config = migration._find_alembic_conf('main')
- self.init_version = migration.ALEMBIC_INIT_VERSION['main']
+ self.init_version = '8f2f1571d55b'
def assertIndexExists(self, connection, table_name, index):
self.assertTrue(
@@ -314,6 +272,15 @@ class NovaMigrationsWalk(
self.assertIsInstance(
table.c.encryption_options.type, sa.types.String)
+ def _check_960aac0e09ea(self, connection):
+ self.assertIndexNotExists(
+ connection, 'console_auth_tokens',
+ 'console_auth_tokens_token_hash_idx',
+ )
+ self.assertIndexNotExists(
+ connection, 'instances', 'uuid',
+ )
+
def test_single_base_revision(self):
"""Ensure we only have a single base revision.
diff --git a/nova/tests/unit/db/test_migration.py b/nova/tests/unit/db/test_migration.py
index ca86f6347c..17a099a8cc 100644
--- a/nova/tests/unit/db/test_migration.py
+++ b/nova/tests/unit/db/test_migration.py
@@ -12,14 +12,10 @@
# License for the specific language governing permissions and limitations
# under the License.
-import glob
-import os
from unittest import mock
import urllib
from alembic.runtime import migration as alembic_migration
-from migrate import exceptions as migrate_exceptions
-from migrate.versioning import api as migrate_api
from nova.db.api import api as api_db_api
from nova.db.main import api as main_db_api
@@ -68,17 +64,9 @@ class TestDBSync(test.NoDBTestCase):
migration.db_sync, '402')
@mock.patch.object(migration, '_upgrade_alembic')
- @mock.patch.object(migration, '_init_alembic_on_legacy_database')
- @mock.patch.object(migration, '_is_database_under_alembic_control')
- @mock.patch.object(migration, '_is_database_under_migrate_control')
@mock.patch.object(migration, '_find_alembic_conf')
- @mock.patch.object(migration, '_find_migrate_repo')
@mock.patch.object(migration, '_get_engine')
- def _test_db_sync(
- self, has_migrate, has_alembic, mock_get_engine, mock_find_repo,
- mock_find_conf, mock_is_migrate, mock_is_alembic, mock_init,
- mock_upgrade,
- ):
+ def test_db_sync(self, mock_get_engine, mock_find_conf, mock_upgrade):
# return an encoded URL to mimic sqlalchemy
mock_get_engine.return_value.url = (
@@ -86,13 +74,10 @@ class TestDBSync(test.NoDBTestCase):
'read_default_file=%2Fetc%2Fmy.cnf.d%2Fnova.cnf'
'&read_default_group=nova'
)
- mock_is_migrate.return_value = has_migrate
- mock_is_alembic.return_value = has_alembic
migration.db_sync()
mock_get_engine.assert_called_once_with('main', context=None)
- mock_find_repo.assert_called_once_with('main')
mock_find_conf.assert_called_once_with('main')
mock_find_conf.return_value.set_main_option.assert_called_once_with(
'sqlalchemy.url',
@@ -100,93 +85,25 @@ class TestDBSync(test.NoDBTestCase):
'read_default_file=%%2Fetc%%2Fmy.cnf.d%%2Fnova.cnf' # ...
'&read_default_group=nova'
)
- mock_is_migrate.assert_called_once_with(
- mock_get_engine.return_value, mock_find_repo.return_value)
-
- if has_migrate:
- mock_is_alembic.assert_called_once_with(
- mock_get_engine.return_value)
- else:
- mock_is_alembic.assert_not_called()
-
- # we should only attempt the upgrade of the remaining
- # sqlalchemy-migrate-based migrations and fake apply of the initial
- # alembic migrations if sqlalchemy-migrate is in place but alembic
- # hasn't been used yet
- if has_migrate and not has_alembic:
- mock_init.assert_called_once_with(
- mock_get_engine.return_value, 'main',
- mock_find_repo.return_value, mock_find_conf.return_value)
- else:
- mock_init.assert_not_called()
- # however, we should always attempt to upgrade the requested migration
- # to alembic
mock_upgrade.assert_called_once_with(
- mock_get_engine.return_value, mock_find_conf.return_value, None)
-
- def test_db_sync_new_deployment(self):
- """Mimic a new deployment without existing sqlalchemy-migrate cruft."""
- has_migrate = False
- has_alembic = False
- self._test_db_sync(has_migrate, has_alembic)
-
- def test_db_sync_with_existing_migrate_database(self):
- """Mimic a deployment currently managed by sqlalchemy-migrate."""
- has_migrate = True
- has_alembic = False
- self._test_db_sync(has_migrate, has_alembic)
-
- def test_db_sync_with_existing_alembic_database(self):
- """Mimic a deployment that's already switched to alembic."""
- has_migrate = True
- has_alembic = True
- self._test_db_sync(has_migrate, has_alembic)
+ mock_get_engine.return_value, mock_find_conf.return_value, None,
+ )
@mock.patch.object(alembic_migration.MigrationContext, 'configure')
-@mock.patch.object(migrate_api, 'db_version')
-@mock.patch.object(migration, '_is_database_under_alembic_control')
-@mock.patch.object(migration, '_is_database_under_migrate_control')
@mock.patch.object(migration, '_get_engine')
-@mock.patch.object(migration, '_find_migrate_repo')
class TestDBVersion(test.NoDBTestCase):
def test_db_version_invalid_database(
- self, mock_find_repo, mock_get_engine, mock_is_migrate,
- mock_is_alembic, mock_migrate_version, mock_m_context_configure,
+ self, mock_get_engine, mock_m_context_configure,
):
"""We only have two databases."""
self.assertRaises(
exception.Invalid, migration.db_version, database='invalid')
- def test_db_version_migrate(
- self, mock_find_repo, mock_get_engine, mock_is_migrate,
- mock_is_alembic, mock_migrate_version, mock_m_context_configure,
- ):
- """Database is controlled by sqlalchemy-migrate."""
- mock_is_migrate.return_value = True
- mock_is_alembic.return_value = False
-
- ret = migration.db_version('main')
- self.assertEqual(mock_migrate_version.return_value, ret)
-
- mock_find_repo.assert_called_once_with('main')
- mock_get_engine.assert_called_once_with('main', context=None)
- mock_is_migrate.assert_called_once()
- mock_is_alembic.assert_called_once()
- mock_migrate_version.assert_called_once_with(
- mock_get_engine.return_value, mock_find_repo.return_value)
- mock_m_context_configure.assert_not_called()
-
- def test_db_version_alembic(
- self, mock_find_repo, mock_get_engine, mock_is_migrate,
- mock_is_alembic, mock_migrate_version, mock_m_context_configure,
- ):
+ def test_db_version(self, mock_get_engine, mock_m_context_configure):
"""Database is controlled by alembic."""
- mock_is_migrate.return_value = False
- mock_is_alembic.return_value = True
-
ret = migration.db_version('main')
mock_m_context = mock_m_context_configure.return_value
self.assertEqual(
@@ -194,31 +111,9 @@ class TestDBVersion(test.NoDBTestCase):
ret
)
- mock_find_repo.assert_called_once_with('main')
mock_get_engine.assert_called_once_with('main', context=None)
- mock_is_migrate.assert_called_once()
- mock_is_alembic.assert_called_once()
- mock_migrate_version.assert_not_called()
mock_m_context_configure.assert_called_once()
- def test_db_version_not_controlled(
- self, mock_find_repo, mock_get_engine, mock_is_migrate,
- mock_is_alembic, mock_migrate_version, mock_m_context_configure,
- ):
- """Database is not controlled."""
- mock_is_migrate.return_value = False
- mock_is_alembic.return_value = False
-
- ret = migration.db_version()
- self.assertIsNone(ret)
-
- mock_find_repo.assert_called_once_with('main')
- mock_get_engine.assert_called_once_with('main', context=None)
- mock_is_migrate.assert_called_once()
- mock_is_alembic.assert_called_once()
- mock_migrate_version.assert_not_called()
- mock_m_context_configure.assert_not_called()
-
class TestGetEngine(test.NoDBTestCase):
@@ -237,77 +132,3 @@ class TestGetEngine(test.NoDBTestCase):
engine = migration._get_engine('api')
self.assertEqual('engine', engine)
mock_get_engine.assert_called_once_with()
-
-
-class TestDatabaseUnderVersionControl(test.NoDBTestCase):
-
- @mock.patch.object(migrate_api, 'db_version')
- def test__is_database_under_migrate_control__true(self, mock_db_version):
- ret = migration._is_database_under_migrate_control('engine', 'repo')
- self.assertTrue(ret)
-
- mock_db_version.assert_called_once_with('engine', 'repo')
-
- @mock.patch.object(migrate_api, 'db_version')
- def test__is_database_under_migrate_control__false(self, mock_db_version):
- mock_db_version.side_effect = \
- migrate_exceptions.DatabaseNotControlledError()
-
- ret = migration._is_database_under_migrate_control('engine', 'repo')
- self.assertFalse(ret)
-
- mock_db_version.assert_called_once_with('engine', 'repo')
-
- @mock.patch.object(alembic_migration.MigrationContext, 'configure')
- def test__is_database_under_alembic_control__true(self, mock_configure):
- context = mock_configure.return_value
- context.get_current_revision.return_value = 'foo'
- engine = mock.MagicMock()
-
- ret = migration._is_database_under_alembic_control(engine)
- self.assertTrue(ret)
-
- context.get_current_revision.assert_called_once_with()
-
- @mock.patch.object(alembic_migration.MigrationContext, 'configure')
- def test__is_database_under_alembic_control__false(self, mock_configure):
- context = mock_configure.return_value
- context.get_current_revision.return_value = None
- engine = mock.MagicMock()
-
- ret = migration._is_database_under_alembic_control(engine)
- self.assertFalse(ret)
-
- context.get_current_revision.assert_called_once_with()
-
-
-class ProjectTestCase(test.NoDBTestCase):
-
- def test_no_migrations_have_downgrade(self):
- topdir = os.path.normpath(os.path.dirname(__file__) + '/../../../')
- # Walk both the nova_api and nova (cell) database migrations.
- includes_downgrade = []
- for directory in (
- os.path.join(topdir, 'db', 'main', 'legacy_migrations'),
- os.path.join(topdir, 'db', 'api', 'legacy_migrations'),
- ):
- py_glob = os.path.join(directory, 'versions', '*.py')
- for path in glob.iglob(py_glob):
- has_upgrade = False
- has_downgrade = False
- with open(path, "r") as f:
- for line in f:
- if 'def upgrade(' in line:
- has_upgrade = True
- if 'def downgrade(' in line:
- has_downgrade = True
-
- if has_upgrade and has_downgrade:
- fname = os.path.basename(path)
- includes_downgrade.append(fname)
-
- helpful_msg = (
- "The following migrations have a downgrade "
- "which is not supported:"
- "\n\t%s" % '\n\t'.join(sorted(includes_downgrade)))
- self.assertFalse(includes_downgrade, helpful_msg)
diff --git a/nova/tests/unit/network/test_neutron.py b/nova/tests/unit/network/test_neutron.py
index eefa7b974f..9aa970aca1 100644
--- a/nova/tests/unit/network/test_neutron.py
+++ b/nova/tests/unit/network/test_neutron.py
@@ -7415,7 +7415,7 @@ class TestAPI(TestAPIBase):
network_id=uuids.network_id, fields='segment_id')
@mock.patch.object(neutronapi, 'get_client')
- def test_get_segment_ids_for_network_with_no_segments(self, mock_client):
+ def test_get_segment_ids_for_network_with_segments_none(self, mock_client):
subnets = {'subnets': [{'segment_id': None}]}
mocked_client = mock.create_autospec(client.Client)
mock_client.return_value = mocked_client
@@ -7431,6 +7431,22 @@ class TestAPI(TestAPIBase):
network_id=uuids.network_id, fields='segment_id')
@mock.patch.object(neutronapi, 'get_client')
+ def test_get_segment_ids_for_network_with_no_segments(self, mock_client):
+ subnets = {'subnets': [{}]}
+ mocked_client = mock.create_autospec(client.Client)
+ mock_client.return_value = mocked_client
+ mocked_client.list_subnets.return_value = subnets
+ with mock.patch.object(
+ self.api, 'has_segment_extension', return_value=True,
+ ):
+ res = self.api.get_segment_ids_for_network(
+ self.context, uuids.network_id)
+ self.assertEqual([], res)
+ mock_client.assert_called_once_with(self.context, admin=True)
+ mocked_client.list_subnets.assert_called_once_with(
+ network_id=uuids.network_id, fields='segment_id')
+
+ @mock.patch.object(neutronapi, 'get_client')
def test_get_segment_ids_for_network_fails(self, mock_client):
mocked_client = mock.create_autospec(client.Client)
mock_client.return_value = mocked_client
diff --git a/nova/tests/unit/objects/test_compute_node.py b/nova/tests/unit/objects/test_compute_node.py
index 7e6894a1cc..84c4e87785 100644
--- a/nova/tests/unit/objects/test_compute_node.py
+++ b/nova/tests/unit/objects/test_compute_node.py
@@ -16,6 +16,7 @@ import copy
from unittest import mock
import netaddr
+from oslo_db import exception as db_exc
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel
from oslo_utils import timeutils
@@ -341,6 +342,14 @@ class _TestComputeNodeObject(object):
'uuid': uuidsentinel.fake_compute_node}
mock_create.assert_called_once_with(self.context, param_dict)
+ @mock.patch('nova.db.main.api.compute_node_create')
+ def test_create_duplicate(self, mock_create):
+ mock_create.side_effect = db_exc.DBDuplicateEntry
+ compute = compute_node.ComputeNode(context=self.context)
+ compute.service_id = 456
+ compute.hypervisor_hostname = 'node1'
+ self.assertRaises(exception.DuplicateRecord, compute.create)
+
@mock.patch.object(db, 'compute_node_update')
@mock.patch(
'nova.db.main.api.compute_node_get', return_value=fake_compute_node)
@@ -553,17 +562,15 @@ class _TestComputeNodeObject(object):
def test_update_from_virt_driver_uuid_already_set(self):
"""Tests update_from_virt_driver where the compute node object already
- has a uuid value so the uuid from the virt driver is ignored.
+ has a uuid value so an error is raised.
"""
# copy in case the update has a side effect
resources = copy.deepcopy(fake_resources)
# Emulate the ironic driver which adds a uuid field.
resources['uuid'] = uuidsentinel.node_uuid
compute = compute_node.ComputeNode(uuid=uuidsentinel.something_else)
- compute.update_from_virt_driver(resources)
- expected = fake_compute_with_resources.obj_clone()
- expected.uuid = uuidsentinel.something_else
- self.assertTrue(base.obj_equal_prims(expected, compute))
+ self.assertRaises(exception.InvalidNodeConfiguration,
+ compute.update_from_virt_driver, resources)
def test_update_from_virt_driver_missing_field(self):
# NOTE(pmurray): update_from_virt_driver does not require
diff --git a/nova/tests/unit/objects/test_request_spec.py b/nova/tests/unit/objects/test_request_spec.py
index d91015a699..58b9859234 100644
--- a/nova/tests/unit/objects/test_request_spec.py
+++ b/nova/tests/unit/objects/test_request_spec.py
@@ -430,6 +430,62 @@ class _TestRequestSpecObject(object):
self.assertListEqual([rg], spec.requested_resources)
self.assertEqual(req_lvl_params, spec.request_level_params)
+ def test_from_components_flavor_based_pci_requests(self):
+ self.flags(group='filter_scheduler', pci_in_placement=True)
+ ctxt = context.RequestContext(
+ fakes.FAKE_USER_ID, fakes.FAKE_PROJECT_ID
+ )
+ instance = fake_instance.fake_instance_obj(ctxt)
+ image = {
+ "id": uuids.image_id,
+ "properties": {"mappings": []},
+ "status": "fake-status",
+ "location": "far-away",
+ }
+ flavor = fake_flavor.fake_flavor_obj(ctxt)
+ filter_properties = {"fake": "property"}
+
+ qos_port_rg = request_spec.RequestGroup()
+ req_lvl_params = request_spec.RequestLevelParams()
+
+ pci_requests = objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ count=1,
+ alias_name='a-dev',
+ request_id=uuids.req1,
+ spec=[{"vendor_id": "1234", "product_id": "fe12"}],
+ )
+ ]
+ )
+ pci_request_group = request_spec.RequestGroup(
+ requester_id=f"{uuids.req1}-0",
+ resources={"CUSTOM_PCI_1234_FE12": 1},
+ same_provider=True,
+ )
+
+ spec = objects.RequestSpec.from_components(
+ ctxt,
+ instance.uuid,
+ image,
+ flavor,
+ instance.numa_topology,
+ pci_requests,
+ filter_properties,
+ None,
+ instance.availability_zone,
+ port_resource_requests=[qos_port_rg],
+ request_level_params=req_lvl_params,
+ )
+
+ self.assertEqual(2, len(spec.requested_resources))
+ self.assertEqual(qos_port_rg, spec.requested_resources[0])
+ self.assertEqual(
+ pci_request_group.obj_to_primitive(),
+ spec.requested_resources[1].obj_to_primitive(),
+ )
+ self.assertEqual(req_lvl_params, spec.request_level_params)
+
def test_get_scheduler_hint(self):
spec_obj = objects.RequestSpec(scheduler_hints={'foo_single': ['1'],
'foo_mul': ['1', '2']})
@@ -1054,6 +1110,183 @@ class TestRemoteRequestSpecObject(test_objects._RemoteTest,
pass
+class TestInstancePCIRequestToRequestGroups(test.NoDBTestCase):
+ def setUp(self):
+ super().setUp()
+ self.flags(group='filter_scheduler', pci_in_placement=True)
+
+ def test_pci_reqs_ignored_if_disabled(self):
+ self.flags(group='filter_scheduler', pci_in_placement=False)
+
+ spec = request_spec.RequestSpec(
+ requested_resources=[],
+ pci_requests=objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ count=1,
+ request_id=uuids.req1,
+ spec=[{"vendor_id": "de12", "product_id": "1234"}],
+ alias_name="a-dev",
+ ),
+ ]
+ ),
+ )
+
+ spec.generate_request_groups_from_pci_requests()
+
+ self.assertEqual(0, len(spec.requested_resources))
+
+ def test_neutron_based_requests_are_ignored(self):
+ pci_req = objects.InstancePCIRequest(
+ count=1,
+ request_id=uuids.req1,
+ spec=[],
+ )
+ spec = request_spec.RequestSpec(
+ requested_resources=[],
+ pci_requests=objects.InstancePCIRequests(requests=[pci_req]),
+ )
+ self.assertEqual(
+ objects.InstancePCIRequest.NEUTRON_PORT, pci_req.source
+ )
+
+ spec.generate_request_groups_from_pci_requests()
+
+ self.assertEqual(0, len(spec.requested_resources))
+
+ def test_rc_from_product_and_vendor(self):
+ spec = request_spec.RequestSpec(
+ requested_resources=[],
+ pci_requests=objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ count=1,
+ request_id=uuids.req1,
+ spec=[{"vendor_id": "de12", "product_id": "1234"}],
+ alias_name="a-dev",
+ ),
+ objects.InstancePCIRequest(
+ count=1,
+ request_id=uuids.req2,
+ spec=[{"vendor_id": "fff", "product_id": "dead"}],
+ alias_name="a-dev",
+ ),
+ ]
+ ),
+ )
+
+ spec.generate_request_groups_from_pci_requests()
+
+ self.assertEqual(2, len(spec.requested_resources))
+ self.assertEqual(
+ request_spec.RequestGroup(
+ requester_id=f"{uuids.req1}-0",
+ resources={"CUSTOM_PCI_DE12_1234": 1},
+ use_same_provider=True,
+ ).obj_to_primitive(),
+ spec.requested_resources[0].obj_to_primitive(),
+ )
+ self.assertEqual(
+ request_spec.RequestGroup(
+ requester_id=f"{uuids.req2}-0",
+ resources={"CUSTOM_PCI_FFF_DEAD": 1},
+ use_same_provider=True,
+ ).obj_to_primitive(),
+ spec.requested_resources[1].obj_to_primitive(),
+ )
+
+ def test_multi_device_split_to_multiple_groups(self):
+ spec = request_spec.RequestSpec(
+ requested_resources=[],
+ pci_requests=objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ count=2,
+ request_id=uuids.req1,
+ spec=[{"vendor_id": "de12", "product_id": "1234"}],
+ alias_name="a-dev",
+ ),
+ ]
+ ),
+ )
+
+ spec.generate_request_groups_from_pci_requests()
+
+ self.assertEqual(2, len(spec.requested_resources))
+ self.assertEqual(
+ request_spec.RequestGroup(
+ requester_id=f"{uuids.req1}-0",
+ resources={"CUSTOM_PCI_DE12_1234": 1},
+ use_same_provider=True,
+ ).obj_to_primitive(),
+ spec.requested_resources[0].obj_to_primitive(),
+ )
+ self.assertEqual(
+ request_spec.RequestGroup(
+ requester_id=f"{uuids.req1}-1",
+ resources={"CUSTOM_PCI_DE12_1234": 1},
+ use_same_provider=True,
+ ).obj_to_primitive(),
+ spec.requested_resources[1].obj_to_primitive(),
+ )
+
+ def test_with_rc_and_traits_from_the_pci_req_spec(self):
+ spec = request_spec.RequestSpec(
+ requested_resources=[],
+ pci_requests=objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ count=1,
+ request_id=uuids.req1,
+ spec=[
+ {
+ "vendor_id": "de12",
+ "product_id": "1234",
+ "resource_class": "gpu",
+ }
+ ],
+ alias_name="a-dev",
+ ),
+ objects.InstancePCIRequest(
+ count=1,
+ request_id=uuids.req2,
+ spec=[
+ {
+ "vendor_id": "fff",
+ "product_id": "dead",
+ "traits": "foo,bar,CUSTOM_BLUE",
+ }
+ ],
+ alias_name="a-dev",
+ ),
+ ]
+ ),
+ )
+
+ spec.generate_request_groups_from_pci_requests()
+
+ self.assertEqual(2, len(spec.requested_resources))
+ self.assertEqual(
+ request_spec.RequestGroup(
+ requester_id=f"{uuids.req1}-0",
+ resources={"CUSTOM_GPU": 1},
+ use_same_provider=True,
+ ).obj_to_primitive(),
+ spec.requested_resources[0].obj_to_primitive(),
+ )
+ # Note that sets would be serialized to tuples by obj_to_primitive in
+ # random order, so we need to match this spec field by field
+ expected = request_spec.RequestGroup(
+ requester_id=f"{uuids.req2}-0",
+ resources={"CUSTOM_PCI_FFF_DEAD": 1},
+ required_traits={"CUSTOM_FOO", "CUSTOM_BAR", "CUSTOM_BLUE"},
+ use_same_provider=True,
+ )
+ actual = spec.requested_resources[1]
+ for field in request_spec.RequestGroup.fields.keys():
+ self.assertEqual(getattr(expected, field), getattr(actual, field))
+
+
class TestRequestGroupObject(test.NoDBTestCase):
def setUp(self):
super(TestRequestGroupObject, self).setUp()
diff --git a/nova/tests/unit/pci/test_request.py b/nova/tests/unit/pci/test_request.py
index 7aefbd15fd..4a3f17f6cb 100644
--- a/nova/tests/unit/pci/test_request.py
+++ b/nova/tests/unit/pci/test_request.py
@@ -187,6 +187,21 @@ class PciRequestTestCase(test.NoDBTestCase):
self.assertIn("xxx", aliases)
self.assertEqual(policy, aliases["xxx"][0])
+ def test_get_alias_from_config_valid_rc_and_traits(self):
+ fake_alias = jsonutils.dumps({
+ "name": "xxx",
+ "resource_class": "foo",
+ "traits": "bar,baz",
+ })
+ self.flags(alias=[fake_alias], group='pci')
+ aliases = request._get_alias_from_config()
+ self.assertIsNotNone(aliases)
+ self.assertIn("xxx", aliases)
+ self.assertEqual(
+ ("legacy", [{"resource_class": "foo", "traits": "bar,baz"}]),
+ aliases["xxx"],
+ )
+
def test_get_alias_from_config_conflicting_device_type(self):
"""Check behavior when device_type conflicts occur."""
fake_alias_a = jsonutils.dumps({
diff --git a/nova/tests/unit/pci/test_stats.py b/nova/tests/unit/pci/test_stats.py
index ef8eb2b2b8..7eb43a05f4 100644
--- a/nova/tests/unit/pci/test_stats.py
+++ b/nova/tests/unit/pci/test_stats.py
@@ -12,11 +12,12 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-
+import collections
from unittest import mock
from oslo_config import cfg
from oslo_serialization import jsonutils
+from oslo_utils.fixture import uuidsentinel as uuids
from nova import exception
from nova import objects
@@ -107,17 +108,19 @@ class PciDeviceStatsTestCase(test.NoDBTestCase):
self._create_fake_devs()
def test_add_device(self):
- self.assertEqual(len(self.pci_stats.pools), 3)
+ self.assertEqual(len(self.pci_stats.pools), 4)
self.assertEqual(set([d['vendor_id'] for d in self.pci_stats]),
set(['v1', 'v2', 'v3']))
- self.assertEqual(set([d['count'] for d in self.pci_stats]),
- set([1, 2]))
+ self.assertEqual([d['count'] for d in self.pci_stats], [1, 1, 1, 1])
def test_remove_device(self):
+ self.assertEqual(len(self.pci_stats.pools), 4)
self.pci_stats.remove_device(self.fake_dev_2)
- self.assertEqual(len(self.pci_stats.pools), 2)
- self.assertEqual(self.pci_stats.pools[0]['count'], 2)
+ self.assertEqual(len(self.pci_stats.pools), 3)
+ self.assertEqual(self.pci_stats.pools[0]['count'], 1)
self.assertEqual(self.pci_stats.pools[0]['vendor_id'], 'v1')
+ self.assertEqual(self.pci_stats.pools[1]['count'], 1)
+ self.assertEqual(self.pci_stats.pools[1]['vendor_id'], 'v1')
def test_remove_device_exception(self):
self.pci_stats.remove_device(self.fake_dev_2)
@@ -146,36 +149,36 @@ class PciDeviceStatsTestCase(test.NoDBTestCase):
m = self.pci_stats.to_device_pools_obj()
new_stats = stats.PciDeviceStats(objects.NUMATopology(), m)
- self.assertEqual(len(new_stats.pools), 3)
- self.assertEqual(set([d['count'] for d in new_stats]),
- set([1, 2]))
+ self.assertEqual(len(new_stats.pools), 4)
+ self.assertEqual([d['count'] for d in new_stats], [1, 1, 1, 1])
self.assertEqual(set([d['vendor_id'] for d in new_stats]),
set(['v1', 'v2', 'v3']))
def test_apply_requests(self):
- self.assertEqual(len(self.pci_stats.pools), 3)
- self.pci_stats.apply_requests(pci_requests)
+ self.assertEqual(len(self.pci_stats.pools), 4)
+ self.pci_stats.apply_requests(pci_requests, {})
self.assertEqual(len(self.pci_stats.pools), 2)
self.assertEqual(self.pci_stats.pools[0]['vendor_id'], 'v1')
self.assertEqual(self.pci_stats.pools[0]['count'], 1)
def test_apply_requests_failed(self):
- self.assertRaises(exception.PciDeviceRequestFailed,
+ self.assertRaises(
+ exception.PciDeviceRequestFailed,
self.pci_stats.apply_requests,
- pci_requests_multiple)
+ pci_requests_multiple,
+ {},
+ )
def test_support_requests(self):
- self.assertTrue(self.pci_stats.support_requests(pci_requests))
- self.assertEqual(len(self.pci_stats.pools), 3)
- self.assertEqual(set([d['count'] for d in self.pci_stats]),
- set((1, 2)))
+ self.assertTrue(self.pci_stats.support_requests(pci_requests, {}))
+ self.assertEqual(len(self.pci_stats.pools), 4)
+ self.assertEqual([d['count'] for d in self.pci_stats], [1, 1, 1, 1])
def test_support_requests_failed(self):
self.assertFalse(
- self.pci_stats.support_requests(pci_requests_multiple))
- self.assertEqual(len(self.pci_stats.pools), 3)
- self.assertEqual(set([d['count'] for d in self.pci_stats]),
- set([1, 2]))
+ self.pci_stats.support_requests(pci_requests_multiple, {}))
+ self.assertEqual(len(self.pci_stats.pools), 4)
+ self.assertEqual([d['count'] for d in self.pci_stats], [1, 1, 1, 1])
def test_support_requests_numa(self):
cells = [
@@ -184,14 +187,18 @@ class PciDeviceStatsTestCase(test.NoDBTestCase):
objects.InstanceNUMACell(
id=1, cpuset=set(), pcpuset=set(), memory=0),
]
- self.assertTrue(self.pci_stats.support_requests(pci_requests, cells))
+ self.assertTrue(
+ self.pci_stats.support_requests(pci_requests, {}, cells)
+ )
def test_support_requests_numa_failed(self):
cells = [
objects.InstanceNUMACell(
id=0, cpuset=set(), pcpuset=set(), memory=0),
]
- self.assertFalse(self.pci_stats.support_requests(pci_requests, cells))
+ self.assertFalse(
+ self.pci_stats.support_requests(pci_requests, {}, cells)
+ )
def test_support_requests_no_numa_info(self):
cells = [
@@ -199,12 +206,16 @@ class PciDeviceStatsTestCase(test.NoDBTestCase):
id=0, cpuset=set(), pcpuset=set(), memory=0),
]
pci_requests = self._get_fake_requests(vendor_ids=['v3'])
- self.assertTrue(self.pci_stats.support_requests(pci_requests, cells))
+ self.assertTrue(
+ self.pci_stats.support_requests(pci_requests, {}, cells)
+ )
# 'legacy' is the default numa_policy so the result must be same
pci_requests = self._get_fake_requests(vendor_ids=['v3'],
numa_policy = fields.PCINUMAAffinityPolicy.LEGACY)
- self.assertTrue(self.pci_stats.support_requests(pci_requests, cells))
+ self.assertTrue(
+ self.pci_stats.support_requests(pci_requests, {}, cells)
+ )
def test_support_requests_numa_pci_numa_policy_preferred(self):
# numa node 0 has 2 devices with vendor_id 'v1'
@@ -218,7 +229,9 @@ class PciDeviceStatsTestCase(test.NoDBTestCase):
pci_requests = self._get_fake_requests(
numa_policy=fields.PCINUMAAffinityPolicy.PREFERRED)
- self.assertTrue(self.pci_stats.support_requests(pci_requests, cells))
+ self.assertTrue(
+ self.pci_stats.support_requests(pci_requests, {}, cells)
+ )
def test_support_requests_no_numa_info_pci_numa_policy_required(self):
# pci device with vendor_id 'v3' has numa_node=None.
@@ -230,7 +243,9 @@ class PciDeviceStatsTestCase(test.NoDBTestCase):
pci_requests = self._get_fake_requests(vendor_ids=['v3'],
numa_policy=fields.PCINUMAAffinityPolicy.REQUIRED)
- self.assertFalse(self.pci_stats.support_requests(pci_requests, cells))
+ self.assertFalse(
+ self.pci_stats.support_requests(pci_requests, {}, cells)
+ )
def test_filter_pools_for_socket_affinity_no_socket(self):
self.pci_stats.numa_topology = objects.NUMATopology(
@@ -571,7 +586,7 @@ class PciDeviceStatsWithTagsTestCase(test.NoDBTestCase):
'compute_node_id': 1,
'address': '0000:0e:00.1',
'vendor_id': '15b3',
- 'product_id': '1018',
+ 'product_id': '101c',
'status': 'available',
'request_id': None,
'dev_type': fields.PciDeviceType.SRIOV_VF,
@@ -599,35 +614,68 @@ class PciDeviceStatsWithTagsTestCase(test.NoDBTestCase):
self.assertEqual(v, pool[k])
def _assertPools(self):
+ nr_tagged = len(self.pci_tagged_devices)
+ nr_untagged = len(self.pci_untagged_devices)
+ nr_remote = len(self.remote_managed_netdevs)
+ nr_local = len(self.locally_managed_netdevs)
+ self.assertEqual(
+ nr_tagged + nr_untagged + nr_remote + nr_local,
+ len(self.pci_stats.pools),
+ )
# Pools are ordered based on the number of keys. 'product_id',
# 'vendor_id' are always part of the keys. When tags are present,
- # they are also part of the keys. In this test class, we have
- # 5 pools with the second one having the tag 'physical_network'
- # and the value 'physnet1' and multiple pools for testing
- # variations of explicit/implicit remote_managed tagging.
- self.assertEqual(5, len(self.pci_stats.pools))
- self._assertPoolContent(self.pci_stats.pools[0], '1137', '0072',
- len(self.pci_untagged_devices))
- self.assertEqual(self.pci_untagged_devices,
- self.pci_stats.pools[0]['devices'])
- self._assertPoolContent(self.pci_stats.pools[1], '1137', '0071',
- len(self.pci_tagged_devices),
- physical_network='physnet1')
- self.assertEqual(self.pci_tagged_devices,
- self.pci_stats.pools[1]['devices'])
- self._assertPoolContent(self.pci_stats.pools[2], '15b3', '101e',
- len(self.remote_managed_netdevs),
- remote_managed='true')
- self.assertEqual(self.remote_managed_netdevs,
- self.pci_stats.pools[2]['devices'])
- self._assertPoolContent(self.pci_stats.pools[3], '15b3', '101c', 1,
- remote_managed='false')
- self.assertEqual([self.locally_managed_netdevs[0]],
- self.pci_stats.pools[3]['devices'])
- self._assertPoolContent(self.pci_stats.pools[4], '15b3', '1018', 1,
- remote_managed='false')
- self.assertEqual([self.locally_managed_netdevs[1]],
- self.pci_stats.pools[4]['devices'])
+ # they are also part of the keys.
+
+ # 3 pools for the pci_untagged_devices
+ devs = []
+ j = 0
+ for i in range(j, j + nr_untagged):
+ self._assertPoolContent(self.pci_stats.pools[i], '1137', '0072', 1)
+ devs += self.pci_stats.pools[i]['devices']
+ self.assertEqual(self.pci_untagged_devices, devs)
+ j += nr_untagged
+
+ # 4 pools for the pci_tagged_devices'
+ devs = []
+ for i in range(j, j + nr_tagged):
+ self._assertPoolContent(
+ self.pci_stats.pools[i],
+ "1137",
+ "0071",
+ 1,
+ physical_network="physnet1",
+ )
+ devs += self.pci_stats.pools[i]['devices']
+ self.assertEqual(self.pci_tagged_devices, devs)
+ j += nr_tagged
+
+ # one with remote_managed_netdevs
+ devs = []
+ for i in range(j, j + nr_remote):
+ self._assertPoolContent(
+ self.pci_stats.pools[i],
+ "15b3",
+ "101e",
+ 1,
+ remote_managed="true",
+ )
+ devs += self.pci_stats.pools[i]['devices']
+ self.assertEqual(self.remote_managed_netdevs, devs)
+ j += nr_remote
+
+ # two with locally_managed_netdevs
+ devs = []
+ for i in range(j, j + nr_local):
+ self._assertPoolContent(
+ self.pci_stats.pools[i],
+ "15b3",
+ "101c",
+ 1,
+ remote_managed="false",
+ )
+ devs += self.pci_stats.pools[i]['devices']
+ self.assertEqual(self.locally_managed_netdevs, devs)
+ j += nr_local
def test_add_devices(self):
self._create_pci_devices()
@@ -650,20 +698,30 @@ class PciDeviceStatsWithTagsTestCase(test.NoDBTestCase):
PCI_REMOTE_MANAGED_TAG: 'False'}]),
objects.InstancePCIRequest(count=1,
spec=[{'vendor_id': '15b3',
- 'product_id': '1018',
+ 'product_id': '101c',
PCI_REMOTE_MANAGED_TAG: 'False'}])]
devs = self.pci_stats.consume_requests(pci_requests)
self.assertEqual(5, len(devs))
- self.assertEqual(set(['0071', '0072', '1018', '101e', '101c']),
+ self.assertEqual(set(['0071', '0072', '101e', '101c']),
set([dev.product_id for dev in devs]))
- self._assertPoolContent(self.pci_stats.pools[0], '1137', '0072', 2)
- self._assertPoolContent(self.pci_stats.pools[1], '1137', '0071', 3,
+ self._assertPoolContent(self.pci_stats.pools[0], '1137', '0072', 0)
+ self._assertPoolContent(self.pci_stats.pools[1], '1137', '0072', 1)
+ self._assertPoolContent(self.pci_stats.pools[2], '1137', '0072', 1)
+
+ self._assertPoolContent(self.pci_stats.pools[3], '1137', '0071', 0,
+ physical_network='physnet1')
+ self._assertPoolContent(self.pci_stats.pools[4], '1137', '0071', 1,
+ physical_network='physnet1')
+ self._assertPoolContent(self.pci_stats.pools[5], '1137', '0071', 1,
+ physical_network='physnet1')
+ self._assertPoolContent(self.pci_stats.pools[6], '1137', '0071', 1,
physical_network='physnet1')
- self._assertPoolContent(self.pci_stats.pools[2], '15b3', '101e', 0,
+
+ self._assertPoolContent(self.pci_stats.pools[7], '15b3', '101e', 0,
remote_managed='true')
- self._assertPoolContent(self.pci_stats.pools[3], '15b3', '101c', 0,
+ self._assertPoolContent(self.pci_stats.pools[8], '15b3', '101c', 0,
remote_managed='false')
- self._assertPoolContent(self.pci_stats.pools[4], '15b3', '1018', 0,
+ self._assertPoolContent(self.pci_stats.pools[9], '15b3', '101c', 0,
remote_managed='false')
def test_add_device_no_devspec(self):
@@ -706,30 +764,754 @@ class PciDeviceStatsWithTagsTestCase(test.NoDBTestCase):
self.pci_stats.remove_device(dev2)
self._assertPools()
- def test_update_device(self):
- # Update device type of one of the device from type-PCI to
+ def test_update_device_splits_the_pool(self):
+ # Update device type of one of the device from type-VF to
# type-PF. Verify if the existing pool is updated and a new
# pool is created with dev_type type-PF.
- self._create_pci_devices()
- dev1 = self.pci_tagged_devices.pop()
- dev1.dev_type = 'type-PF'
- self.pci_stats.update_device(dev1)
- self.assertEqual(6, len(self.pci_stats.pools))
- self._assertPoolContent(self.pci_stats.pools[0], '1137', '0072',
- len(self.pci_untagged_devices))
- self.assertEqual(self.pci_untagged_devices,
- self.pci_stats.pools[0]['devices'])
- self._assertPoolContent(self.pci_stats.pools[1], '1137', '0071',
- len(self.pci_tagged_devices),
- physical_network='physnet1')
- self.assertEqual(self.pci_tagged_devices,
- self.pci_stats.pools[1]['devices'])
- self._assertPoolContent(self.pci_stats.pools[5], '1137', '0071',
- 1,
- physical_network='physnet1',
- remote_managed='false')
- self.assertEqual(dev1,
- self.pci_stats.pools[5]['devices'][0])
+ vfs = []
+ for i in range(3):
+ dev = objects.PciDevice(
+ compute_node_id=1,
+ address="0000:0a:00.%d" % i,
+ vendor_id="1137",
+ product_id="0071",
+ status="available",
+ dev_type="type-VF",
+ parent_addr="0000:0a:01.0",
+ numa_node=0
+ )
+ vfs.append(dev)
+ self.pci_stats.add_device(dev)
+
+ self.assertEqual(1, len(self.pci_stats.pools))
+ self.assertEqual(3, self.pci_stats.pools[0]["count"])
+ self.assertEqual(vfs, self.pci_stats.pools[0]["devices"])
+
+ dev = vfs.pop()
+ dev.dev_type = 'type-PF'
+ dev.parent_addr = None
+ self.pci_stats.update_device(dev)
+ self.assertEqual(2, len(self.pci_stats.pools))
+ self.assertEqual(2, self.pci_stats.pools[0]["count"])
+ self.assertEqual(vfs, self.pci_stats.pools[0]["devices"])
+ self.assertEqual(1, self.pci_stats.pools[1]["count"])
+ self.assertEqual([dev], self.pci_stats.pools[1]["devices"])
+
+ def test_only_vfs_from_the_same_parent_are_pooled(self):
+ pf1_vfs = []
+ for i in range(2):
+ dev = objects.PciDevice(
+ compute_node_id=1,
+ address="0000:0a:00.%d" % i,
+ vendor_id="15b3",
+ product_id="1018",
+ status="available",
+ dev_type="type-VF",
+ parent_addr="0000:0a:01.0",
+ numa_node=0
+ )
+ pf1_vfs.append(dev)
+ self.pci_stats.add_device(dev)
+
+ pf2_vfs = []
+ for i in range(2):
+ dev = objects.PciDevice(
+ compute_node_id=1,
+ address="0000:0b:00.%d" % i,
+ vendor_id="15b3",
+ product_id="1018",
+ status="available",
+ dev_type="type-VF",
+ parent_addr="0000:0b:01.0",
+ numa_node=0
+ )
+ pf2_vfs.append(dev)
+ self.pci_stats.add_device(dev)
+
+ self.assertEqual(2, len(self.pci_stats.pools))
+ self.assertEqual(2, self.pci_stats.pools[0]["count"])
+ self.assertEqual(pf1_vfs, self.pci_stats.pools[0]["devices"])
+ self.assertEqual(2, len(self.pci_stats.pools))
+ self.assertEqual(2, self.pci_stats.pools[1]["count"])
+ self.assertEqual(pf2_vfs, self.pci_stats.pools[1]["devices"])
+
+
+class PciDeviceStatsPlacementSupportTestCase(test.NoDBTestCase):
+
+ def test_device_spec_rc_and_traits_ignored_during_pooling(self):
+ """Assert that resource_class and traits from the device spec are not
+ used as discriminator for pool creation.
+ """
+ device_spec = [
+ jsonutils.dumps(
+ {
+ "resource_class": "foo",
+ "address": "*:81:00.1",
+ "traits": "gold",
+ }
+ ),
+ jsonutils.dumps(
+ {
+ "resource_class": "baar",
+ "address": "*:81:00.2",
+ "traits": "silver",
+ }
+ ),
+ ]
+ self.flags(device_spec=device_spec, group="pci")
+ dev_filter = whitelist.Whitelist(device_spec)
+ pci_stats = stats.PciDeviceStats(
+ objects.NUMATopology(),
+ dev_filter=dev_filter)
+ pci_dev1 = objects.PciDevice(
+ vendor_id="dead",
+ product_id="beef",
+ address="0000:81:00.1",
+ parent_addr="0000:81:00.0",
+ numa_node=0,
+ dev_type="type-VF",
+ )
+ pci_dev2 = objects.PciDevice(
+ vendor_id="dead",
+ product_id="beef",
+ address="0000:81:00.2",
+ parent_addr="0000:81:00.0",
+ numa_node=0,
+ dev_type="type-VF",
+ )
+ # the two device matched by different device_specs with different
+ # resource_class and traits fields
+ pci_stats.add_device(pci_dev1)
+ pci_stats.add_device(pci_dev2)
+
+ # but they are put in the same pool as all the other fields are
+ # matching
+ self.assertEqual(1, len(pci_stats.pools))
+ self.assertEqual(2, pci_stats.pools[0]["count"])
+
+ def test_filter_pools_for_spec_ignores_rc_and_traits_in_spec(self):
+ """Assert that resource_class and traits are ignored in the pci
+ request spec during matching the request to pools.
+ """
+ pci_stats = stats.PciDeviceStats(objects.NUMATopology())
+ pools = [{"vendor_id": "dead", "product_id": "beef"}]
+
+ matching_pools = pci_stats._filter_pools_for_spec(
+ pools=pools,
+ request=objects.InstancePCIRequest(
+ spec=[
+ {
+ "vendor_id": "dead",
+ "product_id": "beef",
+ "resource_class": "foo",
+ "traits": "blue",
+ }
+ ]
+ ),
+ )
+
+ self.assertEqual(pools, matching_pools)
+
+ def test_populate_pools_metadata_from_assigned_devices(self):
+ device_spec = [
+ jsonutils.dumps(
+ {
+ "address": "0000:81:00.*",
+ }
+ ),
+ ]
+ self.flags(device_spec=device_spec, group="pci")
+ dev_filter = whitelist.Whitelist(device_spec)
+ pci_stats = stats.PciDeviceStats(
+ objects.NUMATopology(),
+ dev_filter=dev_filter)
+ pci_dev1 = objects.PciDevice(
+ vendor_id="dead",
+ product_id="beef",
+ address="0000:81:00.1",
+ parent_addr="0000:81:00.0",
+ numa_node=0,
+ dev_type="type-VF",
+ )
+ pci_dev2 = objects.PciDevice(
+ vendor_id="dead",
+ product_id="beef",
+ address="0000:81:00.2",
+ parent_addr="0000:81:00.0",
+ numa_node=0,
+ dev_type="type-VF",
+ )
+ pci_stats.add_device(pci_dev1)
+ pci_dev1.extra_info = {'rp_uuid': uuids.rp1}
+ pci_stats.add_device(pci_dev2)
+ pci_dev2.extra_info = {'rp_uuid': uuids.rp1}
+
+ self.assertEqual(1, len(pci_stats.pools))
+
+ pci_stats.populate_pools_metadata_from_assigned_devices()
+
+ self.assertEqual(uuids.rp1, pci_stats.pools[0]['rp_uuid'])
+
+ def test_populate_pools_metadata_from_assigned_devices_device_without_rp(
+ self
+ ):
+ device_spec = [
+ jsonutils.dumps(
+ {
+ "address": "0000:81:00.*",
+ }
+ ),
+ ]
+ self.flags(device_spec=device_spec, group="pci")
+ dev_filter = whitelist.Whitelist(device_spec)
+ pci_stats = stats.PciDeviceStats(
+ objects.NUMATopology(),
+ dev_filter=dev_filter)
+ pci_dev1 = objects.PciDevice(
+ vendor_id="dead",
+ product_id="beef",
+ address="0000:81:00.1",
+ parent_addr="0000:81:00.0",
+ numa_node=0,
+ dev_type="type-VF",
+ )
+ pci_stats.add_device(pci_dev1)
+
+ self.assertEqual(1, len(pci_stats.pools))
+
+ pci_stats.populate_pools_metadata_from_assigned_devices()
+
+ self.assertNotIn('rp_uuid', pci_stats.pools[0])
+
+ def test_populate_pools_metadata_from_assigned_devices_multiple_rp(self):
+ device_spec = [
+ jsonutils.dumps(
+ {
+ "address": "0000:81:00.*",
+ }
+ ),
+ ]
+ self.flags(device_spec=device_spec, group="pci")
+ dev_filter = whitelist.Whitelist(device_spec)
+ pci_stats = stats.PciDeviceStats(
+ objects.NUMATopology(),
+ dev_filter=dev_filter)
+ pci_dev1 = objects.PciDevice(
+ compute_node_id=1,
+ vendor_id="dead",
+ product_id="beef",
+ address="0000:81:00.1",
+ parent_addr="0000:81:00.0",
+ numa_node=0,
+ dev_type="type-VF",
+ )
+ pci_dev2 = objects.PciDevice(
+ compute_node_id=1,
+ vendor_id="dead",
+ product_id="beef",
+ address="0000:81:00.2",
+ parent_addr="0000:81:00.0",
+ numa_node=0,
+ dev_type="type-VF",
+ )
+ pci_stats.add_device(pci_dev1)
+ pci_dev1.extra_info = {'rp_uuid': uuids.rp1}
+ pci_stats.add_device(pci_dev2)
+ pci_dev2.extra_info = {'rp_uuid': uuids.rp2}
+
+ self.assertEqual(1, len(pci_stats.pools))
+
+ self.assertRaises(
+ ValueError,
+ pci_stats.populate_pools_metadata_from_assigned_devices,
+ )
+
+
+class PciDeviceStatsProviderMappingTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super().setUp()
+ # for simplicity accept any devices
+ device_spec = [
+ jsonutils.dumps(
+ {
+ "address": "*:*:*.*",
+ }
+ ),
+ ]
+ self.flags(device_spec=device_spec, group="pci")
+ self.dev_filter = whitelist.Whitelist(device_spec)
+ self.pci_stats = stats.PciDeviceStats(
+ objects.NUMATopology(), dev_filter=self.dev_filter
+ )
+ # add devices represented by different RPs in placement
+ # two VFs on the same PF
+ self.vf1 = objects.PciDevice(
+ compute_node_id=1,
+ vendor_id="dead",
+ product_id="beef",
+ address="0000:81:00.1",
+ parent_addr="0000:81:00.0",
+ numa_node=0,
+ dev_type="type-VF",
+ )
+ self.vf2 = objects.PciDevice(
+ compute_node_id=1,
+ vendor_id="dead",
+ product_id="beef",
+ address="0000:81:00.2",
+ parent_addr="0000:81:00.0",
+ numa_node=0,
+ dev_type="type-VF",
+ )
+ self.pci_stats.add_device(self.vf1)
+ self.vf1.extra_info = {'rp_uuid': uuids.pf1}
+ self.pci_stats.add_device(self.vf2)
+ self.vf2.extra_info = {'rp_uuid': uuids.pf1}
+ # two PFs pf2 and pf3 (pf1 is used for the paren of the above VFs)
+ self.pf2 = objects.PciDevice(
+ compute_node_id=1,
+ vendor_id="dead",
+ product_id="beef",
+ address="0000:82:00.0",
+ parent_addr=None,
+ numa_node=0,
+ dev_type="type-PF",
+ )
+ self.pci_stats.add_device(self.pf2)
+ self.pf2.extra_info = {'rp_uuid': uuids.pf2}
+
+ self.pf3 = objects.PciDevice(
+ compute_node_id=1,
+ vendor_id="dead",
+ product_id="beef",
+ address="0000:83:00.0",
+ parent_addr=None,
+ numa_node=0,
+ dev_type="type-PF",
+ )
+ self.pci_stats.add_device(self.pf3)
+ self.pf3.extra_info = {'rp_uuid': uuids.pf3}
+ # a PCI
+ self.pci1 = objects.PciDevice(
+ compute_node_id=1,
+ vendor_id="dead",
+ product_id="beef",
+ address="0000:84:00.0",
+ parent_addr=None,
+ numa_node=0,
+ dev_type="type-PCI",
+ )
+ self.pci_stats.add_device(self.pci1)
+ self.pci1.extra_info = {'rp_uuid': uuids.pci1}
+
+ # populate the RP -> pool mapping from the devices to its pools
+ self.pci_stats.populate_pools_metadata_from_assigned_devices()
+
+ # we have 1 pool for the two VFs then the rest has it own pool one by
+ # one
+ self.num_pools = 4
+ self.assertEqual(self.num_pools, len(self.pci_stats.pools))
+ self.num_devs = 5
+ self.assertEqual(
+ self.num_devs, sum(pool["count"] for pool in self.pci_stats.pools)
+ )
+
+ def test_support_request_unrestricted(self):
+ reqs = []
+ for dev_type in ["type-VF", "type-PF", "type-PCI"]:
+ req = objects.InstancePCIRequest(
+ count=1,
+ alias_name='a-dev',
+ spec=[
+ {
+ "vendor_id": "dead",
+ "product_id": "beef",
+ "dev_type": dev_type,
+ }
+ ],
+ )
+ reqs.append(req)
+
+ # an empty mapping means unrestricted by any provider
+ # we have devs for all type so each request should fit
+ self.assertTrue(self.pci_stats.support_requests(reqs, {}))
+
+ # the support_requests call is expected not to consume any device
+ self.assertEqual(self.num_pools, len(self.pci_stats.pools))
+ self.assertEqual(
+ self.num_devs, sum(pool["count"] for pool in self.pci_stats.pools)
+ )
+
+ # now apply the same request to consume the pools
+ self.pci_stats.apply_requests(reqs, {})
+ # we have consumed a 3 devs (a VF, a PF, and a PCI)
+ self.assertEqual(
+ self.num_devs - 3,
+ sum(pool["count"] for pool in self.pci_stats.pools),
+ )
+ # the empty pools are purged. We have one pool for the remaining VF
+ # and the remaining PF
+ self.assertEqual(2, len(self.pci_stats.pools))
+
+ def test_support_request_restricted_by_provider_mapping(self):
+ pf_req = objects.InstancePCIRequest(
+ count=1,
+ alias_name='a-dev',
+ request_id=uuids.req1,
+ spec=[
+ {
+ "vendor_id": "dead",
+ "product_id": "beef",
+ "dev_type": "type-PF",
+ }
+ ],
+ )
+
+ # simulate the placement restricted the possible RPs to pf3
+ self.assertTrue(
+ self.pci_stats.support_requests(
+ [pf_req], {f"{uuids.req1}-0": [uuids.pf3]}
+ )
+ )
+
+ # the support_requests call is expected not to consume any device
+ self.assertEqual(self.num_pools, len(self.pci_stats.pools))
+ self.assertEqual(
+ self.num_devs, sum(pool["count"] for pool in self.pci_stats.pools)
+ )
+
+ # now apply the request and see if the right device is consumed
+ self.pci_stats.apply_requests(
+ [pf_req], {f"{uuids.req1}-0": [uuids.pf3]}
+ )
+
+ self.assertEqual(self.num_pools - 1, len(self.pci_stats.pools))
+ self.assertEqual(
+ self.num_devs - 1,
+ sum(pool["count"] for pool in self.pci_stats.pools),
+ )
+ # pf3 is not available in the pools any more
+ self.assertEqual(
+ {uuids.pf1, uuids.pf2, uuids.pci1},
+ {pool['rp_uuid'] for pool in self.pci_stats.pools},
+ )
+
+ def test_support_request_restricted_by_provider_mapping_does_not_fit(self):
+ pf_req = objects.InstancePCIRequest(
+ count=1,
+ alias_name='a-dev',
+ request_id=uuids.req1,
+ spec=[
+ {
+ "vendor_id": "dead",
+ "product_id": "beef",
+ "dev_type": "type-PF",
+ }
+ ],
+ )
+
+ # Simulate that placement returned an allocation candidate with a PF
+ # that is not in the pools anymore, e.g. filtered out by numa cell.
+ # We expect the request to fail
+ self.assertFalse(
+ self.pci_stats.support_requests(
+ [pf_req], {f"{uuids.req1}-0": [uuids.pf4]}
+ )
+ )
+ self.assertRaises(
+ exception.PciDeviceRequestFailed,
+ self.pci_stats.apply_requests,
+ [pf_req],
+ {f"{uuids.req1}-0": [uuids.pf4]},
+ )
+ # and the pools are not changed
+ self.assertEqual(self.num_pools, len(self.pci_stats.pools))
+ self.assertEqual(
+ self.num_devs, sum(pool["count"] for pool in self.pci_stats.pools)
+ )
+
+ def test_support_request_neutron_port_based_request_ignore_mapping(self):
+ # by not having the alias_name set this becomes a neutron port based
+ # PCI request
+ pf_req = objects.InstancePCIRequest(
+ count=1,
+ request_id=uuids.req1,
+ spec=[
+ {
+ "vendor_id": "dead",
+ "product_id": "beef",
+ "dev_type": "type-PF",
+ }
+ ],
+ )
+
+ # Simulate that placement returned an allocation candidate with a PF
+ # that is not in the pools anymore, e.g. filtered out by numa cell.
+ # We expect that the placement selection is ignored for neutron port
+ # based requests so this request should fit as we have PFs in the pools
+ self.assertTrue(
+ self.pci_stats.support_requests(
+ [pf_req], {f"{uuids.req1}-0": [uuids.pf4]}
+ )
+ )
+ self.pci_stats.apply_requests(
+ [pf_req],
+ {f"{uuids.req1}-0": [uuids.pf4]},
+ )
+ # and a PF is consumed
+ self.assertEqual(self.num_pools - 1, len(self.pci_stats.pools))
+ self.assertEqual(
+ self.num_devs - 1,
+ sum(pool["count"] for pool in self.pci_stats.pools),
+ )
+
+ def test_support_request_req_with_count_2(self):
+ # now ask for two PFs in a single request
+ pf_req = objects.InstancePCIRequest(
+ count=2,
+ alias_name='a-dev',
+ request_id=uuids.req1,
+ spec=[
+ {
+ "vendor_id": "dead",
+ "product_id": "beef",
+ "dev_type": "type-PF",
+ }
+ ],
+ )
+
+ # Simulate that placement returned one candidate RP for both PF reqs
+ mapping = {
+ f"{uuids.req1}-0": [uuids.pf2],
+ f"{uuids.req1}-1": [uuids.pf3],
+ }
+ # so the request fits
+ self.assertTrue(self.pci_stats.support_requests([pf_req], mapping))
+ self.pci_stats.apply_requests([pf_req], mapping)
+ # and both PFs are consumed
+ self.assertEqual(self.num_pools - 2, len(self.pci_stats.pools))
+ self.assertEqual(
+ self.num_devs - 2,
+ sum(pool["count"] for pool in self.pci_stats.pools),
+ )
+ self.assertEqual(
+ {uuids.pf1, uuids.pci1},
+ {pool['rp_uuid'] for pool in self.pci_stats.pools},
+ )
+
+ def test_support_requests_multiple_reqs(self):
+ # request both a VF and a PF
+ vf_req = objects.InstancePCIRequest(
+ count=1,
+ alias_name='a-dev',
+ request_id=uuids.vf_req,
+ spec=[
+ {
+ "vendor_id": "dead",
+ "product_id": "beef",
+ "dev_type": "type-VF",
+ }
+ ],
+ )
+ pf_req = objects.InstancePCIRequest(
+ count=1,
+ alias_name='a-dev',
+ request_id=uuids.pf_req,
+ spec=[
+ {
+ "vendor_id": "dead",
+ "product_id": "beef",
+ "dev_type": "type-PF",
+ }
+ ],
+ )
+
+ # Simulate that placement returned one candidate RP for both reqs
+ mapping = {
+ # the VF is represented by the parent PF RP
+ f"{uuids.vf_req}-0": [uuids.pf1],
+ f"{uuids.pf_req}-0": [uuids.pf3],
+ }
+ # so the request fits
+ self.assertTrue(
+ self.pci_stats.support_requests([vf_req, pf_req], mapping)
+ )
+ self.pci_stats.apply_requests([vf_req, pf_req], mapping)
+ # and the proper devices are consumed
+ # Note that the VF pool still has a device so it remains
+ self.assertEqual(self.num_pools - 1, len(self.pci_stats.pools))
+ self.assertEqual(
+ self.num_devs - 2,
+ sum(pool["count"] for pool in self.pci_stats.pools),
+ )
+ self.assertEqual(
+ {uuids.pf1, uuids.pf2, uuids.pci1},
+ {pool['rp_uuid'] for pool in self.pci_stats.pools},
+ )
+
+ def test_apply_gets_requested_uuids_from_pci_req(self):
+ pf_req = objects.InstancePCIRequest(
+ count=1,
+ alias_name='a-dev',
+ request_id=uuids.req1,
+ spec=[
+ {
+ "vendor_id": "dead",
+ "product_id": "beef",
+ "dev_type": "type-PF",
+ # Simulate that the scheduler already allocate a candidate
+ # and the mapping is stored in the request.
+ # The allocation restricts that we can only consume from
+ # PF3
+ "rp_uuids": ",".join([uuids.pf3])
+ }
+ ],
+ )
+
+ # call apply with None mapping signalling that the allocation is
+ # already done and the resulted mapping is stored in the request
+ self.pci_stats.apply_requests([pf_req], provider_mapping=None)
+
+ # assert that the right device is consumed
+ self.assertEqual(self.num_pools - 1, len(self.pci_stats.pools))
+ self.assertEqual(
+ self.num_devs - 1,
+ sum(pool["count"] for pool in self.pci_stats.pools),
+ )
+ # pf3 is not available in the pools anymore
+ self.assertEqual(
+ {uuids.pf1, uuids.pf2, uuids.pci1},
+ {pool['rp_uuid'] for pool in self.pci_stats.pools},
+ )
+
+ def _create_two_pools_with_two_vfs(self):
+ # create two pools (PFs) with two VFs each
+ self.pci_stats = stats.PciDeviceStats(
+ objects.NUMATopology(), dev_filter=self.dev_filter
+ )
+ for pf_index in [1, 2]:
+ for vf_index in [1, 2]:
+ dev = objects.PciDevice(
+ compute_node_id=1,
+ vendor_id="dead",
+ product_id="beef",
+ address=f"0000:81:0{pf_index}.{vf_index}",
+ parent_addr=f"0000:81:0{pf_index}.0",
+ numa_node=0,
+ dev_type="type-VF",
+ )
+ self.pci_stats.add_device(dev)
+ dev.extra_info = {'rp_uuid': getattr(uuids, f"pf{pf_index}")}
+
+ # populate the RP -> pool mapping from the devices to its pools
+ self.pci_stats.populate_pools_metadata_from_assigned_devices()
+
+ # we have 2 pool and 4 devs in total
+ self.num_pools = 2
+ self.assertEqual(self.num_pools, len(self.pci_stats.pools))
+ self.num_devs = 4
+ self.assertEqual(
+ self.num_devs, sum(pool["count"] for pool in self.pci_stats.pools)
+ )
+
+ def test_apply_asymmetric_allocation(self):
+ self._create_two_pools_with_two_vfs()
+ # ask for 3 VFs
+ vf_req = objects.InstancePCIRequest(
+ count=3,
+ alias_name='a-vf',
+ request_id=uuids.vf_req,
+ spec=[
+ {
+ "vendor_id": "dead",
+ "product_id": "beef",
+ "dev_type": "type-VF",
+ }
+ ],
+ )
+
+ # Simulate that placement returned an allocation candidate where 1 VF
+ # is consumed from PF1 and two from PF2
+ mapping = {
+ # the VF is represented by the parent PF RP
+ f"{uuids.vf_req}-0": [uuids.pf1],
+ f"{uuids.vf_req}-1": [uuids.pf2],
+ f"{uuids.vf_req}-2": [uuids.pf2],
+ }
+ # This should fit
+ self.assertTrue(
+ self.pci_stats.support_requests([vf_req], mapping)
+ )
+ # and when consumed the consumption from the pools should be in sync
+ # with the placement allocation. So the PF2 pool is expected to
+ # disappear as it is fully consumed and the PF1 pool should have
+ # one free device.
+ self.pci_stats.apply_requests([vf_req], mapping)
+ self.assertEqual(1, len(self.pci_stats.pools))
+ self.assertEqual(uuids.pf1, self.pci_stats.pools[0]['rp_uuid'])
+ self.assertEqual(1, self.pci_stats.pools[0]['count'])
+
+ def test_consume_asymmetric_allocation(self):
+ self._create_two_pools_with_two_vfs()
+ # ask for 3 VFs
+ vf_req = objects.InstancePCIRequest(
+ count=3,
+ alias_name='a-vf',
+ request_id=uuids.vf_req,
+ spec=[
+ {
+ "vendor_id": "dead",
+ "product_id": "beef",
+ "dev_type": "type-VF",
+ # Simulate that the scheduler already allocate a candidate
+ # and the mapping is stored in the request.
+ # In placement 1 VF is allocated from PF1 and two from PF2
+ "rp_uuids": ",".join([uuids.pf1, uuids.pf2, uuids.pf2])
+ }
+ ],
+ )
+
+ # So when the PCI claim consumes devices based on this request we
+ # expect that nova follows what is allocated in placement.
+ devs = self.pci_stats.consume_requests([vf_req])
+ self.assertEqual(
+ {"0000:81:01.0": 1, "0000:81:02.0": 2},
+ collections.Counter(dev.parent_addr for dev in devs),
+ )
+
+ def test_consume_restricted_by_allocation(self):
+ pf_req = objects.InstancePCIRequest(
+ count=1,
+ alias_name='a-dev',
+ request_id=uuids.req1,
+ spec=[
+ {
+ "vendor_id": "dead",
+ "product_id": "beef",
+ "dev_type": "type-PF",
+ # Simulate that the scheduler already allocate a candidate
+ # and the mapping is stored in the request.
+ # The allocation restricts that we can only consume from
+ # PF3
+ "rp_uuids": ",".join([uuids.pf3])
+ }
+ ],
+ )
+
+ # Call consume. It always expects the allocated mapping to be stores
+ # the in PCI request as it is always called from the compute side.
+ consumed_devs = self.pci_stats.consume_requests([pf_req])
+ # assert that the right device is consumed
+ self.assertEqual([self.pf3], consumed_devs)
+ # pf3 is not available in the pools anymore
+ self.assertEqual(
+ {uuids.pf1, uuids.pf2, uuids.pci1},
+ {
+ pool["rp_uuid"]
+ for pool in self.pci_stats.pools
+ if pool["count"] > 0
+ },
+ )
class PciDeviceVFPFStatsTestCase(test.NoDBTestCase):
diff --git a/nova/tests/unit/policies/base.py b/nova/tests/unit/policies/base.py
index 68a051b26c..7490441d92 100644
--- a/nova/tests/unit/policies/base.py
+++ b/nova/tests/unit/policies/base.py
@@ -58,6 +58,16 @@ class BasePolicyTest(test.TestCase):
def setUp(self):
super(BasePolicyTest, self).setUp()
+ # TODO(gmann): enforce_scope and enforce_new_defaults are enabled
+ # by default in the code so disable them in base test class until
+ # we have deprecated rules and their tests. We have enforce_scope
+ # and no-legacy tests which are explicitly enabling scope and new
+ # defaults to test the new defaults and scope. In future, once
+ # we remove the deprecated rules, along with refactoring the unit
+ # tests we can remove overriding the oslo policy flags.
+ self.flags(enforce_scope=False, group="oslo_policy")
+ if not self.without_deprecated_rules:
+ self.flags(enforce_new_defaults=False, group="oslo_policy")
self.useFixture(fixtures.NeutronFixture(self))
self.policy = self.useFixture(fixtures.RealPolicyFixture())
diff --git a/nova/tests/unit/policies/test_evacuate.py b/nova/tests/unit/policies/test_evacuate.py
index ddc8241003..b9e4c29dba 100644
--- a/nova/tests/unit/policies/test_evacuate.py
+++ b/nova/tests/unit/policies/test_evacuate.py
@@ -103,7 +103,7 @@ class EvacuatePolicyTest(base.BasePolicyTest):
evacuate_mock.assert_called_once_with(
self.user_req.environ['nova.context'],
mock.ANY, 'my-host', False,
- 'MyNewPass', None)
+ 'MyNewPass', None, None)
class EvacuateNoLegacyNoScopePolicyTest(EvacuatePolicyTest):
diff --git a/nova/tests/unit/scheduler/fakes.py b/nova/tests/unit/scheduler/fakes.py
index 658c82c20e..f5dcf87e4a 100644
--- a/nova/tests/unit/scheduler/fakes.py
+++ b/nova/tests/unit/scheduler/fakes.py
@@ -34,6 +34,7 @@ NUMA_TOPOLOGY = objects.NUMATopology(cells=[
memory=512,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
mempages=[
objects.NUMAPagesTopology(size_kb=16, total=387184, used=0),
@@ -46,6 +47,7 @@ NUMA_TOPOLOGY = objects.NUMATopology(cells=[
memory=512,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
mempages=[
objects.NUMAPagesTopology(size_kb=4, total=1548736, used=0),
diff --git a/nova/tests/unit/scheduler/filters/test_numa_topology_filters.py b/nova/tests/unit/scheduler/filters/test_numa_topology_filters.py
index 0ebe95d5e4..ba9073e0df 100644
--- a/nova/tests/unit/scheduler/filters/test_numa_topology_filters.py
+++ b/nova/tests/unit/scheduler/filters/test_numa_topology_filters.py
@@ -11,6 +11,7 @@
# under the License.
import itertools
+from unittest import mock
from oslo_utils.fixture import uuidsentinel as uuids
@@ -53,7 +54,9 @@ class TestNUMATopologyFilter(test.NoDBTestCase):
{'numa_topology': fakes.NUMA_TOPOLOGY,
'pci_stats': None,
'cpu_allocation_ratio': 16.0,
- 'ram_allocation_ratio': 1.5})
+ 'ram_allocation_ratio': 1.5,
+ 'allocation_candidates': [{"mappings": {}}]
+ })
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
def test_numa_topology_filter_numa_instance_no_numa_host_fail(self):
@@ -132,7 +135,9 @@ class TestNUMATopologyFilter(test.NoDBTestCase):
{'numa_topology': fakes.NUMA_TOPOLOGY,
'pci_stats': None,
'cpu_allocation_ratio': 21,
- 'ram_allocation_ratio': 1.3})
+ 'ram_allocation_ratio': 1.3,
+ 'allocation_candidates': [{"mappings": {}}]
+ })
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
limits = host.limits['numa_topology']
self.assertEqual(limits.cpu_allocation_ratio, 21)
@@ -180,7 +185,9 @@ class TestNUMATopologyFilter(test.NoDBTestCase):
'numa_topology': numa_topology,
'pci_stats': None,
'cpu_allocation_ratio': 1,
- 'ram_allocation_ratio': 1.5})
+ 'ram_allocation_ratio': 1.5,
+ 'allocation_candidates': [{"mappings": {}}],
+ })
assertion = self.assertTrue if passes else self.assertFalse
# test combinations of image properties and extra specs
@@ -237,7 +244,9 @@ class TestNUMATopologyFilter(test.NoDBTestCase):
{'numa_topology': fakes.NUMA_TOPOLOGY,
'pci_stats': None,
'cpu_allocation_ratio': 16.0,
- 'ram_allocation_ratio': 1.5})
+ 'ram_allocation_ratio': 1.5,
+ 'allocation_candidates': [{"mappings": {}}]
+ })
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
def test_numa_topology_filter_fail_mempages(self):
@@ -287,7 +296,9 @@ class TestNUMATopologyFilter(test.NoDBTestCase):
'numa_topology': host_topology,
'pci_stats': None,
'cpu_allocation_ratio': 16.0,
- 'ram_allocation_ratio': 1.5})
+ 'ram_allocation_ratio': 1.5,
+ 'allocation_candidates': [{"mappings": {}}],
+ })
def test_numa_topology_filter_pass_networks(self):
host = self._get_fake_host_state_with_networks()
@@ -329,3 +340,79 @@ class TestNUMATopologyFilter(test.NoDBTestCase):
network_metadata=network_metadata)
self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
+
+ @mock.patch("nova.virt.hardware.numa_fit_instance_to_host")
+ def test_filters_candidates(self, mock_numa_fit):
+ instance_topology = objects.InstanceNUMATopology(
+ cells=[
+ objects.InstanceNUMACell(
+ id=0, cpuset=set([1]), pcpuset=set(), memory=512
+ ),
+ ]
+ )
+ spec_obj = self._get_spec_obj(numa_topology=instance_topology)
+ host = fakes.FakeHostState(
+ "host1",
+ "node1",
+ {
+ "numa_topology": fakes.NUMA_TOPOLOGY,
+ "pci_stats": None,
+ "cpu_allocation_ratio": 16.0,
+ "ram_allocation_ratio": 1.5,
+ # simulate that placement returned 3 candidates for this host
+ "allocation_candidates": [
+ {"mappings": {f"{uuids.req1}-0": ["candidate_rp_1"]}},
+ {"mappings": {f"{uuids.req1}-0": ["candidate_rp_2"]}},
+ {"mappings": {f"{uuids.req1}-0": ["candidate_rp_3"]}},
+ ],
+ },
+ )
+ # and that from those candidates only the second matches the numa logic
+ mock_numa_fit.side_effect = [False, True, False]
+
+ # run the filter and expect that the host passes as it has at least
+ # one viable candidate
+ self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
+ # also assert that the filter checked all three candidates
+ self.assertEqual(3, len(mock_numa_fit.mock_calls))
+ # and also it reduced the candidates in the host state to the only
+ # matching one
+ self.assertEqual(1, len(host.allocation_candidates))
+ self.assertEqual(
+ {"mappings": {f"{uuids.req1}-0": ["candidate_rp_2"]}},
+ host.allocation_candidates[0],
+ )
+
+ @mock.patch("nova.virt.hardware.numa_fit_instance_to_host")
+ def test_filter_fails_if_no_matching_candidate_left(self, mock_numa_fit):
+ instance_topology = objects.InstanceNUMATopology(
+ cells=[
+ objects.InstanceNUMACell(
+ id=0, cpuset=set([1]), pcpuset=set(), memory=512
+ ),
+ ]
+ )
+ spec_obj = self._get_spec_obj(numa_topology=instance_topology)
+ host = fakes.FakeHostState(
+ "host1",
+ "node1",
+ {
+ "numa_topology": fakes.NUMA_TOPOLOGY,
+ "pci_stats": None,
+ "cpu_allocation_ratio": 16.0,
+ "ram_allocation_ratio": 1.5,
+ # simulate that placement returned 1 candidate for this host
+ "allocation_candidates": [
+ {"mappings": {f"{uuids.req1}-0": ["candidate_rp_1"]}},
+ ],
+ },
+ )
+ # simulate that the only candidate we have does not match
+ mock_numa_fit.side_effect = [False]
+
+ # run the filter and expect that it fails the host as there is no
+ # viable candidate left
+ self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
+ self.assertEqual(1, len(mock_numa_fit.mock_calls))
+ # and also it made the candidates list empty in the host state
+ self.assertEqual(0, len(host.allocation_candidates))
diff --git a/nova/tests/unit/scheduler/filters/test_pci_passthrough_filters.py b/nova/tests/unit/scheduler/filters/test_pci_passthrough_filters.py
index edd9735b34..27d80b884e 100644
--- a/nova/tests/unit/scheduler/filters/test_pci_passthrough_filters.py
+++ b/nova/tests/unit/scheduler/filters/test_pci_passthrough_filters.py
@@ -12,6 +12,8 @@
from unittest import mock
+from oslo_utils.fixture import uuidsentinel as uuids
+
from nova import objects
from nova.pci import stats
from nova.scheduler.filters import pci_passthrough_filter
@@ -33,11 +35,16 @@ class TestPCIPassthroughFilter(test.NoDBTestCase):
requests = objects.InstancePCIRequests(requests=[request])
spec_obj = objects.RequestSpec(pci_requests=requests)
host = fakes.FakeHostState(
- 'host1', 'node1',
- attribute_dict={'pci_stats': pci_stats_mock})
+ "host1",
+ "node1",
+ attribute_dict={
+ "pci_stats": pci_stats_mock,
+ "allocation_candidates": [{"mappings": {}}],
+ },
+ )
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
pci_stats_mock.support_requests.assert_called_once_with(
- requests.requests)
+ requests.requests, provider_mapping={})
def test_pci_passthrough_fail(self):
pci_stats_mock = mock.MagicMock()
@@ -47,11 +54,16 @@ class TestPCIPassthroughFilter(test.NoDBTestCase):
requests = objects.InstancePCIRequests(requests=[request])
spec_obj = objects.RequestSpec(pci_requests=requests)
host = fakes.FakeHostState(
- 'host1', 'node1',
- attribute_dict={'pci_stats': pci_stats_mock})
+ "host1",
+ "node1",
+ attribute_dict={
+ "pci_stats": pci_stats_mock,
+ "allocation_candidates": [{"mappings": {}}],
+ },
+ )
self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
pci_stats_mock.support_requests.assert_called_once_with(
- requests.requests)
+ requests.requests, provider_mapping={})
def test_pci_passthrough_no_pci_request(self):
spec_obj = objects.RequestSpec(pci_requests=None)
@@ -82,3 +94,92 @@ class TestPCIPassthroughFilter(test.NoDBTestCase):
host = fakes.FakeHostState('host1', 'node1',
attribute_dict={'pci_stats': None})
self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
+
+ def test_filters_candidates(self):
+ pci_stats_mock = mock.MagicMock()
+ # simulate that only the second allocation candidate fits
+ pci_stats_mock.support_requests.side_effect = [False, True, False]
+ request = objects.InstancePCIRequest(
+ count=1,
+ spec=[{"vendor_id": "8086"}],
+ request_id=uuids.req1,
+ )
+ requests = objects.InstancePCIRequests(requests=[request])
+ spec_obj = objects.RequestSpec(pci_requests=requests)
+ host = fakes.FakeHostState(
+ "host1",
+ "node1",
+ attribute_dict={
+ "pci_stats": pci_stats_mock,
+ # simulate the placement returned 3 possible candidates
+ "allocation_candidates": [
+ {"mappings": {f"{uuids.req1}-0": ["candidate_rp_1"]}},
+ {"mappings": {f"{uuids.req1}-0": ["candidate_rp_2"]}},
+ {"mappings": {f"{uuids.req1}-0": ["candidate_rp_3"]}},
+ ],
+ },
+ )
+
+ # run the filter and expect that it passes the host as there is at
+ # least one viable candidate
+ self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
+
+ # also assert that the filter checked all three candidates
+ pci_stats_mock.support_requests.assert_has_calls(
+ [
+ mock.call(
+ requests.requests,
+ provider_mapping={f"{uuids.req1}-0": ["candidate_rp_1"]},
+ ),
+ mock.call(
+ requests.requests,
+ provider_mapping={f"{uuids.req1}-0": ["candidate_rp_2"]},
+ ),
+ mock.call(
+ requests.requests,
+ provider_mapping={f"{uuids.req1}-0": ["candidate_rp_3"]},
+ ),
+ ]
+ )
+ # and also it reduced the candidates in the host state to the only
+ # matching one
+ self.assertEqual(1, len(host.allocation_candidates))
+ self.assertEqual(
+ {"mappings": {f"{uuids.req1}-0": ["candidate_rp_2"]}},
+ host.allocation_candidates[0],
+ )
+
+ def test_filter_fails_if_no_matching_candidate_left(self):
+ pci_stats_mock = mock.MagicMock()
+ # simulate that the only candidate we have does not match
+ pci_stats_mock.support_requests.side_effect = [False]
+ request = objects.InstancePCIRequest(
+ count=1,
+ spec=[{"vendor_id": "8086"}],
+ request_id=uuids.req1,
+ )
+ requests = objects.InstancePCIRequests(requests=[request])
+ spec_obj = objects.RequestSpec(pci_requests=requests)
+ host = fakes.FakeHostState(
+ "host1",
+ "node1",
+ attribute_dict={
+ "pci_stats": pci_stats_mock,
+ # simulate the placement returned 3 possible candidates
+ "allocation_candidates": [
+ {"mappings": {f"{uuids.req1}-0": ["candidate_rp_1"]}},
+ ],
+ },
+ )
+
+ # run the filter and expect that it fails the host as there is no
+ # viable candidate left
+ self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
+
+ # also assert that the filter checked our candidate
+ pci_stats_mock.support_requests.assert_called_once_with(
+ requests.requests,
+ provider_mapping={f"{uuids.req1}-0": ["candidate_rp_1"]},
+ )
+ # and also it made the candidates list empty in the host state
+ self.assertEqual(0, len(host.allocation_candidates))
diff --git a/nova/tests/unit/scheduler/test_host_manager.py b/nova/tests/unit/scheduler/test_host_manager.py
index c4445d5578..1a7daa515f 100644
--- a/nova/tests/unit/scheduler/test_host_manager.py
+++ b/nova/tests/unit/scheduler/test_host_manager.py
@@ -1562,10 +1562,14 @@ class HostStateTestCase(test.NoDBTestCase):
self.assertIsNone(host.updated)
host.consume_from_request(spec_obj)
- numa_fit_mock.assert_called_once_with(fake_host_numa_topology,
- fake_numa_topology,
- limits=None, pci_requests=None,
- pci_stats=None)
+ numa_fit_mock.assert_called_once_with(
+ fake_host_numa_topology,
+ fake_numa_topology,
+ limits=None,
+ pci_requests=None,
+ pci_stats=None,
+ provider_mapping=None,
+ )
numa_usage_mock.assert_called_once_with(fake_host_numa_topology,
fake_numa_topology)
sync_mock.assert_called_once_with(("fakehost", "fakenode"))
diff --git a/nova/tests/unit/scheduler/test_manager.py b/nova/tests/unit/scheduler/test_manager.py
index 9356292918..e992fe6034 100644
--- a/nova/tests/unit/scheduler/test_manager.py
+++ b/nova/tests/unit/scheduler/test_manager.py
@@ -19,6 +19,7 @@ Tests For Scheduler
from unittest import mock
+from keystoneauth1 import exceptions as ks_exc
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
@@ -26,6 +27,7 @@ from oslo_utils.fixture import uuidsentinel as uuids
from nova import context
from nova import exception
from nova import objects
+from nova.scheduler import filters
from nova.scheduler import host_manager
from nova.scheduler import manager
from nova.scheduler import utils as scheduler_utils
@@ -396,9 +398,16 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
# assertion at the end of the test.
spec_obj.obj_reset_changes(recursive=True)
- host_state = mock.Mock(spec=host_manager.HostState, host="fake_host",
- uuid=uuids.cn1, cell_uuid=uuids.cell, nodename="fake_node",
- limits={}, aggregates=[])
+ host_state = mock.Mock(
+ spec=host_manager.HostState,
+ host="fake_host",
+ uuid=uuids.cn1,
+ cell_uuid=uuids.cell,
+ nodename="fake_node",
+ limits={},
+ aggregates=[],
+ allocation_candidates=[],
+ )
all_host_states = [host_state]
mock_get_all_states.return_value = all_host_states
@@ -459,20 +468,29 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
is_public=True,
name="small_flavor"),
project_id=uuids.project_id,
- instance_group=group)
-
- host_state = mock.Mock(spec=host_manager.HostState,
- host="fake_host", nodename="fake_node", uuid=uuids.cn1,
- limits={}, cell_uuid=uuids.cell, instances={}, aggregates=[])
+ instance_group=group,
+ requested_resources=[],
+ )
+
+ host_state = mock.Mock(
+ spec=host_manager.HostState,
+ host="fake_host",
+ nodename="fake_node",
+ uuid=uuids.cn1,
+ limits={},
+ cell_uuid=uuids.cell,
+ instances={},
+ aggregates=[],
+ allocation_candidates=[],
+ )
all_host_states = [host_state]
mock_get_all_states.return_value = all_host_states
- mock_get_hosts.return_value = all_host_states
+ mock_get_hosts.side_effect = lambda spec_obj, hosts, num: list(hosts)
instance_uuids = None
ctx = mock.Mock()
selected_hosts = self.manager._schedule(ctx, spec_obj,
- instance_uuids, mock.sentinel.alloc_reqs_by_rp_uuid,
- mock.sentinel.provider_summaries)
+ instance_uuids, None, mock.sentinel.provider_summaries)
mock_get_all_states.assert_called_once_with(
ctx.elevated.return_value, spec_obj,
@@ -510,14 +528,24 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
is_public=True,
name="small_flavor"),
project_id=uuids.project_id,
- instance_group=None)
-
- host_state = mock.Mock(spec=host_manager.HostState,
- host="fake_host", nodename="fake_node", uuid=uuids.cn1,
- cell_uuid=uuids.cell1, limits={}, aggregates=[])
+ instance_group=None,
+ requested_resources=[],
+ )
+
+ host_state = mock.Mock(
+ spec=host_manager.HostState,
+ host="fake_host",
+ nodename="fake_node",
+ uuid=uuids.cn1,
+ cell_uuid=uuids.cell1,
+ limits={},
+ aggregates=[],
+ allocation_candidates=[],
+ )
all_host_states = [host_state]
mock_get_all_states.return_value = all_host_states
- mock_get_hosts.return_value = all_host_states
+ # simulate that every host passes the filtering
+ mock_get_hosts.side_effect = lambda spec_obj, hosts, num: list(hosts)
mock_claim.return_value = True
instance_uuids = [uuids.instance]
@@ -583,11 +611,16 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
project_id=uuids.project_id,
instance_group=None)
- host_state = mock.Mock(spec=host_manager.HostState,
- host=mock.sentinel.host, uuid=uuids.cn1, cell_uuid=uuids.cell1)
+ host_state = mock.Mock(
+ spec=host_manager.HostState,
+ host=mock.sentinel.host,
+ uuid=uuids.cn1,
+ cell_uuid=uuids.cell1,
+ allocations_candidates=[],
+ )
all_host_states = [host_state]
mock_get_all_states.return_value = all_host_states
- mock_get_hosts.return_value = all_host_states
+ mock_get_hosts.side_effect = lambda spec_obj, hosts, num: list(hosts)
mock_claim.return_value = False
instance_uuids = [uuids.instance]
@@ -604,7 +637,7 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
mock_get_all_states.assert_called_once_with(
ctx.elevated.return_value, spec_obj,
mock.sentinel.provider_summaries)
- mock_get_hosts.assert_called_once_with(spec_obj, all_host_states, 0)
+ mock_get_hosts.assert_called_once_with(spec_obj, mock.ANY, 0)
mock_claim.assert_called_once_with(ctx.elevated.return_value,
self.manager.placement_client, spec_obj, uuids.instance,
alloc_reqs_by_rp_uuid[uuids.cn1][0],
@@ -635,18 +668,41 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
is_public=True,
name="small_flavor"),
project_id=uuids.project_id,
- instance_group=None)
-
- host_state = mock.Mock(spec=host_manager.HostState,
- host="fake_host", nodename="fake_node", uuid=uuids.cn1,
- cell_uuid=uuids.cell1, limits={}, updated='fake')
+ instance_group=None,
+ requested_resources=[],
+ )
+
+ host_state = mock.Mock(
+ spec=host_manager.HostState,
+ host="fake_host",
+ nodename="fake_node",
+ uuid=uuids.cn1,
+ cell_uuid=uuids.cell1,
+ limits={},
+ updated="fake",
+ allocation_candidates=[],
+ )
all_host_states = [host_state]
mock_get_all_states.return_value = all_host_states
- mock_get_hosts.side_effect = [
- all_host_states, # first instance: return all the hosts (only one)
- [], # second: act as if no more hosts that meet criteria
- all_host_states, # the final call when creating alternates
- ]
+
+ calls = []
+
+ def fake_get_sorted_hosts(spec_obj, hosts, num):
+ c = len(calls)
+ calls.append(1)
+ # first instance: return all the hosts (only one)
+ if c == 0:
+ return hosts
+ # second: act as if no more hosts that meet criteria
+ elif c == 1:
+ return []
+ # the final call when creating alternates
+ elif c == 2:
+ return hosts
+ else:
+ raise StopIteration()
+
+ mock_get_hosts.side_effect = fake_get_sorted_hosts
mock_claim.return_value = True
instance_uuids = [uuids.instance1, uuids.instance2]
@@ -679,20 +735,44 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
swap=0,
vcpus=1),
project_id=uuids.project_id,
- instance_group=None)
-
- host_state0 = mock.Mock(spec=host_manager.HostState,
- host="fake_host0", nodename="fake_node0", uuid=uuids.cn0,
- cell_uuid=uuids.cell, limits={}, aggregates=[])
- host_state1 = mock.Mock(spec=host_manager.HostState,
- host="fake_host1", nodename="fake_node1", uuid=uuids.cn1,
- cell_uuid=uuids.cell, limits={}, aggregates=[])
- host_state2 = mock.Mock(spec=host_manager.HostState,
- host="fake_host2", nodename="fake_node2", uuid=uuids.cn2,
- cell_uuid=uuids.cell, limits={}, aggregates=[])
+ instance_group=None,
+ requested_resources=[],
+ )
+
+ host_state0 = mock.Mock(
+ spec=host_manager.HostState,
+ host="fake_host0",
+ nodename="fake_node0",
+ uuid=uuids.cn0,
+ cell_uuid=uuids.cell,
+ limits={},
+ aggregates=[],
+ allocation_candidates=[],
+ )
+ host_state1 = mock.Mock(
+ spec=host_manager.HostState,
+ host="fake_host1",
+ nodename="fake_node1",
+ uuid=uuids.cn1,
+ cell_uuid=uuids.cell,
+ limits={},
+ aggregates=[],
+ allocation_candidates=[],
+ )
+ host_state2 = mock.Mock(
+ spec=host_manager.HostState,
+ host="fake_host2",
+ nodename="fake_node2",
+ uuid=uuids.cn2,
+ cell_uuid=uuids.cell,
+ limits={},
+ aggregates=[],
+ allocation_candidates=[],
+ )
all_host_states = [host_state0, host_state1, host_state2]
mock_get_all_states.return_value = all_host_states
- mock_get_hosts.return_value = all_host_states
+ # simulate that every host passes the filtering
+ mock_get_hosts.side_effect = lambda spec_obj, hosts, num: list(hosts)
mock_claim.return_value = True
instance_uuids = [uuids.instance0]
@@ -744,20 +824,44 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
swap=0,
vcpus=1),
project_id=uuids.project_id,
- instance_group=None)
-
- host_state0 = mock.Mock(spec=host_manager.HostState,
- host="fake_host0", nodename="fake_node0", uuid=uuids.cn0,
- cell_uuid=uuids.cell, limits={}, aggregates=[])
- host_state1 = mock.Mock(spec=host_manager.HostState,
- host="fake_host1", nodename="fake_node1", uuid=uuids.cn1,
- cell_uuid=uuids.cell, limits={}, aggregates=[])
- host_state2 = mock.Mock(spec=host_manager.HostState,
- host="fake_host2", nodename="fake_node2", uuid=uuids.cn2,
- cell_uuid=uuids.cell, limits={}, aggregates=[])
+ instance_group=None,
+ requested_resources=[],
+ )
+
+ host_state0 = mock.Mock(
+ spec=host_manager.HostState,
+ host="fake_host0",
+ nodename="fake_node0",
+ uuid=uuids.cn0,
+ cell_uuid=uuids.cell,
+ limits={},
+ aggregates=[],
+ allocation_candidates=[],
+ )
+ host_state1 = mock.Mock(
+ spec=host_manager.HostState,
+ host="fake_host1",
+ nodename="fake_node1",
+ uuid=uuids.cn1,
+ cell_uuid=uuids.cell,
+ limits={},
+ aggregates=[],
+ allocation_candidates=[],
+ )
+ host_state2 = mock.Mock(
+ spec=host_manager.HostState,
+ host="fake_host2",
+ nodename="fake_node2",
+ uuid=uuids.cn2,
+ cell_uuid=uuids.cell,
+ limits={},
+ aggregates=[],
+ allocation_candidates=[],
+ )
all_host_states = [host_state0, host_state1, host_state2]
mock_get_all_states.return_value = all_host_states
- mock_get_hosts.return_value = all_host_states
+ # simulate that every host passes the filtering
+ mock_get_hosts.side_effect = lambda spec_obj, hosts, num: list(hosts)
mock_claim.return_value = True
instance_uuids = [uuids.instance0]
@@ -814,17 +918,36 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
is_public=True,
name="small_flavor"),
project_id=uuids.project_id,
- instance_group=ig, instance_uuid=uuids.instance0)
+ instance_group=ig,
+ instance_uuid=uuids.instance0,
+ requested_resources=[],
+ )
# Reset the RequestSpec changes so they don't interfere with the
# assertion at the end of the test.
spec_obj.obj_reset_changes(recursive=True)
- hs1 = mock.Mock(spec=host_manager.HostState, host='host1',
- nodename="node1", limits={}, uuid=uuids.cn1,
- cell_uuid=uuids.cell1, instances={}, aggregates=[])
- hs2 = mock.Mock(spec=host_manager.HostState, host='host2',
- nodename="node2", limits={}, uuid=uuids.cn2,
- cell_uuid=uuids.cell2, instances={}, aggregates=[])
+ hs1 = mock.Mock(
+ spec=host_manager.HostState,
+ host="host1",
+ nodename="node1",
+ limits={},
+ uuid=uuids.cn1,
+ cell_uuid=uuids.cell1,
+ instances={},
+ aggregates=[],
+ allocation_candidates=[],
+ )
+ hs2 = mock.Mock(
+ spec=host_manager.HostState,
+ host="host2",
+ nodename="node2",
+ limits={},
+ uuid=uuids.cn2,
+ cell_uuid=uuids.cell2,
+ instances={},
+ aggregates=[],
+ allocation_candidates=[],
+ )
all_host_states = [hs1, hs2]
mock_get_all_states.return_value = all_host_states
mock_claim.return_value = True
@@ -838,13 +961,18 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
# _get_sorted_hosts() in the two iterations for each instance in
# num_instances
visited_instances = set([])
+ get_sorted_hosts_called_with_host_states = []
def fake_get_sorted_hosts(_spec_obj, host_states, index):
# Keep track of which instances are passed to the filters.
visited_instances.add(_spec_obj.instance_uuid)
if index % 2:
- return [hs1, hs2]
- return [hs2, hs1]
+ s = list(host_states)
+ get_sorted_hosts_called_with_host_states.append(s)
+ return s
+ s = list(host_states)
+ get_sorted_hosts_called_with_host_states.append(s)
+ return reversed(s)
mock_get_hosts.side_effect = fake_get_sorted_hosts
instance_uuids = [
getattr(uuids, 'instance%d' % x) for x in range(num_instances)
@@ -871,10 +999,14 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
# second time, we pass it the hosts that were returned from
# _get_sorted_hosts() the first time
sorted_host_calls = [
- mock.call(spec_obj, all_host_states, 0),
- mock.call(spec_obj, [hs2, hs1], 1),
+ mock.call(spec_obj, mock.ANY, 0),
+ mock.call(spec_obj, mock.ANY, 1),
]
mock_get_hosts.assert_has_calls(sorted_host_calls)
+ self.assertEqual(
+ all_host_states, get_sorted_hosts_called_with_host_states[0])
+ self.assertEqual(
+ [hs1], get_sorted_hosts_called_with_host_states[1])
# The instance group object should have both host1 and host2 in its
# instance group hosts list and there should not be any "changes" to
@@ -1168,14 +1300,36 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
name="small_flavor"),
project_id=uuids.project_id,
instance_uuid=uuids.instance_id,
- instance_group=None)
-
- host_state = mock.Mock(spec=host_manager.HostState, host="fake_host",
- uuid=uuids.cn1, cell_uuid=uuids.cell, nodename="fake_node",
- limits={}, updated="Not None")
+ instance_group=None,
+ requested_resources=[],
+ )
+
+ host_state = mock.Mock(
+ spec=host_manager.HostState,
+ host="fake_host",
+ uuid=uuids.cn1,
+ cell_uuid=uuids.cell,
+ nodename="fake_node",
+ limits={},
+ updated="Not None",
+ allocation_candidates=[],
+ )
all_host_states = [host_state]
mock_get_all_states.return_value = all_host_states
- mock_get_hosts.side_effect = [all_host_states, []]
+
+ calls = []
+
+ def fake_get_sorted_hosts(spec_obj, hosts, num):
+ c = len(calls)
+ calls.append(1)
+ if c == 0:
+ return list(hosts)
+ elif c == 1:
+ return []
+ else:
+ raise StopIteration
+
+ mock_get_hosts.side_effect = fake_get_sorted_hosts
instance_uuids = [uuids.inst1, uuids.inst2]
fake_allocs_by_rp = {uuids.cn1: [{}]}
@@ -1204,7 +1358,7 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
alloc_reqs[hs.uuid] = [{}]
mock_get_all_hosts.return_value = all_host_states
- mock_sorted.return_value = all_host_states
+ mock_sorted.side_effect = lambda spec_obj, hosts, num: list(hosts)
mock_claim.return_value = True
total_returned = num_alternates + 1
self.flags(max_attempts=total_returned, group="scheduler")
@@ -1212,14 +1366,14 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
for num in range(num_instances)]
spec_obj = objects.RequestSpec(
- num_instances=num_instances,
- flavor=objects.Flavor(memory_mb=512,
- root_gb=512,
- ephemeral_gb=0,
- swap=0,
- vcpus=1),
- project_id=uuids.project_id,
- instance_group=None)
+ num_instances=num_instances,
+ flavor=objects.Flavor(
+ memory_mb=512, root_gb=512, ephemeral_gb=0, swap=0, vcpus=1
+ ),
+ project_id=uuids.project_id,
+ instance_group=None,
+ requested_resources=[],
+ )
dests = self.manager._schedule(self.context, spec_obj,
instance_uuids, alloc_reqs, None, return_alternates=True)
@@ -1270,11 +1424,24 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
alloc_reqs[hs.uuid] = [{}]
mock_get_all_hosts.return_value = all_host_states
+
# There are two instances so _get_sorted_hosts is called once per
# instance and then once again before picking alternates.
- mock_sorted.side_effect = [all_host_states,
- list(reversed(all_host_states)),
- all_host_states]
+ calls = []
+
+ def fake_get_sorted_hosts(spec_obj, hosts, num):
+ c = len(calls)
+ calls.append(1)
+ if c == 0:
+ return list(hosts)
+ elif c == 1:
+ return list(reversed(all_host_states))
+ elif c == 2:
+ return list(hosts)
+ else:
+ raise StopIteration()
+
+ mock_sorted.side_effect = fake_get_sorted_hosts
mock_claim.return_value = True
total_returned = 3
self.flags(max_attempts=total_returned, group="scheduler")
@@ -1282,14 +1449,14 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
num_instances = len(instance_uuids)
spec_obj = objects.RequestSpec(
- num_instances=num_instances,
- flavor=objects.Flavor(memory_mb=512,
- root_gb=512,
- ephemeral_gb=0,
- swap=0,
- vcpus=1),
- project_id=uuids.project_id,
- instance_group=None)
+ num_instances=num_instances,
+ flavor=objects.Flavor(
+ memory_mb=512, root_gb=512, ephemeral_gb=0, swap=0, vcpus=1
+ ),
+ project_id=uuids.project_id,
+ instance_group=None,
+ requested_resources=[],
+ )
dests = self.manager._schedule(self.context, spec_obj,
instance_uuids, alloc_reqs, None, return_alternates=True)
@@ -1323,7 +1490,7 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
alloc_reqs[hs.uuid] = [{}]
mock_get_all_hosts.return_value = all_host_states
- mock_sorted.return_value = all_host_states
+ mock_sorted.side_effect = lambda spec_obj, hosts, num: list(hosts)
mock_claim.return_value = True
# Set the total returned to more than the number of available hosts
self.flags(max_attempts=max_attempts, group="scheduler")
@@ -1331,14 +1498,14 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
num_instances = len(instance_uuids)
spec_obj = objects.RequestSpec(
- num_instances=num_instances,
- flavor=objects.Flavor(memory_mb=512,
- root_gb=512,
- ephemeral_gb=0,
- swap=0,
- vcpus=1),
- project_id=uuids.project_id,
- instance_group=None)
+ num_instances=num_instances,
+ flavor=objects.Flavor(
+ memory_mb=512, root_gb=512, ephemeral_gb=0, swap=0, vcpus=1
+ ),
+ project_id=uuids.project_id,
+ instance_group=None,
+ requested_resources=[],
+ )
dests = self.manager._schedule(self.context, spec_obj,
instance_uuids, alloc_reqs, None, return_alternates=True)
@@ -1521,3 +1688,541 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
self.manager._discover_hosts_in_cells(mock.sentinel.context)
mock_log_warning.assert_not_called()
mock_log_debug.assert_called_once_with(msg)
+
+ @mock.patch('nova.scheduler.client.report.report_client_singleton')
+ @mock.patch.object(manager, 'LOG')
+ @mock.patch('nova.scheduler.host_manager.HostManager')
+ @mock.patch('nova.servicegroup.API')
+ @mock.patch('nova.rpc.get_notifier')
+ def test_init_lazy_placement_client(self, mock_rpc, mock_sg, mock_hm,
+ mock_log, mock_report):
+ # Simulate keytone or placement being offline at startup
+ mock_report.side_effect = ks_exc.RequestTimeout
+ mgr = manager.SchedulerManager()
+ mock_report.assert_called_once_with()
+ self.assertTrue(mock_log.warning.called)
+
+ # Make sure we're raising the actual error to subsequent callers
+ self.assertRaises(ks_exc.RequestTimeout, lambda: mgr.placement_client)
+
+ # Simulate recovery of the keystone or placement service
+ mock_report.reset_mock(side_effect=True)
+ mgr.placement_client
+ mock_report.assert_called_once_with()
+
+ @mock.patch('nova.scheduler.client.report.report_client_singleton')
+ @mock.patch('nova.scheduler.host_manager.HostManager')
+ @mock.patch('nova.servicegroup.API')
+ @mock.patch('nova.rpc.get_notifier')
+ def test_init_lazy_placement_client_failures(self, mock_rpc, mock_sg,
+ mock_hm, mock_report):
+ # Certain keystoneclient exceptions are fatal
+ mock_report.side_effect = ks_exc.Unauthorized
+ self.assertRaises(ks_exc.Unauthorized, manager.SchedulerManager)
+
+ # Anything else is fatal
+ mock_report.side_effect = test.TestingException
+ self.assertRaises(test.TestingException, manager.SchedulerManager)
+
+
+class SchedulerManagerAllocationCandidateTestCase(test.NoDBTestCase):
+
+ class ACRecorderFilter(filters.BaseHostFilter):
+ """A filter that records what allocation candidates it saw on each host
+ """
+
+ def __init__(self):
+ super().__init__()
+ self.seen_candidates = []
+
+ def host_passes(self, host_state, filter_properties):
+ # record what candidate the filter saw for each host
+ self.seen_candidates.append(list(host_state.allocation_candidates))
+ return True
+
+ class DropFirstFilter(filters.BaseHostFilter):
+ """A filter that removes one candidate and keeps the rest on each
+ host
+ """
+
+ def host_passes(self, host_state, filter_properties):
+ host_state.allocation_candidates.pop(0)
+ return bool(host_state.allocation_candidates)
+
+ @mock.patch.object(
+ host_manager.HostManager, '_init_instance_info', new=mock.Mock())
+ @mock.patch.object(
+ host_manager.HostManager, '_init_aggregates', new=mock.Mock())
+ def setUp(self):
+ super().setUp()
+ self.context = context.RequestContext('fake_user', 'fake_project')
+ self.manager = manager.SchedulerManager()
+ self.manager.host_manager.weighers = []
+ self.request_spec = objects.RequestSpec(
+ ignore_hosts=[],
+ force_hosts=[],
+ force_nodes=[],
+ requested_resources=[],
+ )
+
+ @mock.patch("nova.objects.selection.Selection.from_host_state")
+ @mock.patch(
+ "nova.scheduler.manager.SchedulerManager._consume_selected_host",
+ )
+ @mock.patch(
+ "nova.scheduler.utils.claim_resources",
+ new=mock.Mock(return_value=True),
+ )
+ @mock.patch("nova.scheduler.manager.SchedulerManager._get_all_host_states")
+ def test_filters_see_allocation_candidates_for_each_host(
+ self,
+ mock_get_all_host_states,
+ mock_consume,
+ mock_selection_from_host_state,
+ ):
+ # have a single filter configured where we can assert that the filter
+ # see the allocation_candidates of each host
+ filter = self.ACRecorderFilter()
+ self.manager.host_manager.enabled_filters = [filter]
+
+ instance_uuids = [uuids.inst1]
+
+ alloc_reqs_by_rp_uuid = {}
+ # have two hosts with different candidates
+ host1 = host_manager.HostState("host1", "node1", uuids.cell1)
+ host1.uuid = uuids.host1
+ alloc_reqs_by_rp_uuid[uuids.host1] = [
+ mock.sentinel.host1_a_c_1,
+ mock.sentinel.host1_a_c_2,
+ ]
+ host2 = host_manager.HostState("host2", "node2", uuids.cell1)
+ host2.uuid = uuids.host2
+ alloc_reqs_by_rp_uuid[uuids.host2] = [
+ mock.sentinel.host2_a_c_1,
+ ]
+ mock_get_all_host_states.return_value = iter([host1, host2])
+
+ self.manager._schedule(
+ self.context,
+ self.request_spec,
+ instance_uuids,
+ alloc_reqs_by_rp_uuid,
+ mock.sentinel.provider_summaries,
+ mock.sentinel.allocation_request_version,
+ )
+
+ # we expect that our filter seen the allocation candidate list of
+ # each host respectively
+ self.assertEqual(
+ [
+ alloc_reqs_by_rp_uuid[uuids.host1],
+ alloc_reqs_by_rp_uuid[uuids.host2],
+ ],
+ filter.seen_candidates,
+ )
+
+ @mock.patch(
+ "nova.scheduler.manager.SchedulerManager._consume_selected_host",
+ )
+ @mock.patch(
+ "nova.scheduler.utils.claim_resources",
+ new=mock.Mock(return_value=True),
+ )
+ @mock.patch("nova.scheduler.manager.SchedulerManager._get_all_host_states")
+ def test_scheduler_selects_filtered_a_c_from_hosts_state(
+ self,
+ mock_get_all_host_states,
+ mock_consume,
+ ):
+ """Assert that if a filter removes an allocation candidate from a host
+ then even if that host is selected the removed allocation candidate
+ is not used by the scheduler.
+ """
+
+ self.manager.host_manager.enabled_filters = [self.DropFirstFilter()]
+
+ instance_uuids = [uuids.inst1]
+ alloc_reqs_by_rp_uuid = {}
+ # have a host with two candidates
+ host1 = host_manager.HostState("host1", "node1", uuids.cell1)
+ host1.uuid = uuids.host1
+ alloc_reqs_by_rp_uuid[uuids.host1] = [
+ "host1-candidate1",
+ "host1-candidate2",
+ ]
+ mock_get_all_host_states.return_value = iter([host1])
+
+ result = self.manager._schedule(
+ self.context,
+ self.request_spec,
+ instance_uuids,
+ alloc_reqs_by_rp_uuid,
+ mock.sentinel.provider_summaries,
+ 'fake-alloc-req-version',
+ )
+ # we have requested one instance to be scheduled so expect on set
+ # of selections
+ self.assertEqual(1, len(result))
+ selections = result[0]
+ # we did not ask for alternatives so a single selection is expected
+ self.assertEqual(1, len(selections))
+ selection = selections[0]
+ # we expect that candidate2 is used as candidate1 is dropped by
+ # the filter
+ self.assertEqual(
+ "host1-candidate2",
+ jsonutils.loads(selection.allocation_request)
+ )
+
+ @mock.patch("nova.objects.selection.Selection.from_host_state")
+ @mock.patch(
+ "nova.scheduler.manager.SchedulerManager._consume_selected_host",
+ )
+ @mock.patch(
+ "nova.scheduler.utils.claim_resources",
+ new=mock.Mock(return_value=True),
+ )
+ @mock.patch("nova.scheduler.manager.SchedulerManager._get_all_host_states")
+ def test_consecutive_filter_sees_filtered_a_c_list(
+ self,
+ mock_get_all_host_states,
+ mock_consume,
+ mock_selection_from_host_state,
+ ):
+ # create two filters
+ # 1) DropFirstFilter runs first and drops the first candidate from each
+ # host
+ # 2) ACRecorderFilter runs next and records what candidates it saw
+ recorder_filter = self.ACRecorderFilter()
+ self.manager.host_manager.enabled_filters = [
+ self.DropFirstFilter(),
+ recorder_filter,
+ ]
+
+ instance_uuids = [uuids.inst1]
+ alloc_reqs_by_rp_uuid = {}
+ # have a host with two candidates
+ host1 = host_manager.HostState("host1", "node1", uuids.cell1)
+ host1.uuid = uuids.host1
+ alloc_reqs_by_rp_uuid[uuids.host1] = [
+ "host1-candidate1",
+ "host1-candidate2",
+ ]
+ mock_get_all_host_states.return_value = iter([host1])
+
+ self.manager._schedule(
+ self.context,
+ self.request_spec,
+ instance_uuids,
+ alloc_reqs_by_rp_uuid,
+ mock.sentinel.provider_summaries,
+ 'fake-alloc-req-version',
+ )
+
+ # we expect that the second filter saw one host with one candidate and
+ # as candidate1 was already filtered out by the run of the first filter
+ self.assertEqual(
+ [["host1-candidate2"]],
+ recorder_filter.seen_candidates
+ )
+
+ @mock.patch(
+ "nova.scheduler.manager.SchedulerManager._consume_selected_host",
+ )
+ @mock.patch(
+ "nova.scheduler.utils.claim_resources",
+ new=mock.Mock(return_value=True),
+ )
+ @mock.patch("nova.scheduler.manager.SchedulerManager._get_all_host_states")
+ def test_filters_removes_all_a_c_host_is_not_selected(
+ self,
+ mock_get_all_host_states,
+ mock_consume,
+ ):
+ # use the filter that always drops the first candidate on each host
+ self.manager.host_manager.enabled_filters = [self.DropFirstFilter()]
+
+ instance_uuids = [uuids.inst1]
+ alloc_reqs_by_rp_uuid = {}
+ # have two hosts
+ # first with a single candidate
+ host1 = host_manager.HostState("host1", "node1", uuids.cell1)
+ host1.uuid = uuids.host1
+ alloc_reqs_by_rp_uuid[uuids.host1] = [
+ "host1-candidate1",
+ ]
+ # second with two candidates
+ host2 = host_manager.HostState("host2", "node2", uuids.cell1)
+ host2.uuid = uuids.host2
+ alloc_reqs_by_rp_uuid[uuids.host2] = [
+ "host2-candidate1",
+ "host2-candidate2",
+ ]
+ mock_get_all_host_states.return_value = iter([host1, host2])
+
+ result = self.manager._schedule(
+ self.context,
+ self.request_spec,
+ instance_uuids,
+ alloc_reqs_by_rp_uuid,
+ mock.sentinel.provider_summaries,
+ 'fake-alloc-req-version',
+ )
+ # we expect that the first host is not selected as the filter
+ # removed every candidate from the host
+ # also we expect that on the second host only candidate2 could have
+ # been selected
+ # we asked for one instance, so we expect one set of selections
+ self.assertEqual(1, len(result))
+ selections = result[0]
+ # we did not ask for alternatives so a single selection is expected
+ self.assertEqual(1, len(selections))
+ selection = selections[0]
+ # we expect that candidate2 is used as candidate1 is dropped by
+ # the filter
+ self.assertEqual(uuids.host2, selection.compute_node_uuid)
+ self.assertEqual(
+ "host2-candidate2",
+ jsonutils.loads(selection.allocation_request)
+ )
+
+ @mock.patch(
+ "nova.scheduler.manager.SchedulerManager._consume_selected_host",
+ )
+ @mock.patch(
+ "nova.scheduler.utils.claim_resources",
+ new=mock.Mock(return_value=True),
+ )
+ @mock.patch("nova.scheduler.manager.SchedulerManager._get_all_host_states")
+ def test_consume_selected_host_sees_updated_request_spec(
+ self,
+ mock_get_all_host_states,
+ mock_consume,
+ ):
+ # simulate that nothing is filtered out, by not having any filters
+ self.manager.host_manager.enabled_filters = []
+
+ # set up the request spec with a request group to be updated
+ # by the selected candidate
+ self.request_spec.requested_resources = [
+ objects.RequestGroup(
+ requester_id=uuids.group_req1, provider_uuids=[]
+ )
+ ]
+
+ instance_uuids = [uuids.inst1]
+ alloc_reqs_by_rp_uuid = {}
+ # have single host with a single candidate
+ # first with a single candidate
+ host1 = host_manager.HostState("host1", "node1", uuids.cell1)
+ host1.uuid = uuids.host1
+ # simulate that placement fulfilled the above RequestGroup from
+ # a certain child RP of the host.
+ alloc_reqs_by_rp_uuid[uuids.host1] = [
+ {
+ "mappings": {
+ "": [uuids.host1],
+ uuids.group_req1: [uuids.host1_child_rp],
+ }
+ }
+ ]
+ mock_get_all_host_states.return_value = iter([host1])
+
+ # make asserts on the request_spec passed to consume
+ def assert_request_spec_updated_with_selected_candidate(
+ selected_host, spec_obj, instance_uuid=None
+ ):
+ # we expect that the scheduler updated the request_spec based
+ # the selected candidate before called consume
+ self.assertEqual(
+ [uuids.host1_child_rp],
+ spec_obj.requested_resources[0].provider_uuids,
+ )
+
+ mock_consume.side_effect = (
+ assert_request_spec_updated_with_selected_candidate)
+
+ self.manager._schedule(
+ self.context,
+ self.request_spec,
+ instance_uuids,
+ alloc_reqs_by_rp_uuid,
+ mock.sentinel.provider_summaries,
+ 'fake-alloc-req-version',
+ )
+
+ mock_consume.assert_called_once()
+
+ @mock.patch(
+ "nova.scheduler.manager.SchedulerManager._consume_selected_host",
+ )
+ @mock.patch(
+ "nova.scheduler.utils.claim_resources",
+ return_value=True,
+ )
+ @mock.patch("nova.scheduler.manager.SchedulerManager._get_all_host_states")
+ def test_get_alternate_hosts_returns_main_selection_with_claimed_a_c(
+ self,
+ mock_get_all_host_states,
+ mock_claim,
+ mock_consume,
+ ):
+ """Assert that the first (a.k.a main) selection returned for an
+ instance always maps to the allocation candidate, that was claimed by
+ the scheduler in placement.
+ """
+ # use the filter that always drops the first candidate on each host
+ self.manager.host_manager.enabled_filters = [self.DropFirstFilter()]
+
+ instance_uuids = [uuids.inst1]
+ alloc_reqs_by_rp_uuid = {}
+ # have one host with 3 candidates each fulfilling a request group
+ # from different child RP
+ host1 = host_manager.HostState("host1", "node1", uuids.cell1)
+ host1.uuid = uuids.host1
+ alloc_reqs_by_rp_uuid[uuids.host1] = [
+ {
+ "mappings": {
+ # This is odd but the un-name request group uses "" as the
+ # name of the group.
+ "": [uuids.host1],
+ uuids.group_req1: [getattr(uuids, f"host1_child{i}")],
+ }
+ } for i in [1, 2, 3]
+ ]
+ mock_get_all_host_states.return_value = iter([host1])
+
+ result = self.manager._schedule(
+ self.context,
+ self.request_spec,
+ instance_uuids,
+ alloc_reqs_by_rp_uuid,
+ mock.sentinel.provider_summaries,
+ 'fake-alloc-req-version',
+ return_alternates=True,
+ )
+
+ # we scheduled one instance
+ self.assertEqual(1, len(result))
+ selections = result[0]
+ # we did not ask for alternatives
+ self.assertEqual(1, len(selections))
+ selection = selections[0]
+ self.assertEqual(uuids.host1, selection.compute_node_uuid)
+ # we expect that host1_child2 candidate is selected as the
+ # DropFirstFilter will drop host1_child1
+ expected_a_c = {
+ "mappings": {
+ "": [uuids.host1],
+ uuids.group_req1: [uuids.host1_child2],
+ }
+ }
+ self.assertEqual(
+ expected_a_c,
+ jsonutils.loads(selection.allocation_request),
+ )
+ # and we expect that the same candidate was claimed in placement
+ mock_claim.assert_called_once_with(
+ mock.ANY,
+ self.manager.placement_client,
+ self.request_spec,
+ uuids.inst1,
+ expected_a_c,
+ allocation_request_version="fake-alloc-req-version",
+ )
+
+ @mock.patch(
+ "nova.scheduler.manager.SchedulerManager._consume_selected_host",
+ )
+ @mock.patch(
+ "nova.scheduler.utils.claim_resources",
+ return_value=True,
+ )
+ @mock.patch("nova.scheduler.manager.SchedulerManager._get_all_host_states")
+ def test_get_alternate_hosts_returns_alts_with_filtered_a_c(
+ self,
+ mock_get_all_host_states,
+ mock_claim,
+ mock_consume,
+ ):
+ """Assert that alternate generation also works based on filtered
+ candidates.
+ """
+
+ class RPFilter(filters.BaseHostFilter):
+ """A filter that only allows candidates with specific RPs"""
+
+ def __init__(self, allowed_rp_uuids):
+ self.allowed_rp_uuids = allowed_rp_uuids
+
+ def host_passes(self, host_state, filter_properties):
+ host_state.allocation_candidates = [
+ a_c
+ for a_c in host_state.allocation_candidates
+ if a_c["mappings"][uuids.group_req1][0]
+ in self.allowed_rp_uuids
+ ]
+ return True
+
+ instance_uuids = [uuids.inst1]
+ alloc_reqs_by_rp_uuid = {}
+ # have 3 hosts each with 2 allocation candidates fulfilling a request
+ # group from a different child RP
+ hosts = []
+ for i in [1, 2, 3]:
+ host = host_manager.HostState(f"host{i}", f"node{i}", uuids.cell1)
+ host.uuid = getattr(uuids, f"host{i}")
+ alloc_reqs_by_rp_uuid[host.uuid] = [
+ {
+ "mappings": {
+ "": [host.uuid],
+ uuids.group_req1: [
+ getattr(uuids, f"host{i}_child{j}")
+ ],
+ }
+ }
+ for j in [1, 2]
+ ]
+ hosts.append(host)
+ mock_get_all_host_states.return_value = iter(hosts)
+
+ # configure a filter that only "likes" host1_child2 and host3_child2
+ # RPs. This means host2 is totally out and host1 and host3 only have
+ # one viable candidate
+ self.manager.host_manager.enabled_filters = [
+ RPFilter(allowed_rp_uuids=[uuids.host1_child2, uuids.host3_child2])
+ ]
+
+ result = self.manager._schedule(
+ self.context,
+ self.request_spec,
+ instance_uuids,
+ alloc_reqs_by_rp_uuid,
+ mock.sentinel.provider_summaries,
+ 'fake-alloc-req-version',
+ return_alternates=True,
+ )
+ # we scheduled one instance
+ self.assertEqual(1, len(result))
+ selections = result[0]
+ # we expect a main selection and a single alternative
+ # (host1, and host3) on both selection we expect child2 as selected
+ # candidate
+ self.assertEqual(2, len(selections))
+ main_selection = selections[0]
+ self.assertEqual(uuids.host1, main_selection.compute_node_uuid)
+ self.assertEqual(
+ [uuids.host1_child2],
+ jsonutils.loads(main_selection.allocation_request)["mappings"][
+ uuids.group_req1
+ ],
+ )
+
+ alt_selection = selections[1]
+ self.assertEqual(uuids.host3, alt_selection.compute_node_uuid)
+ self.assertEqual(
+ [uuids.host3_child2],
+ jsonutils.loads(alt_selection.allocation_request)["mappings"][
+ uuids.group_req1
+ ],
+ )
diff --git a/nova/tests/unit/scheduler/weights/test_weights_hypervisor_version.py b/nova/tests/unit/scheduler/weights/test_weights_hypervisor_version.py
new file mode 100644
index 0000000000..c6e4abd4cd
--- /dev/null
+++ b/nova/tests/unit/scheduler/weights/test_weights_hypervisor_version.py
@@ -0,0 +1,97 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For Scheduler hypervisor version weights.
+"""
+
+from nova.scheduler import weights
+from nova.scheduler.weights import hypervisor_version
+from nova import test
+from nova.tests.unit.scheduler import fakes
+
+
+class HypervisorVersionWeigherTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super().setUp()
+ self.weight_handler = weights.HostWeightHandler()
+ self.weighers = [hypervisor_version.HypervisorVersionWeigher()]
+
+ def _get_weighed_host(self, hosts, weight_properties=None):
+ if weight_properties is None:
+ weight_properties = {}
+ return self.weight_handler.get_weighed_objects(self.weighers,
+ hosts, weight_properties)[0]
+
+ def _get_all_hosts(self):
+ host_values = [
+ ('host1', 'node1', {'hypervisor_version': 1}),
+ ('host2', 'node2', {'hypervisor_version': 200}),
+ ('host3', 'node3', {'hypervisor_version': 100}),
+ ('host4', 'node4', {'hypervisor_version': 1000}),
+ ]
+ return [fakes.FakeHostState(host, node, values)
+ for host, node, values in host_values]
+
+ def test_multiplier_default(self):
+ hostinfo_list = self._get_all_hosts()
+ weighed_host = self._get_weighed_host(hostinfo_list)
+ self.assertEqual(1.0, weighed_host.weight)
+ self.assertEqual('host4', weighed_host.obj.host)
+
+ def test_multiplier_default_full_ordering(self):
+ hostinfo_list = self._get_all_hosts()
+ weighed_hosts = self.weight_handler.get_weighed_objects(
+ self.weighers, hostinfo_list, {}
+ )
+ expected_hosts = [fakes.FakeHostState(host, node, values)
+ for host, node, values in [
+ ('host4', 'node4', {'hypervisor_version': 1000}),
+ ('host2', 'node2', {'hypervisor_version': 200}),
+ ('host3', 'node3', {'hypervisor_version': 100}),
+ ('host1', 'node1', {'hypervisor_version': 1}),
+ ]]
+ for actual, expected in zip(
+ weighed_hosts,
+ expected_hosts
+ ):
+ self.assertEqual(actual.obj.host, expected.host)
+
+ def test_multiplier_none(self):
+ multi = 0.0
+ self.flags(
+ hypervisor_version_weight_multiplier=multi,
+ group='filter_scheduler'
+ )
+ hostinfo_list = self._get_all_hosts()
+ weighed_host = self._get_weighed_host(hostinfo_list)
+ self.assertEqual(multi, weighed_host.weight)
+
+ def test_multiplier_positive(self):
+ multi = 2.0
+ self.flags(
+ hypervisor_version_weight_multiplier=multi,
+ group='filter_scheduler'
+ )
+ hostinfo_list = self._get_all_hosts()
+ weighed_host = self._get_weighed_host(hostinfo_list)
+ self.assertEqual(1.0 * multi, weighed_host.weight)
+ self.assertEqual('host4', weighed_host.obj.host)
+
+ def test_multiplier_negative(self):
+ multi = -1.0
+ self.flags(
+ hypervisor_version_weight_multiplier=multi,
+ group='filter_scheduler'
+ )
+ hostinfo_list = self._get_all_hosts()
+ weighed_host = self._get_weighed_host(hostinfo_list)
+ self.assertEqual('host1', weighed_host.obj.host)
diff --git a/nova/tests/unit/test_filesystem.py b/nova/tests/unit/test_filesystem.py
new file mode 100644
index 0000000000..85f16157ee
--- /dev/null
+++ b/nova/tests/unit/test_filesystem.py
@@ -0,0 +1,52 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+from unittest import mock
+
+from nova import exception
+from nova import filesystem
+from nova import test
+
+
+class TestFSCommon(test.NoDBTestCase):
+
+ def test_read_sys(self):
+ open_mock = mock.mock_open(read_data='bar')
+ with mock.patch('builtins.open', open_mock) as m_open:
+ self.assertEqual('bar', filesystem.read_sys('foo'))
+ expected_path = os.path.join(filesystem.SYS, 'foo')
+ m_open.assert_called_once_with(expected_path, mode='r')
+
+ def test_read_sys_error(self):
+ with mock.patch('builtins.open',
+ side_effect=OSError('error')) as m_open:
+ self.assertRaises(exception.FileNotFound,
+ filesystem.read_sys, 'foo')
+ expected_path = os.path.join(filesystem.SYS, 'foo')
+ m_open.assert_called_once_with(expected_path, mode='r')
+
+ def test_write_sys(self):
+ open_mock = mock.mock_open()
+ with mock.patch('builtins.open', open_mock) as m_open:
+ self.assertIsNone(filesystem.write_sys('foo', 'bar'))
+ expected_path = os.path.join(filesystem.SYS, 'foo')
+ m_open.assert_called_once_with(expected_path, mode='w')
+ open_mock().write.assert_called_once_with('bar')
+
+ def test_write_sys_error(self):
+ with mock.patch('builtins.open',
+ side_effect=OSError('fake_error')) as m_open:
+ self.assertRaises(exception.FileNotFound,
+ filesystem.write_sys, 'foo', 'bar')
+ expected_path = os.path.join(filesystem.SYS, 'foo')
+ m_open.assert_called_once_with(expected_path, mode='w')
diff --git a/nova/tests/unit/test_hacking.py b/nova/tests/unit/test_hacking.py
index 10b2a79db4..41cbada99f 100644
--- a/nova/tests/unit/test_hacking.py
+++ b/nova/tests/unit/test_hacking.py
@@ -1043,3 +1043,24 @@ class HackingTestCase(test.NoDBTestCase):
import unittest.mock
"""
self._assert_has_no_errors(code, checks.import_stock_mock)
+
+ def test_check_set_daemon(self):
+ code = """
+ self.setDaemon(True)
+ worker.setDaemon(True)
+ self._event_thread.setDaemon(True)
+ mythread.setDaemon(False)
+ self.thread.setDaemon(1)
+ """
+ errors = [(x + 1, 0, 'N372') for x in range(5)]
+ self._assert_has_errors(
+ code, checks.check_set_daemon, expected_errors=errors)
+
+ code = """
+ self.setDaemon = True
+ worker.setDaemonFlag(True)
+ self._event_thread.resetDaemon(True)
+ self.set.Daemon(True)
+ self.thread.setdaemon(True)
+ """
+ self._assert_has_no_errors(code, checks.check_set_daemon)
diff --git a/nova/tests/unit/test_policy.py b/nova/tests/unit/test_policy.py
index 871e836d87..752b872381 100644
--- a/nova/tests/unit/test_policy.py
+++ b/nova/tests/unit/test_policy.py
@@ -303,10 +303,10 @@ class RealRolePolicyTestCase(test.NoDBTestCase):
def setUp(self):
super(RealRolePolicyTestCase, self).setUp()
self.policy = self.useFixture(nova_fixtures.RealPolicyFixture())
- self.non_admin_context = context.RequestContext('fake', 'fake',
- roles=['member'])
- self.admin_context = context.RequestContext('fake', 'fake', True,
- roles=['admin', 'member'])
+ self.non_admin_context = context.RequestContext(
+ 'fake', 'fake', roles=['member', 'reader'])
+ self.admin_context = context.RequestContext(
+ 'fake', 'fake', True, roles=['admin', 'member', 'reader'])
self.target = {}
self.fake_policy = jsonutils.loads(fake_policy.policy_data)
@@ -387,6 +387,7 @@ class RealRolePolicyTestCase(test.NoDBTestCase):
"os_compute_api:os-hypervisors:search",
"os_compute_api:os-hypervisors:servers",
"os_compute_api:limits:other_project",
+"os_compute_api:os-flavor-access",
)
self.admin_or_owner_rules = (
@@ -440,7 +441,6 @@ class RealRolePolicyTestCase(test.NoDBTestCase):
"os_compute_api:os-remote-consoles",
"os_compute_api:os-deferred-delete:restore",
"os_compute_api:os-deferred-delete:force",
-"os_compute_api:os-flavor-access",
"os_compute_api:os-flavor-extra-specs:index",
"os_compute_api:os-flavor-extra-specs:show",
"os_compute_api:os-floating-ips:add",
diff --git a/nova/tests/unit/test_rpc.py b/nova/tests/unit/test_rpc.py
index 3fe56013bd..40a914b5f7 100644
--- a/nova/tests/unit/test_rpc.py
+++ b/nova/tests/unit/test_rpc.py
@@ -214,20 +214,20 @@ class TestRPC(test.NoDBTestCase):
@mock.patch.object(rpc, 'TRANSPORT')
@mock.patch.object(rpc, 'profiler', None)
@mock.patch.object(rpc, 'RequestContextSerializer')
- @mock.patch.object(messaging, 'RPCClient')
- def test_get_client(self, mock_client, mock_ser, mock_TRANSPORT):
+ @mock.patch.object(messaging, 'get_rpc_client')
+ def test_get_client(self, mock_get, mock_ser, mock_TRANSPORT):
tgt = mock.Mock()
ser = mock.Mock()
- mock_client.return_value = 'client'
+ mock_get.return_value = 'client'
mock_ser.return_value = ser
client = rpc.get_client(tgt, version_cap='1.0', serializer='foo')
mock_ser.assert_called_once_with('foo')
- mock_client.assert_called_once_with(mock_TRANSPORT,
- tgt, version_cap='1.0',
- call_monitor_timeout=None,
- serializer=ser)
+ mock_get.assert_called_once_with(mock_TRANSPORT,
+ tgt, version_cap='1.0',
+ call_monitor_timeout=None,
+ serializer=ser)
self.assertEqual('client', client)
@mock.patch.object(rpc, 'TRANSPORT')
@@ -253,21 +253,21 @@ class TestRPC(test.NoDBTestCase):
@mock.patch.object(rpc, 'TRANSPORT')
@mock.patch.object(rpc, 'profiler', mock.Mock())
@mock.patch.object(rpc, 'ProfilerRequestContextSerializer')
- @mock.patch.object(messaging, 'RPCClient')
- def test_get_client_profiler_enabled(self, mock_client, mock_ser,
+ @mock.patch.object(messaging, 'get_rpc_client')
+ def test_get_client_profiler_enabled(self, mock_get, mock_ser,
mock_TRANSPORT):
tgt = mock.Mock()
ser = mock.Mock()
- mock_client.return_value = 'client'
+ mock_get.return_value = 'client'
mock_ser.return_value = ser
client = rpc.get_client(tgt, version_cap='1.0', serializer='foo')
mock_ser.assert_called_once_with('foo')
- mock_client.assert_called_once_with(mock_TRANSPORT,
- tgt, version_cap='1.0',
- call_monitor_timeout=None,
- serializer=ser)
+ mock_get.assert_called_once_with(mock_TRANSPORT,
+ tgt, version_cap='1.0',
+ call_monitor_timeout=None,
+ serializer=ser)
self.assertEqual('client', client)
@mock.patch.object(rpc, 'TRANSPORT')
@@ -432,11 +432,11 @@ class TestProfilerRequestContextSerializer(test.NoDBTestCase):
class TestClientRouter(test.NoDBTestCase):
- @mock.patch('oslo_messaging.RPCClient')
- def test_by_instance(self, mock_rpcclient):
+ @mock.patch('oslo_messaging.get_rpc_client')
+ def test_by_instance(self, mock_get):
default_client = mock.Mock()
cell_client = mock.Mock()
- mock_rpcclient.return_value = cell_client
+ mock_get.return_value = cell_client
ctxt = mock.Mock()
ctxt.mq_connection = mock.sentinel.transport
@@ -444,7 +444,7 @@ class TestClientRouter(test.NoDBTestCase):
client = router.client(ctxt)
# verify a client was created by ClientRouter
- mock_rpcclient.assert_called_once_with(
+ mock_get.assert_called_once_with(
mock.sentinel.transport, default_client.target,
version_cap=default_client.version_cap,
call_monitor_timeout=default_client.call_monitor_timeout,
@@ -452,11 +452,11 @@ class TestClientRouter(test.NoDBTestCase):
# verify cell client was returned
self.assertEqual(cell_client, client)
- @mock.patch('oslo_messaging.RPCClient')
- def test_by_instance_untargeted(self, mock_rpcclient):
+ @mock.patch('oslo_messaging.get_rpc_client')
+ def test_by_instance_untargeted(self, mock_get):
default_client = mock.Mock()
cell_client = mock.Mock()
- mock_rpcclient.return_value = cell_client
+ mock_get.return_value = cell_client
ctxt = mock.Mock()
ctxt.mq_connection = None
@@ -464,7 +464,7 @@ class TestClientRouter(test.NoDBTestCase):
client = router.client(ctxt)
self.assertEqual(router.default_client, client)
- self.assertFalse(mock_rpcclient.called)
+ self.assertFalse(mock_get.called)
class TestIsNotificationsEnabledDecorator(test.NoDBTestCase):
diff --git a/nova/tests/unit/test_service.py b/nova/tests/unit/test_service.py
index 9fb6fa1c40..acc1aeca7f 100644
--- a/nova/tests/unit/test_service.py
+++ b/nova/tests/unit/test_service.py
@@ -128,7 +128,7 @@ class ServiceTestCase(test.NoDBTestCase):
serv.manager.additional_endpoints = []
serv.start()
# init_host is called before any service record is created
- serv.manager.init_host.assert_called_once_with()
+ serv.manager.init_host.assert_called_once_with(None)
mock_get_by_host_and_binary.assert_called_once_with(mock.ANY,
self.host, self.binary)
mock_create.assert_called_once_with()
@@ -186,7 +186,7 @@ class ServiceTestCase(test.NoDBTestCase):
mock_create.side_effect = ex
serv.manager = mock_manager
self.assertRaises(test.TestingException, serv.start)
- serv.manager.init_host.assert_called_with()
+ serv.manager.init_host.assert_called_with(None)
mock_get_by_host_and_binary.assert_has_calls([
mock.call(mock.ANY, self.host, self.binary),
mock.call(mock.ANY, self.host, self.binary)])
@@ -216,7 +216,7 @@ class ServiceTestCase(test.NoDBTestCase):
serv.manager.service_name = self.topic
serv.manager.additional_endpoints = []
serv.start()
- serv.manager.init_host.assert_called_once_with()
+ serv.manager.init_host.assert_called_once_with(None)
mock_get_by_host_and_binary.assert_called_once_with(mock.ANY,
self.host,
self.binary)
@@ -241,7 +241,8 @@ class ServiceTestCase(test.NoDBTestCase):
serv.manager.additional_endpoints = []
serv.start()
- serv.manager.init_host.assert_called_with()
+ serv.manager.init_host.assert_called_with(
+ mock_svc_get_by_host_and_binary.return_value)
serv.stop()
serv.manager.cleanup_host.assert_called_with()
diff --git a/nova/tests/unit/virt/disk/test_api.py b/nova/tests/unit/virt/disk/test_api.py
index 62005de525..135558e145 100644
--- a/nova/tests/unit/virt/disk/test_api.py
+++ b/nova/tests/unit/virt/disk/test_api.py
@@ -40,6 +40,7 @@ class FakeMount(object):
class APITestCase(test.NoDBTestCase):
+ @mock.patch('nova.virt.disk.vfs.guestfs.VFSGuestFS', new=mock.Mock())
def test_can_resize_need_fs_type_specified(self):
imgfile = tempfile.NamedTemporaryFile()
self.addCleanup(imgfile.close)
diff --git a/nova/tests/unit/virt/ironic/test_driver.py b/nova/tests/unit/virt/ironic/test_driver.py
index 6ac7ca464e..52aa37ac13 100644
--- a/nova/tests/unit/virt/ironic/test_driver.py
+++ b/nova/tests/unit/virt/ironic/test_driver.py
@@ -935,6 +935,48 @@ class IronicDriverTestCase(test.NoDBTestCase):
expected = {
'CUSTOM_IRON_NFV': {
'total': 1,
+ 'reserved': 1,
+ 'min_unit': 1,
+ 'max_unit': 1,
+ 'step_size': 1,
+ 'allocation_ratio': 1.0,
+ },
+ }
+ mock_nfc.assert_called_once_with(mock.sentinel.nodename)
+ mock_nr.assert_called_once_with(mock_nfc.return_value)
+ mock_res_used.assert_called_once_with(mock_nfc.return_value)
+ mock_res_unavail.assert_called_once_with(mock_nfc.return_value)
+ result = self.ptree.data(mock.sentinel.nodename).inventory
+ self.assertEqual(expected, result)
+
+ @mock.patch.object(ironic_driver.IronicDriver,
+ '_node_resources_used', return_value=True)
+ @mock.patch.object(ironic_driver.IronicDriver,
+ '_node_resources_unavailable', return_value=False)
+ @mock.patch.object(ironic_driver.IronicDriver, '_node_resource')
+ @mock.patch.object(ironic_driver.IronicDriver, '_node_from_cache')
+ def test_update_provider_tree_with_rc_occupied_workaround(self,
+ mock_nfc, mock_nr, mock_res_unavail, mock_res_used):
+ """Ensure that when a node is used, we report the inventory matching
+ the consumed resources.
+ """
+ self.flags(skip_reserve_in_use_ironic_nodes=True,
+ group="workarounds")
+ mock_nr.return_value = {
+ 'vcpus': 24,
+ 'vcpus_used': 24,
+ 'memory_mb': 1024,
+ 'memory_mb_used': 1024,
+ 'local_gb': 100,
+ 'local_gb_used': 100,
+ 'resource_class': 'iron-nfv',
+ }
+
+ self.driver.update_provider_tree(self.ptree, mock.sentinel.nodename)
+
+ expected = {
+ 'CUSTOM_IRON_NFV': {
+ 'total': 1,
'reserved': 0,
'min_unit': 1,
'max_unit': 1,
@@ -945,7 +987,7 @@ class IronicDriverTestCase(test.NoDBTestCase):
mock_nfc.assert_called_once_with(mock.sentinel.nodename)
mock_nr.assert_called_once_with(mock_nfc.return_value)
mock_res_used.assert_called_once_with(mock_nfc.return_value)
- self.assertFalse(mock_res_unavail.called)
+ mock_res_unavail.assert_called_once_with(mock_nfc.return_value)
result = self.ptree.data(mock.sentinel.nodename).inventory
self.assertEqual(expected, result)
@@ -1016,7 +1058,7 @@ class IronicDriverTestCase(test.NoDBTestCase):
mock_nfc.assert_called_once_with(mock.sentinel.nodename)
mock_nr.assert_called_once_with(mock_nfc.return_value)
mock_res_used.assert_called_once_with(mock_nfc.return_value)
- self.assertFalse(mock_res_unavail.called)
+ mock_res_unavail.assert_called_once_with(mock_nfc.return_value)
result = self.ptree.data(mock.sentinel.nodename).traits
self.assertEqual(set(), result)
@@ -1048,7 +1090,7 @@ class IronicDriverTestCase(test.NoDBTestCase):
mock_nfc.assert_called_once_with(mock.sentinel.nodename)
mock_nr.assert_called_once_with(mock_nfc.return_value)
mock_res_used.assert_called_once_with(mock_nfc.return_value)
- self.assertFalse(mock_res_unavail.called)
+ mock_res_unavail.assert_called_once_with(mock_nfc.return_value)
result = self.ptree.data(mock.sentinel.nodename).traits
self.assertEqual(set(traits), result)
@@ -2500,7 +2542,10 @@ class IronicDriverTestCase(test.NoDBTestCase):
@mock.patch.object(cw.IronicClientWrapper, 'call')
def test_prepare_for_spawn(self, mock_call):
- node = ironic_utils.get_test_node(driver='fake')
+ node = ironic_utils.get_test_node(
+ driver='fake', instance_uuid=None,
+ provision_state=ironic_states.AVAILABLE,
+ power_state=ironic_states.POWER_OFF)
self.mock_conn.get_node.return_value = node
instance = fake_instance.fake_instance_obj(self.ctx,
node=node.uuid)
@@ -2532,7 +2577,10 @@ class IronicDriverTestCase(test.NoDBTestCase):
instance)
def test_prepare_for_spawn_conflict(self):
- node = ironic_utils.get_test_node(driver='fake')
+ node = ironic_utils.get_test_node(
+ driver='fake', instance_uuid=None,
+ provision_state=ironic_states.AVAILABLE,
+ power_state=ironic_states.POWER_OFF)
self.mock_conn.get_node.return_value = node
self.mock_conn.update_node.side_effect = sdk_exc.ConflictException
instance = fake_instance.fake_instance_obj(self.ctx, node=node.id)
@@ -2540,6 +2588,18 @@ class IronicDriverTestCase(test.NoDBTestCase):
self.driver.prepare_for_spawn,
instance)
+ def test_prepare_for_spawn_not_available(self):
+ node = ironic_utils.get_test_node(
+ driver='fake', instance_uuid=None,
+ provision_state=ironic_states.CLEANWAIT,
+ power_state=ironic_states.POWER_OFF)
+ self.mock_conn.get_node.return_value = node
+ self.mock_conn.update_node.side_effect = sdk_exc.ConflictException
+ instance = fake_instance.fake_instance_obj(self.ctx, node=node.id)
+ self.assertRaises(exception.ComputeResourcesUnavailable,
+ self.driver.prepare_for_spawn,
+ instance)
+
@mock.patch.object(ironic_driver.IronicDriver, '_cleanup_deploy')
def test_failed_spawn_cleanup(self, mock_cleanup):
node = ironic_utils.get_test_node(driver='fake')
diff --git a/nova/db/api/legacy_migrations/__init__.py b/nova/tests/unit/virt/libvirt/cpu/__init__.py
index e69de29bb2..e69de29bb2 100644
--- a/nova/db/api/legacy_migrations/__init__.py
+++ b/nova/tests/unit/virt/libvirt/cpu/__init__.py
diff --git a/nova/tests/unit/virt/libvirt/cpu/test_api.py b/nova/tests/unit/virt/libvirt/cpu/test_api.py
new file mode 100644
index 0000000000..b5bcb762f3
--- /dev/null
+++ b/nova/tests/unit/virt/libvirt/cpu/test_api.py
@@ -0,0 +1,194 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from unittest import mock
+
+from nova import exception
+from nova import objects
+from nova import test
+from nova.virt.libvirt.cpu import api
+from nova.virt.libvirt.cpu import core
+
+
+class TestAPI(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestAPI, self).setUp()
+ self.core_1 = api.Core(1)
+
+ # Create a fake instance with two pinned CPUs but only one is on the
+ # dedicated set
+ numa_topology = objects.InstanceNUMATopology(cells=[
+ objects.InstanceNUMACell(cpu_pinning_raw={'0': '0', '2': '2'}),
+ ])
+ self.fake_inst = objects.Instance(numa_topology=numa_topology)
+
+ @mock.patch.object(core, 'get_online')
+ def test_online(self, mock_get_online):
+ mock_get_online.return_value = True
+ self.assertTrue(self.core_1.online)
+ mock_get_online.assert_called_once_with(self.core_1.ident)
+
+ @mock.patch.object(core, 'set_online')
+ def test_set_online(self, mock_set_online):
+ self.core_1.online = True
+ mock_set_online.assert_called_once_with(self.core_1.ident)
+
+ @mock.patch.object(core, 'set_offline')
+ def test_set_offline(self, mock_set_offline):
+ self.core_1.online = False
+ mock_set_offline.assert_called_once_with(self.core_1.ident)
+
+ def test_hash(self):
+ self.assertEqual(hash(self.core_1.ident), hash(self.core_1))
+
+ @mock.patch.object(core, 'get_governor')
+ def test_governor(self, mock_get_governor):
+ mock_get_governor.return_value = 'fake_governor'
+ self.assertEqual('fake_governor', self.core_1.governor)
+ mock_get_governor.assert_called_once_with(self.core_1.ident)
+
+ @mock.patch.object(core, 'set_governor')
+ def test_set_governor_low(self, mock_set_governor):
+ self.flags(cpu_power_governor_low='fake_low_gov', group='libvirt')
+ self.core_1.set_low_governor()
+ mock_set_governor.assert_called_once_with(self.core_1.ident,
+ 'fake_low_gov')
+
+ @mock.patch.object(core, 'set_governor')
+ def test_set_governor_high(self, mock_set_governor):
+ self.flags(cpu_power_governor_high='fake_high_gov', group='libvirt')
+ self.core_1.set_high_governor()
+ mock_set_governor.assert_called_once_with(self.core_1.ident,
+ 'fake_high_gov')
+
+ @mock.patch.object(core, 'set_online')
+ def test_power_up_online(self, mock_online):
+ self.flags(cpu_power_management=True, group='libvirt')
+ self.flags(cpu_dedicated_set='0-1', group='compute')
+
+ api.power_up(self.fake_inst)
+ # only core #0 can be set as core #2 is not on the dedicated set
+ # As a reminder, core(i).online calls set_online(i)
+ mock_online.assert_called_once_with(0)
+
+ @mock.patch.object(core, 'set_governor')
+ def test_power_up_governor(self, mock_set_governor):
+ self.flags(cpu_power_management=True, group='libvirt')
+ self.flags(cpu_power_management_strategy='governor', group='libvirt')
+ self.flags(cpu_dedicated_set='0-1', group='compute')
+
+ api.power_up(self.fake_inst)
+ # only core #0 can be set as core #2 is not on the dedicated set
+ # As a reminder, core(i).set_high_governor calls set_governor(i)
+ mock_set_governor.assert_called_once_with(0, 'performance')
+
+ @mock.patch.object(core, 'set_online')
+ def test_power_up_skipped(self, mock_online):
+ self.flags(cpu_power_management=False, group='libvirt')
+ api.power_up(self.fake_inst)
+ mock_online.assert_not_called()
+
+ @mock.patch.object(core, 'set_online')
+ def test_power_up_skipped_if_standard_instance(self, mock_online):
+ self.flags(cpu_power_management=True, group='libvirt')
+ api.power_up(objects.Instance(numa_topology=None))
+ mock_online.assert_not_called()
+
+ @mock.patch.object(core, 'set_offline')
+ def test_power_down_offline(self, mock_offline):
+ self.flags(cpu_power_management=True, group='libvirt')
+ self.flags(cpu_dedicated_set='0-1', group='compute')
+
+ api.power_down(self.fake_inst)
+ # only core #0 can be set as core #2 is not on the dedicated set
+ # As a reminder, core(i).online calls set_online(i)
+ mock_offline.assert_called_once_with(0)
+
+ @mock.patch.object(core, 'set_governor')
+ def test_power_down_governor(self, mock_set_governor):
+ self.flags(cpu_power_management=True, group='libvirt')
+ self.flags(cpu_power_management_strategy='governor', group='libvirt')
+ self.flags(cpu_dedicated_set='0-1', group='compute')
+
+ api.power_down(self.fake_inst)
+ # only core #0 can be set as core #2 is not on the dedicated set
+ # As a reminder, core(i).set_high_governor calls set_governor(i)
+ mock_set_governor.assert_called_once_with(0, 'powersave')
+
+ @mock.patch.object(core, 'set_offline')
+ def test_power_down_skipped(self, mock_offline):
+ self.flags(cpu_power_management=False, group='libvirt')
+ api.power_down(self.fake_inst)
+ mock_offline.assert_not_called()
+
+ @mock.patch.object(core, 'set_offline')
+ def test_power_down_skipped_if_standard_instance(self, mock_offline):
+ self.flags(cpu_power_management=True, group='libvirt')
+ api.power_down(objects.Instance(numa_topology=None))
+ mock_offline.assert_not_called()
+
+ @mock.patch.object(core, 'set_offline')
+ def test_power_down_all_dedicated_cpus_offline(self, mock_offline):
+ self.flags(cpu_power_management=True, group='libvirt')
+ self.flags(cpu_dedicated_set='0-1', group='compute')
+
+ api.power_down_all_dedicated_cpus()
+ # All dedicated CPUs are turned offline
+ mock_offline.assert_has_calls([mock.call(0), mock.call(1)])
+
+ @mock.patch.object(core, 'set_governor')
+ def test_power_down_all_dedicated_cpus_governor(self, mock_set_governor):
+ self.flags(cpu_power_management=True, group='libvirt')
+ self.flags(cpu_power_management_strategy='governor', group='libvirt')
+ self.flags(cpu_dedicated_set='0-1', group='compute')
+
+ api.power_down_all_dedicated_cpus()
+ # All dedicated CPUs are turned offline
+ mock_set_governor.assert_has_calls([mock.call(0, 'powersave'),
+ mock.call(1, 'powersave')])
+
+ @mock.patch.object(core, 'set_offline')
+ def test_power_down_all_dedicated_cpus_skipped(self, mock_offline):
+ self.flags(cpu_power_management=False, group='libvirt')
+ api.power_down_all_dedicated_cpus()
+ mock_offline.assert_not_called()
+
+ def test_power_down_all_dedicated_cpus_wrong_config(self):
+ self.flags(cpu_power_management=True, group='libvirt')
+ self.flags(cpu_dedicated_set=None, group='compute')
+ self.assertRaises(exception.InvalidConfiguration,
+ api.power_down_all_dedicated_cpus)
+
+ @mock.patch.object(core, 'get_governor')
+ @mock.patch.object(core, 'get_online')
+ def test_validate_all_dedicated_cpus_for_governor(self, mock_get_online,
+ mock_get_governor):
+ self.flags(cpu_power_management=True, group='libvirt')
+ self.flags(cpu_dedicated_set='0-1', group='compute')
+ self.flags(cpu_power_management_strategy='governor', group='libvirt')
+ mock_get_governor.return_value = 'performance'
+ mock_get_online.side_effect = (True, False)
+ self.assertRaises(exception.InvalidConfiguration,
+ api.validate_all_dedicated_cpus)
+
+ @mock.patch.object(core, 'get_governor')
+ @mock.patch.object(core, 'get_online')
+ def test_validate_all_dedicated_cpus_for_cpu_state(self, mock_get_online,
+ mock_get_governor):
+ self.flags(cpu_power_management=True, group='libvirt')
+ self.flags(cpu_dedicated_set='0-1', group='compute')
+ self.flags(cpu_power_management_strategy='cpu_state', group='libvirt')
+ mock_get_online.return_value = True
+ mock_get_governor.side_effect = ('powersave', 'performance')
+ self.assertRaises(exception.InvalidConfiguration,
+ api.validate_all_dedicated_cpus)
diff --git a/nova/tests/unit/virt/libvirt/cpu/test_core.py b/nova/tests/unit/virt/libvirt/cpu/test_core.py
new file mode 100644
index 0000000000..a3cba00d3b
--- /dev/null
+++ b/nova/tests/unit/virt/libvirt/cpu/test_core.py
@@ -0,0 +1,122 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from unittest import mock
+
+from nova import exception
+from nova import test
+from nova.tests import fixtures
+from nova.virt.libvirt.cpu import core
+
+
+class TestCore(test.NoDBTestCase):
+
+ @mock.patch.object(core.filesystem, 'read_sys')
+ @mock.patch.object(core.hardware, 'parse_cpu_spec')
+ def test_get_available_cores(self, mock_parse_cpu_spec, mock_read_sys):
+ mock_read_sys.return_value = '1-2'
+ mock_parse_cpu_spec.return_value = set([1, 2])
+ self.assertEqual(set([1, 2]), core.get_available_cores())
+ mock_read_sys.assert_called_once_with(core.AVAILABLE_PATH)
+ mock_parse_cpu_spec.assert_called_once_with('1-2')
+
+ @mock.patch.object(core.filesystem, 'read_sys')
+ @mock.patch.object(core.hardware, 'parse_cpu_spec')
+ def test_get_available_cores_none(
+ self, mock_parse_cpu_spec, mock_read_sys):
+ mock_read_sys.return_value = ''
+ self.assertEqual(set(), core.get_available_cores())
+ mock_parse_cpu_spec.assert_not_called()
+
+ @mock.patch.object(core, 'get_available_cores')
+ def test_exists(self, mock_get_available_cores):
+ mock_get_available_cores.return_value = set([1])
+ self.assertTrue(core.exists(1))
+ mock_get_available_cores.assert_called_once_with()
+ self.assertFalse(core.exists(2))
+
+ @mock.patch.object(
+ core, 'CPU_PATH_TEMPLATE',
+ new_callable=mock.PropertyMock(return_value='/sys/blah%(core)s'))
+ @mock.patch.object(core, 'exists')
+ def test_gen_cpu_path(self, mock_exists, mock_cpu_path):
+ mock_exists.return_value = True
+ self.assertEqual('/sys/blah1', core.gen_cpu_path(1))
+ mock_exists.assert_called_once_with(1)
+
+ @mock.patch.object(core, 'exists')
+ def test_gen_cpu_path_raises(self, mock_exists):
+ mock_exists.return_value = False
+ self.assertRaises(ValueError, core.gen_cpu_path, 1)
+ self.assertIn('Unable to access CPU: 1', self.stdlog.logger.output)
+
+
+class TestCoreHelpers(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestCoreHelpers, self).setUp()
+ self.useFixture(fixtures.PrivsepFixture())
+ _p1 = mock.patch.object(core, 'exists', return_value=True)
+ self.mock_exists = _p1.start()
+ self.addCleanup(_p1.stop)
+
+ _p2 = mock.patch.object(core, 'gen_cpu_path',
+ side_effect=lambda x: '/fakesys/blah%s' % x)
+ self.mock_gen_cpu_path = _p2.start()
+ self.addCleanup(_p2.stop)
+
+ @mock.patch.object(core.filesystem, 'read_sys')
+ def test_get_online(self, mock_read_sys):
+ mock_read_sys.return_value = '1'
+ self.assertTrue(core.get_online(1))
+ mock_read_sys.assert_called_once_with('/fakesys/blah1/online')
+
+ @mock.patch.object(core.filesystem, 'read_sys')
+ def test_get_online_not_exists(self, mock_read_sys):
+ mock_read_sys.side_effect = exception.FileNotFound(file_path='foo')
+ self.assertTrue(core.get_online(1))
+ mock_read_sys.assert_called_once_with('/fakesys/blah1/online')
+
+ @mock.patch.object(core.filesystem, 'write_sys')
+ @mock.patch.object(core, 'get_online')
+ def test_set_online(self, mock_get_online, mock_write_sys):
+ mock_get_online.return_value = True
+ self.assertTrue(core.set_online(1))
+ mock_write_sys.assert_called_once_with('/fakesys/blah1/online',
+ data='1')
+ mock_get_online.assert_called_once_with(1)
+
+ @mock.patch.object(core.filesystem, 'write_sys')
+ @mock.patch.object(core, 'get_online')
+ def test_set_offline(self, mock_get_online, mock_write_sys):
+ mock_get_online.return_value = False
+ self.assertTrue(core.set_offline(1))
+ mock_write_sys.assert_called_once_with('/fakesys/blah1/online',
+ data='0')
+ mock_get_online.assert_called_once_with(1)
+
+ @mock.patch.object(core.filesystem, 'read_sys')
+ def test_get_governor(self, mock_read_sys):
+ mock_read_sys.return_value = 'fake_gov'
+ self.assertEqual('fake_gov', core.get_governor(1))
+ mock_read_sys.assert_called_once_with(
+ '/fakesys/blah1/cpufreq/scaling_governor')
+
+ @mock.patch.object(core, 'get_governor')
+ @mock.patch.object(core.filesystem, 'write_sys')
+ def test_set_governor(self, mock_write_sys, mock_get_governor):
+ mock_get_governor.return_value = 'fake_gov'
+ self.assertEqual('fake_gov',
+ core.set_governor(1, 'fake_gov'))
+ mock_write_sys.assert_called_once_with(
+ '/fakesys/blah1/cpufreq/scaling_governor', data='fake_gov')
+ mock_get_governor.assert_called_once_with(1)
diff --git a/nova/tests/unit/virt/libvirt/test_config.py b/nova/tests/unit/virt/libvirt/test_config.py
index 4a8aa027a9..3d0b5ae685 100644
--- a/nova/tests/unit/virt/libvirt/test_config.py
+++ b/nova/tests/unit/virt/libvirt/test_config.py
@@ -1537,7 +1537,7 @@ class LibvirtConfigGuestInputTest(LibvirtConfigBaseTest):
class LibvirtConfigGuestGraphicsTest(LibvirtConfigBaseTest):
- def test_config_graphics(self):
+ def test_config_graphics_vnc(self):
obj = config.LibvirtConfigGuestGraphics()
obj.type = "vnc"
obj.autoport = True
@@ -1549,6 +1549,30 @@ class LibvirtConfigGuestGraphicsTest(LibvirtConfigBaseTest):
<graphics type="vnc" autoport="yes" keymap="en_US" listen="127.0.0.1"/>
""")
+ def test_config_graphics_spice(self):
+ obj = config.LibvirtConfigGuestGraphics()
+ obj.type = "spice"
+ obj.autoport = False
+ obj.keymap = "en_US"
+ obj.listen = "127.0.0.1"
+
+ obj.image_compression = "auto_glz"
+ obj.jpeg_compression = "auto"
+ obj.zlib_compression = "always"
+ obj.playback_compression = True
+ obj.streaming_mode = "filter"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <graphics type="spice" autoport="no" keymap="en_US" listen="127.0.0.1">
+ <image compression="auto_glz"/>
+ <jpeg compression="auto"/>
+ <zlib compression="always"/>
+ <playback compression="on"/>
+ <streaming mode="filter"/>
+ </graphics>
+ """)
+
class LibvirtConfigGuestHostdev(LibvirtConfigBaseTest):
@@ -1591,7 +1615,7 @@ class LibvirtConfigGuestHostdevPCI(LibvirtConfigBaseTest):
</hostdev>
"""
- def test_config_guest_hosdev_pci(self):
+ def test_config_guest_hostdev_pci(self):
hostdev = config.LibvirtConfigGuestHostdevPCI()
hostdev.domain = "1234"
hostdev.bus = "11"
@@ -1600,7 +1624,7 @@ class LibvirtConfigGuestHostdevPCI(LibvirtConfigBaseTest):
xml = hostdev.to_xml()
self.assertXmlEqual(self.expected, xml)
- def test_parse_guest_hosdev_pci(self):
+ def test_parse_guest_hostdev_pci(self):
xmldoc = self.expected
obj = config.LibvirtConfigGuestHostdevPCI()
obj.parse_str(xmldoc)
@@ -1612,7 +1636,7 @@ class LibvirtConfigGuestHostdevPCI(LibvirtConfigBaseTest):
self.assertEqual(obj.slot, '0x22')
self.assertEqual(obj.function, '0x3')
- def test_parse_guest_hosdev_usb(self):
+ def test_parse_guest_hostdev_usb(self):
xmldoc = """<hostdev mode='subsystem' type='usb'>
<source startupPolicy='optional'>
<vendor id='0x1234'/>
diff --git a/nova/tests/unit/virt/libvirt/test_driver.py b/nova/tests/unit/virt/libvirt/test_driver.py
index 1c5f79dc89..66dbf795d8 100644
--- a/nova/tests/unit/virt/libvirt/test_driver.py
+++ b/nova/tests/unit/virt/libvirt/test_driver.py
@@ -740,6 +740,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
imagebackend.Image._get_driver_format)
self.libvirt = self.useFixture(nova_fixtures.LibvirtFixture())
+ self.cgroups = self.useFixture(nova_fixtures.CGroupsFixture())
# ensure tests perform the same on all host architectures; this is
# already done by the fakelibvirt fixture but we want to change the
@@ -817,6 +818,16 @@ class LibvirtConnTestCase(test.NoDBTestCase,
"Driver capabilities for 'supports_socket_pci_numa_affinity' "
"is invalid",
)
+ self.assertFalse(
+ drvr.capabilities['supports_ephemeral_encryption'],
+ "Driver capabilities for 'supports_ephemeral_encryption' "
+ "is invalid",
+ )
+ self.assertFalse(
+ drvr.capabilities['supports_ephemeral_encryption_luks'],
+ "Driver capabilities for 'supports_ephemeral_encryption_luks' "
+ " is invalid",
+ )
def test_driver_capabilities_qcow2_with_rbd(self):
self.flags(images_type='rbd', group='libvirt')
@@ -1320,7 +1331,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertRaises(exception.Invalid, drvr.init_host, "dummyhost")
- @mock.patch('nova.virt.libvirt.host.libvirt.Connection.compareCPU')
+ @mock.patch(
+ 'nova.virt.libvirt.host.libvirt.Connection.compareHypervisorCPU')
def test__check_cpu_compatibility_advance_model(self, mocked_compare):
mocked_compare.side_effect = (2, 0)
self.flags(cpu_mode="custom",
@@ -1333,6 +1345,22 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_register_all_undefined_instance_details',
new=mock.Mock())
+ @mock.patch('nova.virt.libvirt.host.libvirt.Connection.compareCPU')
+ def test__check_cpu_compatibility_skip_compare_at_init(
+ self, mocked_compare
+ ):
+ self.flags(group='workarounds', skip_cpu_compare_at_startup=True)
+ self.flags(cpu_mode="custom",
+ cpu_models=["Icelake-Server-noTSX"],
+ cpu_model_extra_flags = ["-mpx"],
+ group="libvirt")
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ drvr.init_host("dummyhost")
+ mocked_compare.assert_not_called()
+
+ @mock.patch.object(libvirt_driver.LibvirtDriver,
+ '_register_all_undefined_instance_details',
+ new=mock.Mock())
def test__check_cpu_compatibility_with_flag(self):
self.flags(cpu_mode="custom",
cpu_models=["Penryn"],
@@ -1341,9 +1369,10 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr.init_host("dummyhost")
- @mock.patch('nova.virt.libvirt.host.libvirt.Connection.compareCPU')
+ @mock.patch(
+ 'nova.virt.libvirt.host.libvirt.Connection.compareHypervisorCPU')
def test__check_cpu_compatibility_advance_flag(self, mocked_compare):
- mocked_compare.side_effect = (2, 0)
+ mocked_compare.side_effect = (-1, 0)
self.flags(cpu_mode="custom",
cpu_models=["qemu64"],
cpu_model_extra_flags = ["avx", "avx2"],
@@ -1352,11 +1381,12 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertRaises(exception.InvalidCPUInfo,
drvr.init_host, "dummyhost")
- @mock.patch('nova.virt.libvirt.host.libvirt.Connection.compareCPU')
+ @mock.patch(
+ 'nova.virt.libvirt.host.libvirt.Connection.compareHypervisorCPU')
def test__check_cpu_compatibility_wrong_flag(self, mocked_compare):
# here, and in the surrounding similar tests, the non-zero error
# code in the compareCPU() side effect indicates error
- mocked_compare.side_effect = (2, 0)
+ mocked_compare.side_effect = (-1, 0)
self.flags(cpu_mode="custom",
cpu_models=["Broadwell-noTSX"],
cpu_model_extra_flags = ["a v x"],
@@ -1365,11 +1395,12 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertRaises(exception.InvalidCPUInfo,
drvr.init_host, "dummyhost")
- @mock.patch('nova.virt.libvirt.host.libvirt.Connection.compareCPU')
+ @mock.patch(
+ 'nova.virt.libvirt.host.libvirt.Connection.compareHypervisorCPU')
def test__check_cpu_compatibility_enabled_and_disabled_flags(
self, mocked_compare
):
- mocked_compare.side_effect = (2, 0)
+ mocked_compare.side_effect = (-1, 0)
self.flags(
cpu_mode="custom",
cpu_models=["Cascadelake-Server"],
@@ -1822,6 +1853,22 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_guest.set_user_password.assert_called_once_with("root", "123")
+ @mock.patch('nova.virt.libvirt.host.Host.get_guest')
+ def test_qemu_announce_self(self, mock_get_guest):
+ # Enable the workaround, configure to call announce_self 3 times
+ self.flags(enable_qemu_monitor_announce_self=True, group='workarounds')
+
+ mock_guest = mock.Mock(spec=libvirt_guest.Guest)
+ mock_get_guest.return_value = mock_guest
+
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ drvr._qemu_monitor_announce_self(mock_guest)
+
+ # Ensure that 3 calls are made as defined by option
+ # enable_qemu_monitor_announce_self_retries default of 3
+ mock_guest.announce_self.assert_any_call()
+ self.assertEqual(3, mock_guest.announce_self.call_count)
+
@mock.patch('nova.utils.get_image_from_system_metadata')
@mock.patch.object(host.Host,
'has_min_version', return_value=True)
@@ -3047,9 +3094,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
'fake-flavor', 'fake-image-meta').obj_to_primitive())
@mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
- @mock.patch.object(
- host.Host, "is_cpu_control_policy_capable", return_value=True)
- def test_get_guest_config_numa_host_instance_fits(self, is_able):
+ def test_get_guest_config_numa_host_instance_fits(self):
self.flags(cpu_shared_set=None, cpu_dedicated_set=None,
group='compute')
instance_ref = objects.Instance(**self.test_instance)
@@ -3087,9 +3132,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
@mock.patch('nova.privsep.utils.supports_direct_io',
new=mock.Mock(return_value=True))
- @mock.patch.object(
- host.Host, "is_cpu_control_policy_capable", return_value=True)
- def test_get_guest_config_numa_host_instance_no_fit(self, is_able):
+ def test_get_guest_config_numa_host_instance_no_fit(self):
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
flavor = objects.Flavor(memory_mb=4096, vcpus=4, root_gb=496,
@@ -3356,7 +3399,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual(
"Memory encryption requested by hw:mem_encryption extra spec in "
"m1.fake flavor but image fake_image doesn't have "
- "'hw_firmware_type' property set to 'uefi'", str(exc))
+ "'hw_firmware_type' property set to 'uefi' or volume-backed "
+ "instance was requested", str(exc))
def test_sev_enabled_host_extra_spec_no_machine_type(self):
exc = self.assertRaises(exception.InvalidMachineType,
@@ -3516,10 +3560,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
host_topology, inst_topology, numa_tune)
@mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
- @mock.patch.object(
- host.Host, "is_cpu_control_policy_capable", return_value=True)
- def test_get_guest_config_numa_host_instance_pci_no_numa_info(
- self, is_able):
+ def test_get_guest_config_numa_host_instance_pci_no_numa_info(self):
self.flags(cpu_shared_set='3', cpu_dedicated_set=None,
group='compute')
@@ -3573,10 +3614,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
@mock.patch('nova.privsep.utils.supports_direct_io',
new=mock.Mock(return_value=True))
- @mock.patch.object(
- host.Host, "is_cpu_control_policy_capable", return_value=True)
- def test_get_guest_config_numa_host_instance_2pci_no_fit(
- self, is_able):
+ def test_get_guest_config_numa_host_instance_2pci_no_fit(self):
self.flags(cpu_shared_set='3', cpu_dedicated_set=None,
group='compute')
instance_ref = objects.Instance(**self.test_instance)
@@ -3693,10 +3731,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
None)
@mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
- @mock.patch.object(
- host.Host, "is_cpu_control_policy_capable", return_value=True)
- def test_get_guest_config_numa_host_instance_fit_w_cpu_pinset(
- self, is_able):
+ def test_get_guest_config_numa_host_instance_fit_w_cpu_pinset(self):
self.flags(cpu_shared_set='2-3', cpu_dedicated_set=None,
group='compute')
@@ -3735,10 +3770,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertIsNone(cfg.cpu.numa)
@mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
- @mock.patch.object(
- host.Host, "is_cpu_control_policy_capable", return_value=True)
- def test_get_guest_config_non_numa_host_instance_topo(
- self, is_able):
+ def test_get_guest_config_non_numa_host_instance_topo(self):
instance_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([0]), pcpuset=set(), memory=1024),
@@ -3786,10 +3818,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
numa_cfg_cell.memory)
@mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
- @mock.patch.object(
- host.Host, "is_cpu_control_policy_capable", return_value=True)
- def test_get_guest_config_numa_host_instance_topo(
- self, is_able):
+ def test_get_guest_config_numa_host_instance_topo(self):
self.flags(cpu_shared_set='0-5', cpu_dedicated_set=None,
group='compute')
@@ -5793,6 +5822,11 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual(cfg.devices[3].type, 'vnc')
self.assertEqual(cfg.devices[3].listen, '10.0.0.1')
self.assertIsNone(cfg.devices[3].keymap)
+ self.assertIsNone(cfg.devices[3].image_compression)
+ self.assertIsNone(cfg.devices[3].jpeg_compression)
+ self.assertIsNone(cfg.devices[3].zlib_compression)
+ self.assertIsNone(cfg.devices[3].playback_compression)
+ self.assertIsNone(cfg.devices[3].streaming_mode)
def test_get_guest_config_with_vnc_and_tablet(self):
self.flags(enabled=True, group='vnc')
@@ -5823,6 +5857,11 @@ class LibvirtConnTestCase(test.NoDBTestCase,
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[3].type, 'vnc')
+ self.assertIsNone(cfg.devices[3].image_compression)
+ self.assertIsNone(cfg.devices[3].jpeg_compression)
+ self.assertIsNone(cfg.devices[3].zlib_compression)
+ self.assertIsNone(cfg.devices[3].playback_compression)
+ self.assertIsNone(cfg.devices[3].streaming_mode)
self.assertEqual(cfg.devices[5].type, 'tablet')
def test_get_guest_config_with_spice_and_tablet(self):
@@ -5859,6 +5898,11 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual(cfg.devices[3].type, 'spice')
self.assertEqual(cfg.devices[3].listen, '10.0.0.1')
self.assertIsNone(cfg.devices[3].keymap)
+ self.assertIsNone(cfg.devices[3].image_compression)
+ self.assertIsNone(cfg.devices[3].jpeg_compression)
+ self.assertIsNone(cfg.devices[3].zlib_compression)
+ self.assertIsNone(cfg.devices[3].playback_compression)
+ self.assertIsNone(cfg.devices[3].streaming_mode)
self.assertEqual(cfg.devices[5].type, 'tablet')
@mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
@@ -5918,8 +5962,57 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual(cfg.devices[3].target_name, "com.redhat.spice.0")
self.assertEqual(cfg.devices[3].type, 'spicevmc')
self.assertEqual(cfg.devices[4].type, "spice")
+ self.assertIsNone(cfg.devices[4].image_compression)
+ self.assertIsNone(cfg.devices[4].jpeg_compression)
+ self.assertIsNone(cfg.devices[4].zlib_compression)
+ self.assertIsNone(cfg.devices[4].playback_compression)
+ self.assertIsNone(cfg.devices[4].streaming_mode)
self.assertEqual(cfg.devices[5].type, video_type)
+ def test_get_guest_config_with_spice_compression(self):
+ self.flags(enabled=False, group='vnc')
+ self.flags(virt_type='kvm', group='libvirt')
+ self.flags(enabled=True,
+ agent_enabled=False,
+ image_compression='auto_lz',
+ jpeg_compression='never',
+ zlib_compression='always',
+ playback_compression=False,
+ streaming_mode='all',
+ server_listen='10.0.0.1',
+ group='spice')
+ self.flags(pointer_model='usbtablet')
+
+ cfg = self._get_guest_config_with_graphics()
+
+ self.assertEqual(len(cfg.devices), 9)
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestInput)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigGuestRng)
+ self.assertIsInstance(cfg.devices[7],
+ vconfig.LibvirtConfigGuestUSBHostController)
+ self.assertIsInstance(cfg.devices[8],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ self.assertEqual(cfg.devices[3].type, 'spice')
+ self.assertEqual(cfg.devices[3].listen, '10.0.0.1')
+ self.assertEqual(cfg.devices[3].image_compression, 'auto_lz')
+ self.assertEqual(cfg.devices[3].jpeg_compression, 'never')
+ self.assertEqual(cfg.devices[3].zlib_compression, 'always')
+ self.assertFalse(cfg.devices[3].playback_compression)
+ self.assertEqual(cfg.devices[3].streaming_mode, 'all')
+
@mock.patch.object(host.Host, 'get_guest')
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_get_serial_ports_from_guest')
@@ -7199,9 +7292,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
[],
image_meta, disk_info)
- @mock.patch.object(
- host.Host, "is_cpu_control_policy_capable", return_value=True)
- def test_get_guest_config_with_cpu_quota(self, is_able):
+ def test_get_guest_config_with_cpu_quota(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
@@ -7537,9 +7628,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.flags(images_type='rbd', group='libvirt')
self._test_get_guest_config_disk_cachemodes('rbd')
- @mock.patch.object(
- host.Host, "is_cpu_control_policy_capable", return_value=True)
- def test_get_guest_config_with_bogus_cpu_quota(self, is_able):
+ def test_get_guest_config_with_bogus_cpu_quota(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
@@ -7557,9 +7646,10 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr._get_guest_config,
instance_ref, [], image_meta, disk_info)
- @mock.patch.object(
- host.Host, "is_cpu_control_policy_capable", return_value=False)
- def test_get_update_guest_cputune(self, is_able):
+ def test_get_update_guest_cputune(self):
+ # No CPU controller on the host
+ self.cgroups.version = 0
+
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'quota:cpu_shares': '10000',
@@ -9144,6 +9234,34 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.Invalid, drvr._get_pcpu_available)
+ @mock.patch('nova.virt.libvirt.host.Host.get_available_cpus',
+ return_value=set([0, 1, 2, 3]))
+ def test_get_pcpu_available_for_power_mgmt(self, get_available_cpus):
+ """Test what happens when the '[compute] cpu_dedicated_set' config
+ option is set and power management is defined.
+ """
+ self.flags(vcpu_pin_set=None)
+ self.flags(cpu_dedicated_set='2-3', cpu_shared_set=None,
+ group='compute')
+ self.flags(cpu_power_management=True, group='libvirt')
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ pcpus = drvr._get_pcpu_available()
+ self.assertEqual(set([2, 3]), pcpus)
+
+ @mock.patch('nova.virt.libvirt.host.Host.get_available_cpus',
+ return_value=set([4, 5]))
+ def test_get_pcpu_available__cpu_dedicated_set_invalid_for_pm(self,
+ get_available_cpus):
+ """Test what happens when the '[compute] cpu_dedicated_set' config
+ option is set but it's invalid with power management set.
+ """
+ self.flags(vcpu_pin_set=None)
+ self.flags(cpu_dedicated_set='4-6', cpu_shared_set=None,
+ group='compute')
+ self.flags(cpu_power_management=True, group='libvirt')
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.assertRaises(exception.Invalid, drvr._get_pcpu_available)
+
@mock.patch('nova.virt.libvirt.host.Host.get_online_cpus',
return_value=set([0, 1, 2, 3]))
def test_get_vcpu_available(self, get_online_cpus):
@@ -11146,7 +11264,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
new=mock.Mock(return_value=False))
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file')
- @mock.patch.object(fakelibvirt.Connection, 'compareCPU')
+ @mock.patch.object(fakelibvirt.Connection, 'compareHypervisorCPU')
def test_check_can_live_migrate_dest_all_pass_with_block_migration(
self, mock_cpu, mock_test_file,
):
@@ -11185,7 +11303,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
new=mock.Mock(return_value=False))
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file')
- @mock.patch.object(fakelibvirt.Connection, 'compareCPU')
+ @mock.patch.object(fakelibvirt.Connection, 'compareHypervisorCPU')
def test_check_can_live_migrate_dest_all_pass_with_over_commit(
self, mock_cpu, mock_test_file,
):
@@ -11225,7 +11343,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
new=mock.Mock(return_value=False))
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file')
- @mock.patch.object(fakelibvirt.Connection, 'compareCPU')
+ @mock.patch.object(fakelibvirt.Connection, 'compareHypervisorCPU')
def test_check_can_live_migrate_dest_all_pass_no_block_migration(
self, mock_cpu, mock_test_file,
):
@@ -11263,7 +11381,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file',
return_value='fake')
- @mock.patch.object(fakelibvirt.Connection, 'compareCPU')
+ @mock.patch.object(fakelibvirt.Connection, 'compareHypervisorCPU')
def test_check_can_live_migrate_dest_fills_listen_addrs(
self, mock_cpu, mock_test_file,
):
@@ -11295,7 +11413,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file',
return_value='fake')
- @mock.patch.object(fakelibvirt.Connection, 'compareCPU',
+ @mock.patch.object(fakelibvirt.Connection, 'compareHypervisorCPU',
return_value=1)
def test_check_can_live_migrate_dest_ensure_serial_adds_not_set(
self, mock_cpu, mock_test_file,
@@ -11403,7 +11521,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
new=mock.Mock(return_value=False))
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file')
- @mock.patch.object(fakelibvirt.Connection, 'compareCPU')
+ @mock.patch.object(fakelibvirt.Connection, 'compareHypervisorCPU')
def test_check_can_live_migrate_dest_no_instance_cpu_info(
self, mock_cpu, mock_test_file,
):
@@ -11444,7 +11562,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
new=mock.Mock(return_value=False))
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file')
- @mock.patch.object(fakelibvirt.Connection, 'compareCPU')
+ @mock.patch.object(fakelibvirt.Connection, 'compareHypervisorCPU')
def test_check_can_live_migrate_dest_file_backed(
self, mock_cpu, mock_test_file,
):
@@ -11470,7 +11588,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertTrue(return_value.dst_wants_file_backed_memory)
- @mock.patch.object(fakelibvirt.Connection, 'compareCPU')
+ @mock.patch.object(fakelibvirt.Connection, 'compareHypervisorCPU')
def test_check_can_live_migrate_dest_incompatible_cpu_raises(
self, mock_cpu):
instance_ref = objects.Instance(**self.test_instance)
@@ -11506,7 +11624,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
for vif in result.vifs:
self.assertTrue(vif.supports_os_vif_delegation)
- @mock.patch.object(host.Host, 'compare_cpu')
+ @mock.patch.object(host.Host, 'compare_hypervisor_cpu')
@mock.patch.object(nova.virt.libvirt, 'config')
def test_compare_cpu_compatible_host_cpu(self, mock_vconfig, mock_compare):
instance = objects.Instance(**self.test_instance)
@@ -11516,7 +11634,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
instance)
self.assertIsNone(ret)
- @mock.patch.object(host.Host, 'compare_cpu')
+ @mock.patch.object(host.Host, 'compare_hypervisor_cpu')
@mock.patch.object(nova.virt.libvirt, 'config')
def test_compare_cpu_handles_not_supported_error_gracefully(self,
mock_vconfig,
@@ -11553,7 +11671,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion',
return_value=versionutils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_AARCH64_CPU_COMPARE))
- @mock.patch.object(host.Host, 'compare_cpu')
+ @mock.patch.object(host.Host, 'compare_hypervisor_cpu')
def test_compare_cpu_host_aarch64(self,
mock_compare,
mock_get_libversion,
@@ -11576,7 +11694,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_compare.assert_called_once_with(caps.host.cpu.to_xml())
self.assertIsNone(ret)
- @mock.patch.object(host.Host, 'compare_cpu')
+ @mock.patch.object(host.Host, 'compare_hypervisor_cpu')
@mock.patch.object(nova.virt.libvirt.LibvirtDriver,
'_vcpu_model_to_cpu_config')
def test_compare_cpu_compatible_guest_cpu(self, mock_vcpu_to_cpu,
@@ -11595,7 +11713,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
ret = conn._compare_cpu(None, None, instance)
self.assertIsNone(ret)
- @mock.patch.object(host.Host, 'compare_cpu')
+ @mock.patch.object(host.Host, 'compare_hypervisor_cpu')
@mock.patch.object(nova.virt.libvirt, 'config')
def test_compare_cpu_invalid_cpuinfo_raises(self, mock_vconfig,
mock_compare):
@@ -11607,7 +11725,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
jsonutils.dumps(_fake_cpu_info),
instance)
- @mock.patch.object(host.Host, 'compare_cpu')
+ @mock.patch.object(host.Host, 'compare_hypervisor_cpu')
@mock.patch.object(nova.virt.libvirt, 'config')
def test_compare_cpu_incompatible_cpu_raises(self, mock_vconfig,
mock_compare):
@@ -14038,6 +14156,85 @@ class LibvirtConnTestCase(test.NoDBTestCase,
def test_live_migration_main_monitoring_failed(self):
self._test_live_migration_main(mon_side_effect=Exception)
+ @mock.patch.object(host.Host, "get_connection", new=mock.Mock())
+ @mock.patch.object(utils, "spawn", new=mock.Mock())
+ @mock.patch.object(host.Host, "get_guest")
+ @mock.patch.object(
+ libvirt_driver.LibvirtDriver, "_live_migration_copy_disk_paths")
+ def _test_live_migration_monitor_job_stats_exception(
+ self, exc, mock_copy_disk_paths, mock_get_guest, expect_success=True
+ ):
+ # Verify behavior when various exceptions are raised inside of
+ # Guest.get_job_info() during live migration monitoring.
+ mock_domain = mock.Mock(fakelibvirt.virDomain)
+ guest = libvirt_guest.Guest(mock_domain)
+ mock_get_guest.return_value = guest
+
+ # First, raise the exception from jobStats(), then return "completed"
+ # to make sure we exit the monitoring loop.
+ guest._domain.jobStats.side_effect = [
+ exc,
+ {'type': fakelibvirt.VIR_DOMAIN_JOB_COMPLETED},
+ ]
+
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ instance = objects.Instance(**self.test_instance)
+ post_method = mock.Mock()
+ migrate_data = mock.Mock()
+ disks_to_copy = (['/some/path/one', '/test/path/two'],
+ ['vda', 'vdb'])
+ mock_copy_disk_paths.return_value = disks_to_copy
+
+ func = drvr._live_migration
+ args = (self.context, instance, mock.sentinel.dest, post_method,
+ mock.sentinel.recover_method, mock.sentinel.block_migration,
+ migrate_data)
+
+ if expect_success:
+ func(*args)
+ post_method.assert_called_once_with(
+ self.context, instance, mock.sentinel.dest,
+ mock.sentinel.block_migration, migrate_data
+ )
+ else:
+ actual_exc = self.assertRaises(
+ fakelibvirt.libvirtError, func, *args)
+ self.assertEqual(exc, actual_exc)
+
+ def test_live_migration_monitor_job_stats_no_domain(self):
+ exp = fakelibvirt.make_libvirtError(
+ fakelibvirt.libvirtError, 'no domain',
+ error_code=fakelibvirt.VIR_ERR_NO_DOMAIN
+ )
+ self._test_live_migration_monitor_job_stats_exception(
+ exp, expect_success=True)
+
+ def test_live_migration_monitor_job_stats_op_invalid(self):
+ exp = fakelibvirt.make_libvirtError(
+ fakelibvirt.libvirtError, 'operation invalid',
+ error_code=fakelibvirt.VIR_ERR_OPERATION_INVALID
+ )
+ self._test_live_migration_monitor_job_stats_exception(
+ exp, expect_success=True)
+
+ def test_live_migration_monitor_job_stats_no_ram_info_set(self):
+ exp = fakelibvirt.make_libvirtError(
+ fakelibvirt.libvirtError, 'internal error',
+ error_message='migration was active, but no RAM info was set',
+ error_code=fakelibvirt.VIR_ERR_INTERNAL_ERROR
+ )
+ self._test_live_migration_monitor_job_stats_exception(
+ exp, expect_success=True)
+
+ def test_live_migration_monitor_job_stats_internal_error(self):
+ exp = fakelibvirt.make_libvirtError(
+ fakelibvirt.libvirtError,
+ 'some other internal error',
+ error_code=fakelibvirt.VIR_ERR_INTERNAL_ERROR
+ )
+ self._test_live_migration_monitor_job_stats_exception(
+ exp, expect_success=False)
+
@mock.patch('os.path.exists', return_value=False)
@mock.patch('nova.virt.libvirt.utils.create_image')
@mock.patch.object(libvirt_driver.LibvirtDriver,
@@ -22130,6 +22327,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
self.flags(sysinfo_serial="none", group="libvirt")
self.flags(instances_path=self.useFixture(fixtures.TempDir()).path)
self.useFixture(nova_fixtures.LibvirtFixture())
+ self.useFixture(nova_fixtures.CGroupsFixture())
os_vif.initialize()
self.drvr = libvirt_driver.LibvirtDriver(
diff --git a/nova/tests/unit/virt/libvirt/test_host.py b/nova/tests/unit/virt/libvirt/test_host.py
index 3afd6c139d..a76dc83105 100644
--- a/nova/tests/unit/virt/libvirt/test_host.py
+++ b/nova/tests/unit/virt/libvirt/test_host.py
@@ -1052,6 +1052,12 @@ Active: 8381604 kB
'iowait': 6121490000000},
stats)
+ @mock.patch.object(fakelibvirt.virConnect, "getCPUMap")
+ def test_get_available_cpus(self, mock_map):
+ mock_map.return_value = (4, [True, True, False, False], None)
+ result = self.host.get_available_cpus()
+ self.assertEqual(result, {0, 1, 2, 3})
+
@mock.patch.object(fakelibvirt.virConnect, "defineXML")
def test_write_instance_config(self, mock_defineXML):
fake_dom_xml = """
@@ -1613,25 +1619,59 @@ Active: 8381604 kB
self.host.compare_cpu("cpuxml")
mock_compareCPU.assert_called_once_with("cpuxml", 0)
- def test_is_cpu_control_policy_capable_ok(self):
+ def test_is_cpu_control_policy_capable_via_neither(self):
+ self.useFixture(nova_fixtures.CGroupsFixture(version=0))
+ self.assertFalse(self.host.is_cpu_control_policy_capable())
+
+ def test_is_cpu_control_policy_capable_via_cgroupsv1(self):
+ self.useFixture(nova_fixtures.CGroupsFixture(version=1))
+ self.assertTrue(self.host.is_cpu_control_policy_capable())
+
+ def test_is_cpu_control_policy_capable_via_cgroupsv2(self):
+ self.useFixture(nova_fixtures.CGroupsFixture(version=2))
+ self.assertTrue(self.host.is_cpu_control_policy_capable())
+
+ def test_has_cgroupsv1_cpu_controller_ok(self):
m = mock.mock_open(
- read_data="""cg /cgroup/cpu,cpuacct cg opt1,cpu,opt3 0 0
-cg /cgroup/memory cg opt1,opt2 0 0
-""")
- with mock.patch('builtins.open', m, create=True):
- self.assertTrue(self.host.is_cpu_control_policy_capable())
+ read_data=(
+ "cg /cgroup/cpu,cpuacct cg opt1,cpu,opt3 0 0"
+ "cg /cgroup/memory cg opt1,opt2 0 0"
+ )
+ )
+ with mock.patch("builtins.open", m, create=True):
+ self.assertTrue(self.host._has_cgroupsv1_cpu_controller())
- def test_is_cpu_control_policy_capable_ko(self):
+ def test_has_cgroupsv1_cpu_controller_ko(self):
m = mock.mock_open(
- read_data="""cg /cgroup/cpu,cpuacct cg opt1,opt2,opt3 0 0
-cg /cgroup/memory cg opt1,opt2 0 0
-""")
- with mock.patch('builtins.open', m, create=True):
- self.assertFalse(self.host.is_cpu_control_policy_capable())
+ read_data=(
+ "cg /cgroup/cpu,cpuacct cg opt1,opt2,opt3 0 0"
+ "cg /cgroup/memory cg opt1,opt2 0 0"
+ )
+ )
+ with mock.patch("builtins.open", m, create=True):
+ self.assertFalse(self.host._has_cgroupsv1_cpu_controller())
- @mock.patch('builtins.open', side_effect=IOError)
- def test_is_cpu_control_policy_capable_ioerror(self, mock_open):
- self.assertFalse(self.host.is_cpu_control_policy_capable())
+ @mock.patch("builtins.open", side_effect=IOError)
+ def test_has_cgroupsv1_cpu_controller_ioerror(self, _):
+ self.assertFalse(self.host._has_cgroupsv1_cpu_controller())
+
+ def test_has_cgroupsv2_cpu_controller_ok(self):
+ m = mock.mock_open(
+ read_data="cpuset cpu io memory hugetlb pids rdma misc"
+ )
+ with mock.patch("builtins.open", m, create=True):
+ self.assertTrue(self.host._has_cgroupsv2_cpu_controller())
+
+ def test_has_cgroupsv2_cpu_controller_ko(self):
+ m = mock.mock_open(
+ read_data="memory pids"
+ )
+ with mock.patch("builtins.open", m, create=True):
+ self.assertFalse(self.host._has_cgroupsv2_cpu_controller())
+
+ @mock.patch("builtins.open", side_effect=IOError)
+ def test_has_cgroupsv2_cpu_controller_ioerror(self, _):
+ self.assertFalse(self.host._has_cgroupsv2_cpu_controller())
def test_get_canonical_machine_type(self):
# this test relies on configuration from the FakeLibvirtFixture
diff --git a/nova/tests/unit/virt/libvirt/test_utils.py b/nova/tests/unit/virt/libvirt/test_utils.py
index 0b80bde49f..c648108f56 100644
--- a/nova/tests/unit/virt/libvirt/test_utils.py
+++ b/nova/tests/unit/virt/libvirt/test_utils.py
@@ -103,19 +103,23 @@ class LibvirtUtilsTestCase(test.NoDBTestCase):
def test_valid_hostname_bad(self):
self.assertFalse(libvirt_utils.is_valid_hostname("foo/?com=/bin/sh"))
+ @mock.patch('tempfile.NamedTemporaryFile')
@mock.patch('oslo_concurrency.processutils.execute')
@mock.patch('nova.virt.images.qemu_img_info')
def _test_create_image(
self, path, disk_format, disk_size, mock_info, mock_execute,
- backing_file=None
+ mock_ntf, backing_file=None, encryption=None
):
mock_info.return_value = mock.Mock(
file_format=mock.sentinel.backing_fmt,
cluster_size=mock.sentinel.cluster_size,
)
+ fh = mock_ntf.return_value.__enter__.return_value
libvirt_utils.create_image(
- path, disk_format, disk_size, backing_file=backing_file)
+ path, disk_format, disk_size, backing_file=backing_file,
+ encryption=encryption,
+ )
cow_opts = []
@@ -130,9 +134,32 @@ class LibvirtUtilsTestCase(test.NoDBTestCase):
f'cluster_size={mock.sentinel.cluster_size}',
]
+ encryption_opts = []
+
+ if encryption:
+ encryption_opts = [
+ '--object', f"secret,id=sec,file={fh.name}",
+ '-o', 'encrypt.key-secret=sec',
+ '-o', f"encrypt.format={encryption.get('format')}",
+ ]
+
+ encryption_options = {
+ 'cipher-alg': 'aes-256',
+ 'cipher-mode': 'xts',
+ 'hash-alg': 'sha256',
+ 'iter-time': 2000,
+ 'ivgen-alg': 'plain64',
+ 'ivgen-hash-alg': 'sha256',
+ }
+ for option, value in encryption_options.items():
+ encryption_opts += [
+ '-o',
+ f'encrypt.{option}={value}',
+ ]
+
expected_args = (
'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'create', '-f',
- disk_format, *cow_opts, path,
+ disk_format, *cow_opts, *encryption_opts, path,
)
if disk_size is not None:
expected_args += (disk_size,)
@@ -159,6 +186,16 @@ class LibvirtUtilsTestCase(test.NoDBTestCase):
backing_file=mock.sentinel.backing_file,
)
+ def test_create_image_encryption(self):
+ encryption = {
+ 'secret': 'a_secret',
+ 'format': 'luks',
+ }
+ self._test_create_image(
+ '/some/stuff', 'qcow2', '1234567891234',
+ encryption=encryption,
+ )
+
@ddt.unpack
@ddt.data({'fs_type': 'some_fs_type',
'default_eph_format': None,
diff --git a/nova/tests/unit/virt/test_hardware.py b/nova/tests/unit/virt/test_hardware.py
index 26ec198f08..ab51a3e26c 100644
--- a/nova/tests/unit/virt/test_hardware.py
+++ b/nova/tests/unit/virt/test_hardware.py
@@ -2023,6 +2023,7 @@ class NUMATopologyTest(test.NoDBTestCase):
memory=256,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
mempages=[
objects.NUMAPagesTopology(size_kb=4, total=32768, used=0),
@@ -2036,6 +2037,7 @@ class NUMATopologyTest(test.NoDBTestCase):
memory=256,
cpu_usage=0,
memory_usage=0,
+ socket=1,
pinned_cpus=set(),
mempages=[
objects.NUMAPagesTopology(size_kb=4, total=32768, used=64),
@@ -2049,6 +2051,7 @@ class NUMATopologyTest(test.NoDBTestCase):
memory=2,
cpu_usage=0,
memory_usage=0,
+ socket=2,
pinned_cpus=set(),
mempages=[
objects.NUMAPagesTopology(size_kb=4, total=512, used=16)],
@@ -2130,6 +2133,7 @@ class NUMATopologyTest(test.NoDBTestCase):
memory=160,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
mempages=[
objects.NUMAPagesTopology(size_kb=4, total=32768, used=32),
@@ -2170,6 +2174,7 @@ class NUMATopologyTest(test.NoDBTestCase):
memory=1024,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
mempages=[
objects.NUMAPagesTopology(size_kb=4, total=512, used=0)],
@@ -2181,6 +2186,7 @@ class NUMATopologyTest(test.NoDBTestCase):
memory=512,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
mempages=[
objects.NUMAPagesTopology(size_kb=4, total=512, used=0)],
@@ -2192,6 +2198,7 @@ class NUMATopologyTest(test.NoDBTestCase):
memory=512,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
mempages=[
objects.NUMAPagesTopology(size_kb=4, total=512, used=0)],
@@ -2258,6 +2265,7 @@ class NUMATopologyTest(test.NoDBTestCase):
memory=1024,
cpu_usage=2,
memory_usage=512,
+ socket=0,
mempages=[
objects.NUMAPagesTopology(size_kb=4, total=512, used=0)],
siblings=[set([0]), set([1]), set([2]), set([3])],
@@ -2269,6 +2277,7 @@ class NUMATopologyTest(test.NoDBTestCase):
memory=512,
cpu_usage=1,
memory_usage=512,
+ socket=0,
pinned_cpus=set(),
mempages=[
objects.NUMAPagesTopology(size_kb=4, total=512, used=0)],
@@ -2280,6 +2289,7 @@ class NUMATopologyTest(test.NoDBTestCase):
memory=256,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
mempages=[
objects.NUMAPagesTopology(size_kb=4, total=512, used=0)],
@@ -2330,6 +2340,7 @@ class NUMATopologyTest(test.NoDBTestCase):
memory=512,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
mempages=[objects.NUMAPagesTopology(
size_kb=2048, total=512, used=128,
@@ -2342,6 +2353,7 @@ class NUMATopologyTest(test.NoDBTestCase):
memory=512,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
mempages=[objects.NUMAPagesTopology(
size_kb=1048576, total=5, used=2,
@@ -2606,6 +2618,7 @@ class VirtNUMAHostTopologyTestCase(test.NoDBTestCase):
memory=2048,
cpu_usage=2,
memory_usage=2048,
+ socket=0,
pinned_cpus=set(),
mempages=[objects.NUMAPagesTopology(
size_kb=4, total=524288, used=0)],
@@ -2616,6 +2629,7 @@ class VirtNUMAHostTopologyTestCase(test.NoDBTestCase):
memory=2048,
cpu_usage=2,
memory_usage=2048,
+ socket=0,
pinned_cpus=set(),
mempages=[objects.NUMAPagesTopology(
size_kb=4, total=524288, used=0)],
@@ -2638,45 +2652,45 @@ class VirtNUMAHostTopologyTestCase(test.NoDBTestCase):
def test_get_fitting_success_no_limits(self):
fitted_instance1 = hw.numa_fit_instance_to_host(
- self.host, self.instance1)
+ self.host, self.instance1, {})
self.assertIsInstance(fitted_instance1, objects.InstanceNUMATopology)
self.host = hw.numa_usage_from_instance_numa(
self.host, fitted_instance1)
fitted_instance2 = hw.numa_fit_instance_to_host(
- self.host, self.instance3)
+ self.host, self.instance3, {})
self.assertIsInstance(fitted_instance2, objects.InstanceNUMATopology)
def test_get_fitting_success_limits(self):
fitted_instance = hw.numa_fit_instance_to_host(
- self.host, self.instance3, self.limits)
+ self.host, self.instance3, {}, self.limits)
self.assertIsInstance(fitted_instance, objects.InstanceNUMATopology)
self.assertEqual(1, fitted_instance.cells[0].id)
def test_get_fitting_fails_no_limits(self):
fitted_instance = hw.numa_fit_instance_to_host(
- self.host, self.instance2, self.limits)
+ self.host, self.instance2, {}, self.limits)
self.assertIsNone(fitted_instance)
def test_get_fitting_cumulative_fails_limits(self):
fitted_instance1 = hw.numa_fit_instance_to_host(
- self.host, self.instance1, self.limits)
+ self.host, self.instance1, {}, self.limits)
self.assertIsInstance(fitted_instance1, objects.InstanceNUMATopology)
self.assertEqual(1, fitted_instance1.cells[0].id)
self.host = hw.numa_usage_from_instance_numa(
self.host, fitted_instance1)
fitted_instance2 = hw.numa_fit_instance_to_host(
- self.host, self.instance2, self.limits)
+ self.host, self.instance2, {}, self.limits)
self.assertIsNone(fitted_instance2)
def test_get_fitting_cumulative_success_limits(self):
fitted_instance1 = hw.numa_fit_instance_to_host(
- self.host, self.instance1, self.limits)
+ self.host, self.instance1, {}, self.limits)
self.assertIsInstance(fitted_instance1, objects.InstanceNUMATopology)
self.assertEqual(1, fitted_instance1.cells[0].id)
self.host = hw.numa_usage_from_instance_numa(
self.host, fitted_instance1)
fitted_instance2 = hw.numa_fit_instance_to_host(
- self.host, self.instance3, self.limits)
+ self.host, self.instance3, {}, self.limits)
self.assertIsInstance(fitted_instance2, objects.InstanceNUMATopology)
self.assertEqual(2, fitted_instance2.cells[0].id)
@@ -2691,7 +2705,7 @@ class VirtNUMAHostTopologyTestCase(test.NoDBTestCase):
network_metadata=network_metadata)
fitted_instance = hw.numa_fit_instance_to_host(
- self.host, self.instance1, limits=limits)
+ self.host, self.instance1, {}, limits=limits)
self.assertIsInstance(fitted_instance, objects.InstanceNUMATopology)
mock_supports.assert_called_once_with(
@@ -2708,7 +2722,7 @@ class VirtNUMAHostTopologyTestCase(test.NoDBTestCase):
network_metadata=network_metadata)
fitted_instance = hw.numa_fit_instance_to_host(
- self.host, self.instance1, limits=limits)
+ self.host, self.instance1, {}, limits=limits)
self.assertIsNone(fitted_instance)
mock_supports.assert_has_calls([
@@ -2725,6 +2739,7 @@ class VirtNUMAHostTopologyTestCase(test.NoDBTestCase):
'support_requests', return_value= True):
fitted_instance1 = hw.numa_fit_instance_to_host(self.host,
self.instance1,
+ {},
pci_requests=pci_reqs,
pci_stats=pci_stats)
self.assertIsInstance(fitted_instance1,
@@ -2740,6 +2755,7 @@ class VirtNUMAHostTopologyTestCase(test.NoDBTestCase):
fitted_instance1 = hw.numa_fit_instance_to_host(
self.host,
self.instance1,
+ {},
pci_requests=pci_reqs,
pci_stats=pci_stats)
self.assertIsNone(fitted_instance1)
@@ -2758,7 +2774,7 @@ class VirtNUMAHostTopologyTestCase(test.NoDBTestCase):
# ...therefore an instance without a PCI device should get host cell 2
instance_topology = hw.numa_fit_instance_to_host(
- self.host, self.instance1, pci_stats=pci_stats)
+ self.host, self.instance1, {}, pci_stats=pci_stats)
self.assertIsInstance(instance_topology, objects.InstanceNUMATopology)
# TODO(sfinucan): We should be comparing this against the HOST cell
self.assertEqual(2, instance_topology.cells[0].id)
@@ -2768,7 +2784,7 @@ class VirtNUMAHostTopologyTestCase(test.NoDBTestCase):
# ...therefore an instance without a PCI device should get host cell 1
instance_topology = hw.numa_fit_instance_to_host(
- self.host, self.instance1, pci_stats=pci_stats)
+ self.host, self.instance1, {}, pci_stats=pci_stats)
self.assertIsInstance(instance_topology, objects.InstanceNUMATopology)
self.assertEqual(1, instance_topology.cells[0].id)
@@ -3895,7 +3911,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
]
)
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
for cell in inst_topo.cells:
self.assertInstanceCellPinned(cell, cell_ids=(0, 1))
@@ -3933,7 +3949,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
]
)
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
for cell in inst_topo.cells:
self.assertInstanceCellPinned(cell, cell_ids=(1,))
@@ -3971,7 +3987,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
]
)
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
self.assertIsNone(inst_topo)
def test_host_numa_fit_instance_to_host_fit(self):
@@ -4014,7 +4030,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
]
)
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
for cell in inst_topo.cells:
self.assertInstanceCellPinned(cell, cell_ids=(0, 1))
@@ -4069,7 +4085,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
]
)
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
for cell in inst_topo.cells:
self.assertInstanceCellPinned(cell, cell_ids=(0, 2))
@@ -4114,7 +4130,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
]
)
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
self.assertIsNone(inst_topo)
def test_host_numa_fit_instance_to_host_fail_topology(self):
@@ -4148,7 +4164,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
objects.InstanceNUMACell(
cpuset=set(), pcpuset=set([4, 5]), memory=1024,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
self.assertIsNone(inst_topo)
def test_cpu_pinning_usage_from_instances(self):
@@ -4160,6 +4176,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
memory=4096,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
siblings=[set([0]), set([1]), set([2]), set([3])],
mempages=[objects.NUMAPagesTopology(
@@ -4189,6 +4206,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
memory=4096,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set([0, 1, 3]),
mempages=[objects.NUMAPagesTopology(
size_kb=4, total=524288, used=0)],
@@ -4218,6 +4236,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
memory=4096,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
siblings=[set([0]), set([1]), set([2]), set([3])],
mempages=[objects.NUMAPagesTopology(
@@ -4246,6 +4265,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
memory=4096,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
siblings=[set([0, 2]), set([1, 3])],
mempages=[objects.NUMAPagesTopology(
@@ -4272,6 +4292,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
memory=4096,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set([0, 1, 2, 3]),
siblings=[set([0, 2]), set([1, 3])],
mempages=[objects.NUMAPagesTopology(
@@ -4298,6 +4319,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
memory=4096,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
siblings=[set([0]), set([1]), set([2]), set([3])],
mempages=[objects.NUMAPagesTopology(
@@ -4324,6 +4346,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
memory=4096,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set([0, 1, 2, 3]),
siblings=[set([0]), set([1]), set([2]), set([3])],
mempages=[objects.NUMAPagesTopology(
@@ -4353,6 +4376,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
memory=4096,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set([2]),
siblings=[set([0, 4]), set([1, 5]), set([2, 6]), set([3, 7])],
mempages=[objects.NUMAPagesTopology(
@@ -4383,6 +4407,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
memory=4096,
cpu_usage=2,
memory_usage=0,
+ socket=0,
pinned_cpus=set([2, 6, 7]),
siblings=[set([0, 4]), set([1, 5]), set([2, 6]), set([3, 7])],
mempages=[objects.NUMAPagesTopology(
@@ -4415,6 +4440,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
cpu_usage=2,
memory_usage=0,
pinned_cpus=set(),
+ socket=0,
siblings=[{cpu} for cpu in range(8)],
mempages=[objects.NUMAPagesTopology(
size_kb=4, total=524288, used=0)]
@@ -4448,6 +4474,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
memory=4096,
cpu_usage=2,
memory_usage=0,
+ socket=0,
pinned_cpus=set([0, 1, 2, 3]),
siblings=[{cpu} for cpu in range(8)],
mempages=[objects.NUMAPagesTopology(
@@ -4490,6 +4517,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
memory=4096,
cpu_usage=2,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
siblings=[set([0, 5]), set([1, 6]), set([2, 7]), set([3, 8]),
set([4, 9])],
@@ -4529,6 +4557,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
memory=4096,
cpu_usage=2,
memory_usage=0,
+ socket=0,
pinned_cpus=set([0, 1, 2, 5, 6, 7]),
siblings=[set([0, 5]), set([1, 6]), set([2, 7]), set([3, 8]),
set([4, 9])],
@@ -4764,6 +4793,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
memory=2048,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
siblings=[set([0]), set([1])],
mempages=[objects.NUMAPagesTopology(
@@ -4775,6 +4805,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
memory=2048,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
siblings=[set([2]), set([3])],
mempages=[objects.NUMAPagesTopology(
@@ -4788,7 +4819,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
cpuset=set(), pcpuset=set([0]), memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
self.assertEqual({0: 0}, inst_topo.cells[0].cpu_pinning)
self.assertIsNone(inst_topo.cells[0].cpuset_reserved)
@@ -4802,7 +4833,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
cpuset=set(), pcpuset=set([0]), memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
self.assertEqual({0: 0}, inst_topo.cells[0].cpu_pinning)
self.assertIsNone(inst_topo.cells[0].cpuset_reserved)
@@ -4816,7 +4847,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
cpuset=set(), pcpuset=set([0]), memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
self.assertEqual({0: 0}, inst_topo.cells[0].cpu_pinning)
self.assertEqual(set([1]), inst_topo.cells[0].cpuset_reserved)
@@ -4830,7 +4861,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
cpuset=set(), pcpuset=set([0, 1, 2, 4]), memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
self.assertIsNone(inst_topo)
def test_multi_nodes_isolate(self):
@@ -4847,7 +4878,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
cpuset=set(), pcpuset=set([1]), memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
self.assertEqual({0: 0}, inst_topo.cells[0].cpu_pinning)
self.assertEqual(set([1]), inst_topo.cells[0].cpuset_reserved)
self.assertEqual({1: 2}, inst_topo.cells[1].cpu_pinning)
@@ -4867,7 +4898,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
cpuset=set(), pcpuset=set([2]), memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
# The guest NUMA node 0 is requesting 2pCPUs + 1 additional
# pCPU for emulator threads, the host can't handle the
# request.
@@ -4887,7 +4918,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
cpuset=set(), pcpuset=set([1, 2]), memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
self.assertEqual({0: 0}, inst_topo.cells[0].cpu_pinning)
self.assertEqual(set([1]), inst_topo.cells[0].cpuset_reserved)
self.assertEqual({1: 2, 2: 3}, inst_topo.cells[1].cpu_pinning)
@@ -4962,7 +4993,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
cpu_thread_policy=fields.CPUThreadAllocationPolicy.ISOLATE
)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
self.assertEqual({0: 0, 1: 2}, inst_topo.cells[0].cpu_pinning)
self.assertEqual(set([4]), inst_topo.cells[0].cpuset_reserved)
@@ -4992,7 +5023,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
cpu_thread_policy=fields.CPUThreadAllocationPolicy.ISOLATE
)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
self.assertEqual({0: 2, 1: 4}, inst_topo.cells[0].cpu_pinning)
self.assertEqual(set([1]), inst_topo.cells[0].cpuset_reserved)
@@ -5021,7 +5052,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
if policy:
inst_topo.emulator_threads_policy = policy
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
return inst_topo
def test_mixed_instance_not_define(self):
@@ -5078,7 +5109,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
cpuset=set(), pcpuset=set([0, 1]), memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
self.assertEqual({0: 2, 1: 3}, inst_topo.cells[0].cpu_pinning)
self.assertEqual(set([1]), inst_topo.cells[0].cpuset_reserved)
@@ -5107,7 +5138,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
cpu_thread_policy=fields.CPUThreadAllocationPolicy.ISOLATE
)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
self.assertEqual({0: 2, 1: 4}, inst_topo.cells[0].cpu_pinning)
self.assertEqual(set([1]), inst_topo.cells[0].cpuset_reserved)
@@ -5362,7 +5393,7 @@ class MemEncryptionRequestedWithoutUEFITestCase(
expected_error = (
"Memory encryption requested by %(requesters)s but image "
"%(image_name)s doesn't have 'hw_firmware_type' property "
- "set to 'uefi'"
+ "set to 'uefi' or volume-backed instance was requested"
)
def _test_encrypted_memory_support_no_uefi(self, enc_extra_spec,
@@ -5489,6 +5520,25 @@ class MemEncryptionRequiredTestCase(test.NoDBTestCase):
(self.flavor_name, self.image_id)
)
+ def test_encrypted_memory_support_flavor_for_volume(self):
+ extra_specs = {'hw:mem_encryption': True}
+
+ flavor = objects.Flavor(name=self.flavor_name,
+ extra_specs=extra_specs)
+ # Following image_meta is typical for root Cinder volume
+ image_meta = objects.ImageMeta.from_dict({
+ 'min_disk': 0,
+ 'min_ram': 0,
+ 'properties': {},
+ 'size': 0,
+ 'status': 'active'})
+ # Confirm that exception.FlavorImageConflict is raised when
+ # flavor with hw:mem_encryption flag is used to create
+ # volume-backed instance
+ self.assertRaises(exception.FlavorImageConflict,
+ hw.get_mem_encryption_constraint, flavor,
+ image_meta)
+
class PCINUMAAffinityPolicyTest(test.NoDBTestCase):
@@ -5896,7 +5946,7 @@ class HostCellsSortingTestCase(test.NoDBTestCase):
def test_sort_host_numa_cell_num_equal_instance_cell_num(self):
instance_topology = hw.numa_fit_instance_to_host(
- self.host, self.instance0)
+ self.host, self.instance0, {})
self.assertInstanceNUMAcellOrder([0, 1, 2, 3], instance_topology)
def test_sort_no_pci_stats_no_shared_cpu_policy(self):
@@ -5905,14 +5955,14 @@ class HostCellsSortingTestCase(test.NoDBTestCase):
True,
group = 'compute')
instance_topology = hw.numa_fit_instance_to_host(
- self.host, self.instance2)
+ self.host, self.instance2, {})
self.assertInstanceNUMAcellOrder([0, 1, 3], instance_topology)
CONF.set_override(
'packing_host_numa_cells_allocation_strategy',
False,
group = 'compute')
instance_topology = hw.numa_fit_instance_to_host(
- self.host, self.instance2)
+ self.host, self.instance2, {})
self.assertInstanceNUMAcellOrder([2, 3, 0], instance_topology)
def test_sort_no_pci_stats_shared_cpu_policy(self):
@@ -5921,14 +5971,14 @@ class HostCellsSortingTestCase(test.NoDBTestCase):
True,
group = 'compute')
instance_topology = hw.numa_fit_instance_to_host(
- self.host, self.instance1)
+ self.host, self.instance1, {})
self.assertInstanceNUMAcellOrder([0, 1, 2], instance_topology)
CONF.set_override(
'packing_host_numa_cells_allocation_strategy',
False,
group = 'compute')
instance_topology = hw.numa_fit_instance_to_host(
- self.host, self.instance1)
+ self.host, self.instance1, {})
self.assertInstanceNUMAcellOrder([3, 1, 2], instance_topology)
def test_sort_pci_stats_pci_req_no_shared_cpu_policy(self):
@@ -5941,6 +5991,7 @@ class HostCellsSortingTestCase(test.NoDBTestCase):
pci_reqs = [pci_request]
instance_topology = hw.numa_fit_instance_to_host(
self.host, self.instance2,
+ {},
pci_requests = pci_reqs,
pci_stats = self.pci_stats)
self.assertInstanceNUMAcellOrder([1, 0, 3], instance_topology)
@@ -5950,6 +6001,7 @@ class HostCellsSortingTestCase(test.NoDBTestCase):
group = 'compute')
instance_topology = hw.numa_fit_instance_to_host(
self.host, self.instance2,
+ {},
pci_requests = pci_reqs,
pci_stats = self.pci_stats)
self.assertInstanceNUMAcellOrder([1, 2, 3], instance_topology)
@@ -5964,6 +6016,7 @@ class HostCellsSortingTestCase(test.NoDBTestCase):
pci_reqs = [pci_request]
instance_topology = hw.numa_fit_instance_to_host(
self.host, self.instance1,
+ {},
pci_requests = pci_reqs,
pci_stats = self.pci_stats)
self.assertInstanceNUMAcellOrder([1, 0, 2], instance_topology)
@@ -5973,6 +6026,7 @@ class HostCellsSortingTestCase(test.NoDBTestCase):
group = 'compute')
instance_topology = hw.numa_fit_instance_to_host(
self.host, self.instance1,
+ {},
pci_requests = pci_reqs,
pci_stats = self.pci_stats)
self.assertInstanceNUMAcellOrder([1, 3, 2], instance_topology)
@@ -5984,6 +6038,7 @@ class HostCellsSortingTestCase(test.NoDBTestCase):
group = 'compute')
instance_topology = hw.numa_fit_instance_to_host(
self.host, self.instance2,
+ {},
pci_stats = self.pci_stats)
self.assertInstanceNUMAcellOrder([0, 3, 2], instance_topology)
CONF.set_override(
@@ -5992,6 +6047,7 @@ class HostCellsSortingTestCase(test.NoDBTestCase):
group = 'compute')
instance_topology = hw.numa_fit_instance_to_host(
self.host, self.instance2,
+ {},
pci_stats = self.pci_stats)
self.assertInstanceNUMAcellOrder([2, 3, 0], instance_topology)
@@ -6002,6 +6058,7 @@ class HostCellsSortingTestCase(test.NoDBTestCase):
group = 'compute')
instance_topology = hw.numa_fit_instance_to_host(
self.host, self.instance1,
+ {},
pci_stats = self.pci_stats)
self.assertInstanceNUMAcellOrder([0, 2, 3], instance_topology)
CONF.set_override(
@@ -6010,5 +6067,6 @@ class HostCellsSortingTestCase(test.NoDBTestCase):
group = 'compute')
instance_topology = hw.numa_fit_instance_to_host(
self.host, self.instance1,
+ {},
pci_stats = self.pci_stats)
self.assertInstanceNUMAcellOrder([3, 2, 0], instance_topology)
diff --git a/nova/tests/unit/virt/test_images.py b/nova/tests/unit/virt/test_images.py
index 58581d93ba..62a61c1e8b 100644
--- a/nova/tests/unit/virt/test_images.py
+++ b/nova/tests/unit/virt/test_images.py
@@ -16,6 +16,8 @@ import os
from unittest import mock
from oslo_concurrency import processutils
+from oslo_serialization import jsonutils
+from oslo_utils import imageutils
from nova.compute import utils as compute_utils
from nova import exception
@@ -135,3 +137,47 @@ class QemuTestCase(test.NoDBTestCase):
'-O', 'out_format', '-f', 'in_format', 'source', 'dest')
mock_disk_op_sema.__enter__.assert_called_once()
self.assertTupleEqual(expected, mock_execute.call_args[0])
+
+ def test_convert_image_vmdk_allowed_list_checking(self):
+ info = {'format': 'vmdk',
+ 'format-specific': {
+ 'type': 'vmdk',
+ 'data': {
+ 'create-type': 'monolithicFlat',
+ }}}
+
+ # If the format is not in the allowed list, we should get an error
+ self.assertRaises(exception.ImageUnacceptable,
+ images.check_vmdk_image, 'foo',
+ imageutils.QemuImgInfo(jsonutils.dumps(info),
+ format='json'))
+
+ # With the format in the allowed list, no error
+ self.flags(vmdk_allowed_types=['streamOptimized', 'monolithicFlat',
+ 'monolithicSparse'],
+ group='compute')
+ images.check_vmdk_image('foo',
+ imageutils.QemuImgInfo(jsonutils.dumps(info),
+ format='json'))
+
+ # With an empty list, allow nothing
+ self.flags(vmdk_allowed_types=[], group='compute')
+ self.assertRaises(exception.ImageUnacceptable,
+ images.check_vmdk_image, 'foo',
+ imageutils.QemuImgInfo(jsonutils.dumps(info),
+ format='json'))
+
+ @mock.patch.object(images, 'fetch')
+ @mock.patch('nova.privsep.qemu.unprivileged_qemu_img_info')
+ def test_fetch_checks_vmdk_rules(self, mock_info, mock_fetch):
+ info = {'format': 'vmdk',
+ 'format-specific': {
+ 'type': 'vmdk',
+ 'data': {
+ 'create-type': 'monolithicFlat',
+ }}}
+ mock_info.return_value = jsonutils.dumps(info)
+ with mock.patch('os.path.exists', return_value=True):
+ e = self.assertRaises(exception.ImageUnacceptable,
+ images.fetch_to_raw, None, 'foo', 'anypath')
+ self.assertIn('Invalid VMDK create-type specified', str(e))
diff --git a/nova/tests/unit/virt/test_node.py b/nova/tests/unit/virt/test_node.py
new file mode 100644
index 0000000000..668b762520
--- /dev/null
+++ b/nova/tests/unit/virt/test_node.py
@@ -0,0 +1,142 @@
+# Copyright 2022 Red Hat, inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+from unittest import mock
+import uuid
+
+import fixtures
+from oslo_config import cfg
+from oslo_utils.fixture import uuidsentinel as uuids
+import testtools
+
+from nova import exception
+from nova import test
+from nova.tests import fixtures as nova_fixtures
+from nova.virt import node
+
+CONF = cfg.CONF
+
+
+# NOTE(danms): We do not inherit from test.TestCase because we need
+# our node methods not stubbed out in order to exercise them.
+class TestNodeIdentity(testtools.TestCase):
+ def flags(self, **kw):
+ """Override flag variables for a test."""
+ group = kw.pop('group', None)
+ for k, v in kw.items():
+ CONF.set_override(k, v, group)
+
+ def setUp(self):
+ super().setUp()
+ self.useFixture(nova_fixtures.ConfFixture(CONF))
+ self.tempdir = self.useFixture(fixtures.TempDir()).path
+ self.identity_file = os.path.join(self.tempdir, node.COMPUTE_ID_FILE)
+ self.fake_config_files = ['%s/etc/nova.conf' % self.tempdir,
+ '%s/etc/nova/nova.conf' % self.tempdir,
+ '%s/opt/etc/nova/nova.conf' % self.tempdir]
+ for fn in self.fake_config_files:
+ os.makedirs(os.path.dirname(fn))
+ self.flags(state_path=self.tempdir,
+ config_file=self.fake_config_files)
+ node.LOCAL_NODE_UUID = None
+
+ def test_generate_local_node_uuid(self):
+ node_uuid = uuids.node
+ node.write_local_node_uuid(node_uuid)
+
+ e = self.assertRaises(exception.InvalidNodeConfiguration,
+ node.write_local_node_uuid, 'anything')
+ self.assertIn(
+ 'Identity file %s appeared unexpectedly' % self.identity_file,
+ str(e))
+
+ def test_generate_local_node_uuid_unexpected_open_fail(self):
+ with mock.patch('builtins.open') as mock_open:
+ mock_open.side_effect = IndexError()
+ e = self.assertRaises(exception.InvalidNodeConfiguration,
+ node.write_local_node_uuid, 'foo')
+ self.assertIn('Unable to write uuid to %s' % (
+ self.identity_file), str(e))
+
+ def test_generate_local_node_uuid_unexpected_write_fail(self):
+ with mock.patch('builtins.open') as mock_open:
+ mock_write = mock_open.return_value.__enter__.return_value.write
+ mock_write.side_effect = IndexError()
+ e = self.assertRaises(exception.InvalidNodeConfiguration,
+ node.write_local_node_uuid, 'foo')
+ self.assertIn('Unable to write uuid to %s' % (
+ self.identity_file), str(e))
+
+ def test_get_local_node_uuid_simple_exists(self):
+ node_uuid = uuids.node
+ with test.patch_open('%s/etc/nova/compute_id' % self.tempdir,
+ node_uuid):
+ self.assertEqual(node_uuid, node.get_local_node_uuid())
+
+ def test_get_local_node_uuid_simple_exists_whitespace(self):
+ node_uuid = uuids.node
+ # Make sure we strip whitespace from the file contents
+ with test.patch_open('%s/etc/nova/compute_id' % self.tempdir,
+ ' %s \n' % node_uuid):
+ self.assertEqual(node_uuid, node.get_local_node_uuid())
+
+ def test_get_local_node_uuid_simple_generate(self):
+ self.assertIsNone(node.LOCAL_NODE_UUID)
+ node_uuid1 = node.get_local_node_uuid()
+ self.assertEqual(node_uuid1, node.LOCAL_NODE_UUID)
+ node_uuid2 = node.get_local_node_uuid()
+ self.assertEqual(node_uuid2, node.LOCAL_NODE_UUID)
+
+ # Make sure we got the same thing each time, and that it's a
+ # valid uuid. Since we provided no uuid, it must have been
+ # generated the first time and read/returned the second.
+ self.assertEqual(node_uuid1, node_uuid2)
+ uuid.UUID(node_uuid1)
+
+ # Try to read it directly to make sure the file was really
+ # created and with the right value.
+ self.assertEqual(node_uuid1, node.read_local_node_uuid())
+
+ def test_get_local_node_uuid_two(self):
+ node_uuid = uuids.node
+
+ # Write the uuid to two of our locations
+ for cf in (self.fake_config_files[0], self.fake_config_files[1]):
+ open(os.path.join(os.path.dirname(cf),
+ node.COMPUTE_ID_FILE), 'w').write(node_uuid)
+
+ # Make sure we got the expected uuid and that no exceptions
+ # were raised about the files disagreeing
+ self.assertEqual(node_uuid, node.get_local_node_uuid())
+
+ def test_get_local_node_uuid_two_mismatch(self):
+ node_uuids = [uuids.node1, uuids.node2]
+
+ # Write a different uuid to each file
+ for id, fn in zip(node_uuids, self.fake_config_files):
+ open(os.path.join(
+ os.path.dirname(fn),
+ node.COMPUTE_ID_FILE), 'w').write(id)
+
+ # Make sure we get an error that identifies the mismatching
+ # file with its uuid, as well as what we expected to find
+ e = self.assertRaises(exception.InvalidNodeConfiguration,
+ node.get_local_node_uuid)
+ expected = ('UUID %s in %s does not match %s' % (
+ node_uuids[1],
+ os.path.join(os.path.dirname(self.fake_config_files[1]),
+ 'compute_id'),
+ node_uuids[0]))
+ self.assertIn(expected, str(e))
diff --git a/nova/tests/unit/virt/test_virt_drivers.py b/nova/tests/unit/virt/test_virt_drivers.py
index 58fa3d4c27..ed9f1e3822 100644
--- a/nova/tests/unit/virt/test_virt_drivers.py
+++ b/nova/tests/unit/virt/test_virt_drivers.py
@@ -832,6 +832,7 @@ class LibvirtConnTestCase(_VirtDriverTestCase, test.TestCase):
# This is needed for the live migration tests which spawn off the
# operation for monitoring.
self.useFixture(nova_fixtures.SpawnIsSynchronousFixture())
+ self.useFixture(nova_fixtures.CGroupsFixture())
# When destroying an instance, os-vif will try to execute some commands
# which hang tests so let's just stub out the unplug call to os-vif
# since we don't care about it.
diff --git a/nova/utils.py b/nova/utils.py
index 664056a09f..b5d45c58b5 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -632,15 +632,13 @@ def _serialize_profile_info():
return trace_info
-def spawn(func, *args, **kwargs):
- """Passthrough method for eventlet.spawn.
-
- This utility exists so that it can be stubbed for testing without
- interfering with the service spawns.
+def pass_context(runner, func, *args, **kwargs):
+ """Generalised passthrough method
- It will also grab the context from the threadlocal store and add it to
- the store on the new thread. This allows for continuity in logging the
- context when using this method to spawn a new thread.
+ It will grab the context from the threadlocal store and add it to
+ the store on the runner. This allows for continuity in logging the
+ context when using this method to spawn a new thread through the
+ runner function
"""
_context = common_context.get_current()
profiler_info = _serialize_profile_info()
@@ -655,11 +653,11 @@ def spawn(func, *args, **kwargs):
profiler.init(**profiler_info)
return func(*args, **kwargs)
- return eventlet.spawn(context_wrapper, *args, **kwargs)
+ return runner(context_wrapper, *args, **kwargs)
-def spawn_n(func, *args, **kwargs):
- """Passthrough method for eventlet.spawn_n.
+def spawn(func, *args, **kwargs):
+ """Passthrough method for eventlet.spawn.
This utility exists so that it can be stubbed for testing without
interfering with the service spawns.
@@ -668,25 +666,26 @@ def spawn_n(func, *args, **kwargs):
the store on the new thread. This allows for continuity in logging the
context when using this method to spawn a new thread.
"""
- _context = common_context.get_current()
- profiler_info = _serialize_profile_info()
- @functools.wraps(func)
- def context_wrapper(*args, **kwargs):
- # NOTE: If update_store is not called after spawn_n it won't be
- # available for the logger to pull from threadlocal storage.
- if _context is not None:
- _context.update_store()
- if profiler_info and profiler:
- profiler.init(**profiler_info)
- func(*args, **kwargs)
+ return pass_context(eventlet.spawn, func, *args, **kwargs)
+
+
+def spawn_n(func, *args, **kwargs):
+ """Passthrough method for eventlet.spawn_n.
+
+ This utility exists so that it can be stubbed for testing without
+ interfering with the service spawns.
- eventlet.spawn_n(context_wrapper, *args, **kwargs)
+ It will also grab the context from the threadlocal store and add it to
+ the store on the new thread. This allows for continuity in logging the
+ context when using this method to spawn a new thread.
+ """
+ pass_context(eventlet.spawn_n, func, *args, **kwargs)
def tpool_execute(func, *args, **kwargs):
"""Run func in a native thread"""
- tpool.execute(func, *args, **kwargs)
+ return pass_context(tpool.execute, func, *args, **kwargs)
def is_none_string(val):
diff --git a/nova/virt/driver.py b/nova/virt/driver.py
index 532ed1fa50..5d42a392d8 100644
--- a/nova/virt/driver.py
+++ b/nova/virt/driver.py
@@ -34,6 +34,7 @@ from nova import context as nova_context
from nova.i18n import _
from nova import objects
from nova.virt import event as virtevent
+import nova.virt.node
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
@@ -1595,6 +1596,11 @@ class ComputeDriver(object):
"""
raise NotImplementedError()
+ def get_nodenames_by_uuid(self, refresh=False):
+ """Returns a dict of {uuid: nodename} for all managed nodes."""
+ nodename = self.get_available_nodes()[0]
+ return {nova.virt.node.get_local_node_uuid(): nodename}
+
def node_is_available(self, nodename):
"""Return whether this compute service manages a particular node."""
if nodename in self.get_available_nodes():
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index 362bf89973..bf7dc8fc72 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -32,6 +32,7 @@ import fixtures
import os_resource_classes as orc
from oslo_log import log as logging
from oslo_serialization import jsonutils
+from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import versionutils
from nova.compute import power_state
@@ -48,6 +49,7 @@ from nova.objects import migrate_data
from nova.virt import driver
from nova.virt import hardware
from nova.virt.ironic import driver as ironic
+import nova.virt.node
from nova.virt import virtapi
CONF = nova.conf.CONF
@@ -160,8 +162,8 @@ class FakeDriver(driver.ComputeDriver):
self._host = host
# NOTE(gibi): this is unnecessary complex and fragile but this is
# how many current functional sample tests expect the node name.
- self._nodes = (['fake-mini'] if self._host == 'compute'
- else [self._host])
+ self._set_nodes(['fake-mini'] if self._host == 'compute'
+ else [self._host])
def _set_nodes(self, nodes):
# NOTE(gibi): this is not part of the driver interface but used
@@ -504,6 +506,12 @@ class FakeDriver(driver.ComputeDriver):
host_status['host_hostname'] = nodename
host_status['host_name_label'] = nodename
host_status['cpu_info'] = jsonutils.dumps(cpu_info)
+ # NOTE(danms): Because the fake driver runs on the same host
+ # in tests, potentially with multiple nodes, we need to
+ # control our node uuids. Make sure we return a unique and
+ # consistent uuid for each node we are responsible for to
+ # avoid the persistent local node identity from taking over.
+ host_status['uuid'] = str(getattr(uuids, 'node_%s' % nodename))
return host_status
def update_provider_tree(self, provider_tree, nodename, allocations=None):
@@ -646,6 +654,10 @@ class FakeDriver(driver.ComputeDriver):
def get_available_nodes(self, refresh=False):
return self._nodes
+ def get_nodenames_by_uuid(self, refresh=False):
+ return {str(getattr(uuids, 'node_%s' % n)): n
+ for n in self.get_available_nodes()}
+
def instance_on_disk(self, instance):
return False
@@ -764,7 +776,7 @@ class PredictableNodeUUIDDriver(SmallFakeDriver):
PredictableNodeUUIDDriver, self).get_available_resource(nodename)
# This is used in ComputeNode.update_from_virt_driver which is called
# from the ResourceTracker when creating a ComputeNode.
- resources['uuid'] = uuid.uuid5(uuid.NAMESPACE_DNS, nodename)
+ resources['uuid'] = str(uuid.uuid5(uuid.NAMESPACE_DNS, nodename))
return resources
@@ -1119,3 +1131,22 @@ class EphEncryptionDriverPLAIN(MediumFakeDriver):
FakeDriver.capabilities,
supports_ephemeral_encryption=True,
supports_ephemeral_encryption_plain=True)
+
+
+class FakeDriverWithoutFakeNodes(FakeDriver):
+ """FakeDriver that behaves like a real single-node driver.
+
+ This behaves like a real virt driver from the perspective of its
+ nodes, with a stable nodename and use of the global node identity
+ stuff to provide a stable node UUID.
+ """
+
+ def get_available_resource(self, nodename):
+ resources = super().get_available_resource(nodename)
+ resources['uuid'] = nova.virt.node.get_local_node_uuid()
+ return resources
+
+ def get_nodenames_by_uuid(self, refresh=False):
+ return {
+ nova.virt.node.get_local_node_uuid(): self.get_available_nodes()[0]
+ }
diff --git a/nova/virt/hardware.py b/nova/virt/hardware.py
index 271a719aa2..292536735a 100644
--- a/nova/virt/hardware.py
+++ b/nova/virt/hardware.py
@@ -1213,10 +1213,13 @@ def _check_for_mem_encryption_requirement_conflicts(
"image %(image_name)s which has hw_mem_encryption property "
"explicitly set to %(image_val)s"
)
+ # image_meta.name is not set if image object represents root
+ # Cinder volume.
+ image_name = (image_meta.name if 'name' in image_meta else None)
data = {
'flavor_name': flavor.name,
'flavor_val': flavor_mem_enc_str,
- 'image_name': image_meta.name,
+ 'image_name': image_name,
'image_val': image_mem_enc,
}
raise exception.FlavorImageConflict(emsg % data)
@@ -1228,10 +1231,15 @@ def _check_mem_encryption_uses_uefi_image(requesters, image_meta):
emsg = _(
"Memory encryption requested by %(requesters)s but image "
- "%(image_name)s doesn't have 'hw_firmware_type' property set to 'uefi'"
+ "%(image_name)s doesn't have 'hw_firmware_type' property set to "
+ "'uefi' or volume-backed instance was requested"
)
+ # image_meta.name is not set if image object represents root Cinder
+ # volume, for this case FlavorImageConflict should be raised, but
+ # image_meta.name can't be extracted.
+ image_name = (image_meta.name if 'name' in image_meta else None)
data = {'requesters': " and ".join(requesters),
- 'image_name': image_meta.name}
+ 'image_name': image_name}
raise exception.FlavorImageConflict(emsg % data)
@@ -1260,12 +1268,14 @@ def _check_mem_encryption_machine_type(image_meta, machine_type=None):
if mach_type is None:
return
+ # image_meta.name is not set if image object represents root Cinder volume.
+ image_name = (image_meta.name if 'name' in image_meta else None)
# Could be something like pc-q35-2.11 if a specific version of the
# machine type is required, so do substring matching.
if 'q35' not in mach_type:
raise exception.InvalidMachineType(
mtype=mach_type,
- image_id=image_meta.id, image_name=image_meta.name,
+ image_id=image_meta.id, image_name=image_name,
reason=_("q35 type is required for SEV to work"))
@@ -2295,6 +2305,7 @@ def _numa_cells_support_network_metadata(
def numa_fit_instance_to_host(
host_topology: 'objects.NUMATopology',
instance_topology: 'objects.InstanceNUMATopology',
+ provider_mapping: ty.Optional[ty.Dict[str, ty.List[str]]],
limits: ty.Optional['objects.NUMATopologyLimit'] = None,
pci_requests: ty.Optional['objects.InstancePCIRequests'] = None,
pci_stats: ty.Optional[stats.PciDeviceStats] = None,
@@ -2310,6 +2321,12 @@ def numa_fit_instance_to_host(
:param host_topology: objects.NUMATopology object to fit an
instance on
:param instance_topology: objects.InstanceNUMATopology to be fitted
+ :param provider_mapping: A dict keyed by RequestGroup requester_id,
+ to a list of resource provider UUIDs which provide resource
+ for that RequestGroup. If it is None then it signals that the
+ InstancePCIRequest objects already stores a mapping per request.
+ I.e.: we are called _after_ the scheduler made allocations for this
+ request in placement.
:param limits: objects.NUMATopologyLimits that defines limits
:param pci_requests: instance pci_requests
:param pci_stats: pci_stats for the host
@@ -2465,7 +2482,7 @@ def numa_fit_instance_to_host(
continue
if pci_requests and pci_stats and not pci_stats.support_requests(
- pci_requests, chosen_instance_cells):
+ pci_requests, provider_mapping, chosen_instance_cells):
continue
if network_metadata and not _numa_cells_support_network_metadata(
@@ -2566,6 +2583,7 @@ def numa_usage_from_instance_numa(host_topology, instance_topology,
cpuset=host_cell.cpuset,
pcpuset=host_cell.pcpuset,
memory=host_cell.memory,
+ socket=host_cell.socket,
cpu_usage=0,
memory_usage=0,
mempages=host_cell.mempages,
diff --git a/nova/virt/hyperv/driver.py b/nova/virt/hyperv/driver.py
index 1291f975ad..ba18c85cf7 100644
--- a/nova/virt/hyperv/driver.py
+++ b/nova/virt/hyperv/driver.py
@@ -146,6 +146,14 @@ class HyperVDriver(driver.ComputeDriver):
'in Rocky.')
def init_host(self, host):
+ LOG.warning(
+ 'The hyperv driver is not tested by the OpenStack project nor '
+ 'does it have clear maintainer(s) and thus its quality can not be '
+ 'ensured. It should be considered experimental and may be removed '
+ 'in a future release. If you are using the driver in production '
+ 'please let us know via the openstack-discuss mailing list.'
+ )
+
self._serialconsoleops.start_console_handlers()
event_handler = eventhandler.InstanceEventHandler(
state_change_callback=self.emit_event)
diff --git a/nova/virt/images.py b/nova/virt/images.py
index 5358f3766a..f13c872290 100644
--- a/nova/virt/images.py
+++ b/nova/virt/images.py
@@ -110,6 +110,34 @@ def get_info(context, image_href):
return IMAGE_API.get(context, image_href)
+def check_vmdk_image(image_id, data):
+ # Check some rules about VMDK files. Specifically we want to make
+ # sure that the "create-type" of the image is one that we allow.
+ # Some types of VMDK files can reference files outside the disk
+ # image and we do not want to allow those for obvious reasons.
+
+ types = CONF.compute.vmdk_allowed_types
+
+ if not len(types):
+ LOG.warning('Refusing to allow VMDK image as vmdk_allowed_'
+ 'types is empty')
+ msg = _('Invalid VMDK create-type specified')
+ raise exception.ImageUnacceptable(image_id=image_id, reason=msg)
+
+ try:
+ create_type = data.format_specific['data']['create-type']
+ except KeyError:
+ msg = _('Unable to determine VMDK create-type')
+ raise exception.ImageUnacceptable(image_id=image_id, reason=msg)
+
+ if create_type not in CONF.compute.vmdk_allowed_types:
+ LOG.warning('Refusing to process VMDK file with create-type of %r '
+ 'which is not in allowed set of: %s', create_type,
+ ','.join(CONF.compute.vmdk_allowed_types))
+ msg = _('Invalid VMDK create-type specified')
+ raise exception.ImageUnacceptable(image_id=image_id, reason=msg)
+
+
def fetch_to_raw(context, image_href, path, trusted_certs=None):
path_tmp = "%s.part" % path
fetch(context, image_href, path_tmp, trusted_certs)
@@ -129,6 +157,9 @@ def fetch_to_raw(context, image_href, path, trusted_certs=None):
reason=(_("fmt=%(fmt)s backed by: %(backing_file)s") %
{'fmt': fmt, 'backing_file': backing_file}))
+ if fmt == 'vmdk':
+ check_vmdk_image(image_href, data)
+
if fmt != "raw" and CONF.force_raw_images:
staged = "%s.converted" % path
LOG.debug("%s was %s, converting to raw", image_href, fmt)
diff --git a/nova/virt/ironic/driver.py b/nova/virt/ironic/driver.py
index 7496db5a7c..77fefb81ea 100644
--- a/nova/virt/ironic/driver.py
+++ b/nova/virt/ironic/driver.py
@@ -397,6 +397,18 @@ class IronicDriver(virt_driver.ComputeDriver):
_("Ironic node uuid not supplied to "
"driver for instance %s.") % instance.uuid)
node = self._get_node(node_uuid)
+
+ # Its possible this node has just moved from deleting
+ # to cleaning. Placement will update the inventory
+ # as all reserved, but this instance might have got here
+ # before that happened, but after the previous allocation
+ # got deleted. We trigger a re-schedule to another node.
+ if (self._node_resources_used(node) or
+ self._node_resources_unavailable(node)):
+ msg = "Chosen ironic node %s is not available" % node_uuid
+ LOG.info(msg, instance=instance)
+ raise exception.ComputeResourcesUnavailable(reason=msg)
+
self._set_instance_id(node, instance)
def failed_spawn_cleanup(self, instance):
@@ -827,6 +839,13 @@ class IronicDriver(virt_driver.ComputeDriver):
return node_uuids
+ def get_nodenames_by_uuid(self, refresh=False):
+ nodes = self.get_available_nodes(refresh=refresh)
+ # We use the uuid for compute_node.uuid and
+ # compute_node.hypervisor_hostname, so the dict keys and values are
+ # the same.
+ return dict(zip(nodes, nodes))
+
def update_provider_tree(self, provider_tree, nodename, allocations=None):
"""Update a ProviderTree object with current resource provider and
inventory information.
@@ -874,15 +893,25 @@ class IronicDriver(virt_driver.ComputeDriver):
"""
# nodename is the ironic node's UUID.
node = self._node_from_cache(nodename)
+
reserved = False
- if (not self._node_resources_used(node) and
- self._node_resources_unavailable(node)):
- LOG.debug('Node %(node)s is not ready for a deployment, '
- 'reporting resources as reserved for it. Node\'s '
- 'provision state is %(prov)s, power state is '
- '%(power)s and maintenance is %(maint)s.',
- {'node': node.uuid, 'prov': node.provision_state,
- 'power': node.power_state, 'maint': node.maintenance})
+ if self._node_resources_unavailable(node):
+ # Operators might mark a node as in maintainance,
+ # even when an instance is on the node,
+ # either way lets mark this as reserved
+ reserved = True
+
+ if (self._node_resources_used(node) and
+ not CONF.workarounds.skip_reserve_in_use_ironic_nodes):
+ # Make resources as reserved once we have
+ # and instance here.
+ # When the allocation is deleted, most likely
+ # automatic clean will start, so we keep the node
+ # reserved until it becomes available again.
+ # In the case without automatic clean, once
+ # the allocation is removed in placement it
+ # also stays as reserved until we notice on
+ # the next periodic its actually available.
reserved = True
info = self._node_resource(node)
diff --git a/nova/virt/libvirt/config.py b/nova/virt/libvirt/config.py
index 0db2dc6b67..231283b8dd 100644
--- a/nova/virt/libvirt/config.py
+++ b/nova/virt/libvirt/config.py
@@ -2047,6 +2047,12 @@ class LibvirtConfigGuestGraphics(LibvirtConfigGuestDevice):
self.keymap = None
self.listen = None
+ self.image_compression = None
+ self.jpeg_compression = None
+ self.zlib_compression = None
+ self.playback_compression = None
+ self.streaming_mode = None
+
def format_dom(self):
dev = super(LibvirtConfigGuestGraphics, self).format_dom()
@@ -2057,6 +2063,24 @@ class LibvirtConfigGuestGraphics(LibvirtConfigGuestDevice):
if self.listen:
dev.set("listen", self.listen)
+ if self.type == "spice":
+ if self.image_compression is not None:
+ dev.append(etree.Element(
+ 'image', compression=self.image_compression))
+ if self.jpeg_compression is not None:
+ dev.append(etree.Element(
+ 'jpeg', compression=self.jpeg_compression))
+ if self.zlib_compression is not None:
+ dev.append(etree.Element(
+ 'zlib', compression=self.zlib_compression))
+ if self.playback_compression is not None:
+ dev.append(etree.Element(
+ 'playback', compression=self.get_on_off_str(
+ self.playback_compression)))
+ if self.streaming_mode is not None:
+ dev.append(etree.Element(
+ 'streaming', mode=self.streaming_mode))
+
return dev
diff --git a/nova/db/api/legacy_migrations/versions/__init__.py b/nova/virt/libvirt/cpu/__init__.py
index e69de29bb2..e69de29bb2 100644
--- a/nova/db/api/legacy_migrations/versions/__init__.py
+++ b/nova/virt/libvirt/cpu/__init__.py
diff --git a/nova/virt/libvirt/cpu/api.py b/nova/virt/libvirt/cpu/api.py
new file mode 100644
index 0000000000..1c17458d6b
--- /dev/null
+++ b/nova/virt/libvirt/cpu/api.py
@@ -0,0 +1,157 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from dataclasses import dataclass
+
+from oslo_log import log as logging
+
+import nova.conf
+from nova import exception
+from nova.i18n import _
+from nova import objects
+from nova.virt import hardware
+from nova.virt.libvirt.cpu import core
+
+LOG = logging.getLogger(__name__)
+
+CONF = nova.conf.CONF
+
+
+@dataclass
+class Core:
+ """Class to model a CPU core as reported by sysfs.
+
+ It may be a physical CPU core or a hardware thread on a shared CPU core
+ depending on if the system supports SMT.
+ """
+
+ # NOTE(sbauza): ident is a mandatory field.
+ # The CPU core id/number
+ ident: int
+
+ @property
+ def online(self) -> bool:
+ return core.get_online(self.ident)
+
+ @online.setter
+ def online(self, state: bool) -> None:
+ if state:
+ core.set_online(self.ident)
+ else:
+ core.set_offline(self.ident)
+
+ def __hash__(self):
+ return hash(self.ident)
+
+ def __eq__(self, other):
+ return self.ident == other.ident
+
+ def __str__(self):
+ return str(self.ident)
+
+ @property
+ def governor(self) -> str:
+ return core.get_governor(self.ident)
+
+ def set_high_governor(self) -> None:
+ core.set_governor(self.ident, CONF.libvirt.cpu_power_governor_high)
+
+ def set_low_governor(self) -> None:
+ core.set_governor(self.ident, CONF.libvirt.cpu_power_governor_low)
+
+
+def power_up(instance: objects.Instance) -> None:
+ if not CONF.libvirt.cpu_power_management:
+ return
+ if instance.numa_topology is None:
+ return
+
+ cpu_dedicated_set = hardware.get_cpu_dedicated_set() or set()
+ pcpus = instance.numa_topology.cpu_pinning
+ powered_up = set()
+ for pcpu in pcpus:
+ if pcpu in cpu_dedicated_set:
+ pcpu = Core(pcpu)
+ if CONF.libvirt.cpu_power_management_strategy == 'cpu_state':
+ pcpu.online = True
+ else:
+ pcpu.set_high_governor()
+ powered_up.add(str(pcpu))
+ LOG.debug("Cores powered up : %s", powered_up)
+
+
+def power_down(instance: objects.Instance) -> None:
+ if not CONF.libvirt.cpu_power_management:
+ return
+ if instance.numa_topology is None:
+ return
+
+ cpu_dedicated_set = hardware.get_cpu_dedicated_set() or set()
+ pcpus = instance.numa_topology.cpu_pinning
+ powered_down = set()
+ for pcpu in pcpus:
+ if pcpu in cpu_dedicated_set:
+ pcpu = Core(pcpu)
+ if CONF.libvirt.cpu_power_management_strategy == 'cpu_state':
+ pcpu.online = False
+ else:
+ pcpu.set_low_governor()
+ powered_down.add(str(pcpu))
+ LOG.debug("Cores powered down : %s", powered_down)
+
+
+def power_down_all_dedicated_cpus() -> None:
+ if not CONF.libvirt.cpu_power_management:
+ return
+ if (CONF.libvirt.cpu_power_management and
+ not CONF.compute.cpu_dedicated_set
+ ):
+ msg = _("'[compute]/cpu_dedicated_set' is mandatory to be set if "
+ "'[libvirt]/cpu_power_management' is set."
+ "Please provide the CPUs that can be pinned or don't use the "
+ "power management if you only use shared CPUs.")
+ raise exception.InvalidConfiguration(msg)
+
+ cpu_dedicated_set = hardware.get_cpu_dedicated_set() or set()
+ for pcpu in cpu_dedicated_set:
+ pcpu = Core(pcpu)
+ if CONF.libvirt.cpu_power_management_strategy == 'cpu_state':
+ pcpu.online = False
+ else:
+ pcpu.set_low_governor()
+ LOG.debug("Cores powered down : %s", cpu_dedicated_set)
+
+
+def validate_all_dedicated_cpus() -> None:
+ if not CONF.libvirt.cpu_power_management:
+ return
+ cpu_dedicated_set = hardware.get_cpu_dedicated_set() or set()
+ governors = set()
+ cpu_states = set()
+ for pcpu in cpu_dedicated_set:
+ pcpu = Core(pcpu)
+ # we need to collect the governors strategy and the CPU states
+ governors.add(pcpu.governor)
+ cpu_states.add(pcpu.online)
+ if CONF.libvirt.cpu_power_management_strategy == 'cpu_state':
+ # all the cores need to have the same governor strategy
+ if len(governors) > 1:
+ msg = _("All the cores need to have the same governor strategy"
+ "before modifying the CPU states. You can reboot the "
+ "compute node if you prefer.")
+ raise exception.InvalidConfiguration(msg)
+ elif CONF.libvirt.cpu_power_management_strategy == 'governor':
+ # all the cores need to be online
+ if False in cpu_states:
+ msg = _("All the cores need to be online before modifying the "
+ "governor strategy.")
+ raise exception.InvalidConfiguration(msg)
diff --git a/nova/virt/libvirt/cpu/core.py b/nova/virt/libvirt/cpu/core.py
new file mode 100644
index 0000000000..782f028fee
--- /dev/null
+++ b/nova/virt/libvirt/cpu/core.py
@@ -0,0 +1,78 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+import typing as ty
+
+from oslo_log import log as logging
+
+from nova import exception
+from nova import filesystem
+import nova.privsep
+from nova.virt import hardware
+
+LOG = logging.getLogger(__name__)
+
+AVAILABLE_PATH = '/sys/devices/system/cpu/present'
+
+CPU_PATH_TEMPLATE = '/sys/devices/system/cpu/cpu%(core)s'
+
+
+def get_available_cores() -> ty.Set[int]:
+ cores = filesystem.read_sys(AVAILABLE_PATH)
+ return hardware.parse_cpu_spec(cores) if cores else set()
+
+
+def exists(core: int) -> bool:
+ return core in get_available_cores()
+
+
+def gen_cpu_path(core: int) -> str:
+ if not exists(core):
+ LOG.warning('Unable to access CPU: %s', core)
+ raise ValueError('CPU: %(core)s does not exist', core)
+ return CPU_PATH_TEMPLATE % {'core': core}
+
+
+def get_online(core: int) -> bool:
+ try:
+ online = filesystem.read_sys(
+ os.path.join(gen_cpu_path(core), 'online')).strip()
+ except exception.FileNotFound:
+ # The online file may not exist if we haven't written it yet.
+ # By default, this means that the CPU is online.
+ online = '1'
+ return online == '1'
+
+
+@nova.privsep.sys_admin_pctxt.entrypoint
+def set_online(core: int) -> bool:
+ filesystem.write_sys(os.path.join(gen_cpu_path(core), 'online'), data='1')
+ return get_online(core)
+
+
+def set_offline(core: int) -> bool:
+ filesystem.write_sys(os.path.join(gen_cpu_path(core), 'online'), data='0')
+ return not get_online(core)
+
+
+def get_governor(core: int) -> str:
+ return filesystem.read_sys(
+ os.path.join(gen_cpu_path(core), 'cpufreq/scaling_governor')).strip()
+
+
+@nova.privsep.sys_admin_pctxt.entrypoint
+def set_governor(core: int, governor: str) -> str:
+ filesystem.write_sys(
+ os.path.join(gen_cpu_path(core), 'cpufreq/scaling_governor'),
+ data=governor)
+ return get_governor(core)
diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py
index abe4580707..fe48960296 100644
--- a/nova/virt/libvirt/driver.py
+++ b/nova/virt/libvirt/driver.py
@@ -114,6 +114,7 @@ from nova.virt.image import model as imgmodel
from nova.virt import images
from nova.virt.libvirt import blockinfo
from nova.virt.libvirt import config as vconfig
+from nova.virt.libvirt.cpu import api as libvirt_cpu
from nova.virt.libvirt import designer
from nova.virt.libvirt import event as libvirtevent
from nova.virt.libvirt import guest as libvirt_guest
@@ -412,6 +413,8 @@ class LibvirtDriver(driver.ComputeDriver):
not CONF.force_raw_images)
requires_ploop_image = CONF.libvirt.virt_type == 'parallels'
+ self.image_backend = imagebackend.Backend(CONF.use_cow_images)
+
self.capabilities = {
"has_imagecache": True,
"supports_evacuate": True,
@@ -439,6 +442,10 @@ class LibvirtDriver(driver.ComputeDriver):
"supports_bfv_rescue": True,
"supports_vtpm": CONF.libvirt.swtpm_enabled,
"supports_socket_pci_numa_affinity": True,
+ "supports_ephemeral_encryption":
+ self.image_backend.backend().SUPPORTS_LUKS,
+ "supports_ephemeral_encryption_luks":
+ self.image_backend.backend().SUPPORTS_LUKS,
}
super(LibvirtDriver, self).__init__(virtapi)
@@ -463,7 +470,6 @@ class LibvirtDriver(driver.ComputeDriver):
self._disk_cachemode = None
self.image_cache_manager = imagecache.ImageCacheManager()
- self.image_backend = imagebackend.Backend(CONF.use_cow_images)
self.disk_cachemodes = {}
@@ -812,6 +818,18 @@ class LibvirtDriver(driver.ComputeDriver):
"force_raw_images to True.")
raise exception.InvalidConfiguration(msg)
+ # NOTE(sbauza): We verify first if the dedicated CPU performances were
+ # modified by Nova before. Note that it can provide an exception if
+ # either the governor strategies are different between the cores or if
+ # the cores are offline.
+ libvirt_cpu.validate_all_dedicated_cpus()
+ # NOTE(sbauza): We powerdown all dedicated CPUs but if some instances
+ # exist that are pinned for some CPUs, then we'll later powerup those
+ # CPUs when rebooting the instance in _init_instance()
+ # Note that it can provide an exception if the config options are
+ # wrongly modified.
+ libvirt_cpu.power_down_all_dedicated_cpus()
+
# TODO(sbauza): Remove this code once mediated devices are persisted
# across reboots.
self._recreate_assigned_mediated_devices()
@@ -984,33 +1002,26 @@ class LibvirtDriver(driver.ComputeDriver):
msg = _("The cpu_models option is required when cpu_mode=custom")
raise exception.Invalid(msg)
- cpu = vconfig.LibvirtConfigGuestCPU()
- for model in models:
- cpu.model = self._get_cpu_model_mapping(model)
- try:
- self._compare_cpu(cpu, self._get_cpu_info(), None)
- except exception.InvalidCPUInfo as e:
- msg = (_("Configured CPU model: %(model)s is not "
- "compatible with host CPU. Please correct your "
- "config and try again. %(e)s") % {
- 'model': model, 'e': e})
- raise exception.InvalidCPUInfo(msg)
-
- # Use guest CPU model to check the compatibility between guest CPU and
- # configured extra_flags
- cpu = vconfig.LibvirtConfigGuestCPU()
- cpu.model = self._host.get_capabilities().host.cpu.model
- for flag in set(x.lower() for x in CONF.libvirt.cpu_model_extra_flags):
- cpu_feature = self._prepare_cpu_flag(flag)
- cpu.add_feature(cpu_feature)
- try:
- self._compare_cpu(cpu, self._get_cpu_info(), None)
- except exception.InvalidCPUInfo as e:
- msg = (_("Configured extra flag: %(flag)s it not correct, or "
- "the host CPU does not support this flag. Please "
- "correct the config and try again. %(e)s") % {
- 'flag': flag, 'e': e})
- raise exception.InvalidCPUInfo(msg)
+ if not CONF.workarounds.skip_cpu_compare_at_startup:
+ # Use guest CPU model to check the compatibility between
+ # guest CPU and configured extra_flags
+ for model in models:
+ cpu = vconfig.LibvirtConfigGuestCPU()
+ cpu.model = self._get_cpu_model_mapping(model)
+ for flag in set(x.lower() for
+ x in CONF.libvirt.cpu_model_extra_flags):
+ cpu_feature = self._prepare_cpu_flag(flag)
+ cpu.add_feature(cpu_feature)
+ try:
+ self._compare_cpu(cpu, self._get_cpu_info(), None)
+ except exception.InvalidCPUInfo as e:
+ msg = (_("Configured CPU model: %(model)s "
+ "and CPU Flags %(flags)s ar not "
+ "compatible with host CPU. Please correct your "
+ "config and try again. %(e)s") % {
+ 'model': model, 'e': e,
+ 'flags': CONF.libvirt.cpu_model_extra_flags})
+ raise exception.InvalidCPUInfo(msg)
def _check_vtpm_support(self) -> None:
# TODO(efried): A key manager must be configured to create/retrieve
@@ -1514,6 +1525,8 @@ class LibvirtDriver(driver.ComputeDriver):
# NOTE(GuanQiang): teardown container to avoid resource leak
if CONF.libvirt.virt_type == 'lxc':
self._teardown_container(instance)
+ # We're sure the instance is gone, we can shutdown the core if so
+ libvirt_cpu.power_down(instance)
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, destroy_secrets=True):
@@ -3166,6 +3179,7 @@ class LibvirtDriver(driver.ComputeDriver):
current_power_state = guest.get_power_state(self._host)
+ libvirt_cpu.power_up(instance)
# TODO(stephenfin): Any reason we couldn't use 'self.resume' here?
guest.launch(pause=current_power_state == power_state.PAUSED)
@@ -7302,6 +7316,11 @@ class LibvirtDriver(driver.ComputeDriver):
graphics = vconfig.LibvirtConfigGuestGraphics()
graphics.type = "spice"
graphics.listen = CONF.spice.server_listen
+ graphics.image_compression = CONF.spice.image_compression
+ graphics.jpeg_compression = CONF.spice.jpeg_compression
+ graphics.zlib_compression = CONF.spice.zlib_compression
+ graphics.playback_compression = CONF.spice.playback_compression
+ graphics.streaming_mode = CONF.spice.streaming_mode
guest.add_device(graphics)
add_video_driver = True
@@ -7617,7 +7636,7 @@ class LibvirtDriver(driver.ComputeDriver):
instance: 'objects.Instance',
power_on: bool = True,
pause: bool = False,
- post_xml_callback: ty.Callable = None,
+ post_xml_callback: ty.Optional[ty.Callable] = None,
) -> libvirt_guest.Guest:
"""Create a Guest from XML.
@@ -7643,6 +7662,7 @@ class LibvirtDriver(driver.ComputeDriver):
post_xml_callback()
if power_on or pause:
+ libvirt_cpu.power_up(instance)
guest.launch(pause=pause)
return guest
@@ -7677,7 +7697,7 @@ class LibvirtDriver(driver.ComputeDriver):
block_device_info: ty.Optional[ty.Dict[str, ty.Any]],
power_on: bool = True,
vifs_already_plugged: bool = False,
- post_xml_callback: ty.Callable = None,
+ post_xml_callback: ty.Optional[ty.Callable] = None,
external_events: ty.Optional[ty.List[ty.Tuple[str, str]]] = None,
cleanup_instance_dir: bool = False,
cleanup_instance_disks: bool = False,
@@ -7747,15 +7767,18 @@ class LibvirtDriver(driver.ComputeDriver):
if not CONF.compute.cpu_dedicated_set:
return set()
- online_cpus = self._host.get_online_cpus()
+ if CONF.libvirt.cpu_power_management:
+ available_cpus = self._host.get_available_cpus()
+ else:
+ available_cpus = self._host.get_online_cpus()
dedicated_cpus = hardware.get_cpu_dedicated_set()
- if not dedicated_cpus.issubset(online_cpus):
+ if not dedicated_cpus.issubset(available_cpus):
msg = _("Invalid '[compute] cpu_dedicated_set' config: one or "
- "more of the configured CPUs is not online. Online "
- "cpuset(s): %(online)s, configured cpuset(s): %(req)s")
+ "more of the configured CPUs is not available. Available "
+ "cpuset(s): %(available)s, configured cpuset(s): %(req)s")
raise exception.Invalid(msg % {
- 'online': sorted(online_cpus),
+ 'available': sorted(available_cpus),
'req': sorted(dedicated_cpus)})
return dedicated_cpus
@@ -8366,6 +8389,7 @@ class LibvirtDriver(driver.ComputeDriver):
:param requested_types: Filter out the result for only mediated devices
having those types.
"""
+ LOG.debug('Searching for available mdevs...')
allocated_mdevs = self._get_all_assigned_mediated_devices()
mdevs = self._get_mediated_devices(requested_types)
available_mdevs = set()
@@ -8381,6 +8405,7 @@ class LibvirtDriver(driver.ComputeDriver):
available_mdevs.add(mdev["uuid"])
available_mdevs -= set(allocated_mdevs)
+ LOG.info('Available mdevs at: %s.', available_mdevs)
return available_mdevs
def _create_new_mediated_device(self, parent, uuid=None):
@@ -8392,6 +8417,7 @@ class LibvirtDriver(driver.ComputeDriver):
:returns: the newly created mdev UUID or None if not possible
"""
+ LOG.debug('Attempting to create new mdev...')
supported_types = self.supported_vgpu_types
# Try to see if we can still create a new mediated device
devices = self._get_mdev_capable_devices(supported_types)
@@ -8403,6 +8429,7 @@ class LibvirtDriver(driver.ComputeDriver):
# The device is not the one that was called, not creating
# the mdev
continue
+ LOG.debug('Trying on: %s.', dev_name)
dev_supported_type = self._get_vgpu_type_per_pgpu(dev_name)
if dev_supported_type and device['types'][
dev_supported_type]['availableInstances'] > 0:
@@ -8412,7 +8439,13 @@ class LibvirtDriver(driver.ComputeDriver):
pci_addr = "{}:{}:{}.{}".format(*dev_name[4:].split('_'))
chosen_mdev = nova.privsep.libvirt.create_mdev(
pci_addr, dev_supported_type, uuid=uuid)
+ LOG.info('Created mdev: %s on pGPU: %s.',
+ chosen_mdev, pci_addr)
return chosen_mdev
+ LOG.debug('Failed: No available instances on device.')
+ LOG.info('Failed to create mdev. '
+ 'No free space found among the following devices: %s.',
+ [dev['dev_id'] for dev in devices])
@utils.synchronized(VGPU_RESOURCE_SEMAPHORE)
def _allocate_mdevs(self, allocations):
@@ -8495,6 +8528,8 @@ class LibvirtDriver(driver.ComputeDriver):
# Take the first available mdev
chosen_mdev = mdevs_available.pop()
else:
+ LOG.debug('No available mdevs where found. '
+ 'Creating an new one...')
chosen_mdev = self._create_new_mediated_device(parent_device)
if not chosen_mdev:
# If we can't find devices having available VGPUs, just raise
@@ -8502,6 +8537,7 @@ class LibvirtDriver(driver.ComputeDriver):
reason='mdev-capable resource is not available')
else:
chosen_mdevs.append(chosen_mdev)
+ LOG.info('Allocated mdev: %s.', chosen_mdev)
return chosen_mdevs
def _detach_mediated_devices(self, guest):
@@ -9504,6 +9540,7 @@ class LibvirtDriver(driver.ComputeDriver):
data["hypervisor_type"] = self._host.get_driver_type()
data["hypervisor_version"] = self._host.get_version()
data["hypervisor_hostname"] = self._host.get_hostname()
+ data["uuid"] = self._host.get_node_uuid()
# TODO(berrange): why do we bother converting the
# libvirt capabilities XML into a special JSON format ?
# The data format is different across all the drivers
@@ -9957,7 +9994,7 @@ class LibvirtDriver(driver.ComputeDriver):
try:
cpu_xml = cpu.to_xml()
LOG.debug("cpu compare xml: %s", cpu_xml, instance=instance)
- ret = self._host.compare_cpu(cpu_xml)
+ ret = self._host.compare_hypervisor_cpu(cpu_xml)
except libvirt.libvirtError as e:
error_code = e.get_error_code()
if error_code == libvirt.VIR_ERR_NO_SUPPORT:
@@ -11040,16 +11077,37 @@ class LibvirtDriver(driver.ComputeDriver):
if not CONF.workarounds.enable_qemu_monitor_announce_self:
return
- LOG.info('Sending announce-self command to QEMU monitor',
- instance=instance)
+ current_attempt = 0
- try:
- guest = self._host.get_guest(instance)
- guest.announce_self()
- except Exception:
- LOG.warning('Failed to send announce-self command to QEMU monitor',
- instance=instance)
- LOG.exception()
+ max_attempts = (
+ CONF.workarounds.qemu_monitor_announce_self_count)
+ # qemu_monitor_announce_retry_interval specified in seconds
+ announce_pause = (
+ CONF.workarounds.qemu_monitor_announce_self_interval)
+
+ while(current_attempt < max_attempts):
+ # Increment attempt
+ current_attempt += 1
+
+ # Only use announce_pause after the first attempt to avoid
+ # pausing before calling announce_self for the first attempt
+ if current_attempt != 1:
+ greenthread.sleep(announce_pause)
+
+ LOG.info('Sending announce-self command to QEMU monitor. '
+ 'Attempt %(current_attempt)s of %(max_attempts)s',
+ {'current_attempt': current_attempt,
+ 'max_attempts': max_attempts}, instance=instance)
+ try:
+ guest = self._host.get_guest(instance)
+ guest.announce_self()
+ except Exception:
+ LOG.warning('Failed to send announce-self command to '
+ 'QEMU monitor. Attempt %(current_attempt)s of '
+ '%(max_attempts)s',
+ {'current_attempt': current_attempt,
+ 'max_attempts': max_attempts}, instance=instance)
+ LOG.exception()
def post_live_migration_at_destination(self, context,
instance,
@@ -11299,6 +11357,9 @@ class LibvirtDriver(driver.ComputeDriver):
def get_available_nodes(self, refresh=False):
return [self._host.get_hostname()]
+ def get_nodenames_by_uuid(self, refresh=False):
+ return {self._host.get_node_uuid(): self._host.get_hostname()}
+
def get_host_cpu_stats(self):
"""Return the current CPU state of the host."""
return self._host.get_cpu_stats()
diff --git a/nova/virt/libvirt/event.py b/nova/virt/libvirt/event.py
index a7d2a3624f..56951dc11c 100644
--- a/nova/virt/libvirt/event.py
+++ b/nova/virt/libvirt/event.py
@@ -9,6 +9,8 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+import typing as ty
+
from nova.virt import event
@@ -22,7 +24,10 @@ class LibvirtEvent(event.InstanceEvent):
class DeviceEvent(LibvirtEvent):
"""Base class for device related libvirt events"""
- def __init__(self, uuid: str, dev: str, timestamp: float = None):
+ def __init__(self,
+ uuid: str,
+ dev: str,
+ timestamp: ty.Optional[float] = None):
super().__init__(uuid, timestamp)
self.dev = dev
diff --git a/nova/virt/libvirt/host.py b/nova/virt/libvirt/host.py
index 46435a9a7f..1ae86d9f47 100644
--- a/nova/virt/libvirt/host.py
+++ b/nova/virt/libvirt/host.py
@@ -66,6 +66,7 @@ from nova.virt.libvirt import event as libvirtevent
from nova.virt.libvirt import guest as libvirt_guest
from nova.virt.libvirt import migration as libvirt_migrate
from nova.virt.libvirt import utils as libvirt_utils
+import nova.virt.node # noqa
if ty.TYPE_CHECKING:
import libvirt
@@ -138,6 +139,7 @@ class Host(object):
self._caps = None
self._domain_caps = None
self._hostname = None
+ self._node_uuid = None
self._wrapped_conn = None
self._wrapped_conn_lock = threading.Lock()
@@ -738,6 +740,14 @@ class Host(object):
return doms
+ def get_available_cpus(self):
+ """Get the set of CPUs that exist on the host.
+
+ :returns: set of CPUs, raises libvirtError on error
+ """
+ cpus, cpu_map, online = self.get_connection().getCPUMap()
+ return {cpu for cpu in range(cpus)}
+
def get_online_cpus(self):
"""Get the set of CPUs that are online on the host
@@ -1059,6 +1069,12 @@ class Host(object):
{'old': self._hostname, 'new': hostname})
return self._hostname
+ def get_node_uuid(self):
+ """Returns the UUID of this node."""
+ if not self._node_uuid:
+ self._node_uuid = nova.virt.node.get_local_node_uuid()
+ return self._node_uuid
+
def find_secret(self, usage_type, usage_id):
"""Find a secret.
@@ -1605,21 +1621,66 @@ class Host(object):
"""Compares the given CPU description with the host CPU."""
return self.get_connection().compareCPU(xmlDesc, flags)
+ def compare_hypervisor_cpu(self, xmlDesc, flags=0):
+ """Compares the given CPU description with the CPU provided by
+ the host hypervisor. This is different from the older method,
+ compare_cpu(), which compares a given CPU definition with the
+ host CPU without considering the abilities of the host
+ hypervisor. Except @xmlDesc, rest of all the parameters to
+ compareHypervisorCPU API are optional (libvirt will choose
+ sensible defaults).
+ """
+ emulator = None
+ arch = None
+ machine = None
+ virttype = None
+ return self.get_connection().compareHypervisorCPU(
+ emulator, arch, machine, virttype, xmlDesc, flags)
+
def is_cpu_control_policy_capable(self):
"""Returns whether kernel configuration CGROUP_SCHED is enabled
CONFIG_CGROUP_SCHED may be disabled in some kernel configs to
improve scheduler latency.
"""
+ return self._has_cgroupsv1_cpu_controller() or \
+ self._has_cgroupsv2_cpu_controller()
+
+ def _has_cgroupsv1_cpu_controller(self):
+ LOG.debug(f"Searching host: '{self.get_hostname()}' "
+ "for CPU controller through CGroups V1...")
try:
with open("/proc/self/mounts", "r") as fd:
for line in fd.readlines():
# mount options and split options
bits = line.split()[3].split(",")
if "cpu" in bits:
+ LOG.debug("CPU controller found on host.")
+ return True
+ LOG.debug("CPU controller missing on host.")
+ return False
+ except IOError as ex:
+ LOG.debug(f"Search failed due to: '{ex}'. "
+ "Maybe the host is not running under CGroups V1. "
+ "Deemed host to be missing controller by this approach.")
+ return False
+
+ def _has_cgroupsv2_cpu_controller(self):
+ LOG.debug(f"Searching host: '{self.get_hostname()}' "
+ "for CPU controller through CGroups V2...")
+ try:
+ with open("/sys/fs/cgroup/cgroup.controllers", "r") as fd:
+ for line in fd.readlines():
+ bits = line.split()
+ if "cpu" in bits:
+ LOG.debug("CPU controller found on host.")
return True
+ LOG.debug("CPU controller missing on host.")
return False
- except IOError:
+ except IOError as ex:
+ LOG.debug(f"Search failed due to: '{ex}'. "
+ "Maybe the host is not running under CGroups V2. "
+ "Deemed host to be missing controller by this approach.")
return False
def get_canonical_machine_type(self, arch, machine) -> str:
diff --git a/nova/virt/libvirt/imagebackend.py b/nova/virt/libvirt/imagebackend.py
index 534cc60759..0a64ef43dd 100644
--- a/nova/virt/libvirt/imagebackend.py
+++ b/nova/virt/libvirt/imagebackend.py
@@ -81,6 +81,7 @@ def _update_utime_ignore_eacces(path):
class Image(metaclass=abc.ABCMeta):
SUPPORTS_CLONE = False
+ SUPPORTS_LUKS = False
def __init__(
self,
diff --git a/nova/virt/libvirt/utils.py b/nova/virt/libvirt/utils.py
index 0675e4ac14..e1298ee5c8 100644
--- a/nova/virt/libvirt/utils.py
+++ b/nova/virt/libvirt/utils.py
@@ -22,6 +22,7 @@ import grp
import os
import pwd
import re
+import tempfile
import typing as ty
import uuid
@@ -114,6 +115,7 @@ def create_image(
disk_format: str,
disk_size: ty.Optional[ty.Union[str, int]],
backing_file: ty.Optional[str] = None,
+ encryption: ty.Optional[ty.Dict[str, ty.Any]] = None
) -> None:
"""Disk image creation with qemu-img
:param path: Desired location of the disk image
@@ -125,15 +127,16 @@ def create_image(
If no suffix is given, it will be interpreted as bytes.
Can be None in the case of a COW image.
:param backing_file: (Optional) Backing file to use.
+ :param encryption: (Optional) Dict detailing various encryption attributes
+ such as the format and passphrase.
"""
- base_cmd = [
+ cmd = [
'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'create', '-f', disk_format
]
- cow_opts = []
if backing_file:
base_details = images.qemu_img_info(backing_file)
- cow_opts += [
+ cow_opts = [
f'backing_file={backing_file}',
f'backing_fmt={base_details.file_format}'
]
@@ -147,12 +150,60 @@ def create_image(
# Format as a comma separated list
csv_opts = ",".join(cow_opts)
- cow_opts = ['-o', csv_opts]
-
- cmd = base_cmd + cow_opts + [path]
- if disk_size is not None:
- cmd += [str(disk_size)]
- processutils.execute(*cmd)
+ cmd += ['-o', csv_opts]
+
+ # Disk size can be None in the case of a COW image
+ disk_size_arg = [str(disk_size)] if disk_size is not None else []
+
+ if encryption:
+ with tempfile.NamedTemporaryFile(mode='tr+', encoding='utf-8') as f:
+ # Write out the passphrase secret to a temp file
+ f.write(encryption.get('secret'))
+
+ # Ensure the secret is written to disk, we can't .close() here as
+ # that removes the file when using NamedTemporaryFile
+ f.flush()
+
+ # The basic options include the secret and encryption format
+ encryption_opts = [
+ '--object', f"secret,id=sec,file={f.name}",
+ '-o', 'encrypt.key-secret=sec',
+ '-o', f"encrypt.format={encryption.get('format')}",
+ ]
+ # Supported luks options:
+ # cipher-alg=<str> - Name of cipher algorithm and key length
+ # cipher-mode=<str> - Name of encryption cipher mode
+ # hash-alg=<str> - Name of hash algorithm to use for PBKDF
+ # iter-time=<num> - Time to spend in PBKDF in milliseconds
+ # ivgen-alg=<str> - Name of IV generator algorithm
+ # ivgen-hash-alg=<str> - Name of IV generator hash algorithm
+ #
+ # NOTE(melwitt): Sensible defaults (that match the qemu defaults)
+ # are hardcoded at this time for simplicity and consistency when
+ # instances are migrated. Configuration of luks options could be
+ # added in a future release.
+ encryption_options = {
+ 'cipher-alg': 'aes-256',
+ 'cipher-mode': 'xts',
+ 'hash-alg': 'sha256',
+ 'iter-time': 2000,
+ 'ivgen-alg': 'plain64',
+ 'ivgen-hash-alg': 'sha256',
+ }
+
+ for option, value in encryption_options.items():
+ encryption_opts += [
+ '-o',
+ f'encrypt.{option}={value}',
+ ]
+
+ # We need to execute the command while the NamedTemporaryFile still
+ # exists
+ cmd += encryption_opts + [path] + disk_size_arg
+ processutils.execute(*cmd)
+ else:
+ cmd += [path] + disk_size_arg
+ processutils.execute(*cmd)
def create_ploop_image(
@@ -210,8 +261,8 @@ def copy_image(
dest: str,
host: ty.Optional[str] = None,
receive: bool = False,
- on_execute: ty.Callable = None,
- on_completion: ty.Callable = None,
+ on_execute: ty.Optional[ty.Callable] = None,
+ on_completion: ty.Optional[ty.Callable] = None,
compression: bool = True,
) -> None:
"""Copy a disk image to an existing directory
@@ -588,7 +639,7 @@ def mdev_name2uuid(mdev_name: str) -> str:
return str(uuid.UUID(mdev_uuid))
-def mdev_uuid2name(mdev_uuid: str, parent: str = None) -> str:
+def mdev_uuid2name(mdev_uuid: str, parent: ty.Optional[str] = None) -> str:
"""Convert an mdev uuid (of the form 8-4-4-4-12) and optionally its parent
device to a name (of the form mdev_<uuid_with_underscores>[_<pciid>]).
diff --git a/nova/virt/node.py b/nova/virt/node.py
new file mode 100644
index 0000000000..4cb3d0a573
--- /dev/null
+++ b/nova/virt/node.py
@@ -0,0 +1,108 @@
+# Copyright 2022 Red Hat, inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import logging
+import os
+import uuid
+
+from oslo_utils import uuidutils
+
+import nova.conf
+from nova import exception
+
+CONF = nova.conf.CONF
+LOG = logging.getLogger(__name__)
+COMPUTE_ID_FILE = 'compute_id'
+LOCAL_NODE_UUID = None
+
+
+def write_local_node_uuid(node_uuid):
+ # We only ever write an identity file in the CONF.state_path
+ # location
+ fn = os.path.join(CONF.state_path, COMPUTE_ID_FILE)
+
+ # Try to create the identity file and write our uuid into it. Fail
+ # if the file exists (since it shouldn't if we made it here).
+ try:
+ with open(fn, 'x') as f:
+ f.write(node_uuid)
+ except FileExistsError:
+ # If the file exists, we must either fail or re-survey all the
+ # potential files. If we just read and return it, it could be
+ # inconsistent with files in the other locations.
+ raise exception.InvalidNodeConfiguration(
+ reason='Identity file %s appeared unexpectedly' % fn)
+ except Exception as e:
+ raise exception.InvalidNodeConfiguration(
+ reason='Unable to write uuid to %s: %s' % (fn, e))
+
+ LOG.info('Wrote node identity %s to %s', node_uuid, fn)
+
+
+def read_local_node_uuid():
+ locations = ([os.path.dirname(f) for f in CONF.config_file] +
+ [CONF.state_path])
+
+ uuids = []
+ found = []
+ for location in locations:
+ fn = os.path.join(location, COMPUTE_ID_FILE)
+ try:
+ # UUIDs should be 36 characters in canonical format. Read
+ # a little more to be graceful about whitespace in/around
+ # the actual value we want to read. However, it must parse
+ # to a legit UUID once we strip the whitespace.
+ with open(fn) as f:
+ content = f.read(40)
+ node_uuid = str(uuid.UUID(content.strip()))
+ except FileNotFoundError:
+ continue
+ except ValueError:
+ raise exception.InvalidNodeConfiguration(
+ reason='Unable to parse UUID from %s' % fn)
+ uuids.append(node_uuid)
+ found.append(fn)
+
+ if uuids:
+ # Any identities we found must be consistent, or we fail
+ first = uuids[0]
+ for i, (node_uuid, fn) in enumerate(zip(uuids, found)):
+ if node_uuid != first:
+ raise exception.InvalidNodeConfiguration(
+ reason='UUID %s in %s does not match %s' % (
+ node_uuid, fn, uuids[i - 1]))
+ LOG.info('Determined node identity %s from %s', first, found[0])
+ return first
+ else:
+ return None
+
+
+def get_local_node_uuid():
+ """Read or create local node uuid file.
+
+ :returns: UUID string read from file, or generated
+ """
+ global LOCAL_NODE_UUID
+
+ if LOCAL_NODE_UUID is not None:
+ return LOCAL_NODE_UUID
+
+ node_uuid = read_local_node_uuid()
+ if not node_uuid:
+ node_uuid = uuidutils.generate_uuid()
+ LOG.info('Generated node identity %s', node_uuid)
+ write_local_node_uuid(node_uuid)
+
+ LOCAL_NODE_UUID = node_uuid
+ return node_uuid
diff --git a/releasenotes/notes/add-spice-compression-support-e41676f445544e8d.yaml b/releasenotes/notes/add-spice-compression-support-e41676f445544e8d.yaml
new file mode 100644
index 0000000000..b370889171
--- /dev/null
+++ b/releasenotes/notes/add-spice-compression-support-e41676f445544e8d.yaml
@@ -0,0 +1,23 @@
+---
+features:
+ - |
+ The following SPICE-related options are added to the ``spice``
+ configuration group of a Nova configuration:
+
+ - ``image_compression``
+ - ``jpeg_compression``
+ - ``zlib_compression``
+ - ``playback_compression``
+ - ``streaming_mode``
+
+ These configuration options can be used to enable and set the
+ SPICE compression settings for libvirt (QEMU/KVM) provisioned
+ instances. Each configuration option is optional and can be set
+ explictly to configure the associated SPICE compression setting
+ for libvirt. If all configuration options are not set, then none
+ of the SPICE compression settings will be configured for libvirt,
+ which corresponds to the behavior before this change. In this case,
+ the built-in defaults from the libvirt backend (e.g. QEMU) are used.
+
+ Note that those options are only taken into account if SPICE support
+ is enabled (and the VNC support is disabled).
diff --git a/releasenotes/notes/allowing-target-state-for-evacuate-d4c1912c481973d6.yaml b/releasenotes/notes/allowing-target-state-for-evacuate-d4c1912c481973d6.yaml
new file mode 100644
index 0000000000..6c5bc98046
--- /dev/null
+++ b/releasenotes/notes/allowing-target-state-for-evacuate-d4c1912c481973d6.yaml
@@ -0,0 +1,13 @@
+---
+features:
+ - |
+ Starting with v2.95 any evacuated instance will be stopped at
+ destination. The required minimum version for Nova computes is
+ 27.0.0 (antelope 2023.1). Operator can still continue using
+ previous behavior by selecting microversion below v2.95.
+upgrade:
+ - |
+ Operators will have to consider upgrading compute hosts to Nova
+ 27.0.0 (antelope 2023.1) in order to take advantage of the new
+ (microversion v2.95) evacuate API behavior. An exception will be
+ raised for older versions.
diff --git a/releasenotes/notes/antelope-prelude-4a99907b00e739f8.yaml b/releasenotes/notes/antelope-prelude-4a99907b00e739f8.yaml
new file mode 100644
index 0000000000..66890684af
--- /dev/null
+++ b/releasenotes/notes/antelope-prelude-4a99907b00e739f8.yaml
@@ -0,0 +1,51 @@
+---
+prelude: |
+ The OpenStack 2023.1 (Nova 27.0.0) release includes many new features and
+ bug fixes. Please be sure to read the upgrade section which describes the
+ required actions to upgrade your cloud from 26.0.0 (Zed) to 27.0.0 (2023.1).
+ As a reminder, OpenStack 2023.1 is our first `Skip-Level-Upgrade Release`__
+ (starting from now, we name it a `SLURP release`) where you can
+ rolling-upgrade your compute services from OpenStack Yoga as an experimental
+ feature. Next SLURP release will be 2024.1.
+
+ .. __: https://governance.openstack.org/tc/resolutions/20220210-release-cadence-adjustment.html
+
+ There are a few major changes worth mentioning. This is not an exhaustive
+ list:
+
+ - The latest Compute API microversion supported for 2023.1 is `v2.95`__.
+
+ .. __: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#maximum-in-2023.1
+
+ - `PCI devices can now be scheduled <https://docs.openstack.org/nova/latest/admin/pci-passthrough.html#pci-tracking-in-placement>`_
+ by Nova using the Placement API on a opt-in basis. This will help the
+ nova-scheduler service to better schedule flavors that use PCI
+ (non-Neutron related) resources, will generate less reschedules if an
+ instance cannot be created on a candidate and will help the nova-scheduler
+ to not miss valid candidates if the list was too large.
+
+ - Operators can now ask Nova to `manage the power consumption of dedicated
+ CPUs <https://docs.openstack.org/nova/latest/admin/cpu-topologies.html#configuring-cpu-power-management-for-dedicated-cores>`_
+ so as to either offline them or change their governor if they're
+ currently not in use by any instance or if the instance is stopped.
+
+ - Nova will prevent unexpected compute service renames by `persisting a unique
+ compute UUID on local disk <https://docs.openstack.org/nova/latest/admin/compute-node-identification.html>`_.
+ This stored UUID will be considered the source of truth for knowing whether
+ the compute service hostame has been modified or not. As a reminder,
+ changing a compute hostname is forbidden, particularly when this compute is
+ currently running instances on top of it.
+
+ - `SPICE consoles <https://docs.openstack.org/nova/latest/admin/remote-console-access.html#spice-console>`_
+ can now be configured with compression settings which include choices of the
+ compression algorithm and the compression mode.
+
+ - Fully-Qualified Domain Names are now considered valid for an instance
+ hostname if you use the 2.94 API microversion.
+
+ - By opting into 2.95 API microversion, evacuated instances will remain
+ stopped on the destination host until manually started.
+
+ - Nova APIs now `by default support new RBAC policies <https://docs.openstack.org/nova/latest/configuration/policy.html>`
+ and scopes. See our `Policy Concepts documention <https://docs.openstack.org/nova/latest/configuration/policy-concepts.html>`
+ for further details.
diff --git a/releasenotes/notes/bp-libvirt-cpu-state-mgmt-fbc9c1f9f473003c.yaml b/releasenotes/notes/bp-libvirt-cpu-state-mgmt-fbc9c1f9f473003c.yaml
new file mode 100644
index 0000000000..95422fce67
--- /dev/null
+++ b/releasenotes/notes/bp-libvirt-cpu-state-mgmt-fbc9c1f9f473003c.yaml
@@ -0,0 +1,18 @@
+---
+features:
+ - |
+ This is now possible to configure nova-compute services using libvirt driver
+ by setting ``[libvirt]cpu_power_management`` to ``True`` in order to let the
+ service to powering down or up physical CPUs depending on whether those CPUs
+ are pinned or not to instances. In order on to support this feature, the
+ compute service needs to be set with ``[compute]cpu_dedicated_set``. If so,
+ all the related CPUs will be powering down until they are used by an
+ instance where the related pinned CPU will be powering up just before
+ starting the guest. If ``[compute]cpu_dedicated_set`` isn't set, then the
+ compute service will refuse to start.
+ By default the power strategy will offline CPUs when powering down and
+ online the CPUs on powering up but another strategy is possible by using
+ ``[libvirt]cpu_power_management_strategy=governor`` which will rather modify
+ the related CPU governor using ``[libvirt]cpu_power_governor_low`` and
+ ``[libvirt]cpu_power_governor_high`` configuration values (respective
+ defaults being ``powersave`` and ``performance``)
diff --git a/releasenotes/notes/bp-pci-device-tracking-in-placement-antelope-082310a2b0337e0e.yaml b/releasenotes/notes/bp-pci-device-tracking-in-placement-antelope-082310a2b0337e0e.yaml
new file mode 100644
index 0000000000..7a9e53ed26
--- /dev/null
+++ b/releasenotes/notes/bp-pci-device-tracking-in-placement-antelope-082310a2b0337e0e.yaml
@@ -0,0 +1,8 @@
+---
+features:
+ - |
+ Since 26.0.0 (Zed) Nova supports tracking PCI devices in Placement. Now
+ Nova also supports scheduling flavor based PCI device requests via
+ Placement. This support is disable by default. Please read
+ `documentation <https://docs.openstack.org/nova/latest/admin/pci-passthrough.html#pci-tracking-in-placement>`_
+ for more details on what is supported how this feature can be enabled.
diff --git a/releasenotes/notes/bug-1996995-qemu_monitor_announce_self-add-configurables-2b2f19d238442f72.yaml b/releasenotes/notes/bug-1996995-qemu_monitor_announce_self-add-configurables-2b2f19d238442f72.yaml
new file mode 100644
index 0000000000..0941dd7450
--- /dev/null
+++ b/releasenotes/notes/bug-1996995-qemu_monitor_announce_self-add-configurables-2b2f19d238442f72.yaml
@@ -0,0 +1,28 @@
+---
+fixes:
+ - |
+ Fixes `bug 1996995`_ in which VMs live migrated on certain VXLAN Arista
+ network fabrics were inaccessible until the switch arp cache expired.
+
+ A Nova workaround option of ``enable_qemu_monitor_announce_self`` was added
+ to fix `bug 1815989`_ which when enabled would interact with the QEMU
+ monitor and force a VM to announce itself.
+
+ On certain network fabrics, VMs that are live migrated remain inaccessible
+ via the network despite the QEMU monitor announce_self command successfully
+ being called.
+
+ It was noted that on Arista VXLAN fabrics, testing showed that it required
+ several attempts of running the QEMU announce_self monitor command before
+ the switch would acknowledge a VM's new location on the fabric.
+
+ This fix introduces two operator configurable options.
+ The first option sets the number of times the QEMU monitor announce_self
+ command is called - ``qemu_announce_self_count``
+
+ The second option allows operators to set the delay between the QEMU
+ announce_self commands in seconds for subsequent announce_self commands
+ with ``qemu_announce_self_interval``
+
+ .. _`bug 1996995`: https://bugs.launchpad.net/nova/+bug/1996995
+ .. _`bug 1815989`: https://bugs.launchpad.net/nova/+bug/1815989
diff --git a/releasenotes/notes/enable-enforce-scope-and-new-defaults-14db8c75b263b599.yaml b/releasenotes/notes/enable-enforce-scope-and-new-defaults-14db8c75b263b599.yaml
new file mode 100644
index 0000000000..72a6f861b6
--- /dev/null
+++ b/releasenotes/notes/enable-enforce-scope-and-new-defaults-14db8c75b263b599.yaml
@@ -0,0 +1,23 @@
+---
+upgrade:
+ - |
+ The Nova service enable the API policies (RBAC) new defaults and scope by
+ default. The Default value of config options ``[oslo_policy] enforce_scope``
+ and ``[oslo_policy] oslo_policy.enforce_new_defaults`` have been changed
+ to ``True``.
+
+ This means if you are using system scope token to access Nova API then
+ the request will be failed with 403 error code. Also, new defaults will be
+ enforced by default. To know about the new defaults of each policy
+ rule, refer to the `Policy New Defaults`_. For more detail about the Nova
+ API policies changes, refer to `Policy Concepts`_.
+
+ If you want to disable them then modify the below config options value in
+ ``nova.conf`` file::
+
+ [oslo_policy]
+ enforce_new_defaults=False
+ enforce_scope=False
+
+ .. _`Policy New Defaults`: https://docs.openstack.org/nova/latest/configuration/policy.html
+ .. _`Policy Concepts`: https://docs.openstack.org/nova/latest/configuration/policy-concepts.html
diff --git a/releasenotes/notes/fix-ironic-scheduler-race-08cf8aba0365f512.yaml b/releasenotes/notes/fix-ironic-scheduler-race-08cf8aba0365f512.yaml
new file mode 100644
index 0000000000..4fd2cc1ca9
--- /dev/null
+++ b/releasenotes/notes/fix-ironic-scheduler-race-08cf8aba0365f512.yaml
@@ -0,0 +1,11 @@
+---
+fixes:
+ - |
+ Fixed when placement returns ironic nodes that have just started automatic
+ cleaning as possible valid candidates. This is done by marking all ironic
+ nodes with an instance on them as reserved, such that nova only makes them
+ available once we have double checked Ironic reports the node as available.
+ If you don't have automatic cleaning on, this might mean it takes longer
+ than normal for Ironic nodes to become available for new instances.
+ If you want the old behaviour use the following workaround config:
+ `[workarounds]skip_reserve_in_use_ironic_nodes=true`
diff --git a/releasenotes/notes/hyperv-experimental-antelope-372e18a05cafc295.yaml b/releasenotes/notes/hyperv-experimental-antelope-372e18a05cafc295.yaml
new file mode 100644
index 0000000000..85b874fb69
--- /dev/null
+++ b/releasenotes/notes/hyperv-experimental-antelope-372e18a05cafc295.yaml
@@ -0,0 +1,6 @@
+---
+deprecations:
+ - |
+ The hyperv driver is marked as experimental and may be removed in a
+ future release. The driver is not tested by the OpenStack project and
+ does not have a clear maintainer.
diff --git a/releasenotes/notes/hypervisor-version-weigher-d0bba77e720edafe.yaml b/releasenotes/notes/hypervisor-version-weigher-d0bba77e720edafe.yaml
new file mode 100644
index 0000000000..31f2c70926
--- /dev/null
+++ b/releasenotes/notes/hypervisor-version-weigher-d0bba77e720edafe.yaml
@@ -0,0 +1,20 @@
+---
+features:
+ - |
+ A new hypervisor version weigher has been added to prefer selecting hosts
+ with newer hypervisors installed. For the libvirt driver, this is the version
+ of libvirt on the compute node not the version of qemu. As with all
+ weighers this is enabled by default and its behavior can be modified using
+ the new ``hypervisor_version_weight_multiplier`` config option in the
+ ``filter_scheduler`` section.
+upgrade:
+ - |
+ A new hypervisor version weigher has been added that will prefer selecting
+ hosts with a newer hypervisor installed. This can help simplify rolling
+ upgrades by preferring the already upgraded hosts when moving workloads around
+ using live or cold migration. To restore the old behavior either remove
+ the weigher from the list of enabled weighers or set
+ ``[filter_scheduler] hypervisor_version_weight_multiplier=0``. The default
+ value of the hypervisor_version_weight_multiplier is 1 so only a mild
+ preference is given to new hosts, higher values will make the effect
+ more pronounced and negative values will prefer older hosts.
diff --git a/releasenotes/notes/microversion-2-94-59649401d5763286.yaml b/releasenotes/notes/microversion-2-94-59649401d5763286.yaml
new file mode 100644
index 0000000000..d0927e6f75
--- /dev/null
+++ b/releasenotes/notes/microversion-2-94-59649401d5763286.yaml
@@ -0,0 +1,22 @@
+---
+features:
+ - |
+ The 2.94 microversion has been added. This microversion extends
+ microversion 2.90 by allowing Fully Qualified Domain Names (FQDN) wherever
+ the ``hostname`` is able to be specified. This consists of creating an
+ instance (``POST /servers``), updating an instance
+ (``PUT /servers/{id}``), or rebuilding an instance
+ (``POST /servers/{server_id}/action (rebuild)``). When using an FQDN as the
+ instance hostname, the ``[api]dhcp_domain`` configuration option must be
+ set to the empty string in order for the correct FQDN to appear in the
+ ``hostname`` field in the metadata API.
+
+upgrade:
+ - |
+ In order to make use of microversion's 2.94 FQDN hostnames, the
+ ``[api]dhcp_domain`` config option must be set to the empty string. If
+ this is not done, the ``hostname`` field in the metadata API will be
+ incorrect, as it will include the value of ``[api]dhcp_domain`` appended to
+ the instance's FQDN. Note that simply not setting ``[api]dhcp_domain`` is
+ not enough, as it has a default value of ``novalocal``. It must explicitly
+ be set to the empty string.
diff --git a/releasenotes/notes/port-binding-removed-shelved-offloaded-f1772a64be007b24.yaml b/releasenotes/notes/port-binding-removed-shelved-offloaded-f1772a64be007b24.yaml
new file mode 100644
index 0000000000..7e2dccbbf4
--- /dev/null
+++ b/releasenotes/notes/port-binding-removed-shelved-offloaded-f1772a64be007b24.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ [`bug 1983471 <https://bugs.launchpad.net/nova/+bug/1983471>`_]
+ When offloading a shelved instance, the compute will now remove the
+ binding so instance ports will appear as "unbound" in neutron.
diff --git a/releasenotes/notes/remove-sqlalchemy-migrate-907c200314884d81.yaml b/releasenotes/notes/remove-sqlalchemy-migrate-907c200314884d81.yaml
new file mode 100644
index 0000000000..c08080a806
--- /dev/null
+++ b/releasenotes/notes/remove-sqlalchemy-migrate-907c200314884d81.yaml
@@ -0,0 +1,5 @@
+---
+upgrade:
+ - |
+ The legacy ``sqlalchemy-migrate`` migrations, which have been deprecated
+ since Wallaby, have been removed. There should be no end-user impact.
diff --git a/releasenotes/notes/rescue-volume-based-instance-c6e3fba236d90be7.yaml b/releasenotes/notes/rescue-volume-based-instance-c6e3fba236d90be7.yaml
new file mode 100644
index 0000000000..7e80059b80
--- /dev/null
+++ b/releasenotes/notes/rescue-volume-based-instance-c6e3fba236d90be7.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ Fix rescuing volume based instance by adding a check for 'hw_rescue_disk'
+ and 'hw_rescue_device' properties in image metadata before attempting
+ to rescue instance.
diff --git a/releasenotes/notes/stable-compute-uuid-08663a0955616728.yaml b/releasenotes/notes/stable-compute-uuid-08663a0955616728.yaml
new file mode 100644
index 0000000000..fdeb593bd2
--- /dev/null
+++ b/releasenotes/notes/stable-compute-uuid-08663a0955616728.yaml
@@ -0,0 +1,19 @@
+---
+features:
+ - |
+ The compute manager now uses a local file to provide node uuid persistence
+ to guard against problems with renamed services, among other things.
+ Deployers wishing to ensure that *new* compute services get a predicatble
+ uuid before initial startup may provision that file and nova will use it,
+ otherwise nova will generate and write one to a `compute_id` file in
+ `CONF.state_path` the first time it starts up. Accidental renames of a
+ compute node's hostname will be detected and the manager will exit to avoid
+ database corruption. Note that none of this applies to Ironic computes, as
+ they manage nodes and uuids differently.
+upgrade:
+ - |
+ Existing compute nodes will, upon upgrade, perist the uuid of the compute
+ node assigned to their hostname at first startup. Since this must match
+ what is currently in the database, it is important to let nova provision
+ this file from its database. Nova will only persist to a `compute_id` file
+ in the `CONF.state_path` directory, which should already be writable.
diff --git a/releasenotes/notes/use-compareHypervisorCPU-b75c8f097cc73556.yaml b/releasenotes/notes/use-compareHypervisorCPU-b75c8f097cc73556.yaml
new file mode 100644
index 0000000000..924e09a602
--- /dev/null
+++ b/releasenotes/notes/use-compareHypervisorCPU-b75c8f097cc73556.yaml
@@ -0,0 +1,12 @@
+---
+fixes:
+ - |
+ Nova's use of libvirt's compareCPU() API has become error-prone as
+ it doesn't take into account host hypervisor's capabilities. With
+ QEMU >=2.9 and libvirt >= 4.4.0, libvirt will do the right thing in
+ terms of CPU comparison checks via a new replacement API,
+ compareHypervisorCPU(). Nova satisfies the said minimum version
+ requirements of QEMU and libvirt by a good margin.
+
+ This change replaces the usage of older API, compareCPU(), with the
+ new one, compareHypervisorCPU().
diff --git a/releasenotes/source/2023.1.rst b/releasenotes/source/2023.1.rst
new file mode 100644
index 0000000000..d1238479ba
--- /dev/null
+++ b/releasenotes/source/2023.1.rst
@@ -0,0 +1,6 @@
+===========================
+2023.1 Series Release Notes
+===========================
+
+.. release-notes::
+ :branch: stable/2023.1
diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst
index 6bff00e25a..ed6f8c2d07 100644
--- a/releasenotes/source/index.rst
+++ b/releasenotes/source/index.rst
@@ -6,6 +6,7 @@ Nova Release Notes
:maxdepth: 1
unreleased
+ 2023.1
zed
yoga
xena
diff --git a/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po b/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po
index d90391af7c..c0bd8bc9a8 100644
--- a/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po
+++ b/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po
@@ -2,15 +2,16 @@
# Andi Chandler <andi@gowling.com>, 2018. #zanata
# Andi Chandler <andi@gowling.com>, 2020. #zanata
# Andi Chandler <andi@gowling.com>, 2022. #zanata
+# Andi Chandler <andi@gowling.com>, 2023. #zanata
msgid ""
msgstr ""
"Project-Id-Version: nova\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2022-09-16 12:59+0000\n"
+"POT-Creation-Date: 2023-03-06 19:02+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"PO-Revision-Date: 2022-09-15 09:04+0000\n"
+"PO-Revision-Date: 2023-01-26 10:17+0000\n"
"Last-Translator: Andi Chandler <andi@gowling.com>\n"
"Language-Team: English (United Kingdom)\n"
"Language: en_GB\n"
@@ -382,9 +383,6 @@ msgstr "20.5.0"
msgid "20.6.1"
msgstr "20.6.1"
-msgid "20.6.1-29"
-msgstr "20.6.1-29"
-
msgid "204 NoContent on success"
msgstr "204 NoContent on success"
@@ -409,9 +407,6 @@ msgstr "21.2.2"
msgid "21.2.3"
msgstr "21.2.3"
-msgid "21.2.4-12"
-msgstr "21.2.4-12"
-
msgid "22.0.0"
msgstr "22.0.0"
@@ -433,9 +428,6 @@ msgstr "22.3.0"
msgid "22.4.0"
msgstr "22.4.0"
-msgid "22.4.0-6"
-msgstr "22.4.0-6"
-
msgid "23.0.0"
msgstr "23.0.0"
@@ -451,8 +443,8 @@ msgstr "23.2.0"
msgid "23.2.1"
msgstr "23.2.1"
-msgid "23.2.1-13"
-msgstr "23.2.1-13"
+msgid "23.2.2"
+msgstr "23.2.2"
msgid "24.0.0"
msgstr "24.0.0"
@@ -463,8 +455,8 @@ msgstr "24.1.0"
msgid "24.1.1"
msgstr "24.1.1"
-msgid "24.1.1-7"
-msgstr "24.1.1-7"
+msgid "24.2.0"
+msgstr "24.2.0"
msgid "25.0.0"
msgstr "25.0.0"
@@ -472,8 +464,14 @@ msgstr "25.0.0"
msgid "25.0.1"
msgstr "25.0.1"
-msgid "25.0.1-5"
-msgstr "25.0.1-5"
+msgid "25.1.0"
+msgstr "25.1.0"
+
+msgid "26.0.0"
+msgstr "26.0.0"
+
+msgid "26.1.0"
+msgstr "26.1.0"
msgid "400 for unknown param for query param and for request body."
msgstr "400 for unknown param for query param and for request body."
@@ -488,6 +486,24 @@ msgid "409 Conflict if inventory in use or if some other request concurrently"
msgstr "409 Conflict if inventory in use or if some other request concurrently"
msgid ""
+"A ``--dry-run`` option has been added to the ``nova-manage placement "
+"heal_allocations`` CLI which allows running the command to get output "
+"without committing any changes to placement."
+msgstr ""
+"A ``--dry-run`` option has been added to the ``nova-manage placement "
+"heal_allocations`` CLI which allows running the command to get output "
+"without committing any changes to placement."
+
+msgid ""
+"A ``--force`` flag is provided to skip the above checks but caution should "
+"be taken as this could easily lead to the underlying ABI of the instance "
+"changing when moving between machine types."
+msgstr ""
+"A ``--force`` flag is provided to skip the above checks but caution should "
+"be taken as this could easily lead to the underlying ABI of the instance "
+"changing when moving between machine types."
+
+msgid ""
"A ``default_floating_pool`` configuration option has been added in the "
"``[neutron]`` group. The existing ``default_floating_pool`` option in the "
"``[DEFAULT]`` group is retained and should be used by nova-network users. "
@@ -571,6 +587,24 @@ msgid "Stein Series Release Notes"
msgstr "Stein Series Release Notes"
msgid ""
+"The XenServer configuration option 'iqn_prefix' has been removed. It was not "
+"used anywhere and has no effect on any code, so there should be no impact."
+msgstr ""
+"The XenServer configuration option 'iqn_prefix' has been removed. It was not "
+"used anywhere and has no effect on any code, so there should be no impact."
+
+msgid ""
+"The ``api_rate_limit`` configuration option has been removed. The option was "
+"disabled by default back in the Havana release since it's effectively broken "
+"for more than one API worker. It has been removed because the legacy v2 API "
+"code that was using it has also been removed."
+msgstr ""
+"The ``api_rate_limit`` configuration option has been removed. The option was "
+"disabled by default back in the Havana release since it's effectively broken "
+"for more than one API worker. It has been removed because the legacy v2 API "
+"code that was using it has also been removed."
+
+msgid ""
"The ``nova-manage vm list`` command is deprecated and will be removed in the "
"15.0.0 Ocata release. Use the ``nova list`` command from python-novaclient "
"instead."
@@ -580,6 +614,24 @@ msgstr ""
"instead."
msgid ""
+"The default flavors that nova has previously had are no longer created as "
+"part of the first database migration. New deployments will need to create "
+"appropriate flavors before first use."
+msgstr ""
+"The default flavours that Nova previously had are no longer created as part "
+"of the first database migration. New deployments will need to create "
+"appropriate flavours before first use."
+
+msgid ""
+"The network configuration option 'fake_call' has been removed. It hasn't "
+"been used for several cycles, and has no effect on any code, so there should "
+"be no impact."
+msgstr ""
+"The network configuration option 'fake_call' has been removed. It hasn't "
+"been used for several cycles, and has no effect on any code, so there should "
+"be no impact."
+
+msgid ""
"These commands only work with nova-network which is itself deprecated in "
"favor of Neutron."
msgstr ""
@@ -611,6 +663,9 @@ msgstr "Xena Series Release Notes"
msgid "Yoga Series Release Notes"
msgstr "Yoga Series Release Notes"
+msgid "Zed Series Release Notes"
+msgstr "Zed Series Release Notes"
+
msgid "kernels 3.x: 8"
msgstr "kernels 3.x: 8"
diff --git a/requirements.txt b/requirements.txt
index dc1c414aba..e885a4a66f 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -16,7 +16,6 @@ greenlet>=0.4.15 # MIT
PasteDeploy>=1.5.0 # MIT
Paste>=2.0.2 # MIT
PrettyTable>=0.7.1 # BSD
-sqlalchemy-migrate>=0.13.0 # Apache-2.0
alembic>=1.5.0 # MIT
netaddr>=0.7.18 # BSD
netifaces>=0.10.4 # MIT
@@ -42,8 +41,8 @@ oslo.upgradecheck>=1.3.0
oslo.utils>=4.12.1 # Apache-2.0
oslo.db>=10.0.0 # Apache-2.0
oslo.rootwrap>=5.15.0 # Apache-2.0
-oslo.messaging>=10.3.0 # Apache-2.0
-oslo.policy>=3.7.0 # Apache-2.0
+oslo.messaging>=14.1.0 # Apache-2.0
+oslo.policy>=3.11.0 # Apache-2.0
oslo.privsep>=2.6.2 # Apache-2.0
oslo.i18n>=5.1.0 # Apache-2.0
oslo.service>=2.8.0 # Apache-2.0
diff --git a/setup.cfg b/setup.cfg
index 58594e229c..fa6f6af656 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -22,6 +22,7 @@ classifiers =
Programming Language :: Python :: 3
Programming Language :: Python :: 3.8
Programming Language :: Python :: 3.9
+ Programming Language :: Python :: 3.10
Programming Language :: Python :: 3 :: Only
Programming Language :: Python :: Implementation :: CPython
diff --git a/tox.ini b/tox.ini
index edb08599e7..77c0d9b9d3 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,13 +1,8 @@
[tox]
minversion = 3.18.0
-envlist = py39,functional,pep8
-# Automatic envs (pyXX) will only use the python version appropriate to that
-# env and ignore basepython inherited from [testenv] if we set
-# ignore_basepython_conflict.
-ignore_basepython_conflict = True
+envlist = py3,functional,pep8
[testenv]
-basepython = python3
usedevelop = True
allowlist_externals =
bash
@@ -15,6 +10,7 @@ allowlist_externals =
rm
env
make
+install_command = python -I -m pip install -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} {opts} {packages}
setenv =
VIRTUAL_ENV={envdir}
LANGUAGE=en_US
@@ -26,8 +22,6 @@ setenv =
# TODO(stephenfin): Remove once we bump our upper-constraint to SQLAlchemy 2.0
SQLALCHEMY_WARN_20=1
deps =
- -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
- -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
extras =
zvm
@@ -48,7 +42,7 @@ commands =
env TEST_OSPROFILER=1 stestr run --combine --no-discover 'nova.tests.unit.test_profiler'
stestr slowest
-[testenv:functional{,-py38,-py39,-py310}]
+[testenv:functional{,-py38,-py39,-py310,-py311}]
description =
Run functional tests.
# As nova functional tests import the PlacementFixture from the placement
@@ -67,7 +61,7 @@ description =
# because we do not want placement present during unit tests.
deps =
{[testenv]deps}
- openstack-placement>=1.0.0
+ openstack-placement>=9.0.0.0b1
extras =
commands =
stestr --test-path=./nova/tests/functional run {posargs}
@@ -351,6 +345,7 @@ extension =
N369 = checks:check_lockutils_rwlocks
N370 = checks:check_six
N371 = checks:import_stock_mock
+ N372 = checks:check_set_daemon
paths =
./nova/hacking