summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.zuul.yaml104
-rw-r--r--HACKING.rst6
-rw-r--r--api-guide/source/accelerator-support.rst4
-rw-r--r--api-guide/source/server_concepts.rst2
-rw-r--r--api-guide/source/users.rst2
-rw-r--r--api-ref/source/flavors.inc9
-rw-r--r--api-ref/source/parameters.yaml25
-rw-r--r--api-ref/source/servers-actions.inc19
-rw-r--r--api-ref/source/servers.inc7
-rw-r--r--devstack/nova-multi-cell-exclude-list.txt4
-rw-r--r--doc/api_samples/images/images-details-get-resp.json198
-rw-r--r--doc/api_samples/images/images-list-get-resp.json128
-rw-r--r--doc/api_samples/os-evacuate/v2.95/server-evacuate-find-host-req.json4
-rw-r--r--doc/api_samples/os-evacuate/v2.95/server-evacuate-req.json5
-rw-r--r--doc/api_samples/servers/v2.94/server-action-rebuild-resp.json80
-rw-r--r--doc/api_samples/servers/v2.94/server-action-rebuild.json15
-rw-r--r--doc/api_samples/servers/v2.94/server-create-req.json30
-rw-r--r--doc/api_samples/servers/v2.94/server-create-resp.json22
-rw-r--r--doc/api_samples/servers/v2.94/server-get-resp.json81
-rw-r--r--doc/api_samples/servers/v2.94/server-update-req.json8
-rw-r--r--doc/api_samples/servers/v2.94/server-update-resp.json78
-rw-r--r--doc/api_samples/servers/v2.94/servers-details-resp.json88
-rw-r--r--doc/api_samples/servers/v2.94/servers-list-resp.json24
-rw-r--r--doc/api_samples/versions/v21-version-get-resp.json2
-rw-r--r--doc/api_samples/versions/versions-get-resp.json2
-rw-r--r--doc/ext/extra_specs.py2
-rw-r--r--doc/ext/feature_matrix.py6
-rw-r--r--doc/notification_samples/common_payloads/ImageMetaPropsPayload.json2
-rw-r--r--doc/source/admin/architecture.rst2
-rw-r--r--doc/source/admin/availability-zones.rst56
-rw-r--r--doc/source/admin/cells.rst2
-rw-r--r--doc/source/admin/compute-node-identification.rst83
-rw-r--r--doc/source/admin/configuration/hypervisor-hyper-v.rst2
-rw-r--r--doc/source/admin/cpu-topologies.rst95
-rw-r--r--doc/source/admin/evacuate.rst14
-rw-r--r--doc/source/admin/huge-pages.rst2
-rw-r--r--doc/source/admin/index.rst5
-rw-r--r--doc/source/admin/libvirt-misc.rst30
-rw-r--r--doc/source/admin/live-migration-usage.rst2
-rw-r--r--doc/source/admin/manage-logs.rst10
-rw-r--r--doc/source/admin/manage-volumes.rst6
-rw-r--r--doc/source/admin/managing-resource-providers.rst2
-rw-r--r--doc/source/admin/networking.rst4
-rw-r--r--doc/source/admin/pci-passthrough.rst211
-rw-r--r--doc/source/admin/remote-console-access.rst16
-rw-r--r--doc/source/admin/scheduling.rst12
-rw-r--r--doc/source/admin/secure-live-migration-with-qemu-native-tls.rst4
-rw-r--r--doc/source/admin/soft-delete-shadow-tables.rst62
-rw-r--r--doc/source/admin/vdpa.rst86
-rw-r--r--doc/source/cli/nova-compute.rst2
-rw-r--r--doc/source/cli/nova-manage.rst10
-rw-r--r--doc/source/cli/nova-rootwrap.rst2
-rw-r--r--doc/source/cli/nova-status.rst2
-rw-r--r--doc/source/configuration/index.rst4
-rw-r--r--doc/source/configuration/policy-concepts.rst310
-rw-r--r--doc/source/contributor/api-ref-guideline.rst2
-rw-r--r--doc/source/contributor/development-environment.rst2
-rw-r--r--doc/source/contributor/how-to-get-involved.rst4
-rw-r--r--doc/source/contributor/index.rst12
-rw-r--r--doc/source/contributor/process.rst10
-rw-r--r--doc/source/contributor/ptl-guide.rst4
-rw-r--r--doc/source/index.rst12
-rw-r--r--doc/source/install/overview.rst2
-rw-r--r--doc/source/install/verify.rst4
-rw-r--r--doc/source/reference/attach-volume.rst2
-rw-r--r--doc/source/reference/block-device-structs.rst9
-rw-r--r--doc/source/reference/glossary.rst2
-rw-r--r--doc/source/reference/index.rst8
-rw-r--r--doc/source/reference/libvirt-distro-support-matrix.rst2
-rw-r--r--doc/source/reference/stable-api.rst2
-rw-r--r--doc/source/user/block-device-mapping.rst2
-rw-r--r--doc/source/user/certificate-validation.rst4
-rw-r--r--doc/source/user/metadata.rst11
-rw-r--r--doc/source/user/support-matrix.ini20
-rw-r--r--doc/source/user/wsgi.rst14
-rw-r--r--etc/nova/nova-config-generator.conf1
-rw-r--r--mypy-files.txt5
-rw-r--r--nova/api/openstack/api_version_request.py7
-rw-r--r--nova/api/openstack/compute/evacuate.py25
-rw-r--r--nova/api/openstack/compute/flavor_access.py9
-rw-r--r--nova/api/openstack/compute/remote_consoles.py3
-rw-r--r--nova/api/openstack/compute/rest_api_version_history.rst30
-rw-r--r--nova/api/openstack/compute/schemas/evacuate.py4
-rw-r--r--nova/api/openstack/compute/schemas/server_external_events.py4
-rw-r--r--nova/api/openstack/compute/schemas/servers.py14
-rw-r--r--nova/api/openstack/compute/server_external_events.py3
-rw-r--r--nova/api/openstack/compute/servers.py14
-rw-r--r--nova/api/openstack/compute/services.py7
-rw-r--r--nova/api/openstack/identity.py22
-rw-r--r--nova/api/openstack/wsgi_app.py5
-rw-r--r--nova/api/validation/extra_specs/hw.py57
-rw-r--r--nova/block_device.py4
-rw-r--r--nova/cmd/manage.py18
-rw-r--r--nova/cmd/status.py67
-rw-r--r--nova/compute/api.py197
-rw-r--r--nova/compute/claims.py25
-rw-r--r--nova/compute/manager.py508
-rw-r--r--nova/compute/pci_placement_translator.py623
-rw-r--r--nova/compute/resource_tracker.py208
-rw-r--r--nova/compute/rpcapi.py29
-rw-r--r--nova/compute/utils.py27
-rw-r--r--nova/compute/vm_states.py3
-rw-r--r--nova/conductor/api.py7
-rw-r--r--nova/conductor/manager.py55
-rw-r--r--nova/conductor/rpcapi.py24
-rw-r--r--nova/conductor/tasks/live_migrate.py2
-rw-r--r--nova/conductor/tasks/migrate.py9
-rw-r--r--nova/conf/api.py7
-rw-r--r--nova/conf/compute.py53
-rw-r--r--nova/conf/ironic.py1
-rw-r--r--nova/conf/libvirt.py19
-rw-r--r--nova/conf/mks.py2
-rw-r--r--nova/conf/pci.py164
-rw-r--r--nova/conf/scheduler.py21
-rw-r--r--nova/conf/spice.py53
-rw-r--r--nova/conf/vmware.py5
-rw-r--r--nova/conf/workarounds.py44
-rw-r--r--nova/db/main/migrations/versions/960aac0e09ea_de_duplicate_indexes_in_instances__.py35
-rw-r--r--nova/db/main/migrations/versions/ccb0fa1a2252_add_encryption_fields_to_.py59
-rw-r--r--nova/db/main/models.py14
-rw-r--r--nova/exception.py98
-rw-r--r--nova/filesystem.py59
-rw-r--r--nova/hacking/checks.py21
-rw-r--r--nova/limit/placement.py6
-rw-r--r--nova/locale/cs/LC_MESSAGES/nova.po8
-rw-r--r--nova/locale/de/LC_MESSAGES/nova.po10
-rw-r--r--nova/locale/es/LC_MESSAGES/nova.po9
-rw-r--r--nova/locale/fr/LC_MESSAGES/nova.po10
-rw-r--r--nova/locale/it/LC_MESSAGES/nova.po10
-rw-r--r--nova/locale/ja/LC_MESSAGES/nova.po8
-rw-r--r--nova/locale/ko_KR/LC_MESSAGES/nova.po8
-rw-r--r--nova/locale/pt_BR/LC_MESSAGES/nova.po10
-rw-r--r--nova/locale/ru/LC_MESSAGES/nova.po9
-rw-r--r--nova/locale/tr_TR/LC_MESSAGES/nova.po10
-rw-r--r--nova/locale/zh_CN/LC_MESSAGES/nova.po14
-rw-r--r--nova/locale/zh_TW/LC_MESSAGES/nova.po8
-rw-r--r--nova/manager.py7
-rw-r--r--nova/network/model.py16
-rw-r--r--nova/network/neutron.py45
-rw-r--r--nova/notifications/objects/image.py6
-rw-r--r--nova/objects/block_device.py52
-rw-r--r--nova/objects/compute_node.py15
-rw-r--r--nova/objects/external_event.py6
-rw-r--r--nova/objects/fields.py34
-rw-r--r--nova/objects/image_meta.py26
-rw-r--r--nova/objects/instance.py41
-rw-r--r--nova/objects/instance_info_cache.py4
-rw-r--r--nova/objects/migrate_data.py2
-rw-r--r--nova/objects/migration.py4
-rw-r--r--nova/objects/request_spec.py113
-rw-r--r--nova/objects/service.py24
-rw-r--r--nova/pci/devspec.py33
-rw-r--r--nova/pci/manager.py29
-rw-r--r--nova/pci/request.py13
-rw-r--r--nova/pci/stats.py308
-rw-r--r--nova/pci/whitelist.py8
-rw-r--r--nova/policies/admin_actions.py4
-rw-r--r--nova/policies/admin_password.py2
-rw-r--r--nova/policies/aggregates.py18
-rw-r--r--nova/policies/assisted_volume_snapshots.py4
-rw-r--r--nova/policies/attach_interfaces.py8
-rw-r--r--nova/policies/availability_zone.py4
-rw-r--r--nova/policies/baremetal_nodes.py4
-rw-r--r--nova/policies/base.py44
-rw-r--r--nova/policies/console_auth_tokens.py2
-rw-r--r--nova/policies/console_output.py2
-rw-r--r--nova/policies/create_backup.py2
-rw-r--r--nova/policies/deferred_delete.py4
-rw-r--r--nova/policies/evacuate.py2
-rw-r--r--nova/policies/extended_server_attributes.py2
-rw-r--r--nova/policies/extensions.py2
-rw-r--r--nova/policies/flavor_access.py6
-rw-r--r--nova/policies/flavor_extra_specs.py10
-rw-r--r--nova/policies/flavor_manage.py6
-rw-r--r--nova/policies/floating_ip_pools.py2
-rw-r--r--nova/policies/floating_ips.py12
-rw-r--r--nova/policies/hosts.py12
-rw-r--r--nova/policies/hypervisors.py14
-rw-r--r--nova/policies/instance_actions.py8
-rw-r--r--nova/policies/instance_usage_audit_log.py4
-rw-r--r--nova/policies/ips.py4
-rw-r--r--nova/policies/keypairs.py8
-rw-r--r--nova/policies/limits.py2
-rw-r--r--nova/policies/lock_server.py6
-rw-r--r--nova/policies/migrate_server.py4
-rw-r--r--nova/policies/migrations.py2
-rw-r--r--nova/policies/multinic.py4
-rw-r--r--nova/policies/networks.py4
-rw-r--r--nova/policies/pause_server.py4
-rw-r--r--nova/policies/quota_class_sets.py4
-rw-r--r--nova/policies/quota_sets.py16
-rw-r--r--nova/policies/remote_consoles.py2
-rw-r--r--nova/policies/rescue.py4
-rw-r--r--nova/policies/security_groups.py20
-rw-r--r--nova/policies/server_diagnostics.py2
-rw-r--r--nova/policies/server_external_events.py2
-rw-r--r--nova/policies/server_groups.py10
-rw-r--r--nova/policies/server_metadata.py12
-rw-r--r--nova/policies/server_password.py4
-rw-r--r--nova/policies/server_tags.py12
-rw-r--r--nova/policies/server_topology.py4
-rw-r--r--nova/policies/servers.py60
-rw-r--r--nova/policies/servers_migrations.py8
-rw-r--r--nova/policies/services.py6
-rw-r--r--nova/policies/shelve.py8
-rw-r--r--nova/policies/simple_tenant_usage.py4
-rw-r--r--nova/policies/suspend_server.py4
-rw-r--r--nova/policies/tenant_networks.py4
-rw-r--r--nova/policies/volumes.py20
-rw-r--r--nova/policies/volumes_attachments.py12
-rw-r--r--nova/policy.py12
-rw-r--r--nova/quota.py7
-rw-r--r--nova/rpc.py16
-rw-r--r--nova/scheduler/client/report.py70
-rw-r--r--nova/scheduler/filters/__init__.py44
-rw-r--r--nova/scheduler/filters/numa_topology_filter.py24
-rw-r--r--nova/scheduler/filters/pci_passthrough_filter.py23
-rw-r--r--nova/scheduler/host_manager.py34
-rw-r--r--nova/scheduler/manager.py107
-rw-r--r--nova/scheduler/request_filter.py41
-rw-r--r--nova/service.py4
-rw-r--r--nova/test.py21
-rw-r--r--nova/tests/fixtures/__init__.py2
-rw-r--r--nova/tests/fixtures/cinder.py30
-rw-r--r--nova/tests/fixtures/filesystem.py81
-rw-r--r--nova/tests/fixtures/glance.py31
-rw-r--r--nova/tests/fixtures/libvirt.py74
-rw-r--r--nova/tests/fixtures/libvirt_imagebackend.py16
-rw-r--r--nova/tests/fixtures/nova.py174
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/images/images-details-get-resp.json.tpl118
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/images/images-list-get-resp.json.tpl76
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/os-evacuate/v2.95/server-evacuate-find-host-req.json.tpl5
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/os-evacuate/v2.95/server-evacuate-req.json.tpl5
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-action-rebuild-resp.json.tpl80
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-action-rebuild.json.tpl15
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-create-req.json.tpl21
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-create-resp.json.tpl22
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-get-resp.json.tpl81
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-update-req.json.tpl8
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-update-resp.json.tpl78
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/servers-details-resp.json.tpl88
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/servers-list-resp.json.tpl24
-rw-r--r--nova/tests/functional/api_sample_tests/test_evacuate.py59
-rw-r--r--nova/tests/functional/api_sample_tests/test_images.py30
-rw-r--r--nova/tests/functional/api_sample_tests/test_remote_consoles.py20
-rw-r--r--nova/tests/functional/api_sample_tests/test_servers.py45
-rw-r--r--nova/tests/functional/compute/test_resource_tracker.py11
-rw-r--r--nova/tests/functional/integrated_helpers.py54
-rw-r--r--nova/tests/functional/libvirt/base.py116
-rw-r--r--nova/tests/functional/libvirt/test_device_bus_migration.py8
-rw-r--r--nova/tests/functional/libvirt/test_evacuate.py4
-rw-r--r--nova/tests/functional/libvirt/test_numa_live_migration.py12
-rw-r--r--nova/tests/functional/libvirt/test_numa_servers.py6
-rw-r--r--nova/tests/functional/libvirt/test_pci_in_placement.py1997
-rw-r--r--nova/tests/functional/libvirt/test_pci_sriov_servers.py1800
-rw-r--r--nova/tests/functional/libvirt/test_power_manage.py270
-rw-r--r--nova/tests/functional/libvirt/test_report_cpu_traits.py4
-rw-r--r--nova/tests/functional/libvirt/test_reshape.py21
-rw-r--r--nova/tests/functional/libvirt/test_vgpu.py35
-rw-r--r--nova/tests/functional/libvirt/test_vpmem.py6
-rw-r--r--nova/tests/functional/notification_sample_tests/test_compute_task.py7
-rw-r--r--nova/tests/functional/notification_sample_tests/test_instance.py24
-rw-r--r--nova/tests/functional/regressions/test_bug_1628606.py60
-rw-r--r--nova/tests/functional/regressions/test_bug_1669054.py3
-rw-r--r--nova/tests/functional/regressions/test_bug_1713783.py6
-rw-r--r--nova/tests/functional/regressions/test_bug_1732947.py4
-rw-r--r--nova/tests/functional/regressions/test_bug_1764883.py3
-rw-r--r--nova/tests/functional/regressions/test_bug_1823370.py3
-rw-r--r--nova/tests/functional/regressions/test_bug_1896463.py10
-rw-r--r--nova/tests/functional/regressions/test_bug_1902925.py5
-rw-r--r--nova/tests/functional/regressions/test_bug_1922053.py6
-rw-r--r--nova/tests/functional/regressions/test_bug_1928063.py4
-rw-r--r--nova/tests/functional/regressions/test_bug_1944619.py2
-rw-r--r--nova/tests/functional/regressions/test_bug_1951656.py73
-rw-r--r--nova/tests/functional/regressions/test_bug_1978983.py23
-rw-r--r--nova/tests/functional/regressions/test_bug_1980720.py68
-rw-r--r--nova/tests/functional/regressions/test_bug_1983753.py177
-rw-r--r--nova/tests/functional/test_aggregates.py24
-rw-r--r--nova/tests/functional/test_boot_from_volume.py40
-rw-r--r--nova/tests/functional/test_ephemeral_encryption.py381
-rw-r--r--nova/tests/functional/test_images.py8
-rw-r--r--nova/tests/functional/test_instance_actions.py9
-rw-r--r--nova/tests/functional/test_report_client.py52
-rw-r--r--nova/tests/functional/test_server_group.py77
-rw-r--r--nova/tests/functional/test_server_rescue.py86
-rw-r--r--nova/tests/functional/test_servers.py116
-rw-r--r--nova/tests/functional/test_servers_provider_tree.py4
-rw-r--r--nova/tests/functional/test_servers_resource_request.py26
-rw-r--r--nova/tests/functional/test_service.py85
-rw-r--r--nova/tests/unit/api/openstack/compute/test_create_backup.py4
-rw-r--r--nova/tests/unit/api/openstack/compute/test_evacuate.py29
-rw-r--r--nova/tests/unit/api/openstack/compute/test_flavor_access.py25
-rw-r--r--nova/tests/unit/api/openstack/compute/test_migrate_server.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/test_quotas.py5
-rw-r--r--nova/tests/unit/api/openstack/compute/test_remote_consoles.py12
-rw-r--r--nova/tests/unit/api/openstack/compute/test_server_group_quotas.py7
-rw-r--r--nova/tests/unit/api/openstack/compute/test_server_groups.py97
-rw-r--r--nova/tests/unit/api/openstack/compute/test_servers.py30
-rw-r--r--nova/tests/unit/api/openstack/compute/test_volumes.py10
-rw-r--r--nova/tests/unit/api/openstack/fakes.py14
-rw-r--r--nova/tests/unit/api/openstack/test_wsgi_app.py15
-rw-r--r--nova/tests/unit/api/validation/extra_specs/test_validators.py5
-rw-r--r--nova/tests/unit/cmd/test_policy.py17
-rw-r--r--nova/tests/unit/cmd/test_status.py55
-rw-r--r--nova/tests/unit/compute/test_api.py422
-rw-r--r--nova/tests/unit/compute/test_claims.py6
-rw-r--r--nova/tests/unit/compute/test_compute.py181
-rw-r--r--nova/tests/unit/compute/test_compute_mgr.py659
-rw-r--r--nova/tests/unit/compute/test_pci_placement_translator.py291
-rw-r--r--nova/tests/unit/compute/test_resource_tracker.py489
-rw-r--r--nova/tests/unit/compute/test_rpcapi.py83
-rw-r--r--nova/tests/unit/compute/test_shelve.py6
-rw-r--r--nova/tests/unit/compute/test_utils.py68
-rw-r--r--nova/tests/unit/compute/test_virtapi.py20
-rw-r--r--nova/tests/unit/conductor/tasks/test_live_migrate.py2
-rw-r--r--nova/tests/unit/conductor/test_conductor.py101
-rw-r--r--nova/tests/unit/console/test_websocketproxy.py63
-rw-r--r--nova/tests/unit/db/main/test_migrations.py42
-rw-r--r--nova/tests/unit/network/test_neutron.py210
-rw-r--r--nova/tests/unit/notifications/objects/test_notification.py2
-rw-r--r--nova/tests/unit/objects/test_block_device.py13
-rw-r--r--nova/tests/unit/objects/test_compute_node.py29
-rw-r--r--nova/tests/unit/objects/test_fields.py4
-rw-r--r--nova/tests/unit/objects/test_image_meta.py68
-rw-r--r--nova/tests/unit/objects/test_instance.py162
-rw-r--r--nova/tests/unit/objects/test_objects.py25
-rw-r--r--nova/tests/unit/objects/test_request_spec.py233
-rw-r--r--nova/tests/unit/pci/test_devspec.py48
-rw-r--r--nova/tests/unit/pci/test_manager.py42
-rw-r--r--nova/tests/unit/pci/test_request.py15
-rw-r--r--nova/tests/unit/pci/test_stats.py1063
-rw-r--r--nova/tests/unit/policies/base.py52
-rw-r--r--nova/tests/unit/policies/test_admin_actions.py13
-rw-r--r--nova/tests/unit/policies/test_admin_password.py14
-rw-r--r--nova/tests/unit/policies/test_aggregates.py27
-rw-r--r--nova/tests/unit/policies/test_attach_interfaces.py42
-rw-r--r--nova/tests/unit/policies/test_availability_zone.py15
-rw-r--r--nova/tests/unit/policies/test_baremetal_nodes.py13
-rw-r--r--nova/tests/unit/policies/test_console_output.py14
-rw-r--r--nova/tests/unit/policies/test_create_backup.py14
-rw-r--r--nova/tests/unit/policies/test_deferred_delete.py22
-rw-r--r--nova/tests/unit/policies/test_evacuate.py15
-rw-r--r--nova/tests/unit/policies/test_extensions.py10
-rw-r--r--nova/tests/unit/policies/test_flavor_access.py17
-rw-r--r--nova/tests/unit/policies/test_flavor_extra_specs.py31
-rw-r--r--nova/tests/unit/policies/test_flavor_manage.py5
-rw-r--r--nova/tests/unit/policies/test_floating_ip_pools.py10
-rw-r--r--nova/tests/unit/policies/test_floating_ips.py38
-rw-r--r--nova/tests/unit/policies/test_hosts.py17
-rw-r--r--nova/tests/unit/policies/test_hypervisors.py19
-rw-r--r--nova/tests/unit/policies/test_instance_actions.py35
-rw-r--r--nova/tests/unit/policies/test_instance_usage_audit_log.py3
-rw-r--r--nova/tests/unit/policies/test_keypairs.py14
-rw-r--r--nova/tests/unit/policies/test_limits.py4
-rw-r--r--nova/tests/unit/policies/test_lock_server.py19
-rw-r--r--nova/tests/unit/policies/test_migrate_server.py11
-rw-r--r--nova/tests/unit/policies/test_multinic.py22
-rw-r--r--nova/tests/unit/policies/test_networks.py8
-rw-r--r--nova/tests/unit/policies/test_pause_server.py14
-rw-r--r--nova/tests/unit/policies/test_quota_class_sets.py13
-rw-r--r--nova/tests/unit/policies/test_quota_sets.py35
-rw-r--r--nova/tests/unit/policies/test_remote_consoles.py14
-rw-r--r--nova/tests/unit/policies/test_rescue.py22
-rw-r--r--nova/tests/unit/policies/test_security_groups.py73
-rw-r--r--nova/tests/unit/policies/test_server_diagnostics.py11
-rw-r--r--nova/tests/unit/policies/test_server_groups.py33
-rw-r--r--nova/tests/unit/policies/test_server_ips.py21
-rw-r--r--nova/tests/unit/policies/test_server_metadata.py26
-rw-r--r--nova/tests/unit/policies/test_server_migrations.py11
-rw-r--r--nova/tests/unit/policies/test_server_password.py34
-rw-r--r--nova/tests/unit/policies/test_server_tags.py26
-rw-r--r--nova/tests/unit/policies/test_server_topology.py22
-rw-r--r--nova/tests/unit/policies/test_servers.py37
-rw-r--r--nova/tests/unit/policies/test_services.py13
-rw-r--r--nova/tests/unit/policies/test_shelve.py16
-rw-r--r--nova/tests/unit/policies/test_simple_tenant_usage.py19
-rw-r--r--nova/tests/unit/policies/test_suspend_server.py14
-rw-r--r--nova/tests/unit/policies/test_tenant_networks.py8
-rw-r--r--nova/tests/unit/policies/test_volumes.py74
-rw-r--r--nova/tests/unit/scheduler/client/test_report.py55
-rw-r--r--nova/tests/unit/scheduler/filters/test_numa_topology_filters.py97
-rw-r--r--nova/tests/unit/scheduler/filters/test_pci_passthrough_filters.py113
-rw-r--r--nova/tests/unit/scheduler/test_host_manager.py12
-rw-r--r--nova/tests/unit/scheduler/test_manager.py871
-rw-r--r--nova/tests/unit/scheduler/test_request_filter.py87
-rw-r--r--nova/tests/unit/test_filesystem.py52
-rw-r--r--nova/tests/unit/test_hacking.py21
-rw-r--r--nova/tests/unit/test_policy.py13
-rw-r--r--nova/tests/unit/test_rpc.py44
-rw-r--r--nova/tests/unit/test_service.py9
-rw-r--r--nova/tests/unit/virt/disk/test_api.py1
-rw-r--r--nova/tests/unit/virt/ironic/test_driver.py73
-rw-r--r--nova/tests/unit/virt/libvirt/cpu/__init__.py0
-rw-r--r--nova/tests/unit/virt/libvirt/cpu/test_api.py194
-rw-r--r--nova/tests/unit/virt/libvirt/cpu/test_core.py122
-rw-r--r--nova/tests/unit/virt/libvirt/test_blockinfo.py100
-rw-r--r--nova/tests/unit/virt/libvirt/test_config.py126
-rw-r--r--nova/tests/unit/virt/libvirt/test_driver.py1117
-rw-r--r--nova/tests/unit/virt/libvirt/test_guest.py36
-rw-r--r--nova/tests/unit/virt/libvirt/test_host.py27
-rw-r--r--nova/tests/unit/virt/libvirt/test_imagebackend.py36
-rw-r--r--nova/tests/unit/virt/libvirt/test_utils.py111
-rw-r--r--nova/tests/unit/virt/libvirt/test_vif.py34
-rw-r--r--nova/tests/unit/virt/test_block_device.py74
-rw-r--r--nova/tests/unit/virt/test_hardware.py241
-rw-r--r--nova/tests/unit/virt/test_images.py46
-rw-r--r--nova/tests/unit/virt/test_netutils.py23
-rw-r--r--nova/tests/unit/virt/test_node.py142
-rw-r--r--nova/tests/unit/virt/test_virt.py27
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_driver_api.py4
-rw-r--r--nova/virt/block_device.py89
-rw-r--r--nova/virt/driver.py58
-rw-r--r--nova/virt/fake.py61
-rw-r--r--nova/virt/hardware.py155
-rw-r--r--nova/virt/hyperv/serialproxy.py4
-rw-r--r--nova/virt/images.py31
-rw-r--r--nova/virt/ironic/driver.py68
-rw-r--r--nova/virt/libvirt/blockinfo.py85
-rw-r--r--nova/virt/libvirt/config.py149
-rw-r--r--nova/virt/libvirt/cpu/__init__.py22
-rw-r--r--nova/virt/libvirt/cpu/api.py157
-rw-r--r--nova/virt/libvirt/cpu/core.py78
-rw-r--r--nova/virt/libvirt/driver.py590
-rw-r--r--nova/virt/libvirt/guest.py26
-rw-r--r--nova/virt/libvirt/host.py36
-rw-r--r--nova/virt/libvirt/imagebackend.py99
-rw-r--r--nova/virt/libvirt/utils.py155
-rw-r--r--nova/virt/libvirt/volume/fibrechannel.py3
-rw-r--r--nova/virt/netutils.py9
-rw-r--r--nova/virt/node.py108
-rw-r--r--playbooks/ceph/glance-copy-policy.yaml15
-rw-r--r--playbooks/ceph/glance-setup.yaml39
-rw-r--r--releasenotes/notes/Do-not-send-mtu-value-in-metadata-for-networks-with-enabled-dhcp-641506f2a13b540f.yaml5
-rw-r--r--releasenotes/notes/add-spice-compression-support-e41676f445544e8d.yaml23
-rw-r--r--releasenotes/notes/add-volume-rebuild-b973562ea8f49347.yaml10
-rw-r--r--releasenotes/notes/allowing-target-state-for-evacuate-d4c1912c481973d6.yaml13
-rw-r--r--releasenotes/notes/bp-libvirt-cpu-state-mgmt-fbc9c1f9f473003c.yaml18
-rw-r--r--releasenotes/notes/bp-pci-device-tracking-in-placement-75ee1d20a57662f2.yaml9
-rw-r--r--releasenotes/notes/bp-pci-device-tracking-in-placement-antelope-082310a2b0337e0e.yaml8
-rw-r--r--releasenotes/notes/bug-1978372-optimized-numa-fitting-algorithm-5d5b922b0bdbf818.yaml9
-rw-r--r--releasenotes/notes/bug-1981813-vnic-type-change-9f3e16fae885b57f.yaml9
-rw-r--r--releasenotes/notes/bug-1982284-libvirt-handle-no-ram-info-was-set-99784934ed80fd72.yaml11
-rw-r--r--releasenotes/notes/bug-1983753-update-requestspec-pci_request-for-resize-a3c6b0a979db723f.yaml6
-rw-r--r--releasenotes/notes/bug-1986838-pci-double-booking-1da71ea4399db65a.yaml8
-rw-r--r--releasenotes/notes/bug-1996995-qemu_monitor_announce_self-add-configurables-2b2f19d238442f72.yaml28
-rw-r--r--releasenotes/notes/default-host-numa-strategy-to-spread-18668c6d80154042.yaml12
-rw-r--r--releasenotes/notes/deprecate-passthrough_whitelist-config-name-0530d502c960d753.yaml6
-rw-r--r--releasenotes/notes/enable-enforce-scope-and-new-defaults-14db8c75b263b599.yaml23
-rw-r--r--releasenotes/notes/fix-ironic-scheduler-race-08cf8aba0365f512.yaml11
-rw-r--r--releasenotes/notes/guest-iommu-device-4795c3a060aca424.yaml21
-rw-r--r--releasenotes/notes/ignore-instance-task-state-for-evacuation-e000f141d0153638.yaml11
-rw-r--r--releasenotes/notes/microversion-2-94-59649401d5763286.yaml22
-rw-r--r--releasenotes/notes/multiple-config-files-with-mod_wsgi-f114ea5fdd8b9a51.yaml14
-rw-r--r--releasenotes/notes/new_locked_memory_option-b68a031779366828.yaml13
-rw-r--r--releasenotes/notes/project-reader-rbac-8a1d11b3b2e776fd.yaml36
-rw-r--r--releasenotes/notes/rescue-volume-based-instance-c6e3fba236d90be7.yaml6
-rw-r--r--releasenotes/notes/stable-compute-uuid-08663a0955616728.yaml19
-rw-r--r--releasenotes/notes/update-initial-allocation-defaults-94106033b66b8fa0.yaml21
-rw-r--r--releasenotes/notes/update-libvirt-enlightenments-for-windows-23abea98cc1db667.yaml21
-rw-r--r--releasenotes/notes/use-compareHypervisorCPU-b75c8f097cc73556.yaml12
-rw-r--r--releasenotes/notes/uwsgi-gmr-c00631db79836340.yaml (renamed from nova/releasenotes/notes/uwsgi-gmr-c00631db79836340.yaml)0
-rw-r--r--releasenotes/notes/vdpa-move-ops-a7b3799807807a92.yaml11
-rw-r--r--releasenotes/notes/vdpa-suspend-detach-and-live-migrate-e591e6a03a0c834d.yaml25
-rw-r--r--releasenotes/notes/zed-prelude-a3cddb8b2ac8e293.yaml46
-rw-r--r--releasenotes/source/index.rst1
-rw-r--r--releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po694
-rw-r--r--releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po126
-rw-r--r--releasenotes/source/zed.rst6
-rw-r--r--requirements.txt10
-rw-r--r--setup.cfg1
-rwxr-xr-xtools/test-setup.sh8
-rw-r--r--tox.ini21
472 files changed, 22737 insertions, 3725 deletions
diff --git a/.zuul.yaml b/.zuul.yaml
index d4eb61e77d..29918cafc8 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -89,7 +89,7 @@
description: |
Run tempest live migration tests against local qcow2 ephemeral storage
and shared LVM/iSCSI cinder volumes.
- irrelevant-files: &nova-base-irrelevant-files
+ irrelevant-files:
- ^api-.*$
- ^(test-|)requirements.txt$
- ^.*\.rst$
@@ -100,6 +100,7 @@
- ^nova/policies/.*$
- ^nova/tests/.*$
- ^nova/test.py$
+ - ^nova/virt/ironic/.*$
- ^releasenotes/.*$
- ^setup.cfg$
- ^tools/.*$
@@ -128,7 +129,21 @@
the "iptables_hybrid" securitygroup firewall driver, aka "hybrid plug".
The external events interactions between Nova and Neutron in these
situations has historically been fragile. This job exercises them.
- irrelevant-files: *nova-base-irrelevant-files
+ irrelevant-files: &nova-base-irrelevant-files
+ - ^api-.*$
+ - ^(test-|)requirements.txt$
+ - ^.*\.rst$
+ - ^.git.*$
+ - ^doc/.*$
+ - ^nova/hacking/.*$
+ - ^nova/locale/.*$
+ - ^nova/policies/.*$
+ - ^nova/tests/.*$
+ - ^nova/test.py$
+ - ^releasenotes/.*$
+ - ^setup.cfg$
+ - ^tools/.*$
+ - ^tox.ini$
vars:
tox_envlist: all
tempest_test_regex: (^tempest\..*compute\..*(migration|resize|reboot).*)
@@ -233,8 +248,9 @@
tox_envlist: all
# Only run compute API tests.
tempest_test_regex: ^tempest\.api\.compute
- # Skip slow tests.
- tempest_exclude_regex: .*\[.*\bslow\b.*\]
+ # Skip slow tests. Also, skip some volume detach tests until bug#1998148
+ # is fixed.
+ tempest_exclude_regex: (^tempest\.(api\.compute\.(volumes\.test_attach_volume\.AttachVolumeTestJSON\.test_attach_detach_volume|servers\.(test_server_rescue\.ServerStableDeviceRescueTest\.test_stable_device_rescue_disk_virtio_with_volume_attached|test_server_rescue_negative\.ServerRescueNegativeTestJSON\.test_rescued_vm_detach_volume)))|.*\[.*\bslow\b.*\])
devstack_local_conf:
test-config:
$TEMPEST_CONFIG:
@@ -579,8 +595,11 @@
required-projects:
- openstack/nova
pre-run:
- - playbooks/ceph/glance-copy-policy.yaml
+ - playbooks/ceph/glance-setup.yaml
vars:
+ # NOTE(danms): Increase our swap size since we're dealing with
+ # larger images and trigger OOMs.
+ configure_swap_size: 4096
# NOTE(danms): These tests create an empty non-raw image, which nova
# will refuse because we set never_download_image_if_on_rbd in this job.
# Just skip these tests for this case.
@@ -588,6 +607,8 @@
GLANCE_STANDALONE: True
GLANCE_USE_IMPORT_WORKFLOW: True
DEVSTACK_PARALLEL: True
+ GLANCE_LIMIT_IMAGE_SIZE_TOTAL: 2048
+ MYSQL_REDUCE_MEMORY: True
# NOTE(danms): This job is pretty heavy as it is, so we disable some
# services that are not relevant to the nova-glance-ceph scenario
# that this job is intended to validate.
@@ -598,6 +619,12 @@
s-object: false
s-proxy: false
devstack_local_conf:
+ test-config:
+ $TEMPEST_CONFIG:
+ image-feature-enabled:
+ manage_locations: true
+ volume:
+ volume_size: 1
post-config:
$NOVA_CONF:
libvirt:
@@ -606,7 +633,7 @@
never_download_image_if_on_rbd: True
$GLANCE_API_CONF:
DEFAULT:
- enabled_backends: "cheap:file, robust:rbd"
+ enabled_backends: "cheap:file, robust:rbd, web:http"
default_log_levels: "amqp=WARN, amqplib=WARN, boto=WARN, qpid=WARN, sqlalchemy=WARN, suds=INFO, oslo.messaging=INFO, oslo_messaging=INFO, iso8601=WARN, requests.packages.urllib3.connectionpool=WARN, urllib3.connectionpool=WARN, websocket=WARN, requests.packages.urllib3.util.retry=WARN, urllib3.util.retry=WARN, keystonemiddleware=WARN, routes.middleware=WARN, stevedore=WARN, taskflow=WARN, keystoneauth=WARN, oslo.cache=INFO, dogpile.core.dogpile=INFO, oslo_policy=DEBUG"
glance_store:
default_backend: cheap
@@ -618,6 +645,8 @@
rbd_store_ceph_conf: /etc/ceph/ceph.conf
cheap:
filesystem_store_datadir: /opt/stack/data/glance/images/
+ web:
+ https_insecure: false
os_glance_staging_store:
filesystem_store_datadir: /opt/stack/data/glance/os_glance_staging_store/
os_glance_tasks_store:
@@ -628,14 +657,53 @@
image_conversion:
output_format: raw
+# TODO(gmann): As per the 2023.1 testing runtime, we need to run at least
+# one job on Focal. This job can be removed as per the future testing
+# runtime (whenever we drop the Ubuntu Focal testing).
+- job:
+ name: tempest-integrated-compute-ubuntu-focal
+ description: This is integrated compute job testing on Ubuntu Focal(20.04)
+ parent: tempest-integrated-compute
+ nodeset: openstack-single-node-focal
+
+# TODO(gmann): Remove this jobs once all the required services for intergrate
+# compute gate (Cinder, Glance, Neutron) by default enable scope and new
+# defaults which means all the nova jobs will be tested with new RBAC in
+# integrated way and we do not need this separate job.
+- job:
+ name: tempest-integrated-compute-enforce-scope-new-defaults
+ parent: tempest-integrated-compute
+ description: |
+ This job runs the Tempest tests with scope and new defaults enabled
+ for Nova, Neutron, Glance, and Cinder services.
+ # TODO (gmann): There were few fixes in neutron and neutron-lib for the
+ # RBAC but they are not yet released so we need to add both projcts as
+ # the required-projects. Those can be removed once new version of neutron
+ # and neutron-lib is released.
+ required-projects:
+ - openstack/neutron
+ - openstack/neutron-lib
+ vars:
+ devstack_localrc:
+ # Enabeling the scope and new defaults for services implemented it.
+ # NOTE (gmann): We need to keep keystone scope check disable as
+ # services (except ironic) does not support the system scope and
+ # they need keystone to continue working with project scope. Until
+ # Keystone policies are changed to work for project scoped also, we
+ # need to keep scope check disable for keystone.
+ NOVA_ENFORCE_SCOPE: true
+ CINDER_ENFORCE_SCOPE: true
+ GLANCE_ENFORCE_SCOPE: true
+ NEUTRON_ENFORCE_SCOPE: true
+
- project:
# Please try to keep the list of job names sorted alphabetically.
templates:
- check-requirements
- integrated-gate-compute
- openstack-cover-jobs
- - openstack-python3-zed-jobs
- - openstack-python3-zed-jobs-arm64
+ - openstack-python3-jobs
+ - openstack-python3-jobs-arm64
- periodic-stable-jobs
- publish-openstack-docs-pti
- release-notes-jobs-python3
@@ -664,8 +732,7 @@
voting: false
- nova-tox-functional-py38
- nova-tox-functional-py39
- - nova-tox-functional-py310:
- voting: false
+ - nova-tox-functional-py310
- tempest-integrated-compute:
# NOTE(gmann): Policies changes do not need to run all the
# integration test jobs. Running only tempest and grenade
@@ -685,6 +752,10 @@
- ^setup.cfg$
- ^tools/.*$
- ^tox.ini$
+ - tempest-integrated-compute-ubuntu-focal:
+ irrelevant-files: *policies-irrelevant-files
+ - tempest-integrated-compute-enforce-scope-new-defaults:
+ irrelevant-files: *policies-irrelevant-files
- grenade-skip-level:
irrelevant-files: *policies-irrelevant-files
- nova-grenade-multinode:
@@ -718,6 +789,10 @@
- ^(?!nova/network/.*)(?!nova/virt/libvirt/vif.py).*$
- tempest-integrated-compute:
irrelevant-files: *policies-irrelevant-files
+ - tempest-integrated-compute-ubuntu-focal:
+ irrelevant-files: *policies-irrelevant-files
+ - tempest-integrated-compute-enforce-scope-new-defaults:
+ irrelevant-files: *policies-irrelevant-files
- nova-grenade-multinode:
irrelevant-files: *policies-irrelevant-files
- tempest-ipv6-only:
@@ -729,8 +804,7 @@
# Runs emulation feature functionality test less frequently due
# to being the initial release and experimental in nature.
- nova-emulation
- - tempest-integrated-compute-centos-9-stream:
- irrelevant-files: *nova-base-irrelevant-files
+ - tempest-centos9-stream-fips
experimental:
jobs:
- ironic-tempest-bfv:
@@ -760,12 +834,8 @@
irrelevant-files: *nova-base-irrelevant-files
- devstack-tobiko-nova:
irrelevant-files: *nova-base-irrelevant-files
- - tempest-centos8-stream-fips:
+ - tempest-centos9-stream-fips:
irrelevant-files: *nova-base-irrelevant-files
- nova-emulation
- tempest-integrated-compute-centos-9-stream:
irrelevant-files: *nova-base-irrelevant-files
- periodic:
- jobs:
- - tempest-centos8-stream-fips:
- branches: master
diff --git a/HACKING.rst b/HACKING.rst
index 0f98901864..c5a1ba4ae3 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -71,6 +71,12 @@ Nova Specific Commandments
- [N367] Disallow aliasing the mock.Mock and similar classes in tests.
- [N368] Reject if the mock.Mock class is used as a replacement value instead of and
instance of a mock.Mock during patching in tests.
+- [N369] oslo_concurrency.lockutils.ReaderWriterLock() or
+ fasteners.ReaderWriterLock() does not function correctly
+ with eventlet patched code. Use nova.utils.ReaderWriterLock() instead.
+- [N370] Don't use or import six
+- [N371] You must explicitly import python's mock: ``from unittest import mock``
+- [N372] Don't use the setDaemon method. Use the daemon attribute instead.
Creating Unit Tests
-------------------
diff --git a/api-guide/source/accelerator-support.rst b/api-guide/source/accelerator-support.rst
index c71e899fd4..9d1b4d77b4 100644
--- a/api-guide/source/accelerator-support.rst
+++ b/api-guide/source/accelerator-support.rst
@@ -12,7 +12,7 @@ appropriate privileges) must do the following:
* Create a device profile in Cyborg, which specifies what accelerator
resources need to be provisioned. (See `Cyborg device profiles API`_.)
- .. _`Cyborg device profiles API`: https://docs.openstack.org/api-ref/accelerator/v2/index.html#device-profiles
+ .. _`Cyborg device profiles API`: https://docs.openstack.org/api-ref/accelerator/#device-profiles
* Set the device profile name as an extra spec in a chosen flavor,
with this syntax:
@@ -102,7 +102,7 @@ appropriate privileges) must do the following:
resources need to be provisioned. (See `Cyborg device profiles API`_,
`Cyborg SRIOV Test Report`_.)
- .. _`Cyborg device profiles API`: https://docs.openstack.org/api-ref/accelerator/v2/index.html#device-profiles
+ .. _`Cyborg device profiles API`: https://docs.openstack.org/api-ref/accelerator/#device-profiles
.. _`Cyborg SRIOV Test Report`: https://wiki.openstack.org/wiki/Cyborg/TestReport/IntelNic
* create a 'accelerator-direct' vnic type port with the device-profile name
diff --git a/api-guide/source/server_concepts.rst b/api-guide/source/server_concepts.rst
index f6d07a99d0..9341965140 100644
--- a/api-guide/source/server_concepts.rst
+++ b/api-guide/source/server_concepts.rst
@@ -1048,7 +1048,7 @@ Nova is able to write metadata to a special configuration drive that attaches
to the server when it boots. The server can mount this drive and read files
from it to get information that is normally available through the metadata
service. For more details, refer to the :nova-doc:`user guide
-<user/metadata.html>`.
+<user/metadata.html#config-drives>`.
User data
---------
diff --git a/api-guide/source/users.rst b/api-guide/source/users.rst
index a0b74374a2..28a59201c0 100644
--- a/api-guide/source/users.rst
+++ b/api-guide/source/users.rst
@@ -28,7 +28,7 @@ The Compute API uses these roles, along with oslo.policy, to decide
what the user is authorized to do.
Refer to the to
-:nova-doc:`compute admin guide </admin/arch#projects-users-and-roles>`
+:nova-doc:`compute admin guide </admin/architecture#projects-users-and-roles>`
for details.
Personas used in this guide
diff --git a/api-ref/source/flavors.inc b/api-ref/source/flavors.inc
index 0216ce2983..52577667ec 100644
--- a/api-ref/source/flavors.inc
+++ b/api-ref/source/flavors.inc
@@ -60,6 +60,15 @@ Creates a flavor.
Creating a flavor is typically only available to administrators of a
cloud because this has implications for scheduling efficiently in the cloud.
+.. note::
+ Flavors with special characters in the flavor ID, except the hyphen '-',
+ underscore '_', spaces and dots '.', are not permitted.
+
+ Flavor IDs are meant to be UUIDs. Serialized strings separated/grouped by "-"
+ represent the default flavor ID or UUID. eg: 01cc74d8-4816-4bef-835b-e36ff188c406.
+
+ Only for backward compatibility, an integer as a flavor ID is permitted.
+
Normal response codes: 200
Error response codes: badRequest(400), unauthorized(401), forbidden(403),
diff --git a/api-ref/source/parameters.yaml b/api-ref/source/parameters.yaml
index 63f0f58963..e185dce29d 100644
--- a/api-ref/source/parameters.yaml
+++ b/api-ref/source/parameters.yaml
@@ -3129,8 +3129,9 @@ flavor_id_body_2_46:
max_version: 2.46
flavor_id_body_create:
description: |
- The ID of the flavor. While people often make this look like an int, this
- is really a string. If not provided, this defaults to a uuid.
+ Only alphanumeric characters with hyphen '-', underscore '_', spaces
+ and dots '.' are permitted. If an ID is not provided, then a default UUID
+ will be assigned.
in: body
required: false
type: string
@@ -4019,14 +4020,15 @@ imageRef:
type: string
imageRef_rebuild:
description: |
- The UUID of the image to rebuild for your server instance.
- It must be a valid UUID otherwise API will return 400.
- If rebuilding a volume-backed server with a new image
- (an image different from the image used when creating the volume),
- the API will return 400.
- For non-volume-backed servers, specifying a new image will result
- in validating that the image is acceptable for the current compute host
- on which the server exists. If the new image is not valid,
+ The UUID of the image to rebuild for your server instance. It
+ must be a valid UUID otherwise API will return 400. To rebuild a
+ volume-backed server with a new image, at least microversion 2.93
+ needs to be provided in the request else the request will fall
+ back to old behaviour i.e. the API will return 400 (for an image
+ different from the image used when creating the volume). For
+ non-volume-backed servers, specifying a new image will result in
+ validating that the image is acceptable for the current compute
+ host on which the server exists. If the new image is not valid,
the server will go into ``ERROR`` status.
in: body
required: true
@@ -6380,6 +6382,9 @@ server_hostname_req:
description: |
The hostname to configure for the instance in the metadata service.
+ Starting with microversion 2.94, this can be a Fully Qualified Domain Name
+ (FQDN) of up to 255 characters in length.
+
.. note::
This information is published via the metadata service and requires
diff --git a/api-ref/source/servers-actions.inc b/api-ref/source/servers-actions.inc
index f480403a40..bb9953afa0 100644
--- a/api-ref/source/servers-actions.inc
+++ b/api-ref/source/servers-actions.inc
@@ -540,7 +540,13 @@ Rebuilds a server.
Specify the ``rebuild`` action in the request body.
This operation recreates the root disk of the server.
-For a volume-backed server, this operation keeps the contents of the volume.
+
+With microversion 2.93, we support rebuilding volume backed
+instances which will reimage the volume with the provided
+image. For microversion < 2.93, this operation keeps the
+contents of the volume given the image provided is same as
+the image with which the volume was created else the opearation
+will error out.
**Preconditions**
@@ -552,8 +558,10 @@ If the server was in status ``SHUTOFF`` before the rebuild, it will be stopped
and in status ``SHUTOFF`` after the rebuild, otherwise it will be ``ACTIVE``
if the rebuild was successful or ``ERROR`` if the rebuild failed.
-.. note:: There is a `known limitation`_ where the root disk is not
- replaced for volume-backed instances during a rebuild.
+.. note:: With microversion 2.93, we support rebuilding volume backed
+ instances. If any microversion < 2.93 is specified, there is a
+ `known limitation`_ where the root disk is not replaced for
+ volume-backed instances during a rebuild.
.. _known limitation: https://bugs.launchpad.net/nova/+bug/1482040
@@ -596,6 +604,11 @@ Request
.. literalinclude:: ../../doc/api_samples/servers/v2.90/server-action-rebuild.json
:language: javascript
+**Example Rebuild Server (rebuild Action) (v2.94)**
+
+.. literalinclude:: ../../doc/api_samples/servers/v2.94/server-action-rebuild.json
+ :language: javascript
+
Response
--------
diff --git a/api-ref/source/servers.inc b/api-ref/source/servers.inc
index 547a71e914..e72d0641b9 100644
--- a/api-ref/source/servers.inc
+++ b/api-ref/source/servers.inc
@@ -448,6 +448,11 @@ Request
.. literalinclude:: ../../doc/api_samples/servers/v2.90/server-create-req.json
:language: javascript
+**Example Create Server With FQDN in Hostname (v2.94)**
+
+.. literalinclude:: ../../doc/api_samples/servers/v2.94/server-create-req.json
+ :language: javascript
+
Response
--------
@@ -610,7 +615,7 @@ Response
.. rest_parameters:: parameters.yaml
- - server: server
+ - servers: servers
- accessIPv4: accessIPv4
- accessIPv6: accessIPv6
- addresses: addresses
diff --git a/devstack/nova-multi-cell-exclude-list.txt b/devstack/nova-multi-cell-exclude-list.txt
index a61229c906..0dbe383abf 100644
--- a/devstack/nova-multi-cell-exclude-list.txt
+++ b/devstack/nova-multi-cell-exclude-list.txt
@@ -10,3 +10,7 @@
# https://bugs.launchpad.net/nova/+bug/1907511 for details
test_migrate_with_qos_min_bw_allocation
test_resize_with_qos_min_bw_allocation
+
+# Also exclude unshelve to specific host test cases as unshelve cannot move VMs across cells
+# See https://bugs.launchpad.net/nova/+bug/1988316
+tempest.api.compute.admin.test_servers_on_multinodes.UnshelveToHostMultiNodesTest
diff --git a/doc/api_samples/images/images-details-get-resp.json b/doc/api_samples/images/images-details-get-resp.json
index 034c35f0c0..33cf667287 100644
--- a/doc/api_samples/images/images-details-get-resp.json
+++ b/doc/api_samples/images/images-details-get-resp.json
@@ -1,59 +1,56 @@
{
"images": [
{
- "OS-DCF:diskConfig": "AUTO",
- "OS-EXT-IMG-SIZE:size": "74185822",
+ "OS-EXT-IMG-SIZE:size": "25165824",
"created": "2011-01-01T01:02:03Z",
- "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "id": "155d900f-4e14-4e4c-a73d-069cbf4541e6",
"links": [
{
- "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/155d900f-4e14-4e4c-a73d-069cbf4541e6",
"rel": "self"
},
{
- "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/155d900f-4e14-4e4c-a73d-069cbf4541e6",
"rel": "bookmark"
},
{
- "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "href": "http://glance.openstack.example.com/images/155d900f-4e14-4e4c-a73d-069cbf4541e6",
"rel": "alternate",
"type": "application/vnd.openstack.image"
}
],
"metadata": {
"architecture": "x86_64",
- "auto_disk_config": "True",
"kernel_id": "nokernel",
"ramdisk_id": "nokernel"
},
"minDisk": 0,
"minRam": 0,
- "name": "fakeimage7",
+ "name": "fakeimage123456",
"progress": 100,
"status": "ACTIVE",
"updated": "2011-01-01T01:02:03Z"
},
{
- "OS-EXT-IMG-SIZE:size": "74185822",
+ "OS-EXT-IMG-SIZE:size": "58145823",
"created": "2011-01-01T01:02:03Z",
- "id": "155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "id": "a2459075-d96c-40d5-893e-577ff92e721c",
"links": [
{
- "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/a2459075-d96c-40d5-893e-577ff92e721c",
"rel": "self"
},
{
- "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/a2459075-d96c-40d5-893e-577ff92e721c",
"rel": "bookmark"
},
{
- "href": "http://glance.openstack.example.com/images/155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "href": "http://glance.openstack.example.com/images/a2459075-d96c-40d5-893e-577ff92e721c",
"rel": "alternate",
"type": "application/vnd.openstack.image"
}
],
"metadata": {
- "architecture": "x86_64",
"kernel_id": "nokernel",
"ramdisk_id": "nokernel"
},
@@ -65,25 +62,26 @@
"updated": "2011-01-01T01:02:03Z"
},
{
- "OS-EXT-IMG-SIZE:size": "74185822",
+ "OS-EXT-IMG-SIZE:size": "83594576",
"created": "2011-01-01T01:02:03Z",
- "id": "a2459075-d96c-40d5-893e-577ff92e721c",
+ "id": "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
"links": [
{
- "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/a2459075-d96c-40d5-893e-577ff92e721c",
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
"rel": "self"
},
{
- "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/a2459075-d96c-40d5-893e-577ff92e721c",
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
"rel": "bookmark"
},
{
- "href": "http://glance.openstack.example.com/images/a2459075-d96c-40d5-893e-577ff92e721c",
+ "href": "http://glance.openstack.example.com/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
"rel": "alternate",
"type": "application/vnd.openstack.image"
}
],
"metadata": {
+ "architecture": "x86_64",
"kernel_id": "nokernel",
"ramdisk_id": "nokernel"
},
@@ -95,40 +93,37 @@
"updated": "2011-01-01T01:02:03Z"
},
{
- "OS-DCF:diskConfig": "MANUAL",
- "OS-EXT-IMG-SIZE:size": "74185822",
+ "OS-EXT-IMG-SIZE:size": "84035174",
"created": "2011-01-01T01:02:03Z",
- "id": "a440c04b-79fa-479c-bed1-0b816eaec379",
+ "id": "cedef40a-ed67-4d10-800e-17455edce175",
"links": [
{
- "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/a440c04b-79fa-479c-bed1-0b816eaec379",
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/cedef40a-ed67-4d10-800e-17455edce175",
"rel": "self"
},
{
- "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/a440c04b-79fa-479c-bed1-0b816eaec379",
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/cedef40a-ed67-4d10-800e-17455edce175",
"rel": "bookmark"
},
{
- "href": "http://glance.openstack.example.com/images/a440c04b-79fa-479c-bed1-0b816eaec379",
+ "href": "http://glance.openstack.example.com/images/cedef40a-ed67-4d10-800e-17455edce175",
"rel": "alternate",
"type": "application/vnd.openstack.image"
}
],
"metadata": {
- "architecture": "x86_64",
- "auto_disk_config": "False",
"kernel_id": "nokernel",
"ramdisk_id": "nokernel"
},
"minDisk": 0,
"minRam": 0,
- "name": "fakeimage6",
+ "name": "fakeimage123456",
"progress": 100,
"status": "ACTIVE",
"updated": "2011-01-01T01:02:03Z"
},
{
- "OS-EXT-IMG-SIZE:size": "74185822",
+ "OS-EXT-IMG-SIZE:size": "26360814",
"created": "2011-01-01T01:02:03Z",
"id": "c905cedb-7281-47e4-8a62-f26bc5fc4c77",
"links": [
@@ -158,65 +153,188 @@
"updated": "2011-01-01T01:02:03Z"
},
{
- "OS-EXT-IMG-SIZE:size": "74185822",
+ "OS-DCF:diskConfig": "MANUAL",
+ "OS-EXT-IMG-SIZE:size": "49163826",
"created": "2011-01-01T01:02:03Z",
- "id": "cedef40a-ed67-4d10-800e-17455edce175",
+ "id": "a440c04b-79fa-479c-bed1-0b816eaec379",
"links": [
{
- "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/cedef40a-ed67-4d10-800e-17455edce175",
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/a440c04b-79fa-479c-bed1-0b816eaec379",
"rel": "self"
},
{
- "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/cedef40a-ed67-4d10-800e-17455edce175",
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/a440c04b-79fa-479c-bed1-0b816eaec379",
"rel": "bookmark"
},
{
- "href": "http://glance.openstack.example.com/images/cedef40a-ed67-4d10-800e-17455edce175",
+ "href": "http://glance.openstack.example.com/images/a440c04b-79fa-479c-bed1-0b816eaec379",
"rel": "alternate",
"type": "application/vnd.openstack.image"
}
],
"metadata": {
+ "architecture": "x86_64",
+ "auto_disk_config": "False",
"kernel_id": "nokernel",
"ramdisk_id": "nokernel"
},
"minDisk": 0,
"minRam": 0,
- "name": "fakeimage123456",
+ "name": "fakeimage6",
"progress": 100,
"status": "ACTIVE",
"updated": "2011-01-01T01:02:03Z"
},
{
+ "OS-DCF:diskConfig": "AUTO",
"OS-EXT-IMG-SIZE:size": "74185822",
"created": "2011-01-01T01:02:03Z",
- "id": "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
"links": [
{
- "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b",
"rel": "self"
},
{
- "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b",
"rel": "bookmark"
},
{
- "href": "http://glance.openstack.example.com/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
"rel": "alternate",
"type": "application/vnd.openstack.image"
}
],
"metadata": {
"architecture": "x86_64",
+ "auto_disk_config": "True",
"kernel_id": "nokernel",
"ramdisk_id": "nokernel"
},
"minDisk": 0,
"minRam": 0,
+ "name": "fakeimage7",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "OS-EXT-IMG-SIZE:size": "25165824",
+ "created": "2011-01-01T01:02:03Z",
+ "id": "95fad737-9325-4855-b37e-20a62268ec88",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/95fad737-9325-4855-b37e-20a62268ec88",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/95fad737-9325-4855-b37e-20a62268ec88",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/images/95fad737-9325-4855-b37e-20a62268ec88",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "hw_ephemeral_encryption": "True"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "OS-EXT-IMG-SIZE:size": "25165824",
+ "created": "2011-01-01T01:02:03Z",
+ "id": "535426d4-5d75-44f4-9591-a2123d23c33f",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/535426d4-5d75-44f4-9591-a2123d23c33f",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/535426d4-5d75-44f4-9591-a2123d23c33f",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/images/535426d4-5d75-44f4-9591-a2123d23c33f",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "hw_ephemeral_encryption": "False"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "OS-EXT-IMG-SIZE:size": "25165824",
+ "created": "2011-01-01T01:02:03Z",
+ "id": "5f7d4f5b-3781-4a4e-9046-a2a800e807e5",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/5f7d4f5b-3781-4a4e-9046-a2a800e807e5",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/5f7d4f5b-3781-4a4e-9046-a2a800e807e5",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/images/5f7d4f5b-3781-4a4e-9046-a2a800e807e5",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "hw_ephemeral_encryption": "True",
+ "hw_ephemeral_encryption_format": "luks"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "OS-EXT-IMG-SIZE:size": "25165824",
+ "created": "2011-01-01T01:02:03Z",
+ "id": "261b52ed-f693-4147-8f3b-d25df5efd968",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/261b52ed-f693-4147-8f3b-d25df5efd968",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/261b52ed-f693-4147-8f3b-d25df5efd968",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/images/261b52ed-f693-4147-8f3b-d25df5efd968",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "hw_ephemeral_encryption": "True",
+ "hw_ephemeral_encryption_format": "plain"
+ },
+ "minDisk": 0,
+ "minRam": 0,
"name": "fakeimage123456",
"progress": 100,
"status": "ACTIVE",
"updated": "2011-01-01T01:02:03Z"
}
]
-}
+} \ No newline at end of file
diff --git a/doc/api_samples/images/images-list-get-resp.json b/doc/api_samples/images/images-list-get-resp.json
index 00d06f96b3..e2207b9271 100644
--- a/doc/api_samples/images/images-list-get-resp.json
+++ b/doc/api_samples/images/images-list-get-resp.json
@@ -1,37 +1,37 @@
{
"images": [
{
- "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "id": "155d900f-4e14-4e4c-a73d-069cbf4541e6",
"links": [
{
- "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/155d900f-4e14-4e4c-a73d-069cbf4541e6",
"rel": "self"
},
{
- "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/155d900f-4e14-4e4c-a73d-069cbf4541e6",
"rel": "bookmark"
},
{
- "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "href": "http://glance.openstack.example.com/images/155d900f-4e14-4e4c-a73d-069cbf4541e6",
"rel": "alternate",
"type": "application/vnd.openstack.image"
}
],
- "name": "fakeimage7"
+ "name": "fakeimage123456"
},
{
- "id": "155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "id": "a2459075-d96c-40d5-893e-577ff92e721c",
"links": [
{
- "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/a2459075-d96c-40d5-893e-577ff92e721c",
"rel": "self"
},
{
- "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/a2459075-d96c-40d5-893e-577ff92e721c",
"rel": "bookmark"
},
{
- "href": "http://glance.openstack.example.com/images/155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "href": "http://glance.openstack.example.com/images/a2459075-d96c-40d5-893e-577ff92e721c",
"rel": "alternate",
"type": "application/vnd.openstack.image"
}
@@ -39,18 +39,56 @@
"name": "fakeimage123456"
},
{
- "id": "a2459075-d96c-40d5-893e-577ff92e721c",
+ "id": "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
"links": [
{
- "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/a2459075-d96c-40d5-893e-577ff92e721c",
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
"rel": "self"
},
{
- "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/a2459075-d96c-40d5-893e-577ff92e721c",
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
"rel": "bookmark"
},
{
- "href": "http://glance.openstack.example.com/images/a2459075-d96c-40d5-893e-577ff92e721c",
+ "href": "http://glance.openstack.example.com/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "name": "fakeimage123456"
+ },
+ {
+ "id": "cedef40a-ed67-4d10-800e-17455edce175",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/cedef40a-ed67-4d10-800e-17455edce175",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/cedef40a-ed67-4d10-800e-17455edce175",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/images/cedef40a-ed67-4d10-800e-17455edce175",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "name": "fakeimage123456"
+ },
+ {
+ "id": "c905cedb-7281-47e4-8a62-f26bc5fc4c77",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77",
"rel": "alternate",
"type": "application/vnd.openstack.image"
}
@@ -77,18 +115,37 @@
"name": "fakeimage6"
},
{
- "id": "c905cedb-7281-47e4-8a62-f26bc5fc4c77",
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
"links": [
{
- "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77",
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b",
"rel": "self"
},
{
- "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77",
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b",
"rel": "bookmark"
},
{
- "href": "http://glance.openstack.example.com/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77",
+ "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "name": "fakeimage7"
+ },
+ {
+ "id": "a2293931-dc33-45cc-85ef-232aa9491710",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/a2293931-dc33-45cc-85ef-232aa9491710",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/a2293931-dc33-45cc-85ef-232aa9491710",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/images/a2293931-dc33-45cc-85ef-232aa9491710",
"rel": "alternate",
"type": "application/vnd.openstack.image"
}
@@ -96,18 +153,18 @@
"name": "fakeimage123456"
},
{
- "id": "cedef40a-ed67-4d10-800e-17455edce175",
+ "id": "e78f0ee9-96ef-4ce7-accf-e816f273be45",
"links": [
{
- "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/cedef40a-ed67-4d10-800e-17455edce175",
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/e78f0ee9-96ef-4ce7-accf-e816f273be45",
"rel": "self"
},
{
- "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/cedef40a-ed67-4d10-800e-17455edce175",
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/e78f0ee9-96ef-4ce7-accf-e816f273be45",
"rel": "bookmark"
},
{
- "href": "http://glance.openstack.example.com/images/cedef40a-ed67-4d10-800e-17455edce175",
+ "href": "http://glance.openstack.example.com/images/e78f0ee9-96ef-4ce7-accf-e816f273be45",
"rel": "alternate",
"type": "application/vnd.openstack.image"
}
@@ -115,18 +172,37 @@
"name": "fakeimage123456"
},
{
- "id": "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "id": "54eadb78-eeb6-4b13-beed-20b9894eeadf",
"links": [
{
- "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/54eadb78-eeb6-4b13-beed-20b9894eeadf",
"rel": "self"
},
{
- "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/54eadb78-eeb6-4b13-beed-20b9894eeadf",
"rel": "bookmark"
},
{
- "href": "http://glance.openstack.example.com/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "href": "http://glance.openstack.example.com/images/54eadb78-eeb6-4b13-beed-20b9894eeadf",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "name": "fakeimage123456"
+ },
+ {
+ "id": "eb7458f3-d003-4187-8027-595591dc2723",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/eb7458f3-d003-4187-8027-595591dc2723",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/eb7458f3-d003-4187-8027-595591dc2723",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/images/eb7458f3-d003-4187-8027-595591dc2723",
"rel": "alternate",
"type": "application/vnd.openstack.image"
}
@@ -134,4 +210,4 @@
"name": "fakeimage123456"
}
]
-}
+} \ No newline at end of file
diff --git a/doc/api_samples/os-evacuate/v2.95/server-evacuate-find-host-req.json b/doc/api_samples/os-evacuate/v2.95/server-evacuate-find-host-req.json
new file mode 100644
index 0000000000..8ad929226e
--- /dev/null
+++ b/doc/api_samples/os-evacuate/v2.95/server-evacuate-find-host-req.json
@@ -0,0 +1,4 @@
+{
+ "evacuate": {
+ }
+}
diff --git a/doc/api_samples/os-evacuate/v2.95/server-evacuate-req.json b/doc/api_samples/os-evacuate/v2.95/server-evacuate-req.json
new file mode 100644
index 0000000000..d192892cdc
--- /dev/null
+++ b/doc/api_samples/os-evacuate/v2.95/server-evacuate-req.json
@@ -0,0 +1,5 @@
+{
+ "evacuate": {
+ "host": "testHost"
+ }
+}
diff --git a/doc/api_samples/servers/v2.94/server-action-rebuild-resp.json b/doc/api_samples/servers/v2.94/server-action-rebuild-resp.json
new file mode 100644
index 0000000000..7eeb568ea4
--- /dev/null
+++ b/doc/api_samples/servers/v2.94/server-action-rebuild-resp.json
@@ -0,0 +1,80 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO",
+ "OS-EXT-AZ:availability_zone": "us-west",
+ "OS-EXT-SRV-ATTR:hostname": "updated-hostname.example.com",
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "OS-SRV-USG:launched_at": "2021-08-19T15:16:22.177882",
+ "OS-SRV-USG:terminated_at": null,
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "addresses": {
+ "private": [
+ {
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
+ "OS-EXT-IPS:type": "fixed",
+ "addr": "192.168.1.30",
+ "version": 4
+ }
+ ]
+ },
+ "adminPass": "seekr3t",
+ "config_drive": "",
+ "created": "2019-04-23T17:10:22Z",
+ "description": null,
+ "flavor": {
+ "disk": 1,
+ "ephemeral": 0,
+ "extra_specs": {},
+ "original_name": "m1.tiny",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1
+ },
+ "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6",
+ "id": "0c37a84a-c757-4f22-8c7f-0bf8b6970886",
+ "image": {
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/0c37a84a-c757-4f22-8c7f-0bf8b6970886",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/0c37a84a-c757-4f22-8c7f-0bf8b6970886",
+ "rel": "bookmark"
+ }
+ ],
+ "locked": false,
+ "locked_reason": null,
+ "metadata": {
+ "meta_var": "meta_val"
+ },
+ "name": "foobar",
+ "os-extended-volumes:volumes_attached": [],
+ "progress": 0,
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "server_groups": [],
+ "status": "ACTIVE",
+ "tags": [],
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "trusted_image_certificates": null,
+ "updated": "2019-04-23T17:10:24Z",
+ "user_data": "ZWNobyAiaGVsbG8gd29ybGQi",
+ "user_id": "fake"
+ }
+}
diff --git a/doc/api_samples/servers/v2.94/server-action-rebuild.json b/doc/api_samples/servers/v2.94/server-action-rebuild.json
new file mode 100644
index 0000000000..b5401ad9ca
--- /dev/null
+++ b/doc/api_samples/servers/v2.94/server-action-rebuild.json
@@ -0,0 +1,15 @@
+{
+ "rebuild" : {
+ "accessIPv4" : "1.2.3.4",
+ "accessIPv6" : "80fe::",
+ "OS-DCF:diskConfig": "AUTO",
+ "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b",
+ "name" : "foobar",
+ "adminPass" : "seekr3t",
+ "hostname": "custom-hostname.example.com",
+ "metadata" : {
+ "meta_var" : "meta_val"
+ },
+ "user_data": "ZWNobyAiaGVsbG8gd29ybGQi"
+ }
+}
diff --git a/doc/api_samples/servers/v2.94/server-create-req.json b/doc/api_samples/servers/v2.94/server-create-req.json
new file mode 100644
index 0000000000..c6d4ce5640
--- /dev/null
+++ b/doc/api_samples/servers/v2.94/server-create-req.json
@@ -0,0 +1,30 @@
+{
+ "server" : {
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "name" : "new-server-test",
+ "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "1",
+ "availability_zone": "us-west",
+ "OS-DCF:diskConfig": "AUTO",
+ "hostname": "custom-hostname.example.com",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality": [
+ {
+ "path": "/etc/banner.txt",
+ "contents": "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6 b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ],
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "user_data" : "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg=="
+ },
+ "OS-SCH-HNT:scheduler_hints": {
+ "same_host": "48e6a9f6-30af-47e0-bc04-acaed113bb4e"
+ }
+}
diff --git a/doc/api_samples/servers/v2.94/server-create-resp.json b/doc/api_samples/servers/v2.94/server-create-resp.json
new file mode 100644
index 0000000000..f50e29dd8b
--- /dev/null
+++ b/doc/api_samples/servers/v2.94/server-create-resp.json
@@ -0,0 +1,22 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO",
+ "adminPass": "6NpUwoz2QDRN",
+ "id": "f5dc173b-6804-445a-a6d8-c705dad5b5eb",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/f5dc173b-6804-445a-a6d8-c705dad5b5eb",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/f5dc173b-6804-445a-a6d8-c705dad5b5eb",
+ "rel": "bookmark"
+ }
+ ],
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ]
+ }
+}
diff --git a/doc/api_samples/servers/v2.94/server-get-resp.json b/doc/api_samples/servers/v2.94/server-get-resp.json
new file mode 100644
index 0000000000..0a05b2f917
--- /dev/null
+++ b/doc/api_samples/servers/v2.94/server-get-resp.json
@@ -0,0 +1,81 @@
+{
+ "server": {
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "addresses": {
+ "private": [
+ {
+ "addr": "192.168.1.30",
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
+ "OS-EXT-IPS:type": "fixed",
+ "version": 4
+ }
+ ]
+ },
+ "created": "2013-09-03T04:01:32Z",
+ "description": null,
+ "locked": false,
+ "locked_reason": null,
+ "flavor": {
+ "disk": 1,
+ "ephemeral": 0,
+ "extra_specs": {},
+ "original_name": "m1.tiny",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1
+ },
+ "hostId": "92154fab69d5883ba2c8622b7e65f745dd33257221c07af363c51b29",
+ "id": "0e44cc9c-e052-415d-afbf-469b0d384170",
+ "image": {
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/0e44cc9c-e052-415d-afbf-469b0d384170",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/0e44cc9c-e052-415d-afbf-469b0d384170",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "config_drive": "",
+ "OS-DCF:diskConfig": "AUTO",
+ "OS-EXT-AZ:availability_zone": "nova",
+ "OS-EXT-SRV-ATTR:hostname": "custom-hostname.example.com",
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "os-extended-volumes:volumes_attached": [
+ {"id": "volume_id1", "delete_on_termination": false},
+ {"id": "volume_id2", "delete_on_termination": false}
+ ],
+ "OS-SRV-USG:launched_at": "2013-09-23T13:37:00.880302",
+ "OS-SRV-USG:terminated_at": null,
+ "progress": 0,
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "server_groups": [],
+ "status": "ACTIVE",
+ "tags": [],
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "trusted_image_certificates": null,
+ "updated": "2013-09-03T04:01:33Z",
+ "user_id": "fake"
+ }
+}
diff --git a/doc/api_samples/servers/v2.94/server-update-req.json b/doc/api_samples/servers/v2.94/server-update-req.json
new file mode 100644
index 0000000000..1743f05fc7
--- /dev/null
+++ b/doc/api_samples/servers/v2.94/server-update-req.json
@@ -0,0 +1,8 @@
+{
+ "server": {
+ "accessIPv4": "4.3.2.1",
+ "accessIPv6": "80fe::",
+ "OS-DCF:diskConfig": "AUTO",
+ "hostname" : "new-server-hostname.example.com"
+ }
+}
diff --git a/doc/api_samples/servers/v2.94/server-update-resp.json b/doc/api_samples/servers/v2.94/server-update-resp.json
new file mode 100644
index 0000000000..4aa834f9ec
--- /dev/null
+++ b/doc/api_samples/servers/v2.94/server-update-resp.json
@@ -0,0 +1,78 @@
+{
+ "server": {
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "addresses": {
+ "private": [
+ {
+ "addr": "192.168.1.30",
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
+ "OS-EXT-IPS:type": "fixed",
+ "version": 4
+ }
+ ]
+ },
+ "created": "2013-09-03T04:01:32Z",
+ "description": null,
+ "locked": false,
+ "locked_reason": null,
+ "flavor": {
+ "disk": 1,
+ "ephemeral": 0,
+ "extra_specs": {},
+ "original_name": "m1.tiny",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1
+ },
+ "hostId": "92154fab69d5883ba2c8622b7e65f745dd33257221c07af363c51b29",
+ "id": "0e44cc9c-e052-415d-afbf-469b0d384170",
+ "image": {
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/0e44cc9c-e052-415d-afbf-469b0d384170",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/0e44cc9c-e052-415d-afbf-469b0d384170",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "config_drive": "",
+ "OS-DCF:diskConfig": "AUTO",
+ "OS-EXT-AZ:availability_zone": "us-west",
+ "OS-EXT-SRV-ATTR:hostname": "new-server-hostname.example.com",
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "os-extended-volumes:volumes_attached": [],
+ "OS-SRV-USG:launched_at": "2013-09-23T13:37:00.880302",
+ "OS-SRV-USG:terminated_at": null,
+ "progress": 0,
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "server_groups": [],
+ "status": "ACTIVE",
+ "tags": [],
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "trusted_image_certificates": null,
+ "updated": "2013-09-03T04:01:33Z",
+ "user_id": "fake"
+ }
+}
diff --git a/doc/api_samples/servers/v2.94/servers-details-resp.json b/doc/api_samples/servers/v2.94/servers-details-resp.json
new file mode 100644
index 0000000000..54b63fa523
--- /dev/null
+++ b/doc/api_samples/servers/v2.94/servers-details-resp.json
@@ -0,0 +1,88 @@
+{
+ "servers": [
+ {
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "addresses": {
+ "private": [
+ {
+ "addr": "192.168.1.30",
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
+ "OS-EXT-IPS:type": "fixed",
+ "version": 4
+ }
+ ]
+ },
+ "created": "2013-09-03T04:01:32Z",
+ "description": "",
+ "flavor": {
+ "disk": 1,
+ "ephemeral": 0,
+ "extra_specs": {},
+ "original_name": "m1.tiny",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1
+ },
+ "hostId": "bcf92836fc9ed4203a75cb0337afc7f917d2be504164b995c2334b25",
+ "id": "f5dc173b-6804-445a-a6d8-c705dad5b5eb",
+ "image": {
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/f5dc173b-6804-445a-a6d8-c705dad5b5eb",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/f5dc173b-6804-445a-a6d8-c705dad5b5eb",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "config_drive": "",
+ "locked": false,
+ "locked_reason": "",
+ "OS-DCF:diskConfig": "AUTO",
+ "OS-EXT-AZ:availability_zone": "nova",
+ "OS-EXT-SRV-ATTR:hostname": "custom-hostname.example.com",
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "os-extended-volumes:volumes_attached": [
+ {"id": "volume_id1", "delete_on_termination": false},
+ {"id": "volume_id2", "delete_on_termination": false}
+ ],
+ "OS-SRV-USG:launched_at": "2013-09-23T13:53:12.774549",
+ "OS-SRV-USG:terminated_at": null,
+ "progress": 0,
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "status": "ACTIVE",
+ "tags": [],
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "trusted_image_certificates": null,
+ "updated": "2013-09-03T04:01:32Z",
+ "user_id": "fake"
+ }
+ ],
+ "servers_links": [
+ {
+ "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/detail?limit=1&marker=f5dc173b-6804-445a-a6d8-c705dad5b5eb",
+ "rel": "next"
+ }
+ ]
+}
diff --git a/doc/api_samples/servers/v2.94/servers-list-resp.json b/doc/api_samples/servers/v2.94/servers-list-resp.json
new file mode 100644
index 0000000000..742d54b170
--- /dev/null
+++ b/doc/api_samples/servers/v2.94/servers-list-resp.json
@@ -0,0 +1,24 @@
+{
+ "servers": [
+ {
+ "id": "22c91117-08de-4894-9aa9-6ef382400985",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/22c91117-08de-4894-9aa9-6ef382400985",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/22c91117-08de-4894-9aa9-6ef382400985",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "new-server-test"
+ }
+ ],
+ "servers_links": [
+ {
+ "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers?limit=1&marker=22c91117-08de-4894-9aa9-6ef382400985",
+ "rel": "next"
+ }
+ ]
+}
diff --git a/doc/api_samples/versions/v21-version-get-resp.json b/doc/api_samples/versions/v21-version-get-resp.json
index 6e98517b61..3f285e6017 100644
--- a/doc/api_samples/versions/v21-version-get-resp.json
+++ b/doc/api_samples/versions/v21-version-get-resp.json
@@ -19,7 +19,7 @@
}
],
"status": "CURRENT",
- "version": "2.92",
+ "version": "2.95",
"min_version": "2.1",
"updated": "2013-07-23T11:33:21Z"
}
diff --git a/doc/api_samples/versions/versions-get-resp.json b/doc/api_samples/versions/versions-get-resp.json
index 5fdd20ae61..749fd4674f 100644
--- a/doc/api_samples/versions/versions-get-resp.json
+++ b/doc/api_samples/versions/versions-get-resp.json
@@ -22,7 +22,7 @@
}
],
"status": "CURRENT",
- "version": "2.92",
+ "version": "2.95",
"min_version": "2.1",
"updated": "2013-07-23T11:33:21Z"
}
diff --git a/doc/ext/extra_specs.py b/doc/ext/extra_specs.py
index 534f5fa969..ddd233d503 100644
--- a/doc/ext/extra_specs.py
+++ b/doc/ext/extra_specs.py
@@ -103,7 +103,7 @@ def _format_validator_help(
validator: base.ExtraSpecValidator,
summary: bool,
):
- """Generate reStucturedText snippets for the provided validator.
+ """Generate reStructuredText snippets for the provided validator.
:param validator: A validator to document.
:type validator: nova.api.validation.extra_specs.base.ExtraSpecValidator
diff --git a/doc/ext/feature_matrix.py b/doc/ext/feature_matrix.py
index 2bb773a657..31725e311e 100644
--- a/doc/ext/feature_matrix.py
+++ b/doc/ext/feature_matrix.py
@@ -69,10 +69,10 @@ class MatrixImplementation(object):
STATUS_COMPLETE = "complete"
STATUS_PARTIAL = "partial"
STATUS_MISSING = "missing"
- STATUS_UKNOWN = "unknown"
+ STATUS_UNKNOWN = "unknown"
STATUS_ALL = [STATUS_COMPLETE, STATUS_PARTIAL, STATUS_MISSING,
- STATUS_UKNOWN]
+ STATUS_UNKNOWN]
def __init__(self, status=STATUS_MISSING, notes=None, release=None):
"""MatrixImplementation models a cell in the matrix
@@ -394,7 +394,7 @@ class FeatureMatrixDirective(rst.Directive):
impl_status = u"\u2716"
elif impl.status == MatrixImplementation.STATUS_PARTIAL:
impl_status = u"\u2714"
- elif impl.status == MatrixImplementation.STATUS_UKNOWN:
+ elif impl.status == MatrixImplementation.STATUS_UNKNOWN:
impl_status = u"?"
implref.append(nodes.literal(
diff --git a/doc/notification_samples/common_payloads/ImageMetaPropsPayload.json b/doc/notification_samples/common_payloads/ImageMetaPropsPayload.json
index d1adfcc427..cdde7d3097 100644
--- a/doc/notification_samples/common_payloads/ImageMetaPropsPayload.json
+++ b/doc/notification_samples/common_payloads/ImageMetaPropsPayload.json
@@ -4,5 +4,5 @@
"hw_architecture": "x86_64"
},
"nova_object.name": "ImageMetaPropsPayload",
- "nova_object.version": "1.9"
+ "nova_object.version": "1.12"
}
diff --git a/doc/source/admin/architecture.rst b/doc/source/admin/architecture.rst
index 69130122f7..f5e2b90dd9 100644
--- a/doc/source/admin/architecture.rst
+++ b/doc/source/admin/architecture.rst
@@ -173,7 +173,7 @@ is possible to configure other filesystem types.
.. rubric:: Cinder-provisioned block storage
-The OpenStack Block Storage service, Cinder, provides persistent volumes hat
+The OpenStack Block Storage service, Cinder, provides persistent volumes that
are represented by a persistent virtualized block device independent of any
particular instance.
diff --git a/doc/source/admin/availability-zones.rst b/doc/source/admin/availability-zones.rst
index ffe1be06f9..aff8a0ab31 100644
--- a/doc/source/admin/availability-zones.rst
+++ b/doc/source/admin/availability-zones.rst
@@ -118,11 +118,47 @@ Implications for moving servers
There are several ways to move a server to another host: evacuate, resize,
cold migrate, live migrate, and unshelve. Move operations typically go through
-the scheduler to pick the target host *unless* a target host is specified and
-the request forces the server to that host by bypassing the scheduler. Only
-evacuate and live migrate can forcefully bypass the scheduler and move a
-server to a specified host and even then it is highly recommended to *not*
-force and bypass the scheduler.
+the scheduler to pick the target host.
+
+Prior to API microversion 2.68, using older openstackclient (pre-5.5.0) and
+novaclient, it was possible to specify a target host and the request forces
+the server to that host by bypassing the scheduler. Only evacuate and live
+migrate can forcefully bypass the scheduler and move a server to specified host
+and even then it is highly recommended to *not* force and bypass a scheduler.
+
+- live migrate with force host (works with older openstackclients(pre-5.5.0):
+
+.. code-block:: console
+
+ $ openstack server migrate --live <host> <server>
+
+- live migrate without forcing:
+
+.. code-block:: console
+
+ $ openstack server migrate --live-migration --host <host> <server>
+
+While support for 'server evacuate' command to openstackclient was added
+in 5.5.3 and there it never exposed ability to force an evacuation, but
+it was previously possible with novaclient.
+
+- evacuate with force host:
+
+.. code-block:: console
+
+ $ nova evacuate --force <server> <host>
+
+- evacuate without forcing using novaclient:
+
+.. code-block:: console
+
+ $ nova evacuate
+
+- evacuate without forcing using openstackclient:
+
+.. code-block:: console
+
+ $ openstack server evacuate --host <host> <server>
With respect to availability zones, a server is restricted to a zone if:
@@ -150,16 +186,6 @@ If the server was not created in a specific zone then it is free to be moved
to other zones, i.e. the :ref:`AvailabilityZoneFilter <AvailabilityZoneFilter>`
is a no-op.
-Knowing this, it is dangerous to force a server to another host with evacuate
-or live migrate if the server is restricted to a zone and is then forced to
-move to a host in another zone, because that will create an inconsistency in
-the internal tracking of where that server should live and may require manually
-updating the database for that server. For example, if a user creates a server
-in zone A and then the admin force live migrates the server to zone B, and then
-the user resizes the server, the scheduler will try to move it back to zone A
-which may or may not work, e.g. if the admin deleted or renamed zone A in the
-interim.
-
Resource affinity
~~~~~~~~~~~~~~~~~
diff --git a/doc/source/admin/cells.rst b/doc/source/admin/cells.rst
index bad3566bd9..bb83e967f3 100644
--- a/doc/source/admin/cells.rst
+++ b/doc/source/admin/cells.rst
@@ -600,7 +600,7 @@ of ``rabbit://bob:s3kret@otherhost/nova`` when used with the above example.
The :oslo.config:option:`transport_url` option can contain an
extended syntax for the "netloc" part of the URL
(i.e. ``userA:passwordA@hostA:portA,userB:passwordB:hostB:portB``). In this
- case, substitions of the form ``username1``, ``username2``, etc will be
+ case, substitutions of the form ``username1``, ``username2``, etc will be
honored and can be used in the template URL.
The templating of these URLs may be helpful in order to provide each service host
diff --git a/doc/source/admin/compute-node-identification.rst b/doc/source/admin/compute-node-identification.rst
new file mode 100644
index 0000000000..31d4802d0b
--- /dev/null
+++ b/doc/source/admin/compute-node-identification.rst
@@ -0,0 +1,83 @@
+===========================
+Compute Node Identification
+===========================
+
+Nova requires that compute nodes maintain a constant and consistent identity
+during their lifecycle. With the exception of the ironic driver, starting in
+the 2023.1 release, this is achieved by use of a file containing the node
+unique identifier that is persisted on disk. Prior to 2023.1, a combination of
+the compute node's hostname and the :oslo.config:option:`host` value in the
+configuration file were used.
+
+The 2023.1 and later compute node identification file must remain unchanged
+during the lifecycle of the compute node. Changing the value or removing the
+file will result in a failure to start and may require advanced techniques
+for recovery. The file is read once at `nova-compute`` startup, at which point
+it is validated for formatting and the corresponding node is located or
+created in the database.
+
+.. note::
+
+ Even after 2023.1, the compute node's hostname may not be changed after
+ the initial registration with the controller nodes, it is just not used
+ as the primary method for identification.
+
+The behavior of ``nova-compute`` is different when using the ironic driver,
+as the (UUID-based) identity and mapping of compute nodes to compute manager
+service hosts is dynamic. In that case, no single node identity is maintained
+by the compute host and thus no identity file is read or written. Thus none
+of the sections below apply to hosts with :oslo.config:option:`compute_driver`
+set to `ironic`.
+
+Self-provisioning of the node identity
+--------------------------------------
+
+By default, ``nova-compute`` will automatically generate and write a UUID to
+disk the first time it starts up, and will use that going forward as its
+stable identity. Using the :oslo.config:option:`state_path`
+(which is ``/var/lib/nova`` on most systems), a ``compute_id`` file will be
+created with a generated UUID.
+
+Since this file (and it's parent directory) is writable by nova, it may be
+desirable to move this to one of the other locations that nova looks for the
+identification file.
+
+Deployment provisioning of the node identity
+--------------------------------------------
+
+In addition to the location mentioned above, nova will also search the parent
+directories of any config file in use (either the defaults or provided on
+the command line) for a ``compute_id`` file. Thus, a deployment tool may, on
+most systems, pre-provision the node's UUID by writing one to
+``/etc/nova/compute_id``.
+
+The contents of the file should be a single UUID in canonical textual
+representation with no additional whitespace or other characters. The following
+should work on most Linux systems:
+
+.. code-block:: shell
+
+ $ uuidgen > /etc/nova/compute_id
+
+.. note::
+
+ **Do not** execute the above command blindly in every run of a deployment
+ tool, as that will result in overwriting the ``compute_id`` file each time,
+ which *will* prevent nova from working properly.
+
+Upgrading from pre-2023.1
+-------------------------
+
+Before release 2023.1, ``nova-compute`` only used the hostname (combined with
+:oslo.config:option:`host`, if set) to identify its compute node objects in
+the database. When upgrading from a prior release, the compute node will
+perform a one-time migration of the hostname-matched compute node UUID to the
+``compute_id`` file in the :oslo.config:option:`state_path` location.
+
+.. note::
+
+ It is imperative that you allow the above migration to run and complete on
+ compute nodes that are being upgraded. Skipping this step by
+ pre-provisioning a ``compute_id`` file before the upgrade will **not** work
+ and will be equivalent to changing the compute node UUID after it has
+ already been created once.
diff --git a/doc/source/admin/configuration/hypervisor-hyper-v.rst b/doc/source/admin/configuration/hypervisor-hyper-v.rst
index 969a0c13b3..8ce9c2ebb4 100644
--- a/doc/source/admin/configuration/hypervisor-hyper-v.rst
+++ b/doc/source/admin/configuration/hypervisor-hyper-v.rst
@@ -244,7 +244,7 @@ The following packages must be installed with pip:
* ``pywin32``
* ``pymysql``
* ``greenlet``
-* ``pycryto``
+* ``pycrypto``
* ``ecdsa``
* ``amqp``
* ``wmi``
diff --git a/doc/source/admin/cpu-topologies.rst b/doc/source/admin/cpu-topologies.rst
index 529542d805..082c88f655 100644
--- a/doc/source/admin/cpu-topologies.rst
+++ b/doc/source/admin/cpu-topologies.rst
@@ -129,9 +129,9 @@ Sorts are performed on host's NUMA nodes list in the following order:
Top sorting priority is for host's NUMA nodes with PCI devices attached. If VM
requested PCI device(s) logic **always** puts host's NUMA nodes with more PCI
-devices at the beginnig of the host's NUMA nodes list. If PCI devices isn't
+devices at the beginning of the host's NUMA nodes list. If PCI devices isn't
requested by VM than NUMA nodes with no (or less) PCI device available will be
-placed at the beginnig of the list.
+placed at the beginning of the list.
.. caution::
@@ -730,6 +730,97 @@ CPU policy, meanwhile, will consume ``VCPU`` inventory.
.. _configure-hyperv-numa:
+Configuring CPU power management for dedicated cores
+----------------------------------------------------
+
+.. versionchanged:: 27.0.0
+
+ This feature was only introduced by the 2023.1 Antelope release
+
+.. important::
+
+ The functionality described below is currently only supported by the
+ libvirt/KVM driver.
+
+For power saving reasons, operators can decide to turn down the power usage of
+CPU cores whether they are in use or not. For obvious reasons, Nova only allows
+to change the power consumption of a dedicated CPU core and not a shared one.
+Accordingly, usage of this feature relies on the reading of
+:oslo.config:option:`compute.cpu_dedicated_set` config option to know which CPU
+cores to handle.
+The main action to enable the power management of dedicated cores is to set
+:oslo.config:option:`libvirt.cpu_power_management` config option to ``True``.
+
+By default, if this option is enabled, Nova will lookup the dedicated cores and
+power them down at the compute service startup. Then, once an instance starts
+by being attached to a dedicated core, this below core will be powered up right
+before the libvirt guest starts. On the other way, once an instance is stopped,
+migrated or deleted, then the corresponding dedicated core will be powered down.
+
+There are two distinct strategies for powering up or down :
+
+- the default is to offline the CPU core and online it when needed.
+- an alternative strategy is to use two distinct CPU governors for the up state
+ and the down state.
+
+The strategy can be chosen using
+:oslo.config:option:`libvirt.cpu_power_management_strategy` config option.
+``cpu_state`` supports the first online/offline strategy, while ``governor``
+sets the alternative strategy.
+We default to turning off the cores as it provides you the best power savings
+while there could be other tools outside Nova to manage the governor, like
+tuned. That being said, we also provide a way to automatically change the
+governors on the fly, as explained below.
+
+If the strategy is set to ``governor``, a couple of config options are provided
+to define which exact CPU govenor to use for each of the up and down states :
+
+- :oslo.config:option:`libvirt.cpu_power_governor_low` will define the governor
+ to use for the powerdown state (defaults to ``powersave``)
+- :oslo.config:option:`libvirt.cpu_power_governor_high` will define the
+ governor to use for the powerup state (defaults to ``performance``)
+
+.. important::
+ This is the responsibility of the operator to ensure that the govenors
+ defined by the configuration options are currently supported by the OS
+ underlying kernel that runs the compute service.
+
+ As a side note, we recommend the ``schedutil`` governor as an alternative for
+ the high-power state (if the kernel supports it) as the CPU frequency is
+ dynamically set based on CPU task states. Other governors may be worth to
+ be tested, including ``conservative`` and ``ondemand`` which are quite a bit
+ more power consuming than ``schedutil`` but more efficient than
+ ``performance``. See `Linux kernel docs`_ for further explanations.
+
+.. _`Linux kernel docs`: https://www.kernel.org/doc/Documentation/cpu-freq/governors.txt
+
+As an example, a ``nova.conf`` part of configuration would look like::
+
+ [compute]
+ cpu_dedicated_set=2-17
+
+ [libvirt]
+ cpu_power_management=True
+ cpu_power_management_strategy=cpu_state
+
+.. warning::
+
+ The CPU core #0 has a special meaning in most of the recent Linux kernels.
+ This is always highly discouraged to use it for CPU pinning but please
+ refrain to have it power managed or you could have surprises if Nova turns
+ it off !
+
+One last important note : you can decide to change the CPU management strategy
+during the compute lifecycle, or you can currently already manage the CPU
+states. For ensuring that Nova can correctly manage the CPU performances, we
+added a couple of checks at startup that refuse to start nova-compute service
+if those arbitrary rules aren't enforced :
+
+- if the operator opts for ``cpu_state`` strategy, then all dedicated CPU
+ governors *MUST* be identical.
+- if they decide using ``governor``, then all dedicated CPU cores *MUST* be
+ online.
+
Configuring Hyper-V compute nodes for instance NUMA policies
------------------------------------------------------------
diff --git a/doc/source/admin/evacuate.rst b/doc/source/admin/evacuate.rst
index ef9eccd931..18796d9c23 100644
--- a/doc/source/admin/evacuate.rst
+++ b/doc/source/admin/evacuate.rst
@@ -97,3 +97,17 @@ instances up and running.
using a pattern you might want to use the ``--strict`` flag which got introduced
in version 10.2.0 to make sure nova matches the ``FAILED_HOST``
exactly.
+
+.. note::
+ .. code-block:: bash
+
+ +------+--------+--------------+
+ | Name | Status | Task State |
+ +------+--------+--------------+
+ | vm_1 | ACTIVE | powering-off |
+ +------------------------------+
+
+ If the instance task state is not None, evacuation will be possible. However,
+ depending on the ongoing operation, there may be clean up required in other
+ services which the instance was using, such as neutron, cinder, glance, or
+ the storage backend. \ No newline at end of file
diff --git a/doc/source/admin/huge-pages.rst b/doc/source/admin/huge-pages.rst
index 73f6c5dd2d..a451c6e3ab 100644
--- a/doc/source/admin/huge-pages.rst
+++ b/doc/source/admin/huge-pages.rst
@@ -96,7 +96,7 @@ pages at boot time, run:
.. code-block:: console
- # echo 'GRUB_CMDLINE_LINUX="$GRUB_CMDLINE_LINUX hugepagesz=2M hugepages=2048 transparent_hugepage=never"' > /etc/default/grub
+ # echo 'GRUB_CMDLINE_LINUX="$GRUB_CMDLINE_LINUX hugepagesz=2M hugepages=2048 transparent_hugepage=never"' >> /etc/default/grub
$ grep GRUB_CMDLINE_LINUX /etc/default/grub
GRUB_CMDLINE_LINUX="..."
GRUB_CMDLINE_LINUX="$GRUB_CMDLINE_LINUX hugepagesz=2M hugepages=2048 transparent_hugepage=never"
diff --git a/doc/source/admin/index.rst b/doc/source/admin/index.rst
index 6b031ba968..8cb5bf7156 100644
--- a/doc/source/admin/index.rst
+++ b/doc/source/admin/index.rst
@@ -99,7 +99,7 @@ the defaults from the :doc:`install guide </install/index>` will be sufficient.
cells allow sharding of your compute environment. Upfront planning is key to
a successful cells v2 layout.
-* :doc:`Availablity Zones </admin/availability-zones>`: Availability Zones are
+* :doc:`Availability Zones </admin/availability-zones>`: Availability Zones are
an end-user visible logical abstraction for partitioning a cloud without
knowing the physical infrastructure.
@@ -199,12 +199,14 @@ instance for these kind of workloads.
virtual-gpu
file-backed-memory
ports-with-resource-requests
+ vdpa
virtual-persistent-memory
emulated-tpm
uefi
secure-boot
sev
managing-resource-providers
+ compute-node-identification
resource-limits
cpu-models
libvirt-misc
@@ -229,3 +231,4 @@ Once you are running nova, the following information is extremely useful.
node-down
hw-machine-type
hw-emulation-architecture
+ soft-delete-shadow-tables
diff --git a/doc/source/admin/libvirt-misc.rst b/doc/source/admin/libvirt-misc.rst
index 87dbe18ea4..eb3d20b479 100644
--- a/doc/source/admin/libvirt-misc.rst
+++ b/doc/source/admin/libvirt-misc.rst
@@ -138,3 +138,33 @@ For example, to hide your signature from the guest OS, run:
.. code:: console
$ openstack flavor set $FLAVOR --property hw:hide_hypervisor_id=true
+
+
+.. _extra-spec-locked_memory:
+
+Locked memory allocation
+------------------------
+
+.. versionadded:: 26.0.0 (Zed)
+
+Locking memory marks the guest memory allocations as unmovable and
+unswappable. It is implicitly enabled in a number of cases such as SEV or
+realtime guests but can also be enabled explicitly using the
+``hw:locked_memory`` extra spec (or use ``hw_locked_memory`` image property).
+``hw:locked_memory`` (also ``hw_locked_memory`` image property) accept
+boolean values in string format like 'true' or 'false' value.
+It will raise `FlavorImageLockedMemoryConflict` exception if both flavor and
+image property are specified but with different boolean values.
+This will only be allowed if you have also set ``hw:mem_page_size``,
+so we can ensure that the scheduler can actually account for this correctly
+and prevent out of memory events. Otherwise, will raise `LockMemoryForbidden`
+exception.
+
+.. code:: console
+
+ $ openstack flavor set FLAVOR-NAME \
+ --property hw:locked_memory=BOOLEAN_VALUE
+
+.. note::
+
+ This is currently only supported by the libvirt driver.
diff --git a/doc/source/admin/live-migration-usage.rst b/doc/source/admin/live-migration-usage.rst
index 783ab5e27c..32c67c2b0a 100644
--- a/doc/source/admin/live-migration-usage.rst
+++ b/doc/source/admin/live-migration-usage.rst
@@ -102,7 +102,7 @@ Manual selection of the destination host
.. code-block:: console
- $ openstack server migrate d1df1b5a-70c4-4fed-98b7-423362f2c47c --live HostC
+ $ openstack server migrate d1df1b5a-70c4-4fed-98b7-423362f2c47c --live-migration --host HostC
#. Confirm that the instance has been migrated successfully:
diff --git a/doc/source/admin/manage-logs.rst b/doc/source/admin/manage-logs.rst
index f60a523852..3a1546d8f4 100644
--- a/doc/source/admin/manage-logs.rst
+++ b/doc/source/admin/manage-logs.rst
@@ -181,12 +181,18 @@ websocket client to access the serial console.
.. rubric:: Accessing the serial console on an instance
-#. Use the :command:`nova get-serial-proxy` command to retrieve the websocket
+#. Use the :command:`nova get-serial-console` command to retrieve the websocket
URL for the serial console on the instance:
.. code-block:: console
- $ nova get-serial-proxy INSTANCE_NAME
+ $ nova get-serial-console INSTANCE_NAME
+
+ Or use the :command:`openstack console url show` command.
+
+ .. code-block:: console
+
+ $ openstack console url show --serial INSTANCE_NAME
.. list-table::
:header-rows: 0
diff --git a/doc/source/admin/manage-volumes.rst b/doc/source/admin/manage-volumes.rst
index a9d705a47a..ef45d2c7aa 100644
--- a/doc/source/admin/manage-volumes.rst
+++ b/doc/source/admin/manage-volumes.rst
@@ -188,9 +188,9 @@ fetched using the `volume_attachment get_connector` subcommand:
.. note::
- Future work will remove this requirement and incorperate the gathering of
- the host connector into the main refresh command. Unfortunatley until then
- it must remain a seperate manual step.
+ Future work will remove this requirement and incorporate the gathering of
+ the host connector into the main refresh command. Unfortunately until then
+ it must remain a separate manual step.
We can then provide this connector to the `volume_attachment refresh`
subcommand. This command will connect to the compute, disconnect any host
diff --git a/doc/source/admin/managing-resource-providers.rst b/doc/source/admin/managing-resource-providers.rst
index 27bfe20140..6e4fbc2703 100644
--- a/doc/source/admin/managing-resource-providers.rst
+++ b/doc/source/admin/managing-resource-providers.rst
@@ -158,7 +158,7 @@ Schema Example
items:
patternProperties:
# Allows any key name matching the resource class pattern,
- # check to prevent conflicts with virt driver owned resouces classes
+ # check to prevent conflicts with virt driver owned resources classes
# will be done after schema validation.
^[A-Z0-9_]{1,255}$:
type: object
diff --git a/doc/source/admin/networking.rst b/doc/source/admin/networking.rst
index 9005232cc3..c5b945b361 100644
--- a/doc/source/admin/networking.rst
+++ b/doc/source/admin/networking.rst
@@ -46,7 +46,7 @@ A full guide on configuring and using SR-IOV is provided in the
**Limitations**
* Only VFs are supported and they must be tagged in the Nova Compute
- configuration in the ``passthrough_whitelist`` option as
+ configuration in the :oslo.config:option:`pci.device_spec` option as
``remote_managed: "true"``. There is no auto-discovery of this based
on vendor and product IDs;
* Either VF or its respective PF must expose a PCI VPD capability with a
@@ -70,7 +70,7 @@ A full guide on configuring and using SR-IOV is provided in the
(see, for example, `this bug <https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1957753>`_
which discusses a case relevant to one driver). As of Libvirt v8.1.0,
EPERM errors encountered while programming VLAN 0 are ignored if
- VLAN clearning is not explicitly requested in the device XML (i.e.
+ VLAN clearing is not explicitly requested in the device XML (i.e.
VLAN 0 is not specified explicitly).
NUMA Affinity
diff --git a/doc/source/admin/pci-passthrough.rst b/doc/source/admin/pci-passthrough.rst
index 5f131b325a..09a963603d 100644
--- a/doc/source/admin/pci-passthrough.rst
+++ b/doc/source/admin/pci-passthrough.rst
@@ -51,6 +51,24 @@ capabilities.
Nova will ignore PCI devices reported by the hypervisor if the address is
outside of these ranges.
+.. versionchanged:: 26.0.0 (Zed):
+ PCI passthrough device inventories now can be tracked in Placement.
+ For more information, refer to :ref:`pci-tracking-in-placement`.
+
+.. versionchanged:: 26.0.0 (Zed):
+ The nova-compute service will refuse to start if both the parent PF and its
+ children VFs are configured in :oslo.config:option:`pci.device_spec`.
+ For more information, refer to :ref:`pci-tracking-in-placement`.
+
+.. versionchanged:: 26.0.0 (Zed):
+ The nova-compute service will refuse to start with
+ :oslo.config:option:`pci.device_spec` configuration that uses the
+ ``devname`` field.
+
+.. versionchanged:: 27.0.0 (2023.1 Antelope):
+ Nova provides Placement based scheduling support for servers with flavor
+ based PCI requests. This support is disable by default.
+
Enabling PCI passthrough
------------------------
@@ -92,15 +110,15 @@ Configure ``nova-compute``
Once PCI passthrough has been configured for the host, :program:`nova-compute`
must be configured to allow the PCI device to pass through to VMs. This is done
-using the :oslo.config:option:`pci.passthrough_whitelist` option. For example,
+using the :oslo.config:option:`pci.device_spec` option. For example,
assuming our sample PCI device has a PCI address of ``41:00.0`` on each host:
.. code-block:: ini
[pci]
- passthrough_whitelist = { "address": "0000:41:00.0" }
+ device_spec = { "address": "0000:41:00.0" }
-Refer to :oslo.config:option:`pci.passthrough_whitelist` for syntax information.
+Refer to :oslo.config:option:`pci.device_spec` for syntax information.
Alternatively, to enable passthrough of all devices with the same product and
vendor ID:
@@ -108,7 +126,7 @@ vendor ID:
.. code-block:: ini
[pci]
- passthrough_whitelist = { "vendor_id": "8086", "product_id": "154d" }
+ device_spec = { "vendor_id": "8086", "product_id": "154d" }
If using vendor and product IDs, all PCI devices matching the ``vendor_id`` and
``product_id`` are added to the pool of PCI devices available for passthrough
@@ -159,7 +177,7 @@ Once configured, restart the :program:`nova-compute` service.
Special Tags
^^^^^^^^^^^^
-When specified in :oslo.config:option:`pci.passthrough_whitelist` some tags
+When specified in :oslo.config:option:`pci.device_spec` some tags
have special meaning:
``physical_network``
@@ -173,6 +191,13 @@ have special meaning:
this tag can be used for remote-managed devices in conjunction with the
``remote_managed`` tag.
+.. note::
+
+ The use of ``"physical_network": null`` is only supported in single segment
+ networks. This is due to Nova not supporting multisegment networks for
+ SR-IOV ports. See
+ `bug 1983570 <https://bugs.launchpad.net/nova/+bug/1983570>`_ for details.
+
``remote_managed``
Used to specify whether a PCI device is managed remotely or not. By default,
devices are implicitly tagged as ``"remote_managed": "false"`` but and they
@@ -198,7 +223,7 @@ have special meaning:
result in an error depending on your driver and kernel version (see, for
example, `this bug <https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1957753>`_
which discusses a case relevant to one driver). As of Libvirt v8.1.0, EPERM
- errors encountered while programming a VLAN are ignored if VLAN clearning is
+ errors encountered while programming a VLAN are ignored if VLAN clearing is
not explicitly requested in the device XML.
``trusted``
@@ -340,3 +365,177 @@ policy for any neutron SR-IOV interfaces attached by the user:
You can also configure this for PCI passthrough devices by specifying the
policy in the alias configuration via :oslo.config:option:`pci.alias`. For more
information, refer to :oslo.config:option:`the documentation <pci.alias>`.
+
+.. _pci-tracking-in-placement:
+
+PCI tracking in Placement
+-------------------------
+.. note::
+ The feature described below are optional and disabled by default in nova
+ 26.0.0. (Zed). The legacy PCI tracker code path is still supported and
+ enabled. The Placement PCI tracking can be enabled via the
+ :oslo.config:option:`pci.report_in_placement` configuration. But please note
+ that once it is enabled on a given compute host it cannot be disabled there
+ any more.
+
+Since nova 26.0.0 (Zed) PCI passthrough device inventories are tracked in
+Placement. If a PCI device exists on the hypervisor and
+matches one of the device specifications configured via
+:oslo.config:option:`pci.device_spec` then Placement will have a representation
+of the device. Each PCI device of type ``type-PCI`` and ``type-PF`` will be
+modeled as a Placement resource provider (RP) with the name
+``<hypervisor_hostname>_<pci_address>``. A devices with type ``type-VF`` is
+represented by its parent PCI device, the PF, as resource provider.
+
+By default nova will use ``CUSTOM_PCI_<vendor_id>_<product_id>`` as the
+resource class in PCI inventories in Placement. However the name of the
+resource class can be customized via the ``resource_class`` tag in the
+:oslo.config:option:`pci.device_spec` option. There is also a new ``traits``
+tag in that configuration that allows specifying a list of placement traits to
+be added to the resource provider representing the matching PCI devices.
+
+.. note::
+ In nova 26.0.0 (Zed) the Placement resource tracking of PCI devices does not
+ support SR-IOV devices intended to be consumed via Neutron ports and
+ therefore having ``physical_network`` tag in
+ :oslo.config:option:`pci.device_spec`. Such devices are supported via the
+ legacy PCI tracker code path in Nova.
+
+.. note::
+ Having different resource class or traits configuration for VFs under the
+ same parent PF is not supported and the nova-compute service will refuse to
+ start with such configuration.
+
+.. important::
+ While nova supported configuring both the PF and its children VFs for PCI
+ passthrough in the past, it only allowed consuming either the parent PF or
+ its children VFs. Since 26.0.0. (Zed) the nova-compute service will
+ enforce the same rule for the configuration as well and will refuse to
+ start if both the parent PF and its VFs are configured.
+
+.. important::
+ While nova supported configuring PCI devices by device name via the
+ ``devname`` parameter in :oslo.config:option:`pci.device_spec` in the past,
+ this proved to be problematic as the netdev name of a PCI device could
+ change for multiple reasons during hypervisor reboot. So since nova 26.0.0
+ (Zed) the nova-compute service will refuse to start with such configuration.
+ It is suggested to use the PCI address of the device instead.
+
+The nova-compute service makes sure that existing instances with PCI
+allocations in the nova DB will have a corresponding PCI allocation in
+placement. This allocation healing also acts on any new instances regardless of
+the status of the scheduling part of this feature to make sure that the nova
+DB and placement are in sync. There is one limitation of the healing logic.
+It assumes that there is no in-progress migration when the nova-compute service
+is upgraded. If there is an in-progress migration then the PCI allocation on
+the source host of the migration will not be healed. The placement view will be
+consistent after such migration is completed or reverted.
+
+Reconfiguring the PCI devices on the hypervisor or changing the
+:oslo.config:option:`pci.device_spec` configuration option and restarting the
+nova-compute service is supported in the following cases:
+
+* new devices are added
+* devices without allocation are removed
+
+Removing a device that has allocations is not supported. If a device having any
+allocation is removed then the nova-compute service will keep the device and
+the allocation exists in the nova DB and in placement and logs a warning. If
+a device with any allocation is reconfigured in a way that an allocated PF is
+removed and VFs from the same PF is configured (or vice versa) then
+nova-compute will refuse to start as it would create a situation where both
+the PF and its VFs are made available for consumption.
+
+Since nova 27.0.0 (2023.1 Antelope) scheduling and allocation of PCI devices
+in Placement can also be enabled via
+:oslo.config:option:`filter_scheduler.pci_in_placement`. Please note that this
+should only be enabled after all the computes in the system is configured to
+report PCI inventory in Placement via
+enabling :oslo.config:option:`pci.report_in_placement`. In Antelope flavor
+based PCI requests are support but Neutron port base PCI requests are not
+handled in Placement.
+
+If you are upgrading from an earlier version with already existing servers with
+PCI usage then you must enable :oslo.config:option:`pci.report_in_placement`
+first on all your computes having PCI allocations and then restart the
+nova-compute service, before you enable
+:oslo.config:option:`filter_scheduler.pci_in_placement`. The compute service
+will heal the missing PCI allocation in placement during startup and will
+continue healing missing allocations for future servers until the scheduling
+support is enabled.
+
+If a flavor requests multiple ``type-VF`` devices via
+:nova:extra-spec:`pci_passthrough:alias` then it is important to consider the
+value of :nova:extra-spec:`group_policy` as well. The value ``none``
+allows nova to select VFs from the same parent PF to fulfill the request. The
+value ``isolate`` restricts nova to select each VF from a different parent PF
+to fulfill the request. If :nova:extra-spec:`group_policy` is not provided in
+such flavor then it will defaulted to ``none``.
+
+Symmetrically with the ``resource_class`` and ``traits`` fields of
+:oslo.config:option:`pci.device_spec` the :oslo.config:option:`pci.alias`
+configuration option supports requesting devices by Placement resource class
+name via the ``resource_class`` field and also support requesting traits to
+be present on the selected devices via the ``traits`` field in the alias. If
+the ``resource_class`` field is not specified in the alias then it is defaulted
+by nova to ``CUSTOM_PCI_<vendor_id>_<product_id>``.
+
+For deeper technical details please read the `nova specification. <https://specs.openstack.org/openstack/nova-specs/specs/zed/approved/pci-device-tracking-in-placement.html>`_
+
+
+Virtual IOMMU support
+---------------------
+
+With provided :nova:extra-spec:`hw:viommu_model` flavor extra spec or equivalent
+image metadata property ``hw_viommu_model`` and with the guest CPU architecture
+and OS allows, we can enable vIOMMU in libvirt driver.
+
+.. note::
+
+ Enable vIOMMU might introduce significant performance overhead.
+ You can see performance comparison table from
+ `AMD vIOMMU session on KVM Forum 2021`_.
+ For the above reason, vIOMMU should only be enabled for workflow that
+ require it.
+
+.. _`AMD vIOMMU session on KVM Forum 2021`: https://static.sched.com/hosted_files/kvmforum2021/da/vIOMMU%20KVM%20Forum%202021%20-%20v4.pdf
+
+Here are four possible values allowed for ``hw:viommu_model``
+(and ``hw_viommu_model``):
+
+**virtio**
+ Supported on Libvirt since 8.3.0, for Q35 and ARM virt guests.
+
+**smmuv3**
+ Supported on Libvirt since 5.5.0, for ARM virt guests.
+**intel**
+ Supported for for Q35 guests.
+
+**auto**
+ This option will translate to ``virtio`` if Libvirt supported,
+ else ``intel`` on X86 (Q35) and ``smmuv3`` on AArch64.
+
+For the viommu attributes:
+
+* ``intremap``, ``caching_mode``, and ``iotlb``
+ options for viommu (These attributes are driver attributes defined in
+ `Libvirt IOMMU Domain`_) will direcly enabled.
+
+* ``eim`` will directly enabled if machine type is Q35.
+ ``eim`` is driver attribute defined in `Libvirt IOMMU Domain`_.
+
+.. note::
+
+ eim(Extended Interrupt Mode) attribute (with possible values on and off)
+ can be used to configure Extended Interrupt Mode.
+ A q35 domain with split I/O APIC (as described in hypervisor features),
+ and both interrupt remapping and EIM turned on for the IOMMU, will be
+ able to use more than 255 vCPUs. Since 3.4.0 (QEMU/KVM only).
+
+* ``aw_bits`` attribute can used to set the address width to allow mapping
+ larger iova addresses in the guest. Since Qemu current supported
+ values are 39 and 48, we directly set this to larger width (48)
+ if Libvirt supported.
+ ``aw_bits`` is driver attribute defined in `Libvirt IOMMU Domain`_.
+
+.. _`Libvirt IOMMU Domain`: https://libvirt.org/formatdomain.html#iommu-devices
diff --git a/doc/source/admin/remote-console-access.rst b/doc/source/admin/remote-console-access.rst
index 01ef44810c..9b28646d27 100644
--- a/doc/source/admin/remote-console-access.rst
+++ b/doc/source/admin/remote-console-access.rst
@@ -221,6 +221,9 @@ server. In :file:`/etc/libvirt/qemu.conf`, uncomment the following settings:
service, this ensures that only approved VNC proxy servers can connect to the
Compute nodes.
+Make sure to provide correct permissions to the certificate files for the process
+which creates instance. Please follow the libvirt wiki page [3]_ for the same.
+
After editing :file:`qemu.conf`, the ``libvirtd`` service must be restarted:
.. code-block:: shell
@@ -363,6 +366,16 @@ Replace ``IP_ADDRESS`` with the IP address from which the proxy is accessible
by the outside world. For example, this may be the management interface IP
address of the controller or the VIP.
+Optionally, the :program:`nova-compute` service supports the following
+additional options to configure compression settings (algorithms and modes)
+for SPICE consoles.
+
+- :oslo.config:option:`spice.image_compression`
+- :oslo.config:option:`spice.jpeg_compression`
+- :oslo.config:option:`spice.zlib_compression`
+- :oslo.config:option:`spice.playback_compression`
+- :oslo.config:option:`spice.streaming_mode`
+
Serial
------
@@ -610,5 +623,6 @@ Frequently Asked Questions
References
----------
-.. [1] https://qemu.weilnetz.de/doc/qemu-doc.html#vnc_005fsec_005fcertificate_005fverify
+.. [1] https://qemu.weilnetz.de/doc/4.2/qemu-doc.html#vnc_005fsec_005fcertificate_005fverify
.. [2] https://tools.ietf.org/html/rfc3280#section-4.2.1.10
+.. [3] https://wiki.libvirt.org/page/VNCTLSSetup#Changes_to_be_made_on_the_virtualisation_host_server \ No newline at end of file
diff --git a/doc/source/admin/scheduling.rst b/doc/source/admin/scheduling.rst
index e0e5b7188b..9071c92ac9 100644
--- a/doc/source/admin/scheduling.rst
+++ b/doc/source/admin/scheduling.rst
@@ -1100,6 +1100,16 @@ control the initial allocation ratio values for a compute node:
* :oslo.config:option:`initial_disk_allocation_ratio` the initial DISK_GB
inventory allocation ratio for a new compute node record, defaults to 1.0
+Starting with the 27.0.0 Antelope release, the following default values are used
+for the initial allocation ratio values for a compute node:
+
+* :oslo.config:option:`initial_cpu_allocation_ratio` the initial VCPU
+ inventory allocation ratio for a new compute node record, defaults to 4.0
+* :oslo.config:option:`initial_ram_allocation_ratio` the initial MEMORY_MB
+ inventory allocation ratio for a new compute node record, defaults to 1.0
+* :oslo.config:option:`initial_disk_allocation_ratio` the initial DISK_GB
+ inventory allocation ratio for a new compute node record, defaults to 1.0
+
Scheduling considerations
~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -1162,7 +1172,7 @@ here.
:oslo.config:option:`DEFAULT.cpu_allocation_ratio`,
:oslo.config:option:`DEFAULT.ram_allocation_ratio` or
:oslo.config:option:`DEFAULT.disk_allocation_ratio` to a non-null value
- would ensure the user-configured value was always overriden.
+ would ensure the user-configured value was always overridden.
.. _osc-placement: https://docs.openstack.org/osc-placement/latest/index.html
diff --git a/doc/source/admin/secure-live-migration-with-qemu-native-tls.rst b/doc/source/admin/secure-live-migration-with-qemu-native-tls.rst
index 0e6206d0b1..61a4e840cb 100644
--- a/doc/source/admin/secure-live-migration-with-qemu-native-tls.rst
+++ b/doc/source/admin/secure-live-migration-with-qemu-native-tls.rst
@@ -186,8 +186,8 @@ Related information
any other ``*_tls*`` parameters, _unless_ you need different
certificates for some services. The rationale for that is that some
services (e.g. migration / NBD) are only exposed to internal
- infrastructure; while some sevices (VNC, Spice) might be exposed
- publically, so might need different certificates. For OpenStack this
+ infrastructure; while some services (VNC, Spice) might be exposed
+ publicly, so might need different certificates. For OpenStack this
does not matter, though, we will stick with the defaults.
- If they are not already open, ensure you open up these TCP ports on
diff --git a/doc/source/admin/soft-delete-shadow-tables.rst b/doc/source/admin/soft-delete-shadow-tables.rst
new file mode 100644
index 0000000000..126279c4d0
--- /dev/null
+++ b/doc/source/admin/soft-delete-shadow-tables.rst
@@ -0,0 +1,62 @@
+=============================
+Soft Delete and Shadow Tables
+=============================
+
+Nova has two unrelated features which are called ``soft delete``:
+
+Soft delete instances that can be restored
+------------------------------------------
+
+After an instance delete request, the actual delete is
+delayed by a configurable amount of time (config option
+:oslo.config:option:`reclaim_instance_interval`). During the delay,
+the instance is marked to be in state ``SOFT_DELETED`` and can be
+restored (:command:`openstack server restore`) by an admin in order to
+gracefully handle human mistakes. If the instance is not restored during
+the configured delay, a periodic job actually deletes the instance.
+
+This feature is optional and by default off.
+
+See also:
+
+- "Delete, Restore" in `API Guide: Server Concepts
+ <https://docs.openstack.org/api-guide/compute/server_concepts.html#server-actions>`_
+- config reference: :oslo.config:option:`reclaim_instance_interval`
+
+Soft delete database rows to shadow tables
+------------------------------------------
+
+At an actual instance delete, no DB record is deleted. Instead the
+records are marked as deleted (for example ``instances.deleted``
+in Nova cell databases). This preserves historic information
+for debugging and audit uses. But it also leads to accumulation
+of data in Nova cell DB tables, which may have an effect on
+Nova DB performance as documented in `DB prune deleted rows
+<https://docs.openstack.org/nova/latest/admin/upgrades.html#concepts>`_.
+
+The records marked as deleted can be cleaned up in multiple stages.
+First you can move them to so-called shadow tables (tables with prefix
+``shadow_`` in Nova cell databases). This is called *archiving the
+deleted rows*. Nova does not query shadow tables, therefore data moved
+to the shadow tables no longer affect DB performance. However storage
+space is still consumed. Then you can actually delete the information
+from the shadow tables. This is called *DB purge*.
+
+These operations can be performed by nova-manage:
+
+- https://docs.openstack.org/nova/latest/cli/nova-manage.html#db-archive-deleted-rows
+- https://docs.openstack.org/nova/latest/cli/nova-manage.html#db-purge
+
+This feature is not optional. Every long-running deployment should
+regularly archive and purge the deleted rows. For example via a cron
+job to regularly call :program:`nova-manage db archive_deleted_rows` and
+:program:`nova-manage db purge`. The tradeoffs between data retention,
+DB performance and storage needs should be considered.
+
+In the Mitaka release there was an agreement between Nova developers that
+it's not desirable to provide shadow tables for every table in the Nova
+database, `documented in a spec
+<https://specs.openstack.org/openstack/nova-specs/specs/mitaka/implemented/no-more-soft-delete.html>`_.
+
+Therefore not all information about an instance is preserved in the shadow
+tables. Since then new shadow tables are not introduced.
diff --git a/doc/source/admin/vdpa.rst b/doc/source/admin/vdpa.rst
new file mode 100644
index 0000000000..5d0408b0b3
--- /dev/null
+++ b/doc/source/admin/vdpa.rst
@@ -0,0 +1,86 @@
+============================
+Using ports vnic_type='vdpa'
+============================
+.. versionadded:: 23.0.0 (Wallaby)
+
+ Introduced support for vDPA.
+
+.. versionadded:: 26.0.0 (Zed)
+
+ Added support for all instance move operations,
+ and the interface attach/detach, and suspend/resume operations.
+
+.. important::
+ The functionality described below is only supported by the
+ libvirt/KVM virt driver.
+
+The kernel vDPA (virtio Data Path Acceleration) framework
+provides a vendor independent framework for offloading data-plane
+processing to software or hardware virtio device backends.
+While the kernel vDPA framework supports many types of vDPA devices,
+at this time nova only support ``virtio-net`` devices
+using the ``vhost-vdpa`` front-end driver. Support for ``virtio-blk`` or
+``virtio-gpu`` may be added in the future but is not currently planned
+for any specific release.
+
+vDPA device tracking
+~~~~~~~~~~~~~~~~~~~~
+When implementing support for vDPA based neutron ports one of the first
+decisions nova had to make was how to model the availability of vDPA devices
+and the capability to virtualize vDPA devices. As the initial use-case
+for this technology was to offload networking to hardware offload OVS via
+neutron ports the decision was made to extend the existing PCI tracker that
+is used for SR-IOV and pci-passthrough to support vDPA devices. As a result
+a simplification was made to assume that the parent device of a vDPA device
+is an SR-IOV Virtual Function (VF). As a result software only vDPA device such
+as those created by the kernel ``vdpa-sim`` sample module are not supported.
+
+To make vDPA device available to be scheduled to guests the operator should
+include the device using the PCI address or vendor ID and product ID of the
+parent VF in the PCI ``device_spec``.
+See: :nova-doc:`pci-passthrough <admin/pci-passthrough>` for details.
+
+Nova will not create the VFs or vDPA devices automatically. It is expected
+that the operator will allocate them before starting the nova-compute agent.
+While no specific mechanisms is prescribed to do this udev rules or systemd
+service files are generally the recommended approach to ensure the devices
+are created consistently across reboots.
+
+.. note::
+ As vDPA is an offload only for the data plane and not the control plane a
+ vDPA control plane is required to properly support vDPA device passthrough.
+ At the time of writing only hardware offloaded OVS is supported when using
+ vDPA with nova. Because of this vDPA devices cannot be requested using the
+ PCI alias. While nova could allow vDPA devices to be requested by the
+ flavor using a PCI alias we would not be able to correctly configure the
+ device as there would be no suitable control plane. For this reason vDPA
+ devices are currently only consumable via neutron ports.
+
+Virt driver support
+~~~~~~~~~~~~~~~~~~~
+
+Supporting neutron ports with ``vnic_type=vdpa`` depends on the capability
+of the virt driver. At this time only the ``libvirt`` virt driver with KVM
+is fully supported. QEMU may also work but is untested.
+
+vDPA support depends on kernel 5.7+, Libvirt 6.9.0+ and QEMU 5.1+.
+
+vDPA lifecycle operations
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+To boot a VM with vDPA ports they must first be created in neutron.
+To do this the normal SR-IOV workflow is used where by the port is first created
+in neutron and passed into nova as part of the server create request.
+
+.. code-block:: bash
+
+ openstack port create --network <my network> --vnic-type vdpa vdpa-port
+ openstack server create --flavor <my-flavor> --image <my-image> --port <vdpa-port uuid> vdpa-vm
+
+vDPA live migration
+~~~~~~~~~~~~~~~~~~~
+
+At this time QEMU and the ``vhost-vdpa`` kernel module do not support transparent
+live migration of vm with vdpa ports. To enable live migration of VMs with
+vDPA interfaces the existing SR-IOV hotplug live migration procedure has been
+extended to include ``vnic_type='vdpa'`` interfaces.
diff --git a/doc/source/cli/nova-compute.rst b/doc/source/cli/nova-compute.rst
index f190949efa..1346dab92e 100644
--- a/doc/source/cli/nova-compute.rst
+++ b/doc/source/cli/nova-compute.rst
@@ -41,6 +41,8 @@ Files
* ``/etc/nova/policy.d/``
* ``/etc/nova/rootwrap.conf``
* ``/etc/nova/rootwrap.d/``
+* ``/etc/nova/compute_id``
+* ``/var/lib/nova/compute_id``
See Also
========
diff --git a/doc/source/cli/nova-manage.rst b/doc/source/cli/nova-manage.rst
index 8aacf965b4..53152a0a6f 100644
--- a/doc/source/cli/nova-manage.rst
+++ b/doc/source/cli/nova-manage.rst
@@ -258,17 +258,17 @@ stopping at 0, or use the :option:`--until-complete` option.
``YYYY-MM-DD[HH:mm:ss]``. For example::
# Purge shadow table rows older than a specific date
- nova-manage db archive --before 2015-10-21
+ nova-manage db archive_deleted_rows --before 2015-10-21
# or
- nova-manage db archive --before "Oct 21 2015"
+ nova-manage db archive_deleted_rows --before "Oct 21 2015"
# Times are also accepted
- nova-manage db archive --before "2015-10-21 12:00"
+ nova-manage db archive_deleted_rows --before "2015-10-21 12:00"
Note that relative dates (such as ``yesterday``) are not supported
natively. The ``date`` command can be helpful here::
# Archive deleted rows more than one month old
- nova-manage db archive --before "$(date -d 'now - 1 month')"
+ nova-manage db archive_deleted_rows --before "$(date -d 'now - 1 month')"
.. option:: --verbose
@@ -1607,7 +1607,7 @@ instance changing when moving between machine types.
.. option:: --force
- Skip machine type compatability checks and force machine type update.
+ Skip machine type compatibility checks and force machine type update.
.. rubric:: Return codes
diff --git a/doc/source/cli/nova-rootwrap.rst b/doc/source/cli/nova-rootwrap.rst
index 4fcae829fc..47e3268b97 100644
--- a/doc/source/cli/nova-rootwrap.rst
+++ b/doc/source/cli/nova-rootwrap.rst
@@ -9,7 +9,7 @@ Synopsis
::
- nova-rootwrap CONFIG_FILE COMMMAND
+ nova-rootwrap CONFIG_FILE COMMAND
Description
===========
diff --git a/doc/source/cli/nova-status.rst b/doc/source/cli/nova-status.rst
index a198159e17..5fbb23f388 100644
--- a/doc/source/cli/nova-status.rst
+++ b/doc/source/cli/nova-status.rst
@@ -137,7 +137,7 @@ Upgrade
* Checks for the Placement API are modified to require version 1.35.
* Checks for the policy files are not automatically overwritten with
- new defaults.
+ new defaults. This check has been dropped in 26.0.0 (Zed) release.
**22.0.0 (Victoria)**
diff --git a/doc/source/configuration/index.rst b/doc/source/configuration/index.rst
index f7f40790f1..8d59d2dc27 100644
--- a/doc/source/configuration/index.rst
+++ b/doc/source/configuration/index.rst
@@ -48,7 +48,7 @@ services and what configuration options are available can be found below.
.. # NOTE(mriedem): This is the section where we hide things that we don't
# actually want in the table of contents but sphinx build would fail if
# they aren't in the toctree somewhere.
-.. # NOTE(amotoki): toctree needs to be placed at the end of the secion to
+.. # NOTE(amotoki): toctree needs to be placed at the end of the section to
# keep the document structure in the PDF doc.
.. toctree::
:hidden:
@@ -100,7 +100,7 @@ policies are available can be found below.
.. # NOTE(mriedem): This is the section where we hide things that we don't
# actually want in the table of contents but sphinx build would fail if
# they aren't in the toctree somewhere.
-.. # NOTE(amotoki): toctree needs to be placed at the end of the secion to
+.. # NOTE(amotoki): toctree needs to be placed at the end of the section to
# keep the document structure in the PDF doc.
.. toctree::
:hidden:
diff --git a/doc/source/configuration/policy-concepts.rst b/doc/source/configuration/policy-concepts.rst
index b2df1e4c1b..383b27247f 100644
--- a/doc/source/configuration/policy-concepts.rst
+++ b/doc/source/configuration/policy-concepts.rst
@@ -65,36 +65,13 @@ represent the layer of authorization required to access an API.
.. note::
- The ``scope_type`` of each policy is hardcoded and is not
- overridable via the policy file.
+ The ``scope_type`` of each policy is hardcoded to ``project`` scoped
+ and is not overridable via the policy file.
Nova policies have implemented the scope concept by defining the ``scope_type``
-in policies. To know each policy's ``scope_type``, please refer to the
-:doc:`Policy Reference </configuration/policy>` and look for ``Scope Types`` or
-``Intended scope(s)`` in :doc:`Policy Sample File </configuration/sample-policy>`
-as shown in below examples.
-
-.. rubric:: ``system`` scope
-
-Policies with a ``scope_type`` of ``system`` means a user with a
-``system-scoped`` token has permission to access the resource. This can be
-seen as a global role. All the system-level operation's policies
-have defaulted to ``scope_type`` of ``['system']``.
-
-For example, consider the ``GET /os-hypervisors`` API.
-
-.. code::
-
- # List all hypervisors.
- # GET /os-hypervisors
- # Intended scope(s): system
- #"os_compute_api:os-hypervisors:list": "rule:system_reader_api"
-
-.. rubric:: ``project`` scope
-
-Policies with a ``scope_type`` of ``project`` means a user with a
-``project-scoped`` token has permission to access the resource. Project-level
-only operation's policies are defaulted to ``scope_type`` of ``['project']``.
+for all the policies to ``project`` scoped. It means if user tries to access
+nova APIs with ``system`` scoped token they will get 403 permission denied
+error.
For example, consider the ``POST /os-server-groups`` API.
@@ -105,28 +82,6 @@ For example, consider the ``POST /os-server-groups`` API.
# Intended scope(s): project
#"os_compute_api:os-server-groups:create": "rule:project_member_api"
-.. rubric:: ``system and project`` scope
-
-Policies with a ``scope_type`` of ``system and project`` means a user with a
-``system-scoped`` or ``project-scoped`` token has permission to access the
-resource. All the system and project level operation's policies have defaulted
-to ``scope_type`` of ``['system', 'project']``.
-
-For example, consider the ``GET /flavors/{flavor_id}/os-extra_specs/{flavor_extra_spec_key}``
-API.
-
-.. code::
-
- # Show an extra spec for a flavor
- # GET /flavors/{flavor_id}/os-extra_specs/{flavor_extra_spec_key}
- # Intended scope(s): system, project
- #"os_compute_api:os-flavor-extra-specs:show": "rule:project_reader_or_admin"
-
-These scope types provide a way to differentiate between system-level and
-project-level access roles. You can control the information with scope of the
-users. This means you can control that none of the project level role can get
-the hypervisor information.
-
Policy scope is disabled by default to allow operators to migrate from
the old policy enforcement system in a graceful way. This can be
enabled by configuring the :oslo.config:option:`oslo_policy.enforce_scope`
@@ -149,62 +104,139 @@ defaults for each policy.
.. rubric:: ``reader``
-This provides read-only access to the resources within the ``system`` or
-``project``. Nova policies are defaulted to below rules:
-
-.. code::
-
- system_reader_api
- Default
- role:reader and system_scope:all
-
- system_or_project_reader
- Default
- (rule:system_reader_api) or (role:reader and project_id:%(project_id)s)
+This provides read-only access to the resources. Nova policies are defaulted
+to below rules:
+
+.. code-block:: python
+
+ policy.RuleDefault(
+ name="project_reader",
+ check_str="role:reader and project_id:%(project_id)s",
+ description="Default rule for Project level read only APIs."
+ )
+
+Using it in policy rule (with admin + reader access): (because we want to keep legacy admin behavior the same we need to give access of reader APIs to admin role too.)
+
+.. code-block:: python
+
+ policy.DocumentedRuleDefault(
+ name='os_compute_api:servers:show',
+ check_str='role:admin or (' + 'role:reader and project_id:%(project_id)s)',
+ description="Show a server",
+ operations=[
+ {
+ 'method': 'GET',
+ 'path': '/servers/{server_id}'
+ }
+ ],
+ scope_types=['project'],
+ )
+
+OR
+
+.. code-block:: python
+
+ policy.RuleDefault(
+ name="admin_api",
+ check_str="role:admin",
+ description="Default rule for administrative APIs."
+ )
+
+ policy.DocumentedRuleDefault(
+ name='os_compute_api:servers:show',
+ check_str='rule: admin or rule:project_reader',
+ description='Show a server',
+ operations=[
+ {
+ 'method': 'GET',
+ 'path': '/servers/{server_id}'
+ }
+ ],
+ scope_types=['project'],
+ )
.. rubric:: ``member``
-This role is to perform the project level write operation with combination
-to the system admin. Nova policies are defaulted to below rules:
-
-.. code::
-
- project_member_api
- Default
- role:member and project_id:%(project_id)s
-
- system_admin_or_owner
- Default
- (role:admin and system_scope:all) or (role:member and project_id:%(project_id)s)
+project-member is denoted by someone with the member role on a project. It is
+intended to be used by end users who consume resources within a project
+which requires higher permission than reader role but less than admin role.
+It inherits all the permissions of a project-reader.
+
+project-member persona in the policy check string:
+
+.. code-block:: python
+
+ policy.RuleDefault(
+ name="project_member",
+ check_str="role:member and project_id:%(project_id)s",
+ description="Default rule for Project level non admin APIs."
+ )
+
+Using it in policy rule (with admin + member access): (because we want to keep legacy admin behavior, admin role gets access to the project level member APIs.)
+
+.. code-block:: python
+
+ policy.DocumentedRuleDefault(
+ name='os_compute_api:servers:create',
+ check_str='role:admin or (' + 'role:member and project_id:%(project_id)s)',
+ description='Create a server',
+ operations=[
+ {
+ 'method': 'POST',
+ 'path': '/servers'
+ }
+ ],
+ scope_types=['project'],
+ )
+
+OR
+
+.. code-block:: python
+
+ policy.RuleDefault(
+ name="admin_api",
+ check_str="role:admin",
+ description="Default rule for administrative APIs."
+ )
+
+ policy.DocumentedRuleDefault(
+ name='os_compute_api:servers:create',
+ check_str='rule_admin or rule:project_member',
+ description='Create a server',
+ operations=[
+ {
+ 'method': 'POST',
+ 'path': '/servers'
+ }
+ ],
+ scope_types=['project'],
+ )
+
+'project_id:%(project_id)s' in the check_str is important to restrict the
+access within the requested project.
.. rubric:: ``admin``
-This role is to perform the admin level write operation at system as well
-as at project-level operations. Nova policies are defaulted to below rules:
-
-.. code::
-
- system_admin_api
- Default
- role:admin and system_scope:all
+This role is to perform the admin level write operations. Nova policies are
+defaulted to below rules:
- project_admin_api
- Default
- role:admin and project_id:%(project_id)s
+.. code-block:: python
- system_admin_or_owner
- Default
- (role:admin and system_scope:all) or (role:member and project_id:%(project_id)s)
+ policy.DocumentedRuleDefault(
+ name='os_compute_api:os-hypervisors:list',
+ check_str='role:admin',
+ scope_types=['project']
+ )
With these new defaults, you can solve the problem of:
#. Providing the read-only access to the user. Polices are made more granular
- and defaulted to reader rules. For exmaple: If you need to let someone audit
+ and defaulted to reader rules. For example: If you need to let someone audit
your deployment for security purposes.
#. Customize the policy in better way. For example, you will be able
- to provide access to project level user to perform live migration for their
- server or any other project with their token.
+ to provide access to project level user to perform operations within
+ their project only.
Nova supported scope & Roles
-----------------------------
@@ -212,40 +244,21 @@ Nova supported scope & Roles
Nova supports the below combination of scopes and roles where roles can be
overridden in the policy.yaml file but scope is not override-able.
-#. ADMIN: ``admin`` role on ``system`` scope. This is System Administrator to
- perform the system level resource operations. Example: enable/disable compute
- services.
-
-#. PROJECT_ADMIN: ``admin`` role on ``project`` scope. This is used to perform
- admin level operation within project. For example: Live migrate server.
-
- .. note::
-
- PROJECT_ADMIN has the limitation for the below policies
-
- * ``os_compute_api:servers:create:forced_host``
- * ``os_compute_api:servers:compute:servers:create:requested_destination``
-
- To create a server on specific host via force host or requested
- destination, you need to pass the hostname in ``POST /servers``
- API request but there is no way for PROJECT_ADMIN to get the hostname
- via API. This limitation will be addressed in a future release.
-
+#. ADMIN: ``admin`` role on ``project`` scope. This is an administrator to
+ perform the admin level operations. Example: enable/disable compute
+ service, Live migrate server etc.
#. PROJECT_MEMBER: ``member`` role on ``project`` scope. This is used to perform
resource owner level operation within project. For example: Pause a server.
-
#. PROJECT_READER: ``reader`` role on ``project`` scope. This is used to perform
read-only operation within project. For example: Get server.
+#. PROJECT_MEMBER_OR_ADMIN: ``admin`` or ``member`` role on ``project`` scope. Such policy rules are default to most of the owner level APIs and aling
+ with `member` role legacy admin can continue to access those APIs.
-#. PROJECT_READER_OR_ADMIN: ``admin`` role on ``system`` scope
- or ``reader`` role on ``project`` scope. Such policy rules are scoped
- as both ``system`` as well as ``project``. Example: to allow system
- admin and project reader to list flavor extra specs.
-
- .. note:: As of now, only ``system`` and ``project`` scopes are supported in Nova.
+#. PROJECT_READER_OR_ADMIN: ``admin`` or ``reader`` role on ``project`` scope. Such policy rules are default to most of the read only APIs so that legacy
+ admin can continue to access those APIs.
Backward Compatibility
----------------------
@@ -253,10 +266,10 @@ Backward Compatibility
Backward compatibility with versions prior to 21.0.0 (Ussuri) is maintained by
supporting the old defaults and disabling the ``scope_type`` feature by default.
This means the old defaults and deployments that use them will keep working
-as-is. However, we encourage every deployment to switch to new policy.
-Scope checks are disabled by default and will be enabled by default starting
-Nova 26.0.0 (OpenStack Zed release) and the old defaults will be removed
-starting in the Nova 27.0.0 release.
+as-is. However, we encourage every deployment to switch to the new policy. The
+new defaults will be enabled by default in OpenStack 2023.1 (Nova 27.0.0)
+release and old defaults will be removed starting in the OpenStack 2023.2
+(Nova 28.0.0) release.
To implement the new default reader roles, some policies needed to become
granular. They have been renamed, with the old names still supported for
@@ -275,7 +288,6 @@ Here is step wise guide for migration:
You need to create the new token with scope knowledge via below CLI:
- - :keystone-doc:`Create System Scoped Token </admin/tokens-overview.html#operation_create_system_token>`.
- :keystone-doc:`Create Project Scoped Token </admin/tokens-overview.html#operation_create_project_scoped_token>`.
#. Create new default roles in keystone if not done:
@@ -295,10 +307,6 @@ Here is step wise guide for migration:
(assuming the rest of the policy passes). The default value of this flag
is False.
- .. note:: Before you enable this flag, you need to audit your users and make
- sure everyone who needs system-level access has a system role
- assignment in keystone.
-
#. Enable new defaults
The :oslo.config:option:`oslo_policy.enforce_new_defaults` flag switches
@@ -311,7 +319,6 @@ Here is step wise guide for migration:
.. note:: Before you enable this flag, you need to educate users about the
different roles they need to use to continue using Nova APIs.
-
#. Check for deprecated policies
A few policies were made more granular to implement the reader roles. New
@@ -319,28 +326,31 @@ Here is step wise guide for migration:
are overwritten in policy file, then warning will be logged. Please migrate
those policies to new policy names.
+NOTE::
+
+ We recommend to enable the both scope as well new defaults together
+ otherwise you may experience some late failures with unclear error
+ messages. For example, if you enable new defaults and disable scope
+ check then it will allow system users to access the APIs but fail
+ later due to the project check which can be difficult to debug.
+
Below table show how legacy rules are mapped to new rules:
-+--------------------+----------------------------------+-----------------+-------------------+
-| Legacy Rules | New Rules | | |
-+====================+==================================+=================+===================+
-| | | *Roles* | *Scope* |
-| +----------------------------------+-----------------+-------------------+
-| | ADMIN | admin | system |
-| Project Admin +----------------------------------+-----------------+ |
-| Role | PROJECT_ADMIN | admin | project |
-| | | | |
-+--------------------+----------------------------------+-----------------+-------------------+
-| | PROJECT_ADMIN | admin | project |
-| +----------------------------------+-----------------+ |
-| | PROJECT_MEMBER | member | |
-| +----------------------------------+-----------------+ |
-| Project admin or | PROJECT_READER | reader | |
-| owner role +----------------------------------+-----------------+-------------------+
-| | PROJECT_READER_OR_ADMIN | admin on system | system |
-| | | or reader on | OR |
-| | | project | project |
-+--------------------+----------------------------------+-----------------+-------------------+
-
-We expect all deployments to migrate to new policy by 27.0.0 release so that
-we can remove the support of old policies.
++--------------------+---------------------------+----------------+-----------+
+| Legacy Rule | New Rules |Operation |scope_type |
++====================+===========================+================+===========+
+| RULE_ADMIN_API |-> ADMIN |Global resource | [project] |
+| | |Write & Read | |
++--------------------+---------------------------+----------------+-----------+
+| |-> ADMIN |Project admin | [project] |
+| | |level operation | |
+| +---------------------------+----------------+-----------+
+| RULE_ADMIN_OR_OWNER|-> PROJECT_MEMBER_OR_ADMIN |Project resource| [project] |
+| | |Write | |
+| +---------------------------+----------------+-----------+
+| |-> PROJECT_READER_OR_ADMIN |Project resource| [project] |
+| | |Read | |
++--------------------+---------------------------+----------------+-----------+
+
+We expect all deployments to migrate to the new policy by OpenStack 2023.1
+(Nova 27.0.0) release so that we can remove the support of old policies.
diff --git a/doc/source/contributor/api-ref-guideline.rst b/doc/source/contributor/api-ref-guideline.rst
index cc5eab2538..0aec4eeeb4 100644
--- a/doc/source/contributor/api-ref-guideline.rst
+++ b/doc/source/contributor/api-ref-guideline.rst
@@ -351,7 +351,7 @@ In the parameter file, define the ``required`` field in each parameter.
but does not appear when non-admin users call.
If a parameter must be specified in the request or always appears
-in the response in the micoversion added or later,
+in the response in the microversion added or later,
the parameter must be defined as required (``true``).
Microversion
diff --git a/doc/source/contributor/development-environment.rst b/doc/source/contributor/development-environment.rst
index 32b8f8334e..3e19ef1ca2 100644
--- a/doc/source/contributor/development-environment.rst
+++ b/doc/source/contributor/development-environment.rst
@@ -197,7 +197,7 @@ Using fake computes for tests
The number of instances supported by fake computes is not limited by physical
constraints. It allows you to perform stress tests on a deployment with few
resources (typically a laptop). Take care to avoid using scheduler filters
-that will limit the number of instances per compute, such as ``AggregateCoreFilter``.
+that will limit the number of instances per compute, such as ``NumInstancesFilter``.
Fake computes can also be used in multi hypervisor-type deployments in order to
take advantage of fake and "real" computes during tests:
diff --git a/doc/source/contributor/how-to-get-involved.rst b/doc/source/contributor/how-to-get-involved.rst
index fd90138354..28e75564b0 100644
--- a/doc/source/contributor/how-to-get-involved.rst
+++ b/doc/source/contributor/how-to-get-involved.rst
@@ -261,7 +261,7 @@ reviews:
- Where do I start? What should I review?
- There are various tools, but a good place to start is:
- https://etherpad.openstack.org/p/nova-runways-zed
+ https://review.opendev.org/q/project:openstack/nova+status:open+label:Review-Priority%253DANY
- Depending on the time in the cycle, it's worth looking at
NeedsCodeReview blueprints:
https://blueprints.launchpad.net/nova/
@@ -323,7 +323,7 @@ becoming a member of nova-core.
How to do great nova-spec reviews?
==================================
-https://specs.openstack.org/openstack/nova-specs/specs/zed/template.html
+https://specs.openstack.org/openstack/nova-specs/specs/2023.1/template.html
:doc:`/contributor/blueprints`.
diff --git a/doc/source/contributor/index.rst b/doc/source/contributor/index.rst
index 472e083313..5b6e0b8f92 100644
--- a/doc/source/contributor/index.rst
+++ b/doc/source/contributor/index.rst
@@ -22,7 +22,7 @@ Getting Started
* :doc:`/contributor/development-environment`: Get your computer setup to
contribute
-.. # NOTE(amotoki): toctree needs to be placed at the end of the secion to
+.. # NOTE(amotoki): toctree needs to be placed at the end of the section to
# keep the document structure in the PDF doc.
.. toctree::
:hidden:
@@ -60,7 +60,7 @@ while keeping users happy and keeping developers productive.
* :doc:`/contributor/ptl-guide`: A chronological PTL reference guide
-.. # NOTE(amotoki): toctree needs to be placed at the end of the secion to
+.. # NOTE(amotoki): toctree needs to be placed at the end of the section to
# keep the document structure in the PDF doc.
.. toctree::
:hidden:
@@ -86,7 +86,7 @@ Reviewing
* :doc:`/contributor/documentation`: Guidelines for handling documentation
contributions
-.. # NOTE(amotoki): toctree needs to be placed at the end of the secion to
+.. # NOTE(amotoki): toctree needs to be placed at the end of the section to
# keep the document structure in the PDF doc.
.. toctree::
:hidden:
@@ -120,7 +120,7 @@ be Python code. All new code needs to be validated somehow.
* :doc:`/contributor/testing/eventlet-profiling`
-.. # NOTE(amotoki): toctree needs to be placed at the end of the secion to
+.. # NOTE(amotoki): toctree needs to be placed at the end of the section to
# keep the document structure in the PDF doc.
.. toctree::
:hidden:
@@ -152,7 +152,7 @@ extend.
* :doc:`/contributor/notifications`: How to add your own notifications
-.. # NOTE(amotoki): toctree needs to be placed at the end of the secion to
+.. # NOTE(amotoki): toctree needs to be placed at the end of the section to
# keep the document structure in the PDF doc.
.. toctree::
:hidden:
@@ -176,7 +176,7 @@ diving in.
* :doc:`/contributor/resize-and-cold-migrate`: Describes the differences and
similarities between resize and cold migrate operations.
-.. # NOTE(amotoki): toctree needs to be placed at the end of the secion to
+.. # NOTE(amotoki): toctree needs to be placed at the end of the section to
# keep the document structure in the PDF doc.
.. toctree::
:hidden:
diff --git a/doc/source/contributor/process.rst b/doc/source/contributor/process.rst
index 1cbb9a0c72..f1be1c1b4a 100644
--- a/doc/source/contributor/process.rst
+++ b/doc/source/contributor/process.rst
@@ -36,8 +36,8 @@ If you are new to Nova, please read this first: :ref:`getting_involved`.
Dates overview
==============
-For Zed, please see:
-https://wiki.openstack.org/wiki/Nova/Zed_Release_Schedule
+For 2023.1 Antelope, please see:
+https://wiki.openstack.org/wiki/Nova/2023.1_Release_Schedule
.. note:: Throughout this document any link which references the name of a
release cycle in the link can usually be changed to the name of the
@@ -102,9 +102,9 @@ Why we have a Spec Freeze:
By the freeze date, we expect all blueprints that will be approved for the
cycle to be listed on launchpad and all relevant specs to be merged.
-For Zed, blueprints can be found at
-https://blueprints.launchpad.net/nova/zed and specs at
-https://specs.openstack.org/openstack/nova-specs/specs/zed/index.html
+For 2023.1 Antelope, blueprints can be found at
+https://blueprints.launchpad.net/nova/antelope and specs at
+https://specs.openstack.org/openstack/nova-specs/specs/2023.1/index.html
Starting with Liberty, we are keeping a backlog open for submission at all
times.
diff --git a/doc/source/contributor/ptl-guide.rst b/doc/source/contributor/ptl-guide.rst
index daf7142f10..813f1bc83e 100644
--- a/doc/source/contributor/ptl-guide.rst
+++ b/doc/source/contributor/ptl-guide.rst
@@ -257,10 +257,6 @@ Immediately after RC
* https://wiki.openstack.org/wiki/Nova/ReleaseChecklist
- * Add database migration placeholders
-
- * Example: https://review.opendev.org/650964
-
* Drop old RPC compat code (if there was a RPC major version bump)
* Example: https://review.opendev.org/543580
diff --git a/doc/source/index.rst b/doc/source/index.rst
index 718aa0eca5..8cd5ae9ceb 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -49,7 +49,7 @@ For End Users
As an end user of nova, you'll use nova to create and manage servers with
either tools or the API directly.
-.. # NOTE(amotoki): toctree needs to be placed at the end of the secion to
+.. # NOTE(amotoki): toctree needs to be placed at the end of the section to
# keep the document structure in the PDF doc.
.. toctree::
:hidden:
@@ -110,7 +110,7 @@ Architecture Overview
* :doc:`Nova architecture </admin/architecture>`: An overview of how all the parts in
nova fit together.
-.. # NOTE(amotoki): toctree needs to be placed at the end of the secion to
+.. # NOTE(amotoki): toctree needs to be placed at the end of the section to
# keep the document structure in the PDF doc.
.. toctree::
:hidden:
@@ -128,7 +128,7 @@ having installed :keystone-doc:`keystone <install/>`, :glance-doc:`glance
:placement-doc:`placement <install/>`. Ensure that you follow their install
guides first.
-.. # NOTE(amotoki): toctree needs to be placed at the end of the secion to
+.. # NOTE(amotoki): toctree needs to be placed at the end of the section to
# keep the document structure in the PDF doc.
.. toctree::
:maxdepth: 2
@@ -192,7 +192,7 @@ Once you are running nova, the following information is extremely useful.
instances (either via metadata server or config drive) for your specific
purposes.
-.. # NOTE(amotoki): toctree needs to be placed at the end of the secion to
+.. # NOTE(amotoki): toctree needs to be placed at the end of the section to
# keep the document structure in the PDF doc.
.. toctree::
:hidden:
@@ -211,7 +211,7 @@ Reference Material
* :doc:`Configuration Guide <configuration/index>`: Information on configuring
the system, including role-based access control policy rules.
-.. # NOTE(amotoki): toctree needs to be placed at the end of the secion to
+.. # NOTE(amotoki): toctree needs to be placed at the end of the section to
# keep the document structure in the PDF doc.
.. toctree::
:hidden:
@@ -230,7 +230,7 @@ For Contributors
both current and future looking parts of our architecture.
These are collected here.
-.. # NOTE(amotoki): toctree needs to be placed at the end of the secion to
+.. # NOTE(amotoki): toctree needs to be placed at the end of the section to
# keep the document structure in the PDF doc.
.. toctree::
:hidden:
diff --git a/doc/source/install/overview.rst b/doc/source/install/overview.rst
index 9781973d9f..c8e72e4256 100644
--- a/doc/source/install/overview.rst
+++ b/doc/source/install/overview.rst
@@ -67,7 +67,7 @@ follows:
For more information on production architectures, see the `Architecture Design
Guide <https://docs.openstack.org/arch-design/>`_, `OpenStack Operations Guide
-<https://wiki.openstack.org/wiki/OpsGuide>`_, and `OpenStack Networking Guide
+<https://docs.openstack.org/operations-guide/>`_, and `OpenStack Networking Guide
<https://docs.openstack.org/ocata/networking-guide/>`_.
.. _figure-hwreqs:
diff --git a/doc/source/install/verify.rst b/doc/source/install/verify.rst
index 99936c1d9c..c4e0383af4 100644
--- a/doc/source/install/verify.rst
+++ b/doc/source/install/verify.rst
@@ -119,10 +119,6 @@ Verify operation of the Compute service.
| Result: Success |
| Details: None |
+--------------------------------------------------------------------+
- | Check: Policy Scope-based Defaults |
- | Result: Success |
- | Details: None |
- +--------------------------------------------------------------------+
| Check: Policy File JSON to YAML Migration |
| Result: Success |
| Details: None |
diff --git a/doc/source/reference/attach-volume.rst b/doc/source/reference/attach-volume.rst
index a38a32e533..c82c035a14 100644
--- a/doc/source/reference/attach-volume.rst
+++ b/doc/source/reference/attach-volume.rst
@@ -22,7 +22,7 @@ the optional interactions with the ``os-brick`` library on the compute hosts
during the request.
.. note:: ``os-brick`` is not always used to connect volumes to the host, most
- notibly when connecting an instance natively to ceph ``rbd`` volumes
+ notably when connecting an instance natively to ceph ``rbd`` volumes
The diagram also outlines the various locks taken on the compute during the
attach volume flow. In this example these include locks against the
diff --git a/doc/source/reference/block-device-structs.rst b/doc/source/reference/block-device-structs.rst
index 1b8636c537..8c2508539f 100644
--- a/doc/source/reference/block-device-structs.rst
+++ b/doc/source/reference/block-device-structs.rst
@@ -71,6 +71,8 @@ called ``block_device_info``, and is generated by
``root_device_name``
Hypervisor's notion of the root device's name
+``image``
+ An image backed disk if used
``ephemerals``
A list of all ephemeral disks
``block_device_mapping``
@@ -105,13 +107,6 @@ persist data to the BDM object in the DB.
In other contexts this filtering will not have happened, and
``block_device_mapping`` will contain all volumes.
-.. note::
-
- Unlike BDMs, ``block_device_info`` does not currently represent all
- disks that an instance might have. Significantly, it will not contain any
- representation of an image-backed local disk, i.e. the root disk of a
- typical instance which isn't boot-from-volume. Other representations used
- by the libvirt driver explicitly reconstruct this missing disk.
libvirt driver specific BDM data structures
===========================================
diff --git a/doc/source/reference/glossary.rst b/doc/source/reference/glossary.rst
index e48a4acc2e..a337699aca 100644
--- a/doc/source/reference/glossary.rst
+++ b/doc/source/reference/glossary.rst
@@ -26,7 +26,7 @@ Glossary
Cell
A cell is a shard or horizontal partition in a nova deployment.
A cell mostly consists of a database, queue, and set of compute nodes.
- All deployments willl have at least one cell (and one "fake" cell).
+ All deployments will have at least one cell (and one "fake" cell).
Larger deployments can have many.
For more information, refer to :doc:`/admin/cells`.
diff --git a/doc/source/reference/index.rst b/doc/source/reference/index.rst
index aadfdb090a..cb376ad53a 100644
--- a/doc/source/reference/index.rst
+++ b/doc/source/reference/index.rst
@@ -44,7 +44,7 @@ The following is a dive into some of the internals in nova.
* :doc:`/reference/libvirt-distro-support-matrix`: Libvirt virt driver OS
distribution support matrix
-.. # NOTE(amotoki): toctree needs to be placed at the end of the secion to
+.. # NOTE(amotoki): toctree needs to be placed at the end of the section to
# keep the document structure in the PDF doc.
.. toctree::
:hidden:
@@ -73,7 +73,7 @@ Debugging
* :doc:`/reference/gmr`: Inspired by Amiga, a way to trigger a very
comprehensive dump of a running service for deep debugging.
-.. # NOTE(amotoki): toctree needs to be placed at the end of the secion to
+.. # NOTE(amotoki): toctree needs to be placed at the end of the section to
# keep the document structure in the PDF doc.
.. toctree::
:hidden:
@@ -97,7 +97,7 @@ these are a great place to start reading up on the current plans.
* :doc:`/reference/scheduler-evolution`: Motivation behind the scheduler /
placement evolution
-.. # NOTE(amotoki): toctree needs to be placed at the end of the secion to
+.. # NOTE(amotoki): toctree needs to be placed at the end of the section to
# keep the document structure in the PDF doc.
.. toctree::
:hidden:
@@ -112,7 +112,7 @@ Additional Information
* :doc:`/reference/glossary`: A quick reference guide to some of the terms you
might encounter working on or using nova.
-.. # NOTE(amotoki): toctree needs to be placed at the end of the secion to
+.. # NOTE(amotoki): toctree needs to be placed at the end of the section to
# keep the document structure in the PDF doc.
.. toctree::
:hidden:
diff --git a/doc/source/reference/libvirt-distro-support-matrix.rst b/doc/source/reference/libvirt-distro-support-matrix.rst
index e1a31cd6cc..fd22fc5ba3 100644
--- a/doc/source/reference/libvirt-distro-support-matrix.rst
+++ b/doc/source/reference/libvirt-distro-support-matrix.rst
@@ -180,7 +180,7 @@ OS distribution versions
------------------------
This table provides information on a representative sample of OS distros and
-the version of libirt/QEMU/libguestfs that they ship. This is **NOT** intended
+the version of libvirt/QEMU/libguestfs that they ship. This is **NOT** intended
to be an exhaustive list of distros where OpenStack Nova can run - it is
intended to run on any Linux distro that can satisfy the minimum required
software versions. This table merely aims to help identify when minimum
diff --git a/doc/source/reference/stable-api.rst b/doc/source/reference/stable-api.rst
index 462e8e3feb..3a491150a5 100644
--- a/doc/source/reference/stable-api.rst
+++ b/doc/source/reference/stable-api.rst
@@ -29,7 +29,7 @@ Background
Nova used to include two distinct frameworks for exposing REST API
functionality. Older code is called the "v2 API" and existed in the
/nova/api/openstack/compute/legacy_v2/ directory. This code tree was totally
-removed during Netwon release time frame (14.0.0 and later).
+removed during Newton release time frame (14.0.0 and later).
Newer code is called the "v2.1 API" and exists in the
/nova/api/openstack/compute directory.
diff --git a/doc/source/user/block-device-mapping.rst b/doc/source/user/block-device-mapping.rst
index b43f01de8b..361f4bcf01 100644
--- a/doc/source/user/block-device-mapping.rst
+++ b/doc/source/user/block-device-mapping.rst
@@ -49,7 +49,7 @@ When we talk about block device mapping, we usually refer to one of two things
on.
For more details on this please refer to the :doc:`Driver BDM Data
- Structures <../reference/block-device-structs>` refernce document.
+ Structures <../reference/block-device-structs>` reference document.
.. note::
diff --git a/doc/source/user/certificate-validation.rst b/doc/source/user/certificate-validation.rst
index 1140712159..69219a67ed 100644
--- a/doc/source/user/certificate-validation.rst
+++ b/doc/source/user/certificate-validation.rst
@@ -309,7 +309,7 @@ Create the first intermediate certificate
"""""""""""""""""""""""""""""""""""""""""
Create a certificate request for the first intermediate certificate. For these
instructions, we will save the certificate request as
-``cert_intermeidate_a.csr`` and the private key as ``key_intermediate_a.pem``.
+``cert_intermediate_a.csr`` and the private key as ``key_intermediate_a.pem``.
.. code-block:: console
@@ -357,7 +357,7 @@ Create the second intermediate certificate
""""""""""""""""""""""""""""""""""""""""""
Create a certificate request for the second intermediate certificate. For these
instructions, we will save the certificate request as
-``cert_intermeidate_b.csr`` and the private key as ``key_intermediate_b.pem``.
+``cert_intermediate_b.csr`` and the private key as ``key_intermediate_b.pem``.
.. code-block:: console
diff --git a/doc/source/user/metadata.rst b/doc/source/user/metadata.rst
index f5f39231ac..65f5bddc96 100644
--- a/doc/source/user/metadata.rst
+++ b/doc/source/user/metadata.rst
@@ -234,6 +234,17 @@ information about the format of the files and subdirectories within these
directories.
+Setting in image
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. code-block:: console
+
+ $ openstack image set IMG-UUID --property img_config_drive=mandatory
+
+The img_config_drive image metadata property can be used to force enable the config drive.
+Setting img_config_drive specifies whether the image needs a config drive.
+
+
Nova metadata
-------------
diff --git a/doc/source/user/support-matrix.ini b/doc/source/user/support-matrix.ini
index 412623b4a3..ae5bbde110 100644
--- a/doc/source/user/support-matrix.ini
+++ b/doc/source/user/support-matrix.ini
@@ -332,6 +332,26 @@ driver.libvirt-vz-vm=complete
driver.libvirt-vz-ct=complete
driver.zvm=unknown
+[operation.rebuild-volume-backed]
+title=Rebuild volume backed instance
+status=optional
+notes=This will wipe out all existing data in the root volume
+ of a volume backed instance. This is available from microversion
+ 2.93 and onwards.
+cli=openstack server rebuild --reimage-boot-volume --image <image> <server>
+driver.libvirt-kvm-x86=complete
+driver.libvirt-kvm-aarch64=complete
+driver.libvirt-kvm-ppc64=complete
+driver.libvirt-kvm-s390x=complete
+driver.libvirt-qemu-x86=complete
+driver.libvirt-lxc=unknown
+driver.vmware=missing
+driver.hyperv=missing
+driver.ironic=missing
+driver.libvirt-vz-vm=missing
+driver.libvirt-vz-ct=missing
+driver.zvm=missing
+
[operation.get-guest-info]
title=Guest instance status
status=mandatory
diff --git a/doc/source/user/wsgi.rst b/doc/source/user/wsgi.rst
index 6b314b4832..63f949df1a 100644
--- a/doc/source/user/wsgi.rst
+++ b/doc/source/user/wsgi.rst
@@ -8,10 +8,16 @@ as Apache_ or nginx_).
The nova project provides two automatically generated entry points that
support this: ``nova-api-wsgi`` and ``nova-metadata-wsgi``. These read
-``nova.conf`` and ``api-paste.ini`` and generate the required module-level
-``application`` that most WSGI servers require. If nova is installed using pip,
-these two scripts will be installed into whatever the expected ``bin``
-directory is for the environment.
+``nova.conf`` and ``api-paste.ini`` by default and generate the required
+module-level ``application`` that most WSGI servers require.
+If nova is installed using pip, these two scripts will be installed into
+whatever the expected ``bin`` directory is for the environment.
+
+The config files and config directory can be overridden via the
+``OS_NOVA_CONFIG_FILES`` and ``OS_NOVA_CONFIG_DIR`` environment variables.
+File paths listed in ``OS_NOVA_CONFIG_FILES`` are relative to
+``OS_NOVA_CONFIG_DIR`` and delimited by ``;``.
+
The new scripts replace older experimental scripts that could be found in the
``nova/wsgi`` directory of the code repository. The new scripts are *not*
diff --git a/etc/nova/nova-config-generator.conf b/etc/nova/nova-config-generator.conf
index 742f348f11..8b7fd3bec8 100644
--- a/etc/nova/nova-config-generator.conf
+++ b/etc/nova/nova-config-generator.conf
@@ -16,3 +16,4 @@ namespace = oslo.concurrency
namespace = oslo.reports
namespace = keystonemiddleware.auth_token
namespace = osprofiler
+namespace = os_vif
diff --git a/mypy-files.txt b/mypy-files.txt
index 1b56b5e8ea..391ed58d87 100644
--- a/mypy-files.txt
+++ b/mypy-files.txt
@@ -1,5 +1,7 @@
nova/compute/manager.py
+nova/compute/pci_placement_translator.py
nova/crypto.py
+nova/filesystem.py
nova/limit/local.py
nova/limit/placement.py
nova/network/neutron.py
@@ -12,6 +14,9 @@ nova/virt/driver.py
nova/virt/hardware.py
nova/virt/libvirt/machine_type_utils.py
nova/virt/libvirt/__init__.py
+nova/virt/libvirt/cpu/__init__.py
+nova/virt/libvirt/cpu/api.py
+nova/virt/libvirt/cpu/core.py
nova/virt/libvirt/driver.py
nova/virt/libvirt/event.py
nova/virt/libvirt/guest.py
diff --git a/nova/api/openstack/api_version_request.py b/nova/api/openstack/api_version_request.py
index a3a8b1f41e..718ac7e8e6 100644
--- a/nova/api/openstack/api_version_request.py
+++ b/nova/api/openstack/api_version_request.py
@@ -235,7 +235,7 @@ REST_API_VERSION_HISTORY = """REST API Version History:
``POST /flavors/{flavor_id}/os-extra_specs`` and
``PUT /flavors/{flavor_id}/os-extra_specs/{id}`` APIs.
* 2.87 - Adds support for rescuing boot from volume instances when the
- compute host reports the COMPUTE_BFV_RESCUE capability trait.
+ compute host reports the COMPUTE_RESCUE_BFV capability trait.
* 2.88 - Drop statistics-style fields from the ``/os-hypervisors/detail``
and ``/os-hypervisors/{hypervisor_id}`` APIs, and remove the
``/os-hypervisors/statistics`` and
@@ -252,6 +252,9 @@ REST_API_VERSION_HISTORY = """REST API Version History:
* 2.92 - Drop generation of keypair, add keypair name validation on
``POST /os-keypairs`` and allow including @ and dot (.) characters
in keypair name.
+ * 2.93 - Add support for volume backed server rebuild.
+ * 2.94 - Allow FQDN in server hostname.
+ * 2.95 - Evacuate will now stop instance at destination.
"""
# The minimum and maximum versions of the API supported
@@ -260,7 +263,7 @@ REST_API_VERSION_HISTORY = """REST API Version History:
# Note(cyeoh): This only applies for the v2.1 API once microversions
# support is fully merged. It does not affect the V2 API.
_MIN_API_VERSION = '2.1'
-_MAX_API_VERSION = '2.92'
+_MAX_API_VERSION = '2.95'
DEFAULT_API_VERSION = _MIN_API_VERSION
# Almost all proxy APIs which are related to network, images and baremetal
diff --git a/nova/api/openstack/compute/evacuate.py b/nova/api/openstack/compute/evacuate.py
index aa35812759..a6602be079 100644
--- a/nova/api/openstack/compute/evacuate.py
+++ b/nova/api/openstack/compute/evacuate.py
@@ -23,9 +23,11 @@ from nova.api.openstack.compute.schemas import evacuate
from nova.api.openstack import wsgi
from nova.api import validation
from nova.compute import api as compute
+from nova.compute import vm_states
import nova.conf
from nova import exception
from nova.i18n import _
+from nova import objects
from nova.policies import evacuate as evac_policies
from nova import utils
@@ -33,6 +35,8 @@ CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
+MIN_VER_NOVA_COMPUTE_EVACUATE_STOPPED = 62
+
class EvacuateController(wsgi.Controller):
def __init__(self):
@@ -77,7 +81,8 @@ class EvacuateController(wsgi.Controller):
@validation.schema(evacuate.evacuate, "2.0", "2.13")
@validation.schema(evacuate.evacuate_v214, "2.14", "2.28")
@validation.schema(evacuate.evacuate_v2_29, "2.29", "2.67")
- @validation.schema(evacuate.evacuate_v2_68, "2.68")
+ @validation.schema(evacuate.evacuate_v2_68, "2.68", "2.94")
+ @validation.schema(evacuate.evacuate_v2_95, "2.95")
def _evacuate(self, req, id, body):
"""Permit admins to evacuate a server from a failed host
to a new one.
@@ -92,6 +97,19 @@ class EvacuateController(wsgi.Controller):
host = evacuate_body.get("host")
force = None
+ target_state = None
+ if api_version_request.is_supported(req, min_version='2.95'):
+ min_ver = objects.service.get_minimum_version_all_cells(
+ context, ['nova-compute'])
+ if min_ver < MIN_VER_NOVA_COMPUTE_EVACUATE_STOPPED:
+ raise exception.NotSupportedComputeForEvacuateV295(
+ {'currently': min_ver,
+ 'expected': MIN_VER_NOVA_COMPUTE_EVACUATE_STOPPED})
+ # Starts to 2.95 any evacuated instances will be stopped at
+ # destination. Previously an active or stopped instance would have
+ # kept its state.
+ target_state = vm_states.STOPPED
+
on_shared_storage = self._get_on_shared_storage(req, evacuate_body)
if api_version_request.is_supported(req, min_version='2.29'):
@@ -120,7 +138,8 @@ class EvacuateController(wsgi.Controller):
try:
self.compute_api.evacuate(context, instance, host,
- on_shared_storage, password, force)
+ on_shared_storage, password, force,
+ target_state)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'evacuate', id)
@@ -130,6 +149,8 @@ class EvacuateController(wsgi.Controller):
exception.ExtendedResourceRequestOldCompute,
) as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
+ except exception.UnsupportedRPCVersion as e:
+ raise exc.HTTPConflict(explanation=e.format_message())
if (not api_version_request.is_supported(req, min_version='2.14') and
CONF.api.enable_instance_password):
diff --git a/nova/api/openstack/compute/flavor_access.py b/nova/api/openstack/compute/flavor_access.py
index e17e6f0ddc..fc8df15db5 100644
--- a/nova/api/openstack/compute/flavor_access.py
+++ b/nova/api/openstack/compute/flavor_access.py
@@ -93,7 +93,14 @@ class FlavorActionController(wsgi.Controller):
vals = body['removeTenantAccess']
tenant = vals['tenant']
- identity.verify_project_id(context, tenant)
+ # It doesn't really matter if project exists or not: we can delete
+ # it from flavor's access list in both cases.
+ try:
+ identity.verify_project_id(context, tenant)
+ except webob.exc.HTTPBadRequest as identity_exc:
+ msg = "Project ID %s is not a valid project." % tenant
+ if msg not in identity_exc.explanation:
+ raise
# NOTE(gibi): We have to load a flavor from the db here as
# flavor.remove_access() will try to emit a notification and that needs
diff --git a/nova/api/openstack/compute/remote_consoles.py b/nova/api/openstack/compute/remote_consoles.py
index 36015542aa..7d374ef432 100644
--- a/nova/api/openstack/compute/remote_consoles.py
+++ b/nova/api/openstack/compute/remote_consoles.py
@@ -56,6 +56,9 @@ class RemoteConsolesController(wsgi.Controller):
raise webob.exc.HTTPNotFound(explanation=e.format_message())
except exception.InstanceNotReady as e:
raise webob.exc.HTTPConflict(explanation=e.format_message())
+ except exception.InstanceInvalidState as e:
+ common.raise_http_conflict_for_instance_invalid_state(
+ e, 'get_vnc_console', id)
except NotImplementedError:
common.raise_feature_not_supported()
diff --git a/nova/api/openstack/compute/rest_api_version_history.rst b/nova/api/openstack/compute/rest_api_version_history.rst
index b65e50c62f..c7a2777d3a 100644
--- a/nova/api/openstack/compute/rest_api_version_history.rst
+++ b/nova/api/openstack/compute/rest_api_version_history.rst
@@ -1141,7 +1141,7 @@ Validation is only used for recognized extra spec namespaces, currently:
-------------------------------------
Adds support for rescuing boot from volume instances when the compute host
-reports the ``COMPUTE_BFV_RESCUE`` capability trait.
+reports the ``COMPUTE_RESCUE_BFV`` capability trait.
.. _microversion 2.88:
@@ -1219,3 +1219,31 @@ Add support to pin a server to an availability zone or unpin a server from any a
The ``POST /os-keypairs`` API now forbids to generate a keypair and allows new
safe characters, specifically '@' and '.' (dot character).
+
+.. _microversion 2.93:
+
+2.93 (Maximum in Zed)
+---------------------
+
+Add support for volume backed server rebuild. The end user will provide the
+image with the rebuild command and it will rebuild the volume with the new
+image similar to the result of rebuilding an ephemeral disk.
+
+
+2.94
+----
+
+The ``hostname`` parameter to the ``POST /servers`` (create server), ``PUT
+/servers/{id}`` (update server) and ``POST /servers/{server_id}/action
+(rebuild)`` (rebuild server) APIs is now allowed to be a Fully Qualified Domain
+Name (FQDN).
+
+.. _microversion 2.95:
+
+2.95 (Maximum in 2023.1 Antelope)
+---------------------------------
+
+Any evacuated instances will be now stopped at destination. This
+requires minimun nova release 27.0.0, OpenStack release 2023.1
+Antelope. Operators can still use previous microversion for older
+behavior.
diff --git a/nova/api/openstack/compute/schemas/evacuate.py b/nova/api/openstack/compute/schemas/evacuate.py
index a415a97f89..c7b84a655e 100644
--- a/nova/api/openstack/compute/schemas/evacuate.py
+++ b/nova/api/openstack/compute/schemas/evacuate.py
@@ -46,3 +46,7 @@ evacuate_v2_29['properties']['evacuate']['properties'][
# v2.68 removes the 'force' parameter added in v2.29, meaning it is identical
# to v2.14
evacuate_v2_68 = copy.deepcopy(evacuate_v214)
+
+# v2.95 keeps the same schema, evacuating an instance will now result its state
+# to be stopped at destination.
+evacuate_v2_95 = copy.deepcopy(evacuate_v2_68)
diff --git a/nova/api/openstack/compute/schemas/server_external_events.py b/nova/api/openstack/compute/schemas/server_external_events.py
index b8a89e047d..6ac3f009ec 100644
--- a/nova/api/openstack/compute/schemas/server_external_events.py
+++ b/nova/api/openstack/compute/schemas/server_external_events.py
@@ -63,3 +63,7 @@ name['enum'].append('power-update')
create_v282 = copy.deepcopy(create_v276)
name = create_v282['properties']['events']['items']['properties']['name']
name['enum'].append('accelerator-request-bound')
+
+create_v293 = copy.deepcopy(create_v282)
+name = create_v293['properties']['events']['items']['properties']['name']
+name['enum'].append('volume-reimaged')
diff --git a/nova/api/openstack/compute/schemas/servers.py b/nova/api/openstack/compute/schemas/servers.py
index 300411de40..0869f83434 100644
--- a/nova/api/openstack/compute/schemas/servers.py
+++ b/nova/api/openstack/compute/schemas/servers.py
@@ -360,6 +360,11 @@ create_v290 = copy.deepcopy(create_v274)
create_v290['properties']['server'][
'properties']['hostname'] = parameter_types.hostname
+# Support FQDN as hostname
+create_v294 = copy.deepcopy(create_v290)
+create_v294['properties']['server'][
+ 'properties']['hostname'] = parameter_types.fqdn
+
update = {
'type': 'object',
'properties': {
@@ -391,6 +396,11 @@ update_v290 = copy.deepcopy(update_v219)
update_v290['properties']['server'][
'properties']['hostname'] = parameter_types.hostname
+
+update_v294 = copy.deepcopy(update_v290)
+update_v294['properties']['server'][
+ 'properties']['hostname'] = parameter_types.fqdn
+
rebuild = {
'type': 'object',
'properties': {
@@ -449,6 +459,10 @@ rebuild_v290 = copy.deepcopy(rebuild_v263)
rebuild_v290['properties']['rebuild']['properties'][
'hostname'] = parameter_types.hostname
+rebuild_v294 = copy.deepcopy(rebuild_v290)
+rebuild_v294['properties']['rebuild']['properties'][
+ 'hostname'] = parameter_types.fqdn
+
resize = {
'type': 'object',
diff --git a/nova/api/openstack/compute/server_external_events.py b/nova/api/openstack/compute/server_external_events.py
index 55f17e3541..23813d5790 100644
--- a/nova/api/openstack/compute/server_external_events.py
+++ b/nova/api/openstack/compute/server_external_events.py
@@ -69,7 +69,8 @@ class ServerExternalEventsController(wsgi.Controller):
@validation.schema(server_external_events.create, '2.0', '2.50')
@validation.schema(server_external_events.create_v251, '2.51', '2.75')
@validation.schema(server_external_events.create_v276, '2.76', '2.81')
- @validation.schema(server_external_events.create_v282, '2.82')
+ @validation.schema(server_external_events.create_v282, '2.82', '2.92')
+ @validation.schema(server_external_events.create_v293, '2.93')
def create(self, req, body):
"""Creates a new instance event."""
context = req.environ['nova.context']
diff --git a/nova/api/openstack/compute/servers.py b/nova/api/openstack/compute/servers.py
index 88f5fd4f8e..33e74456fd 100644
--- a/nova/api/openstack/compute/servers.py
+++ b/nova/api/openstack/compute/servers.py
@@ -677,7 +677,8 @@ class ServersController(wsgi.Controller):
@validation.schema(schema_servers.create_v263, '2.63', '2.66')
@validation.schema(schema_servers.create_v267, '2.67', '2.73')
@validation.schema(schema_servers.create_v274, '2.74', '2.89')
- @validation.schema(schema_servers.create_v290, '2.90')
+ @validation.schema(schema_servers.create_v290, '2.90', '2.93')
+ @validation.schema(schema_servers.create_v294, '2.94')
def create(self, req, body):
"""Creates a new server for a given user."""
context = req.environ['nova.context']
@@ -906,7 +907,8 @@ class ServersController(wsgi.Controller):
@validation.schema(schema_servers.update_v20, '2.0', '2.0')
@validation.schema(schema_servers.update, '2.1', '2.18')
@validation.schema(schema_servers.update_v219, '2.19', '2.89')
- @validation.schema(schema_servers.update_v290, '2.90')
+ @validation.schema(schema_servers.update_v290, '2.90', '2.93')
+ @validation.schema(schema_servers.update_v294, '2.94')
def update(self, req, id, body):
"""Update server then pass on to version-specific controller."""
@@ -1147,7 +1149,8 @@ class ServersController(wsgi.Controller):
@validation.schema(schema_servers.rebuild_v254, '2.54', '2.56')
@validation.schema(schema_servers.rebuild_v257, '2.57', '2.62')
@validation.schema(schema_servers.rebuild_v263, '2.63', '2.89')
- @validation.schema(schema_servers.rebuild_v290, '2.90')
+ @validation.schema(schema_servers.rebuild_v290, '2.90', '2.93')
+ @validation.schema(schema_servers.rebuild_v294, '2.94')
def _action_rebuild(self, req, id, body):
"""Rebuild an instance with the given attributes."""
rebuild_dict = body['rebuild']
@@ -1205,6 +1208,9 @@ class ServersController(wsgi.Controller):
):
kwargs['hostname'] = rebuild_dict['hostname']
+ if api_version_request.is_supported(req, min_version='2.93'):
+ kwargs['reimage_boot_volume'] = True
+
for request_attribute, instance_attribute in attr_map.items():
try:
if request_attribute == 'name':
@@ -1350,6 +1356,8 @@ class ServersController(wsgi.Controller):
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'createImage', id)
+ except exception.InstanceQuiesceFailed as err:
+ raise exc.HTTPConflict(explanation=err.format_message())
except exception.Invalid as err:
raise exc.HTTPBadRequest(explanation=err.format_message())
except exception.OverQuota as e:
diff --git a/nova/api/openstack/compute/services.py b/nova/api/openstack/compute/services.py
index 6deb84a7f1..e9d51d4d0c 100644
--- a/nova/api/openstack/compute/services.py
+++ b/nova/api/openstack/compute/services.py
@@ -48,13 +48,10 @@ class ServiceController(wsgi.Controller):
self.actions = {"enable": self._enable,
"disable": self._disable,
"disable-log-reason": self._disable_log_reason}
- self._placementclient = None # Lazy-load on first access.
@property
def placementclient(self):
- if self._placementclient is None:
- self._placementclient = report.SchedulerReportClient()
- return self._placementclient
+ return report.report_client_singleton()
def _get_services(self, req):
# The API services are filtered out since they are not RPC services
@@ -328,7 +325,7 @@ class ServiceController(wsgi.Controller):
"Failed to delete compute node resource provider "
"for compute node %s: %s",
compute_node.uuid, str(e))
- # remove the host_mapping of this host.
+ # Remove the host_mapping of this host.
try:
hm = objects.HostMapping.get_by_host(context, service.host)
hm.destroy()
diff --git a/nova/api/openstack/identity.py b/nova/api/openstack/identity.py
index 7ffc623fed..15ec884aea 100644
--- a/nova/api/openstack/identity.py
+++ b/nova/api/openstack/identity.py
@@ -27,24 +27,27 @@ def verify_project_id(context, project_id):
"""verify that a project_id exists.
This attempts to verify that a project id exists. If it does not,
- an HTTPBadRequest is emitted.
+ an HTTPBadRequest is emitted. Also HTTPBadRequest is emitted
+ if Keystone identity service version 3.0 is not found.
"""
adap = utils.get_ksa_adapter(
'identity', ksa_auth=context.get_auth_plugin(),
min_version=(3, 0), max_version=(3, 'latest'))
- failure = webob.exc.HTTPBadRequest(
- explanation=_("Project ID %s is not a valid project.") %
- project_id)
try:
resp = adap.get('/projects/%s' % project_id)
except kse.EndpointNotFound:
LOG.error(
- "Keystone identity service version 3.0 was not found. This might "
- "be because your endpoint points to the v2.0 versioned endpoint "
- "which is not supported. Please fix this.")
- raise failure
+ "Keystone identity service version 3.0 was not found. This "
+ "might be caused by Nova misconfiguration or Keystone "
+ "problems.")
+ msg = _("Nova was unable to find Keystone service endpoint.")
+ # TODO(astupnik). It may be reasonable to switch to HTTP 503
+ # (HTTP Service Unavailable) instead of HTTP Bad Request here.
+ # If proper Keystone servie is inaccessible, then technially
+ # this is a server side error and not an error in Nova.
+ raise webob.exc.HTTPBadRequest(explanation=msg)
except kse.ClientException:
# something is wrong, like there isn't a keystone v3 endpoint,
# or nova isn't configured for the interface to talk to it;
@@ -57,7 +60,8 @@ def verify_project_id(context, project_id):
return True
elif resp.status_code == 404:
# we got access, and we know this project is not there
- raise failure
+ msg = _("Project ID %s is not a valid project.") % project_id
+ raise webob.exc.HTTPBadRequest(explanation=msg)
elif resp.status_code == 403:
# we don't have enough permission to verify this, so default
# to "it's ok".
diff --git a/nova/api/openstack/wsgi_app.py b/nova/api/openstack/wsgi_app.py
index d60069ce84..6a2b72a611 100644
--- a/nova/api/openstack/wsgi_app.py
+++ b/nova/api/openstack/wsgi_app.py
@@ -42,8 +42,11 @@ def _get_config_files(env=None):
if env is None:
env = os.environ
dirname = env.get('OS_NOVA_CONFIG_DIR', '/etc/nova').strip()
+ files = env.get('OS_NOVA_CONFIG_FILES', '').split(';')
+ if files == ['']:
+ files = CONFIG_FILES
return [os.path.join(dirname, config_file)
- for config_file in CONFIG_FILES]
+ for config_file in files]
def _setup_service(host, name):
diff --git a/nova/api/validation/extra_specs/hw.py b/nova/api/validation/extra_specs/hw.py
index 4aaccf639a..c0c8f02809 100644
--- a/nova/api/validation/extra_specs/hw.py
+++ b/nova/api/validation/extra_specs/hw.py
@@ -15,6 +15,7 @@
"""Validators for ``hw`` namespaced extra specs."""
from nova.api.validation.extra_specs import base
+from nova.objects import fields
realtime_validators = [
@@ -162,6 +163,18 @@ hugepage_validators = [
'pattern': r'(large|small|any|\d+([kKMGT]i?)?(b|bit|B)?)',
},
),
+ base.ExtraSpecValidator(
+ name='hw:locked_memory',
+ description=(
+ 'Determine if **guest** (instance) memory should be locked '
+ 'preventing swaping. This is required in rare cases for device '
+ 'DMA transfers. Only supported by the libvirt virt driver.'
+ ),
+ value={
+ 'type': bool,
+ 'description': 'Whether to lock **guest** (instance) memory.',
+ },
+ ),
]
numa_validators = [
@@ -498,6 +511,47 @@ feature_flag_validators = [
],
},
),
+ base.ExtraSpecValidator(
+ name='hw:viommu_model',
+ description=(
+ 'This can be used to set model for virtual IOMMU device.'
+ ),
+ value={
+ 'type': str,
+ 'enum': [
+ 'intel',
+ 'smmuv3',
+ 'virtio',
+ 'auto'
+ ],
+ 'description': 'model for vIOMMU',
+ },
+ ),
+]
+
+ephemeral_encryption_validators = [
+ base.ExtraSpecValidator(
+ name='hw:ephemeral_encryption',
+ description=(
+ 'Whether to enable ephemeral storage encryption.'
+ ),
+ value={
+ 'type': bool,
+ 'description': 'Whether to enable ephemeral storage encryption.',
+ },
+ ),
+ base.ExtraSpecValidator(
+ name='hw:ephemeral_encryption_format',
+ description=(
+ 'The encryption format to be used if ephemeral storage '
+ 'encryption is enabled via hw:ephemeral_encryption.'
+ ),
+ value={
+ 'type': str,
+ 'description': 'The encryption format to be used if enabled.',
+ 'enum': fields.BlockDeviceEncryptionFormatType.ALL,
+ },
+ ),
]
@@ -509,5 +563,6 @@ def register():
hugepage_validators +
numa_validators +
cpu_topology_validators +
- feature_flag_validators
+ feature_flag_validators +
+ ephemeral_encryption_validators
)
diff --git a/nova/block_device.py b/nova/block_device.py
index c0c69d4a71..31d163f811 100644
--- a/nova/block_device.py
+++ b/nova/block_device.py
@@ -52,7 +52,9 @@ bdm_new_fields = set(['source_type', 'destination_type',
'guest_format', 'device_type', 'disk_bus', 'boot_index',
'device_name', 'delete_on_termination', 'snapshot_id',
'volume_id', 'volume_size', 'image_id', 'no_device',
- 'connection_info', 'tag', 'volume_type'])
+ 'connection_info', 'tag', 'volume_type', 'encrypted',
+ 'encryption_secret_uuid', 'encryption_format',
+ 'encryption_options'])
bdm_db_only_fields = set(['id', 'instance_uuid', 'attachment_id', 'uuid'])
diff --git a/nova/cmd/manage.py b/nova/cmd/manage.py
index 5ee30ccff5..45ae678ab4 100644
--- a/nova/cmd/manage.py
+++ b/nova/cmd/manage.py
@@ -122,6 +122,10 @@ def format_dict(dct, dict_property="Property", dict_value='Value',
"""
pt = prettytable.PrettyTable([dict_property, dict_value])
pt.align = 'l'
+ # starting in PrettyTable 3.4.0 we need to also set the header
+ # as align now only applies to the data.
+ if hasattr(pt, 'header_align'):
+ pt.header_align = 'l'
for k, v in sorted(dct.items(), key=sort_key):
# convert dict to str to check length
if isinstance(v, dict):
@@ -752,15 +756,7 @@ class CellV2Commands(object):
# worry about parsing and splitting a URL which could have special
# characters in the password, which makes parsing a nightmare.
url = sqla_url.make_url(connection)
-
- # TODO(gibi): remove hasattr() conditional in favor of "url.set()"
- # when SQLAlchemy 1.4 is the minimum version in requirements
- if hasattr(url, "set"):
- url = url.set(database=url.database + '_cell0')
- else:
- # TODO(zzzeek): remove when SQLAlchemy 1.4
- # is the minimum version in requirements
- url.database = url.database + '_cell0'
+ url = url.set(database=url.database + '_cell0')
return urlparse.unquote(str(url))
@@ -2217,7 +2213,7 @@ class PlacementCommands(object):
output(_('No cells to process.'))
return 4
- placement = report.SchedulerReportClient()
+ placement = report.report_client_singleton()
neutron = None
if heal_port_allocations:
@@ -2718,7 +2714,7 @@ class PlacementCommands(object):
if verbose:
output = lambda msg: print(msg)
- placement = report.SchedulerReportClient()
+ placement = report.report_client_singleton()
# Resets two in-memory dicts for knowing instances per compute node
self.cn_uuid_mapping = collections.defaultdict(tuple)
self.instances_mapping = collections.defaultdict(list)
diff --git a/nova/cmd/status.py b/nova/cmd/status.py
index 048ca6d1d3..29e4a5d01e 100644
--- a/nova/cmd/status.py
+++ b/nova/cmd/status.py
@@ -41,7 +41,6 @@ from nova.objects import cell_mapping as cell_mapping_obj
# to be registered under nova.objects when called via _check_machine_type_set
from nova.objects import image_meta as image_meta_obj # noqa: F401
from nova.objects import instance as instance_obj # noqa: F401
-from nova import policy
from nova import utils
from nova import version
from nova.virt.libvirt import machine_type_utils
@@ -249,70 +248,6 @@ class UpgradeCommands(upgradecheck.UpgradeCommands):
str(ex))
return upgradecheck.Result(upgradecheck.Code.SUCCESS)
- def _check_policy(self):
- """Checks to see if policy file is overwritten with the new
- defaults.
- """
- msg = _("Your policy file contains rules which examine token scope, "
- "which may be due to generation with the new defaults. "
- "If that is done intentionally to migrate to the new rule "
- "format, then you are required to enable the flag "
- "'oslo_policy.enforce_scope=True' and educate end users on "
- "how to request scoped tokens from Keystone. Another easy "
- "and recommended way for you to achieve the same is via two "
- "flags, 'oslo_policy.enforce_scope=True' and "
- "'oslo_policy.enforce_new_defaults=True' and avoid "
- "overwriting the file. Please refer to this document to "
- "know the complete migration steps: "
- "https://docs.openstack.org/nova/latest/configuration"
- "/policy-concepts.html. If you did not intend to migrate "
- "to new defaults in this upgrade, then with your current "
- "policy file the scope checking rule will fail. A possible "
- "reason for such a policy file is that you generated it with "
- "'oslopolicy-sample-generator' in json format. "
- "Three ways to fix this until you are ready to migrate to "
- "scoped policies: 1. Generate the policy file with "
- "'oslopolicy-sample-generator' in yaml format, keep "
- "the generated content commented out, and update "
- "the generated policy.yaml location in "
- "``oslo_policy.policy_file``. "
- "2. Use a pre-existing sample config file from the Train "
- "release. 3. Use an empty or non-existent file to take all "
- "the defaults.")
- rule = "context_is_admin"
- rule_new_default = "role:admin and system_scope:all"
- status = upgradecheck.Result(upgradecheck.Code.SUCCESS)
- # NOTE(gmann): Initialise the policy if it not initialized.
- # We need policy enforcer with all the rules loaded to check
- # their value with defaults.
- try:
- if policy._ENFORCER is None:
- policy.init(suppress_deprecation_warnings=True)
-
- # For safer side, recheck that the enforcer is available before
- # upgrade checks. If something is wrong on oslo side and enforcer
- # is still not available the return warning to avoid any false
- # result.
- if policy._ENFORCER is not None:
- current_rule = str(policy._ENFORCER.rules[rule]).strip("()")
- if (current_rule == rule_new_default and
- not CONF.oslo_policy.enforce_scope):
- status = upgradecheck.Result(upgradecheck.Code.WARNING,
- msg)
- else:
- status = upgradecheck.Result(
- upgradecheck.Code.WARNING,
- _('Policy is not initialized to check the policy rules'))
- except Exception as ex:
- status = upgradecheck.Result(
- upgradecheck.Code.WARNING,
- _('Unable to perform policy checks due to error: %s') %
- str(ex))
- # reset the policy state so that it can be initialized from fresh if
- # operator changes policy file after running this upgrade checks.
- policy.reset()
- return status
-
def _check_old_computes(self):
# warn if there are computes in the system older than the previous
# major release
@@ -350,8 +285,6 @@ https://docs.openstack.org/latest/nova/admin/hw_machine_type.html"""))
(_('Placement API'), _check_placement),
# Added in Train
(_('Cinder API'), _check_cinder),
- # Added in Ussuri
- (_('Policy Scope-based Defaults'), _check_policy),
# Added in Victoria
(
_('Policy File JSON to YAML Migration'),
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 112a318f1d..6b2023c19f 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -118,6 +118,9 @@ MIN_COMPUTE_MOVE_WITH_EXTENDED_RESOURCE_REQUEST = 59
MIN_COMPUTE_INT_ATTACH_WITH_EXTENDED_RES_REQ = 60
SUPPORT_VNIC_TYPE_REMOTE_MANAGED = 61
+MIN_COMPUTE_VDPA_ATTACH_DETACH = 62
+MIN_COMPUTE_VDPA_HOTPLUG_LIVE_MIGRATION = 63
+
# FIXME(danms): Keep a global cache of the cells we find the
# first time we look. This needs to be refreshed on a timer or
@@ -277,7 +280,7 @@ def reject_vtpm_instances(operation):
return outer
-def reject_vdpa_instances(operation):
+def reject_vdpa_instances(operation, until=None):
"""Reject requests to decorated function if instance has vDPA interfaces.
Raise OperationNotSupportedForVDPAInterfaces if operations involves one or
@@ -291,8 +294,18 @@ def reject_vdpa_instances(operation):
vif['vnic_type'] == network_model.VNIC_TYPE_VDPA
for vif in instance.get_network_info()
):
- raise exception.OperationNotSupportedForVDPAInterface(
- instance_uuid=instance.uuid, operation=operation)
+ reject = True
+ if until is not None:
+ min_ver = objects.service.get_minimum_version_all_cells(
+ nova_context.get_admin_context(), ['nova-compute']
+ )
+ if min_ver >= until:
+ reject = False
+
+ if reject:
+ raise exception.OperationNotSupportedForVDPAInterface(
+ instance_uuid=instance.uuid, operation=operation
+ )
return f(self, context, instance, *args, **kw)
return inner
return outer
@@ -386,7 +399,6 @@ class API:
self.image_api = image_api or glance.API()
self.network_api = network_api or neutron.API()
self.volume_api = volume_api or cinder.API()
- self._placementclient = None # Lazy-load on first access.
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self.compute_task_api = conductor.ComputeTaskAPI()
self.servicegroup_api = servicegroup.API()
@@ -1581,6 +1593,42 @@ class API:
return objects.InstanceGroup.get_by_uuid(context, group_hint)
+ def _update_ephemeral_encryption_bdms(
+ self,
+ flavor: 'objects.Flavor',
+ image_meta_dict: ty.Dict[str, ty.Any],
+ block_device_mapping: 'objects.BlockDeviceMappingList',
+ ) -> None:
+ """Update local BlockDeviceMappings when ephemeral encryption requested
+
+ Enable ephemeral encryption in all local BlockDeviceMappings
+ when requested in the flavor or image. Also optionally set the format
+ and options if also provided.
+
+ :param flavor: The instance flavor for the request
+ :param image_meta_dict: The image metadata for the request
+ :block_device_mapping: The current block_device_mapping for the request
+ """
+ image_meta = _get_image_meta_obj(image_meta_dict)
+ if not hardware.get_ephemeral_encryption_constraint(
+ flavor, image_meta):
+ return
+
+ # NOTE(lyarwood): Attempt to find the format in the flavor and image,
+ # if one isn't found then the compute will need to provide and save a
+ # default format during a the initial build.
+ eph_format = hardware.get_ephemeral_encryption_format(
+ flavor, image_meta)
+
+ # NOTE(lyarwood): The term ephemeral is overloaded in the codebase,
+ # what it actually means in the context of ephemeral encryption is
+ # anything local to the compute host so use the is_local property.
+ # TODO(lyarwood): Add .get_local_devices() to BlockDeviceMappingList
+ for bdm in [b for b in block_device_mapping if b.is_local]:
+ bdm.encrypted = True
+ if eph_format:
+ bdm.encryption_format = eph_format
+
def _create_instance(self, context, flavor,
image_href, kernel_id, ramdisk_id,
min_count, max_count,
@@ -1658,10 +1706,17 @@ class API:
'max_net_count': max_net_count})
max_count = max_net_count
+ # _check_and_transform_bdm transforms block_device_mapping from API
+ # bdms (dicts) to a BlockDeviceMappingList.
block_device_mapping = self._check_and_transform_bdm(context,
base_options, flavor, boot_meta, min_count, max_count,
block_device_mapping, legacy_bdm)
+ # Update any local BlockDeviceMapping objects if ephemeral encryption
+ # has been requested though flavor extra specs or image properties
+ self._update_ephemeral_encryption_bdms(
+ flavor, boot_meta, block_device_mapping)
+
# We can't do this check earlier because we need bdms from all sources
# to have been merged in order to get the root bdm.
# Set validate_numa=False since numa validation is already done by
@@ -2492,6 +2547,8 @@ class API:
instance=instance)
with nova_context.target_cell(context, cell) as cctxt:
self._local_delete(cctxt, instance, bdms, delete_type, cb)
+ self._record_action_start(context, instance,
+ instance_actions.DELETE)
except exception.InstanceNotFound:
# NOTE(comstud): Race condition. Instance already gone.
@@ -2575,9 +2632,7 @@ class API:
@property
def placementclient(self):
- if self._placementclient is None:
- self._placementclient = report.SchedulerReportClient()
- return self._placementclient
+ return report.report_client_singleton()
def _local_delete(self, context, instance, bdms, delete_type, cb):
if instance.vm_state == vm_states.SHELVED_OFFLOADED:
@@ -3536,7 +3591,7 @@ class API:
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.ERROR])
def rebuild(self, context, instance, image_href, admin_password,
- files_to_inject=None, **kwargs):
+ files_to_inject=None, reimage_boot_volume=False, **kwargs):
"""Rebuild the given instance with the provided attributes."""
files_to_inject = files_to_inject or []
metadata = kwargs.get('metadata', {})
@@ -3617,15 +3672,16 @@ class API:
orig_image_ref = volume_image_metadata.get('image_id')
if orig_image_ref != image_href:
- # Leave a breadcrumb.
- LOG.debug('Requested to rebuild instance with a new image %s '
- 'for a volume-backed server with image %s in its '
- 'root volume which is not supported.', image_href,
- orig_image_ref, instance=instance)
- msg = _('Unable to rebuild with a different image for a '
- 'volume-backed server.')
- raise exception.ImageUnacceptable(
- image_id=image_href, reason=msg)
+ if not reimage_boot_volume:
+ # Leave a breadcrumb.
+ LOG.debug('Requested to rebuild instance with a new image '
+ '%s for a volume-backed server with image %s in '
+ 'its root volume which is not supported.',
+ image_href, orig_image_ref, instance=instance)
+ msg = _('Unable to rebuild with a different image for a '
+ 'volume-backed server.')
+ raise exception.ImageUnacceptable(
+ image_id=image_href, reason=msg)
else:
orig_image_ref = instance.image_ref
@@ -3740,7 +3796,9 @@ class API:
image_ref=image_href, orig_image_ref=orig_image_ref,
orig_sys_metadata=orig_sys_metadata, bdms=bdms,
preserve_ephemeral=preserve_ephemeral, host=host,
- request_spec=request_spec)
+ request_spec=request_spec,
+ reimage_boot_volume=reimage_boot_volume,
+ target_state=None)
def _check_volume_status(self, context, bdms):
"""Check whether the status of the volume is "in-use".
@@ -4100,9 +4158,6 @@ class API:
# finally split resize and cold migration into separate code paths
@block_extended_resource_request
@block_port_accelerators()
- # FIXME(sean-k-mooney): Cold migrate and resize to different hosts
- # probably works but they have not been tested so block them for now
- @reject_vdpa_instances(instance_actions.RESIZE)
@block_accelerators()
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED])
@@ -4227,6 +4282,19 @@ class API:
if not same_flavor:
request_spec.numa_topology = hardware.numa_get_constraints(
new_flavor, instance.image_meta)
+ # if the flavor is changed then we need to recalculate the
+ # pci_requests as well because the new flavor might request
+ # different pci_aliases
+ new_pci_requests = pci_request.get_pci_requests_from_flavor(
+ new_flavor)
+ new_pci_requests.instance_uuid = instance.uuid
+ # The neutron based InstancePCIRequest cannot change during resize,
+ # so we just need to copy them from the old request
+ for request in request_spec.pci_requests.requests or []:
+ if request.source == objects.InstancePCIRequest.NEUTRON_PORT:
+ new_pci_requests.requests.append(request)
+ request_spec.pci_requests = new_pci_requests
+
# TODO(huaqiang): Remove in Wallaby
# check nova-compute nodes have been updated to Victoria to resize
# instance to a new mixed instance from a dedicated or shared
@@ -4328,10 +4396,7 @@ class API:
allow_same_host = CONF.allow_resize_to_same_host
return allow_same_host
- # FIXME(sean-k-mooney): Shelve works but unshelve does not due to bug
- # #1851545, so block it for now
@block_port_accelerators()
- @reject_vdpa_instances(instance_actions.SHELVE)
@reject_vtpm_instances(instance_actions.SHELVE)
@block_accelerators(until_service=54)
@check_instance_lock
@@ -4599,11 +4664,10 @@ class API:
return self.compute_rpcapi.get_instance_diagnostics(context,
instance=instance)
- # FIXME(sean-k-mooney): Suspend does not work because we do not unplug
- # the vDPA devices before calling managed save as we do with SR-IOV
- # devices
@block_port_accelerators()
- @reject_vdpa_instances(instance_actions.SUSPEND)
+ @reject_vdpa_instances(
+ instance_actions.SUSPEND, until=MIN_COMPUTE_VDPA_HOTPLUG_LIVE_MIGRATION
+ )
@block_accelerators()
@reject_sev_instances(instance_actions.SUSPEND)
@check_instance_lock
@@ -4616,6 +4680,9 @@ class API:
self.compute_rpcapi.suspend_instance(context, instance)
@check_instance_lock
+ @reject_vdpa_instances(
+ instance_actions.RESUME, until=MIN_COMPUTE_VDPA_HOTPLUG_LIVE_MIGRATION
+ )
@check_instance_state(vm_state=[vm_states.SUSPENDED])
def resume(self, context, instance):
"""Resume the given instance."""
@@ -4633,6 +4700,7 @@ class API:
allow_bfv_rescue=False):
"""Rescue the given instance."""
+ image_meta = None
if rescue_image_ref:
try:
image_meta = image_meta_obj.ImageMeta.from_image_ref(
@@ -4653,6 +4721,8 @@ class API:
"image properties set")
raise exception.UnsupportedRescueImage(
image=rescue_image_ref)
+ else:
+ image_meta = instance.image_meta
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
@@ -4661,6 +4731,9 @@ class API:
volume_backed = compute_utils.is_volume_backed_instance(
context, instance, bdms)
+ allow_bfv_rescue &= 'hw_rescue_bus' in image_meta.properties and \
+ 'hw_rescue_device' in image_meta.properties
+
if volume_backed and allow_bfv_rescue:
cn = objects.ComputeNode.get_by_host_and_nodename(
context, instance.host, instance.node)
@@ -5304,9 +5377,14 @@ class API:
instance_uuid=instance.uuid)
@check_instance_lock
- @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
- vm_states.STOPPED],
- task_state=[None])
+ @reject_vdpa_instances(
+ instance_actions.ATTACH_INTERFACE, until=MIN_COMPUTE_VDPA_ATTACH_DETACH
+ )
+ @check_instance_state(
+ vm_state=[
+ vm_states.ACTIVE, vm_states.PAUSED, vm_states.STOPPED
+ ], task_state=[None]
+ )
def attach_interface(self, context, instance, network_id, port_id,
requested_ip, tag=None):
"""Use hotplug to add an network adapter to an instance."""
@@ -5319,12 +5397,6 @@ class API:
# port.resource_request field which only returned for admins
port = self.network_api.show_port(
context.elevated(), port_id)['port']
- if port.get('binding:vnic_type', "normal") == "vdpa":
- # FIXME(sean-k-mooney): Attach works but detach results in a
- # QEMU error; blocked until this is resolved
- raise exception.OperationNotSupportedForVDPAInterface(
- instance_uuid=instance.uuid,
- operation=instance_actions.ATTACH_INTERFACE)
if port.get('binding:vnic_type', 'normal') in (
network_model.VNIC_TYPE_ACCELERATOR_DIRECT,
@@ -5343,37 +5415,23 @@ class API:
requested_ip=requested_ip, tag=tag)
@check_instance_lock
- @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
- vm_states.STOPPED],
- task_state=[None])
+ @reject_vdpa_instances(
+ instance_actions.DETACH_INTERFACE, until=MIN_COMPUTE_VDPA_ATTACH_DETACH
+ )
+ @check_instance_state(
+ vm_state=[
+ vm_states.ACTIVE, vm_states.PAUSED, vm_states.STOPPED
+ ], task_state=[None]
+ )
def detach_interface(self, context, instance, port_id):
"""Detach an network adapter from an instance."""
-
- # FIXME(sean-k-mooney): Detach currently results in a failure to remove
- # the interface from the live libvirt domain, so while the networking
- # is torn down on the host the vDPA device is still attached to the VM.
- # This is likely a libvirt/qemu bug so block detach until that is
- # resolved.
for vif in instance.get_network_info():
if vif['id'] == port_id:
- if vif['vnic_type'] == 'vdpa':
- raise exception.OperationNotSupportedForVDPAInterface(
- instance_uuid=instance.uuid,
- operation=instance_actions.DETACH_INTERFACE)
if vif['vnic_type'] in (
network_model.VNIC_TYPE_ACCELERATOR_DIRECT,
network_model.VNIC_TYPE_ACCELERATOR_DIRECT_PHYSICAL):
raise exception.ForbiddenPortsWithAccelerator()
break
- else:
- # NOTE(sean-k-mooney) This should never happen but just in case the
- # info cache does not have the port we are detaching we can fall
- # back to neutron.
- port = self.network_api.show_port(context, port_id)['port']
- if port.get('binding:vnic_type', 'normal') == 'vdpa':
- raise exception.OperationNotSupportedForVDPAInterface(
- instance_uuid=instance.uuid,
- operation=instance_actions.DETACH_INTERFACE)
self._record_action_start(
context, instance, instance_actions.DETACH_INTERFACE)
@@ -5418,7 +5476,10 @@ class API:
@block_extended_resource_request
@block_port_accelerators()
- @reject_vdpa_instances(instance_actions.LIVE_MIGRATION)
+ @reject_vdpa_instances(
+ instance_actions.LIVE_MIGRATION,
+ until=MIN_COMPUTE_VDPA_HOTPLUG_LIVE_MIGRATION
+ )
@block_accelerators()
@reject_vtpm_instances(instance_actions.LIVE_MIGRATION)
@reject_sev_instances(instance_actions.LIVE_MIGRATION)
@@ -5552,14 +5613,12 @@ class API:
@block_extended_resource_request
@block_port_accelerators()
- # FIXME(sean-k-mooney): rebuild works but we have not tested evacuate yet
- @reject_vdpa_instances(instance_actions.EVACUATE)
@reject_vtpm_instances(instance_actions.EVACUATE)
@block_accelerators(until_service=SUPPORT_ACCELERATOR_SERVICE_FOR_REBUILD)
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
- vm_states.ERROR])
+ vm_states.ERROR], task_state=None)
def evacuate(self, context, instance, host, on_shared_storage,
- admin_password=None, force=None):
+ admin_password=None, force=None, target_state=None):
"""Running evacuate to target host.
Checking vm compute host state, if the host not in expected_state,
@@ -5570,6 +5629,7 @@ class API:
:param on_shared_storage: True if instance files on shared storage
:param admin_password: password to set on rebuilt instance
:param force: Force the evacuation to the specific host target
+ :param target_state: Set a target state for the evacuated instance
"""
LOG.debug('vm evacuation scheduled', instance=instance)
@@ -5584,7 +5644,7 @@ class API:
context, instance.uuid)
instance.task_state = task_states.REBUILDING
- instance.save(expected_task_state=[None])
+ instance.save(expected_task_state=None)
self._record_action_start(context, instance, instance_actions.EVACUATE)
# NOTE(danms): Create this as a tombstone for the source compute
@@ -5633,7 +5693,7 @@ class API:
on_shared_storage=on_shared_storage,
host=host,
request_spec=request_spec,
- )
+ target_state=target_state)
def get_migrations(self, context, filters):
"""Get all migrations for the given filters."""
@@ -6392,13 +6452,10 @@ class AggregateAPI:
def __init__(self):
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self.query_client = query.SchedulerQueryClient()
- self._placement_client = None # Lazy-load on first access.
@property
def placement_client(self):
- if self._placement_client is None:
- self._placement_client = report.SchedulerReportClient()
- return self._placement_client
+ return report.report_client_singleton()
@wrap_exception()
def create_aggregate(self, context, aggregate_name, availability_zone):
diff --git a/nova/compute/claims.py b/nova/compute/claims.py
index 79e8f2f012..490b418081 100644
--- a/nova/compute/claims.py
+++ b/nova/compute/claims.py
@@ -124,7 +124,13 @@ class Claim(NopClaim):
pci_requests = self._pci_requests
if pci_requests.requests:
stats = self.tracker.pci_tracker.stats
- if not stats.support_requests(pci_requests.requests):
+ if not stats.support_requests(
+ pci_requests.requests,
+ # We explicitly signal that we are _after_ the scheduler made
+ # allocations in placement and therefore pci_requests.requests
+ # carry its own placement provider mapping information
+ provider_mapping=None,
+ ):
return _('Claim pci failed')
def _test_numa_topology(self, compute_node, limit):
@@ -139,12 +145,17 @@ class Claim(NopClaim):
if pci_requests.requests:
pci_stats = self.tracker.pci_tracker.stats
- instance_topology = (
- hardware.numa_fit_instance_to_host(
- host_topology, requested_topology,
- limits=limit,
- pci_requests=pci_requests.requests,
- pci_stats=pci_stats))
+ instance_topology = hardware.numa_fit_instance_to_host(
+ host_topology,
+ requested_topology,
+ limits=limit,
+ pci_requests=pci_requests.requests,
+ pci_stats=pci_stats,
+ # We explicitly signal that we are _after_ the scheduler made
+ # allocations in placement and therefore pci_requests.requests
+ # carry its own placement provider mapping information
+ provider_mapping=None,
+ )
if requested_topology and not instance_topology:
if pci_requests.requests:
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index de52973b0b..efcdece81a 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -31,6 +31,7 @@ import contextlib
import copy
import functools
import inspect
+import math
import sys
import time
import traceback
@@ -83,6 +84,7 @@ from nova.objects import external_event as external_event_obj
from nova.objects import fields
from nova.objects import instance as obj_instance
from nova.objects import migrate_data as migrate_data_obj
+from nova.objects import service as service_obj
from nova.pci import request as pci_req_module
from nova.pci import whitelist
from nova import safe_utils
@@ -95,6 +97,7 @@ from nova.virt import configdrive
from nova.virt import driver
from nova.virt import event as virtevent
from nova.virt import hardware
+import nova.virt.node
from nova.virt import storage_users
from nova.virt import virtapi
from nova.volume import cinder
@@ -615,13 +618,18 @@ class ComputeVirtAPI(virtapi.VirtAPI):
class ComputeManager(manager.Manager):
"""Manages the running instances from creation to destruction."""
- target = messaging.Target(version='6.0')
+ target = messaging.Target(version='6.2')
def __init__(self, compute_driver=None, *args, **kwargs):
"""Load configuration options and connect to the hypervisor."""
# We want the ComputeManager, ResourceTracker and ComputeVirtAPI all
# using the same instance of SchedulerReportClient which has the
# ProviderTree cache for this compute service.
+ # NOTE(danms): We do not use the global placement client
+ # singleton here, because the above-mentioned stack of objects
+ # maintain local state in the client. Thus, keeping our own
+ # private object for that stack avoids any potential conflict
+ # with other users in our process outside of the above.
self.reportclient = report.SchedulerReportClient()
self.virtapi = ComputeVirtAPI(self)
self.network_api = neutron.API()
@@ -1241,6 +1249,20 @@ class ComputeManager(manager.Manager):
'updated.', instance=instance)
self._set_instance_obj_error_state(instance)
return
+ except exception.PciDeviceNotFoundById:
+ # This is bug 1981813 where the bound port vnic_type has changed
+ # from direct to macvtap. Nova does not support that and it
+ # already printed an ERROR when the change is detected during
+ # _heal_instance_info_cache. Now we print an ERROR again and skip
+ # plugging the vifs but let the service startup continue to init
+ # the other instances
+ LOG.exception(
+ 'Virtual interface plugging failed for instance. Probably the '
+ 'vnic_type of the bound port has been changed. Nova does not '
+ 'support such change.',
+ instance=instance
+ )
+ return
if instance.task_state == task_states.RESIZE_MIGRATING:
# We crashed during resize/migration, so roll back for safety
@@ -1450,40 +1472,120 @@ class ComputeManager(manager.Manager):
:return: a dict of ComputeNode objects keyed by the UUID of the given
node.
"""
- nodes_by_uuid = {}
try:
- node_names = self.driver.get_available_nodes()
+ node_ids = self.driver.get_nodenames_by_uuid()
except exception.VirtDriverNotReady:
LOG.warning(
"Virt driver is not ready. If this is the first time this "
- "service is starting on this host, then you can ignore this "
- "warning.")
+ "service is starting on this host, then you can ignore "
+ "this warning.")
return {}
- for node_name in node_names:
- try:
- node = objects.ComputeNode.get_by_host_and_nodename(
- context, self.host, node_name)
- nodes_by_uuid[node.uuid] = node
- except exception.ComputeHostNotFound:
- LOG.warning(
- "Compute node %s not found in the database. If this is "
- "the first time this service is starting on this host, "
- "then you can ignore this warning.", node_name)
- return nodes_by_uuid
+ nodes = objects.ComputeNodeList.get_all_by_uuids(context,
+ list(node_ids.keys()))
+ if not nodes:
+ # NOTE(danms): This should only happen if the compute_id is
+ # pre-provisioned on a host that has never started.
+ LOG.warning('Compute nodes %s for host %s were not found in the '
+ 'database. If this is the first time this service is '
+ 'starting on this host, then you can ignore this '
+ 'warning.',
+ list(node_ids.keys()), self.host)
+ return {}
+
+ for node in nodes:
+ if node.hypervisor_hostname != node_ids.get(node.uuid):
+ raise exception.InvalidConfiguration(
+ ('My compute node %s has hypervisor_hostname %s '
+ 'but virt driver reports it should be %s. Possible '
+ 'rename detected, refusing to start!') % (
+ node.uuid, node.hypervisor_hostname,
+ node_ids.get(node.uuid)))
+
+ return {n.uuid: n for n in nodes}
+
+ def _ensure_existing_node_identity(self, service_ref):
+ """If we are upgrading from an older service version, we need
+ to write our node identity uuid (if not already done) based on
+ nodes assigned to us in the database.
+ """
+ if 'ironic' in CONF.compute_driver.lower():
+ # We do not persist a single local node identity for
+ # ironic
+ return
+
+ if service_ref.version >= service_obj.NODE_IDENTITY_VERSION:
+ # Already new enough, nothing to do here, but make sure that we
+ # have a UUID file already, as this is not our first time starting.
+ if nova.virt.node.read_local_node_uuid() is None:
+ raise exception.InvalidConfiguration(
+ ('No local node identity found, but this is not our '
+ 'first startup on this host. Refusing to start after '
+ 'potentially having lost that state!'))
+ return
- def init_host(self):
+ if nova.virt.node.read_local_node_uuid():
+ # We already have a local node identity, no migration needed
+ return
+
+ context = nova.context.get_admin_context()
+ db_nodes = objects.ComputeNodeList.get_all_by_host(context, self.host)
+ if not db_nodes:
+ # This means we have no nodes in the database (that we
+ # know of) and thus have no need to record an existing
+ # UUID. That is probably strange, so log a warning.
+ raise exception.InvalidConfiguration(
+ ('Upgrading from service version %i but found no '
+ 'nodes in the database for host %s to persist '
+ 'locally; Possible rename detected, '
+ 'refusing to start!') % (
+ service_ref.version, self.host))
+
+ if len(db_nodes) > 1:
+ # If this happens we can't do the right thing, so raise an
+ # exception to abort host startup
+ LOG.warning('Multiple nodes found in the database for host %s; '
+ 'unable to persist local node identity automatically')
+ raise exception.InvalidConfiguration(
+ 'Multiple nodes found in database, manual node uuid '
+ 'configuration required')
+
+ nova.virt.node.write_local_node_uuid(db_nodes[0].uuid)
+
+ def _check_for_host_rename(self, nodes_by_uuid):
+ if 'ironic' in CONF.compute_driver.lower():
+ # Ironic (currently) rebalances nodes at various times, and as
+ # such, nodes being discovered as assigned to this host with a
+ # different hostname is not surprising. Skip this check for
+ # ironic.
+ return
+ for node in nodes_by_uuid.values():
+ if node.host != self.host:
+ raise exception.InvalidConfiguration(
+ 'My node %s has host %r but my host is %r; '
+ 'Possible rename detected, refusing to start!' % (
+ node.uuid, node.host, self.host))
+ LOG.debug('Verified node %s matches my host %s',
+ node.uuid, self.host)
+
+ def init_host(self, service_ref):
"""Initialization for a standalone compute service."""
- if CONF.pci.passthrough_whitelist:
- # Simply loading the PCI passthrough whitelist will do a bunch of
+ if service_ref:
+ # If we are an existing service, check to see if we need
+ # to record a locally-persistent node identity because
+ # we have upgraded from a previous version.
+ self._ensure_existing_node_identity(service_ref)
+
+ if CONF.pci.device_spec:
+ # Simply loading the PCI passthrough spec will do a bunch of
# validation that would otherwise wait until the PciDevTracker is
# constructed when updating available resources for the compute
# node(s) in the resource tracker, effectively killing that task.
- # So load up the whitelist when starting the compute service to
- # flush any invalid configuration early so we can kill the service
+ # So load up the spec when starting the compute service to
+ # flush any invalid configuration early, so we can kill the service
# if the configuration is wrong.
- whitelist.Whitelist(CONF.pci.passthrough_whitelist)
+ whitelist.Whitelist(CONF.pci.device_spec)
nova.conf.neutron.register_dynamic_opts(CONF)
# Even if only libvirt uses them, make it available for all drivers
@@ -1504,7 +1606,18 @@ class ComputeManager(manager.Manager):
raise exception.InvalidConfiguration(msg)
self.driver.init_host(host=self.host)
+
+ # NOTE(gibi): At this point the compute_nodes of the resource tracker
+ # has not been populated yet so we cannot rely on the resource tracker
+ # here.
context = nova.context.get_admin_context()
+ nodes_by_uuid = self._get_nodes(context)
+
+ # NOTE(danms): Check for a possible host rename and abort
+ # startup before we start mucking with instances we think are
+ # ours.
+ self._check_for_host_rename(nodes_by_uuid)
+
instances = objects.InstanceList.get_by_host(
context, self.host,
expected_attrs=['info_cache', 'metadata', 'numa_topology'])
@@ -1514,17 +1627,12 @@ class ComputeManager(manager.Manager):
self._validate_pinning_configuration(instances)
self._validate_vtpm_configuration(instances)
- # NOTE(gibi): At this point the compute_nodes of the resource tracker
- # has not been populated yet so we cannot rely on the resource tracker
- # here.
# NOTE(gibi): If ironic and vcenter virt driver slow start time
# becomes problematic here then we should consider adding a config
# option or a driver flag to tell us if we should thread
# _destroy_evacuated_instances and
# _error_out_instances_whose_build_was_interrupted out in the
# background on startup
- nodes_by_uuid = self._get_nodes(context)
-
try:
# checking that instance was not already evacuated to other host
evacuated_instances = self._destroy_evacuated_instances(
@@ -2025,6 +2133,7 @@ class ComputeManager(manager.Manager):
ephemerals = []
swap = []
block_device_mapping = []
+ image = []
for device in block_devices:
if block_device.new_format_is_ephemeral(device):
@@ -2036,8 +2145,12 @@ class ComputeManager(manager.Manager):
if driver_block_device.is_block_device_mapping(device):
block_device_mapping.append(device)
+ if driver_block_device.is_local_image(device):
+ image.append(device)
+
self._default_device_names_for_instance(instance,
root_device_name,
+ image,
ephemerals,
swap,
block_device_mapping)
@@ -2446,10 +2559,12 @@ class ComputeManager(manager.Manager):
if provider_mapping:
try:
- compute_utils\
- .update_pci_request_spec_with_allocated_interface_name(
- context, self.reportclient,
- instance.pci_requests.requests, provider_mapping)
+ compute_utils.update_pci_request_with_placement_allocations(
+ context,
+ self.reportclient,
+ instance.pci_requests.requests,
+ provider_mapping,
+ )
except (exception.AmbiguousResourceProviderForPCIRequest,
exception.UnexpectedResourceProviderNameForPCIRequest
) as e:
@@ -2711,7 +2826,8 @@ class ComputeManager(manager.Manager):
block_device_mapping)
resources['block_device_info'] = block_device_info
except (exception.InstanceNotFound,
- exception.UnexpectedDeletingTaskStateError):
+ exception.UnexpectedDeletingTaskStateError,
+ exception.ComputeResourcesUnavailable):
with excutils.save_and_reraise_exception():
self._build_resources_cleanup(instance, network_info)
except (exception.UnexpectedTaskStateError,
@@ -3009,6 +3125,7 @@ class ComputeManager(manager.Manager):
self._try_deallocate_network(context, instance, requested_networks)
timer.restart()
+ connector = None
for bdm in vol_bdms:
try:
if bdm.attachment_id:
@@ -3017,7 +3134,8 @@ class ComputeManager(manager.Manager):
else:
# NOTE(vish): actual driver detach done in driver.destroy,
# so just tell cinder that we are done with it.
- connector = self.driver.get_volume_connector(instance)
+ if connector is None:
+ connector = self.driver.get_volume_connector(instance)
self.volume_api.terminate_connection(context,
bdm.volume_id,
connector)
@@ -3386,18 +3504,124 @@ class ComputeManager(manager.Manager):
migration.status = status
migration.save()
+ @staticmethod
+ def _reimage_failed_callback(event_name, instance):
+ msg = ('Cinder reported failure during reimaging '
+ 'with %(event)s for instance %(uuid)s')
+ msg_args = {'event': event_name, 'uuid': instance.uuid}
+ LOG.error(msg, msg_args)
+ raise exception.ReimageException(msg % msg_args)
+
+ def _detach_root_volume(self, context, instance, root_bdm):
+ volume_id = root_bdm.volume_id
+ mp = root_bdm.device_name
+ old_connection_info = jsonutils.loads(root_bdm.connection_info)
+ try:
+ self.driver.detach_volume(context, old_connection_info,
+ instance, root_bdm.device_name)
+ except exception.DiskNotFound as err:
+ LOG.warning('Ignoring DiskNotFound exception while '
+ 'detaching volume %(volume_id)s from '
+ '%(mp)s : %(err)s',
+ {'volume_id': volume_id, 'mp': mp,
+ 'err': err}, instance=instance)
+ except exception.DeviceDetachFailed:
+ with excutils.save_and_reraise_exception():
+ LOG.warning('Guest refused to detach volume %(vol)s',
+ {'vol': volume_id}, instance=instance)
+ self.volume_api.roll_detaching(context, volume_id)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ LOG.exception('Failed to detach volume '
+ '%(volume_id)s from %(mp)s',
+ {'volume_id': volume_id, 'mp': mp},
+ instance=instance)
+ self.volume_api.roll_detaching(context, volume_id)
+
+ def _rebuild_volume_backed_instance(self, context, instance, bdms,
+ image_id):
+ # Get root bdm and attachment ID associated to it
+ root_bdm = compute_utils.get_root_bdm(context, instance, bdms)
+ old_attachment_id = root_bdm.attachment_id
+
+ # Create a new attachment and delete the previous attachment
+ # We create a new attachment first to keep the volume in
+ # reserved state after old attachment is deleted and avoid any
+ # races in between the attachment create and delete.
+ attachment_id = None
+ try:
+ attachment_id = self.volume_api.attachment_create(
+ context, root_bdm.volume_id, instance.uuid)['id']
+ self._detach_root_volume(context, instance, root_bdm)
+ root_bdm.attachment_id = attachment_id
+ root_bdm.save()
+ self.volume_api.attachment_delete(context,
+ old_attachment_id)
+ except exception.InstanceNotFound:
+ # This means we failed to save the new attachment because
+ # the instance is deleted, so (try to) delete it and abort.
+ try:
+ self.volume_api.attachment_delete(context,
+ attachment_id)
+ except cinder_exception.ClientException:
+ LOG.error('Failed to delete new attachment %s',
+ attachment_id)
+ msg = _('Failed to rebuild volume backed instance.')
+ raise exception.BuildAbortException(
+ instance_uuid=instance.uuid, reason=msg)
+ except cinder_exception.ClientException:
+ if attachment_id:
+ LOG.error('Failed to delete old attachment %s',
+ old_attachment_id)
+ else:
+ LOG.error('Failed to create new attachment')
+ msg = _('Failed to rebuild volume backed instance.')
+ raise exception.BuildAbortException(
+ instance_uuid=instance.uuid, reason=msg)
+ events = [('volume-reimaged', root_bdm.volume_id)]
+
+ # Get the image requested for rebuild
+ try:
+ image = self.image_api.get(context, image_id)
+ except exception.ImageNotFound:
+ msg = _('Image %s not found.') % image_id
+ LOG.error(msg)
+ raise exception.BuildAbortException(
+ instance_uuid=instance.uuid, reason=msg)
+ image_size = int(math.ceil(float(image.get('size')) / units.Gi))
+ deadline = CONF.reimage_timeout_per_gb * image_size
+ error_cb = self._reimage_failed_callback
+
+ # Call cinder to perform reimage operation and wait until an
+ # external event is triggered.
+ try:
+ with self.virtapi.wait_for_instance_event(instance, events,
+ deadline=deadline,
+ error_callback=error_cb):
+ self.volume_api.reimage_volume(
+ context, root_bdm.volume_id, image_id,
+ reimage_reserved=True)
+
+ except Exception as ex:
+ LOG.error('Failed to rebuild volume backed instance: %s',
+ str(ex), instance=instance)
+ msg = _('Failed to rebuild volume backed instance.')
+ raise exception.BuildAbortException(
+ instance_uuid=instance.uuid, reason=msg)
+
def _rebuild_default_impl(
self, context, instance, image_meta, injected_files,
admin_password, allocations, bdms, detach_block_devices,
attach_block_devices, network_info=None, evacuate=False,
block_device_info=None, preserve_ephemeral=False,
- accel_uuids=None):
+ accel_uuids=None, reimage_boot_volume=False):
if preserve_ephemeral:
# The default code path does not support preserving ephemeral
# partitions.
raise exception.PreserveEphemeralNotSupported()
accel_info = []
+ detach_root_bdm = not reimage_boot_volume
if evacuate:
if instance.flavor.extra_specs.get('accel:device_profile'):
try:
@@ -3409,13 +3633,36 @@ class ComputeManager(manager.Manager):
msg = _('Failure getting accelerator resources.')
raise exception.BuildAbortException(
instance_uuid=instance.uuid, reason=msg)
- detach_block_devices(context, bdms)
+ detach_block_devices(context, bdms,
+ detach_root_bdm=detach_root_bdm)
else:
self._power_off_instance(instance, clean_shutdown=True)
- detach_block_devices(context, bdms)
- self.driver.destroy(context, instance,
- network_info=network_info,
- block_device_info=block_device_info)
+ detach_block_devices(context, bdms,
+ detach_root_bdm=detach_root_bdm)
+ if reimage_boot_volume:
+ # Previously, the calls reaching here were for image
+ # backed instance rebuild and didn't have a root bdm
+ # so now we need to handle the case for root bdm.
+ # For the root BDM, we are doing attach/detach operations
+ # manually as we want to maintain a 'reserved' state
+ # throughout the reimage process from the cinder side so
+ # we are excluding the root BDM from certain operations
+ # here i.e. deleteing it's mapping before the destroy call.
+ block_device_info_copy = copy.deepcopy(block_device_info)
+ root_bdm = compute_utils.get_root_bdm(context, instance, bdms)
+ mapping = block_device_info_copy["block_device_mapping"]
+ # drop root bdm from the mapping
+ mapping = [
+ bdm for bdm in mapping
+ if bdm["volume_id"] != root_bdm.volume_id
+ ]
+ self.driver.destroy(context, instance,
+ network_info=network_info,
+ block_device_info=block_device_info_copy)
+ else:
+ self.driver.destroy(context, instance,
+ network_info=network_info,
+ block_device_info=block_device_info)
try:
accel_info = self._get_accel_info(context, instance)
except Exception as exc:
@@ -3424,6 +3671,12 @@ class ComputeManager(manager.Manager):
msg = _('Failure getting accelerator resources.')
raise exception.BuildAbortException(
instance_uuid=instance.uuid, reason=msg)
+ if reimage_boot_volume:
+ is_volume_backed = compute_utils.is_volume_backed_instance(
+ context, instance, bdms)
+ if is_volume_backed:
+ self._rebuild_volume_backed_instance(
+ context, instance, bdms, image_meta.id)
instance.task_state = task_states.REBUILD_BLOCK_DEVICE_MAPPING
instance.save(expected_task_state=[task_states.REBUILDING])
@@ -3458,7 +3711,8 @@ class ComputeManager(manager.Manager):
injected_files, new_pass, orig_sys_metadata,
bdms, recreate, on_shared_storage,
preserve_ephemeral, migration,
- scheduled_node, limits, request_spec, accel_uuids):
+ scheduled_node, limits, request_spec, accel_uuids,
+ reimage_boot_volume, target_state):
"""Destroy and re-make this instance.
A 'rebuild' effectively purges all existing data from the system and
@@ -3490,6 +3744,10 @@ class ComputeManager(manager.Manager):
specified by the user, this will be None
:param request_spec: a RequestSpec object used to schedule the instance
:param accel_uuids: a list of cyborg ARQ uuids
+ :param reimage_boot_volume: Boolean to specify whether the user has
+ explicitly requested to rebuild a boot
+ volume
+ :param target_state: Set a target state for the evacuated instance.
"""
# recreate=True means the instance is being evacuated from a failed
@@ -3554,7 +3812,8 @@ class ComputeManager(manager.Manager):
image_meta, injected_files, new_pass, orig_sys_metadata,
bdms, evacuate, on_shared_storage, preserve_ephemeral,
migration, request_spec, allocs, rebuild_claim,
- scheduled_node, limits, accel_uuids)
+ scheduled_node, limits, accel_uuids, reimage_boot_volume,
+ target_state)
except (exception.ComputeResourcesUnavailable,
exception.RescheduledException) as e:
if isinstance(e, exception.ComputeResourcesUnavailable):
@@ -3613,7 +3872,8 @@ class ComputeManager(manager.Manager):
self, context, instance, orig_image_ref, image_meta,
injected_files, new_pass, orig_sys_metadata, bdms, evacuate,
on_shared_storage, preserve_ephemeral, migration, request_spec,
- allocations, rebuild_claim, scheduled_node, limits, accel_uuids):
+ allocations, rebuild_claim, scheduled_node, limits, accel_uuids,
+ reimage_boot_volume, target_state):
"""Helper to avoid deep nesting in the top-level method."""
provider_mapping = None
@@ -3621,10 +3881,12 @@ class ComputeManager(manager.Manager):
provider_mapping = self._get_request_group_mapping(request_spec)
if provider_mapping:
- compute_utils.\
- update_pci_request_spec_with_allocated_interface_name(
- context, self.reportclient,
- instance.pci_requests.requests, provider_mapping)
+ compute_utils.update_pci_request_with_placement_allocations(
+ context,
+ self.reportclient,
+ instance.pci_requests.requests,
+ provider_mapping,
+ )
claim_context = rebuild_claim(
context, instance, scheduled_node, allocations,
@@ -3635,7 +3897,8 @@ class ComputeManager(manager.Manager):
context, instance, orig_image_ref, image_meta, injected_files,
new_pass, orig_sys_metadata, bdms, evacuate, on_shared_storage,
preserve_ephemeral, migration, request_spec, allocations,
- provider_mapping, accel_uuids)
+ provider_mapping, accel_uuids, reimage_boot_volume,
+ target_state)
@staticmethod
def _get_image_name(image_meta):
@@ -3649,10 +3912,18 @@ class ComputeManager(manager.Manager):
injected_files, new_pass, orig_sys_metadata, bdms, evacuate,
on_shared_storage, preserve_ephemeral, migration, request_spec,
allocations, request_group_resource_providers_mapping,
- accel_uuids):
+ accel_uuids, reimage_boot_volume, target_state):
orig_vm_state = instance.vm_state
if evacuate:
+ if target_state and orig_vm_state != vm_states.ERROR:
+ # This will ensure that at destination the instance will have
+ # the desired state.
+ if target_state not in vm_states.ALLOW_TARGET_STATES:
+ raise exception.InstanceEvacuateNotSupportedTargetState(
+ target_state=target_state)
+ orig_vm_state = target_state
+
if request_spec:
# NOTE(gibi): Do a late check of server group policy as
# parallel scheduling could violate such policy. This will
@@ -3754,8 +4025,23 @@ class ComputeManager(manager.Manager):
self._get_instance_block_device_info(
context, instance, bdms=bdms)
- def detach_block_devices(context, bdms):
+ def detach_block_devices(context, bdms, detach_root_bdm=True):
for bdm in bdms:
+ # Previously, the calls made to this method by rebuild
+ # instance operation were for image backed instances which
+ # assumed we only had attached volumes and no root BDM.
+ # Now we need to handle case for root BDM which we are
+ # doing manually so skipping the attachment create/delete
+ # calls from here.
+ # The detach_root_bdm parameter is only passed while
+ # rebuilding the volume backed instance so we don't have
+ # to worry about other callers as they won't satisfy this
+ # condition.
+ # For evacuate case, we have detach_root_bdm always True
+ # since we don't have reimage_boot_volume parameter in
+ # this case so this will not be executed.
+ if not detach_root_bdm and bdm.is_root:
+ continue
if bdm.is_volume:
# NOTE (ildikov): Having the attachment_id set in the BDM
# means that it's the new Cinder attach/detach flow
@@ -3791,7 +4077,8 @@ class ComputeManager(manager.Manager):
network_info=network_info,
preserve_ephemeral=preserve_ephemeral,
evacuate=evacuate,
- accel_uuids=accel_uuids)
+ accel_uuids=accel_uuids,
+ reimage_boot_volume=reimage_boot_volume)
try:
with instance.mutated_migration_context():
self.driver.rebuild(**kwargs)
@@ -5231,10 +5518,12 @@ class ComputeManager(manager.Manager):
if provider_mapping:
try:
- compute_utils.\
- update_pci_request_spec_with_allocated_interface_name(
- context, self.reportclient,
- instance.pci_requests.requests, provider_mapping)
+ compute_utils.update_pci_request_with_placement_allocations(
+ context,
+ self.reportclient,
+ instance.pci_requests.requests,
+ provider_mapping,
+ )
except (exception.AmbiguousResourceProviderForPCIRequest,
exception.UnexpectedResourceProviderNameForPCIRequest
) as e:
@@ -5337,7 +5626,7 @@ class ComputeManager(manager.Manager):
clean_shutdown)
except exception.BuildAbortException:
# NOTE(gibi): We failed
- # update_pci_request_spec_with_allocated_interface_name so
+ # update_pci_request_with_placement_allocations so
# there is no reason to re-schedule. Just revert the allocation
# and fail the migration.
with excutils.save_and_reraise_exception():
@@ -5468,7 +5757,7 @@ class ComputeManager(manager.Manager):
'host (%s).', self.host, instance=instance)
self._send_prep_resize_notifications(
ctxt, instance, fields.NotificationPhase.START, flavor)
- # TODO(mriedem): update_pci_request_spec_with_allocated_interface_name
+ # TODO(mriedem): update_pci_request_with_placement_allocations
# should be called here if the request spec has request group mappings,
# e.g. for things like QoS ports with resource requests. Do it outside
# the try/except so if it raises BuildAbortException we do not attempt
@@ -6715,12 +7004,12 @@ class ComputeManager(manager.Manager):
try:
if provider_mappings:
- update = (
- compute_utils.
- update_pci_request_spec_with_allocated_interface_name)
- update(
- context, self.reportclient, instance.pci_requests.requests,
- provider_mappings)
+ compute_utils.update_pci_request_with_placement_allocations(
+ context,
+ self.reportclient,
+ instance.pci_requests.requests,
+ provider_mappings,
+ )
accel_info = []
if accel_uuids:
@@ -7700,10 +7989,10 @@ class ComputeManager(manager.Manager):
if not pci_reqs.requests:
return None
- devices = self.rt.claim_pci_devices(
- context, pci_reqs, instance.numa_topology)
-
- if not devices:
+ try:
+ devices = self.rt.claim_pci_devices(
+ context, pci_reqs, instance.numa_topology)
+ except exception.PciDeviceRequestFailed:
LOG.info('Failed to claim PCI devices during interface attach '
'for PCI request %s', pci_reqs, instance=instance)
raise exception.InterfaceAttachPciClaimFailed(
@@ -7800,12 +8089,12 @@ class ComputeManager(manager.Manager):
instance_uuid=instance.uuid) from e
try:
- update = (
- compute_utils.
- update_pci_request_spec_with_allocated_interface_name)
- update(
- context, self.reportclient, pci_reqs.requests,
- provider_mappings)
+ compute_utils.update_pci_request_with_placement_allocations(
+ context,
+ self.reportclient,
+ pci_reqs.requests,
+ provider_mappings,
+ )
except (
exception.AmbiguousResourceProviderForPCIRequest,
exception.UnexpectedResourceProviderNameForPCIRequest
@@ -8270,7 +8559,7 @@ class ComputeManager(manager.Manager):
action=fields.NotificationAction.LIVE_MIGRATION_PRE,
phase=fields.NotificationPhase.START, bdms=bdms)
- connector = self.driver.get_volume_connector(instance)
+ connector = None
try:
for bdm in bdms:
if bdm.is_volume and bdm.attachment_id is not None:
@@ -8284,6 +8573,8 @@ class ComputeManager(manager.Manager):
#
# Also note that attachment_update is not needed as we
# are providing the connector in the create call.
+ if connector is None:
+ connector = self.driver.get_volume_connector(instance)
attach_ref = self.volume_api.attachment_create(
context, bdm.volume_id, bdm.instance_uuid,
connector=connector, mountpoint=bdm.device_name)
@@ -8576,8 +8867,9 @@ class ComputeManager(manager.Manager):
# host attachment. We fetch BDMs before that to retain connection_info
# and attachment_id relating to the source host for post migration
# cleanup.
- post_live_migration = functools.partial(self._post_live_migration,
- source_bdms=source_bdms)
+ post_live_migration = functools.partial(
+ self._post_live_migration_update_host, source_bdms=source_bdms
+ )
rollback_live_migration = functools.partial(
self._rollback_live_migration, source_bdms=source_bdms)
@@ -8809,7 +9101,7 @@ class ComputeManager(manager.Manager):
volumes with connection_info set for the source host
"""
# Detaching volumes.
- connector = self.driver.get_volume_connector(instance)
+ connector = None
for bdm in source_bdms:
if bdm.is_volume:
# Detaching volumes is a call to an external API that can fail.
@@ -8829,6 +9121,9 @@ class ComputeManager(manager.Manager):
# remove the volume connection without detaching from
# hypervisor because the instance is not running
# anymore on the current host
+ if connector is None:
+ connector = self.driver.get_volume_connector(
+ instance)
self.volume_api.terminate_connection(context,
bdm.volume_id,
connector)
@@ -8850,6 +9145,42 @@ class ComputeManager(manager.Manager):
bdm.attachment_id, self.host,
str(e), instance=instance)
+ # TODO(sean-k-mooney): add typing
+ def _post_live_migration_update_host(
+ self, ctxt, instance, dest, block_migration=False,
+ migrate_data=None, source_bdms=None
+ ):
+ try:
+ self._post_live_migration(
+ ctxt, instance, dest, block_migration, migrate_data,
+ source_bdms)
+ except Exception:
+ # Restore the instance object
+ node_name = None
+ try:
+ # get node name of compute, where instance will be
+ # running after migration, that is destination host
+ compute_node = self._get_compute_info(ctxt, dest)
+ node_name = compute_node.hypervisor_hostname
+ except exception.ComputeHostNotFound:
+ LOG.exception('Failed to get compute_info for %s', dest)
+
+ # we can never rollback from post live migration and we can only
+ # get here if the instance is running on the dest so we ensure
+ # the instance.host is set correctly and reraise the original
+ # exception unmodified.
+ if instance.host != dest:
+ # apply saves the new fields while drop actually removes the
+ # migration context from the instance, so migration persists.
+ instance.apply_migration_context()
+ instance.drop_migration_context()
+ instance.host = dest
+ instance.task_state = None
+ instance.node = node_name
+ instance.progress = 0
+ instance.save()
+ raise
+
@wrap_exception()
@wrap_instance_fault
def _post_live_migration(self, ctxt, instance, dest,
@@ -8861,7 +9192,7 @@ class ComputeManager(manager.Manager):
and mainly updating database record.
:param ctxt: security context
- :param instance: instance dict
+ :param instance: instance object
:param dest: destination host
:param block_migration: if true, prepare for block migration
:param migrate_data: if not None, it is a dict which has data
@@ -10139,6 +10470,27 @@ class ComputeManager(manager.Manager):
# (e.g. disable the service).
with excutils.save_and_reraise_exception():
LOG.exception("ReshapeNeeded exception is unexpected here!")
+ except exception.PlacementPciException:
+ # If we are at startup and the Placement PCI inventory handling
+ # failed then probably there is a configuration error. Propagate
+ # the error up to kill the service.
+ if startup:
+ raise
+ # If we are not at startup then we can assume that the
+ # configuration was correct at startup so the error is probably
+ # transient. Anyhow we cannot kill the service any more so just
+ # log the error and continue.
+ LOG.exception(
+ "Error updating PCI resources for node %(node)s.",
+ {'node': nodename})
+ except exception.InvalidConfiguration as e:
+ if startup:
+ # If this happens during startup, we need to let it raise to
+ # abort our service startup.
+ raise
+ else:
+ LOG.error("Error updating resources for node %s: %s",
+ nodename, e)
except Exception:
LOG.exception("Error updating resources for node %(node)s.",
{'node': nodename})
@@ -11055,7 +11407,7 @@ class _ComputeV5Proxy(object):
bdms, recreate, on_shared_storage,
preserve_ephemeral, migration,
scheduled_node, limits, request_spec,
- accel_uuids)
+ accel_uuids, False, None)
# 5.13 support for optional accel_uuids argument
def shelve_instance(self, context, instance, image_id,
diff --git a/nova/compute/pci_placement_translator.py b/nova/compute/pci_placement_translator.py
new file mode 100644
index 0000000000..016efd9122
--- /dev/null
+++ b/nova/compute/pci_placement_translator.py
@@ -0,0 +1,623 @@
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import collections
+import copy
+import typing as ty
+
+import os_resource_classes
+import os_traits
+from oslo_log import log as logging
+from oslo_utils import uuidutils
+
+from nova.compute import provider_tree
+import nova.conf
+from nova import exception
+from nova.i18n import _
+from nova.objects import fields
+from nova.objects import pci_device
+from nova.pci import devspec
+from nova.pci import manager as pci_manager
+
+
+CONF = nova.conf.CONF
+LOG = logging.getLogger(__name__)
+
+
+# Devs with this type are in one to one mapping with an RP in placement
+PARENT_TYPES = (
+ fields.PciDeviceType.STANDARD, fields.PciDeviceType.SRIOV_PF)
+# Devs with these type need to have a parent and that parent is the one
+# that mapped to a placement RP
+CHILD_TYPES = (
+ fields.PciDeviceType.SRIOV_VF, fields.PciDeviceType.VDPA)
+
+
+def _is_placement_tracking_enabled() -> bool:
+ return CONF.pci.report_in_placement
+
+
+def _normalize_traits(traits: ty.List[str]) -> ty.List[str]:
+ """Make the trait names acceptable for placement.
+
+ It keeps the already valid standard or custom traits but normalizes trait
+ names that are not already normalized.
+ """
+ standard_traits, rest = os_traits.check_traits(traits)
+ custom_traits = []
+ for name in rest:
+ name = name.upper()
+ if os_traits.is_custom(name):
+ custom_traits.append(name)
+ else:
+ custom_traits.append(os_traits.normalize_name(name))
+
+ return list(standard_traits) + custom_traits
+
+
+def get_traits(traits_str: str) -> ty.Set[str]:
+ """Return a normalized set of placement standard and custom traits from
+ a string of comma separated trait names.
+ """
+ # traits is a comma separated list of placement trait names
+ if not traits_str:
+ return set()
+ return set(_normalize_traits(traits_str.split(',')))
+
+
+def _get_traits_for_dev(
+ dev_spec_tags: ty.Dict[str, str],
+) -> ty.Set[str]:
+ return get_traits(dev_spec_tags.get("traits", "")) | {
+ os_traits.COMPUTE_MANAGED_PCI_DEVICE
+ }
+
+
+def _normalize_resource_class(rc: str) -> str:
+ rc = rc.upper()
+ if (
+ rc not in os_resource_classes.STANDARDS and
+ not os_resource_classes.is_custom(rc)
+ ):
+ rc = os_resource_classes.normalize_name(rc)
+ # mypy: normalize_name will return non None for non None input
+ assert rc
+
+ return rc
+
+
+def get_resource_class(
+ requested_name: ty.Optional[str], vendor_id: str, product_id: str
+) -> str:
+ """Return the normalized resource class name based on what is requested
+ or if nothing is requested then generated from the vendor_id and product_id
+ """
+ if requested_name:
+ rc = _normalize_resource_class(requested_name)
+ else:
+ rc = f"CUSTOM_PCI_{vendor_id}_{product_id}".upper()
+ return rc
+
+
+def _get_rc_for_dev(
+ dev: pci_device.PciDevice,
+ dev_spec_tags: ty.Dict[str, str],
+) -> str:
+ """Return the resource class to represent the device.
+
+ It is either provided by the user in the configuration as the
+ resource_class tag, or we are generating one from vendor_id and product_id.
+
+ The user specified resource class is normalized if it is not already an
+ acceptable standard or custom resource class.
+ """
+ rc = dev_spec_tags.get("resource_class")
+ return get_resource_class(rc, dev.vendor_id, dev.product_id)
+
+
+class PciResourceProvider:
+ """A PCI Resource Provider"""
+
+ def __init__(self, name: str) -> None:
+ self.name = name
+ self.parent_dev = None
+ self.children_devs: ty.List[pci_device.PciDevice] = []
+ self.resource_class: ty.Optional[str] = None
+ self.traits: ty.Optional[ty.Set[str]] = None
+
+ @property
+ def devs(self) -> ty.List[pci_device.PciDevice]:
+ return [self.parent_dev] if self.parent_dev else self.children_devs
+
+ @property
+ def to_be_deleted(self):
+ return not bool(self.devs)
+
+ def add_child(self, dev, dev_spec_tags: ty.Dict[str, str]) -> None:
+ if self.parent_dev:
+ raise exception.PlacementPciDependentDeviceException(
+ parent_dev=dev.address,
+ children_devs=",".join(dev.address for dev in self.devs)
+ )
+
+ rc = _get_rc_for_dev(dev, dev_spec_tags)
+ if self.resource_class and rc != self.resource_class:
+ raise exception.PlacementPciMixedResourceClassException(
+ new_rc=rc,
+ new_dev=dev.address,
+ current_rc=self.resource_class,
+ current_devs=",".join(
+ dev.address for dev in self.children_devs)
+ )
+
+ traits = _get_traits_for_dev(dev_spec_tags)
+ if self.traits is not None and self.traits != traits:
+ raise exception.PlacementPciMixedTraitsException(
+ new_traits=",".join(sorted(traits)),
+ new_dev=dev.address,
+ current_traits=",".join(sorted(self.traits)),
+ current_devs=",".join(
+ dev.address for dev in self.children_devs),
+ )
+
+ self.children_devs.append(dev)
+ self.resource_class = rc
+ self.traits = traits
+
+ def add_parent(self, dev, dev_spec_tags: ty.Dict[str, str]) -> None:
+ if self.parent_dev or self.children_devs:
+ raise exception.PlacementPciDependentDeviceException(
+ parent_dev=dev.address,
+ children_devs=",".join(dev.address for dev in self.devs)
+ )
+
+ self.parent_dev = dev
+ self.resource_class = _get_rc_for_dev(dev, dev_spec_tags)
+ self.traits = _get_traits_for_dev(dev_spec_tags)
+
+ def remove_child(self, dev: pci_device.PciDevice) -> None:
+ # Nothing to do here. The update_provider_tree will handle the
+ # inventory decrease or the full RP removal
+ pass
+
+ def remove_parent(self, dev: pci_device.PciDevice) -> None:
+ # Nothing to do here. The update_provider_tree we handle full RP
+ pass
+
+ def _get_allocations(self) -> ty.Mapping[str, int]:
+ """Return a dict of used resources keyed by consumer UUID.
+
+ Note that:
+ 1) a single consumer can consume more than one resource from a single
+ RP. I.e. A VM with two VFs from the same parent PF
+ 2) multiple consumers can consume resources from a single RP. I.e. two
+ VMs consuming one VF from the same PF each
+ 3) regardless of how many consumers we have on a single PCI RP, they
+ are always consuming resources from the same resource class as
+ we are not supporting dependent devices modelled by the same RP but
+ different resource classes.
+ """
+ return collections.Counter(
+ [
+ dev.instance_uuid
+ for dev in self.devs
+ if "instance_uuid" in dev and dev.instance_uuid
+ ]
+ )
+
+ def update_provider_tree(
+ self,
+ provider_tree: provider_tree.ProviderTree,
+ parent_rp_name: str,
+ ) -> None:
+
+ if self.to_be_deleted:
+ # This means we need to delete the RP from placement if exists
+ if provider_tree.exists(self.name):
+ # NOTE(gibi): If there are allocations on this RP then
+ # Placement will reject the update the provider_tree is
+ # synced up.
+ provider_tree.remove(self.name)
+
+ return
+
+ if not provider_tree.exists(self.name):
+ # NOTE(gibi): We need to generate UUID for the new provider in Nova
+ # instead of letting Placement assign one. We are potentially
+ # healing a missing RP along with missing allocations on that RP.
+ # The allocation healing happens with POST /reshape, and that API
+ # only takes RP UUIDs.
+ provider_tree.new_child(
+ self.name,
+ parent_rp_name,
+ uuid=uuidutils.generate_uuid(dashed=True)
+ )
+
+ provider_tree.update_inventory(
+ self.name,
+ # NOTE(gibi): The rest of the inventory fields (reserved,
+ # allocation_ratio, etc.) are defaulted by placement and the
+ # default value make sense for PCI devices, i.e. no overallocation
+ # and PCI can be allocated one by one.
+ # Also, this way if the operator sets reserved value in placement
+ # for the PCI inventories directly then nova will not override that
+ # value periodically.
+ {
+ self.resource_class: {
+ "total": len(self.devs),
+ "max_unit": len(self.devs),
+ }
+ },
+ )
+ provider_tree.update_traits(self.name, self.traits)
+
+ # Here we are sure the RP exists in the provider_tree. So, we can
+ # record the RP UUID in each PciDevice this RP represents
+ rp_uuid = provider_tree.data(self.name).uuid
+ for dev in self.devs:
+ dev.extra_info['rp_uuid'] = rp_uuid
+
+ def update_allocations(
+ self,
+ allocations: dict,
+ provider_tree: provider_tree.ProviderTree,
+ same_host_instances: ty.List[str],
+ ) -> bool:
+ updated = False
+
+ if self.to_be_deleted:
+ # the RP is going away because either removed from the hypervisor
+ # or the compute's config is changed to ignore the device.
+ return updated
+
+ # we assume here that if this RP has been created in the current round
+ # of healing then it already has a UUID assigned.
+ rp_uuid = provider_tree.data(self.name).uuid
+
+ for consumer, amount in self._get_allocations().items():
+ if consumer not in allocations:
+ # We have PCI device(s) allocated to an instance, but we don't
+ # see any instance allocation in placement. This
+ # happens for two reasons:
+ # 1) The instance is being migrated and therefore the
+ # allocation is held by the migration UUID in placement. In
+ # this case the PciDevice is still allocated to the instance
+ # UUID in the nova DB hence our lookup for the instance
+ # allocation here. We can ignore this case as: i) We healed
+ # the PCI allocation for the instance before the migration
+ # was started. ii) Nova simply moves the allocation from the
+ # instance UUID to the migration UUID in placement. So we
+ # assume the migration allocation is correct without
+ # healing. One limitation of this is that if there is in
+ # progress migration when nova is upgraded, then the PCI
+ # allocation of that migration will be missing from
+ # placement on the source host. But it is temporary and the
+ # allocation will be fixed as soon as the migration is
+ # completed or reverted.
+ # 2) We have a bug in the scheduler or placement and the whole
+ # instance allocation is lost. We cannot handle that here.
+ # It is expected to be healed via nova-manage placement
+ # heal_allocation CLI instead.
+ continue
+
+ if consumer in same_host_instances:
+ # This is a nasty special case. This instance is undergoing
+ # a same host resize. So in Placement the source host
+ # allocation is held by the migration UUID *but* the
+ # PciDevice.instance_uuid is set for the instance UUID both
+ # on the source and on the destination host. As the source and
+ # dest are the same for migration we will see PciDevice
+ # objects assigned to this instance that should not be
+ # allocated to the instance UUID in placement.
+ # As noted above we don't want to take care in progress
+ # migration during healing. So we simply ignore this instance.
+ # If the instance needs healing then it will be healed when
+ # after the migration is confirmed or reverted.
+ continue
+
+ current_allocs = allocations[consumer]['allocations']
+ current_rp_allocs = current_allocs.get(rp_uuid)
+
+ if current_rp_allocs:
+ # update an existing allocation if the current one differs
+ current_rc_allocs = current_rp_allocs["resources"].get(
+ self.resource_class, 0)
+ if current_rc_allocs != amount:
+ current_rp_allocs[
+ "resources"][self.resource_class] = amount
+ updated = True
+ else:
+ # insert a new allocation as it is missing
+ current_allocs[rp_uuid] = {
+ "resources": {self.resource_class: amount}
+ }
+ updated = True
+
+ return updated
+
+ def __str__(self) -> str:
+ if self.devs:
+ return (
+ f"RP({self.name}, {self.resource_class}={len(self.devs)}, "
+ f"traits={','.join(sorted(self.traits or set()))})"
+ )
+ else:
+ return f"RP({self.name}, <EMPTY>)"
+
+
+class PlacementView:
+ """The PCI Placement view"""
+
+ def __init__(
+ self,
+ hypervisor_hostname: str,
+ instances_under_same_host_resize: ty.List[str],
+ ) -> None:
+ self.rps: ty.Dict[str, PciResourceProvider] = {}
+ self.root_rp_name = hypervisor_hostname
+ self.same_host_instances = instances_under_same_host_resize
+
+ def _get_rp_name_for_address(self, addr: str) -> str:
+ return f"{self.root_rp_name}_{addr.upper()}"
+
+ def _ensure_rp(self, rp_name: str) -> PciResourceProvider:
+ return self.rps.setdefault(rp_name, PciResourceProvider(rp_name))
+
+ def _get_rp_name_for_child(self, dev: pci_device.PciDevice) -> str:
+ if not dev.parent_addr:
+ msg = _(
+ "Missing parent address for PCI device s(dev)% with "
+ "type s(type)s"
+ ) % {
+ "dev": dev.address,
+ "type": dev.dev_type,
+ }
+ raise exception.PlacementPciException(error=msg)
+
+ return self._get_rp_name_for_address(dev.parent_addr)
+
+ def _add_child(
+ self, dev: pci_device.PciDevice, dev_spec_tags: ty.Dict[str, str]
+ ) -> None:
+ rp_name = self._get_rp_name_for_child(dev)
+ self._ensure_rp(rp_name).add_child(dev, dev_spec_tags)
+
+ def _add_parent(
+ self, dev: pci_device.PciDevice, dev_spec_tags: ty.Dict[str, str]
+ ) -> None:
+ rp_name = self._get_rp_name_for_address(dev.address)
+ self._ensure_rp(rp_name).add_parent(dev, dev_spec_tags)
+
+ def _add_dev(
+ self, dev: pci_device.PciDevice, dev_spec_tags: ty.Dict[str, str]
+ ) -> None:
+ if dev_spec_tags.get("physical_network"):
+ # NOTE(gibi): We ignore devices that has physnet configured as
+ # those are there for Neutron based SRIOV and that is out of scope
+ # for now. Later these devices will be tracked as PCI_NETDEV
+ # devices in placement.
+ return
+
+ if dev.dev_type in PARENT_TYPES:
+ self._add_parent(dev, dev_spec_tags)
+ elif dev.dev_type in CHILD_TYPES:
+ self._add_child(dev, dev_spec_tags)
+ else:
+ msg = _(
+ "Unhandled PCI device type %(type)s for %(dev)s. Please "
+ "report a bug."
+ ) % {
+ "type": dev.dev_type,
+ "dev": dev.address,
+ }
+ raise exception.PlacementPciException(error=msg)
+
+ def _remove_child(self, dev: pci_device.PciDevice) -> None:
+ rp_name = self._get_rp_name_for_child(dev)
+ self._ensure_rp(rp_name).remove_child(dev)
+
+ def _remove_parent(self, dev: pci_device.PciDevice) -> None:
+ rp_name = self._get_rp_name_for_address(dev.address)
+ self._ensure_rp(rp_name).remove_parent(dev)
+
+ def _remove_dev(self, dev: pci_device.PciDevice) -> None:
+ """Remove PCI devices from Placement that existed before but now
+ deleted from the hypervisor or unlisted from [pci]device_spec
+ """
+ if dev.dev_type in PARENT_TYPES:
+ self._remove_parent(dev)
+ elif dev.dev_type in CHILD_TYPES:
+ self._remove_child(dev)
+
+ def process_dev(
+ self,
+ dev: pci_device.PciDevice,
+ dev_spec: ty.Optional[devspec.PciDeviceSpec],
+ ) -> None:
+
+ if dev.status in (
+ fields.PciDeviceStatus.DELETED,
+ fields.PciDeviceStatus.REMOVED,
+ ):
+ # If the PCI tracker marked the device DELETED or REMOVED then
+ # such device is not allocated, so we are free to drop it from
+ # placement too.
+ self._remove_dev(dev)
+ else:
+ if not dev_spec:
+ if dev.instance_uuid:
+ LOG.warning(
+ "Device spec is not found for device %s in "
+ "[pci]device_spec. We are skipping this devices "
+ "during Placement update. The device is allocated by "
+ "%s. You should not remove an allocated device from "
+ "the configuration. Please restore the configuration "
+ "or cold migrate the instance to resolve the "
+ "inconsistency.",
+ dev.address,
+ dev.instance_uuid
+ )
+ else:
+ LOG.warning(
+ "Device spec is not found for device %s in "
+ "[pci]device_spec. Ignoring device in Placement "
+ "resource view. This should not happen. Please file a "
+ "bug.",
+ dev.address
+ )
+
+ return
+
+ self._add_dev(dev, dev_spec.get_tags())
+
+ def __str__(self) -> str:
+ return (
+ f"Placement PCI view on {self.root_rp_name}: "
+ f"{', '.join(str(rp) for rp in self.rps.values())}"
+ )
+
+ def update_provider_tree(
+ self, provider_tree: provider_tree.ProviderTree
+ ) -> None:
+ for rp_name, rp in self.rps.items():
+ rp.update_provider_tree(provider_tree, self.root_rp_name)
+
+ def update_allocations(
+ self,
+ allocations: dict,
+ provider_tree: provider_tree.ProviderTree
+ ) -> bool:
+ """Updates the passed in allocations dict inplace with any PCI
+ allocations that is inferred from the PciDevice objects already added
+ to the view. It returns True if the allocations dict has been changed,
+ False otherwise.
+ """
+ updated = False
+ for rp in self.rps.values():
+ updated |= rp.update_allocations(
+ allocations,
+ provider_tree,
+ self.same_host_instances,
+ )
+ return updated
+
+
+def ensure_no_dev_spec_with_devname(dev_specs: ty.List[devspec.PciDeviceSpec]):
+ for dev_spec in dev_specs:
+ if dev_spec.dev_spec_conf.get("devname"):
+ msg = _(
+ "Invalid [pci]device_spec configuration. PCI Placement "
+ "reporting does not support 'devname' based device "
+ "specification but we got %(dev_spec)s. "
+ "Please use PCI address in the configuration instead."
+ ) % {"dev_spec": dev_spec.dev_spec_conf}
+ raise exception.PlacementPciException(error=msg)
+
+
+def ensure_tracking_was_not_enabled_before(
+ provider_tree: provider_tree.ProviderTree
+) -> None:
+ # If placement tracking was enabled before then we do not support
+ # disabling it later. To check for that we can look for RPs with
+ # the COMPUTE_MANAGED_PCI_DEVICE trait. If any then we raise to
+ # kill the service
+ for rp_uuid in provider_tree.get_provider_uuids():
+ if (
+ os_traits.COMPUTE_MANAGED_PCI_DEVICE
+ in provider_tree.data(rp_uuid).traits
+ ):
+ msg = _(
+ "The [pci]report_in_placement is False but it was enabled "
+ "before on this compute. Nova does not support disabling "
+ "it after it is enabled."
+ )
+ raise exception.PlacementPciException(error=msg)
+
+
+def update_provider_tree_for_pci(
+ provider_tree: provider_tree.ProviderTree,
+ nodename: str,
+ pci_tracker: pci_manager.PciDevTracker,
+ allocations: dict,
+ instances_under_same_host_resize: ty.List[str],
+) -> bool:
+ """Based on the PciDevice objects in the pci_tracker it calculates what
+ inventories and allocations needs to exist in placement and create the
+ missing peaces.
+
+ It returns True if not just the provider_tree but also allocations needed
+ to be changed.
+
+ :param allocations:
+ Dict of allocation data of the form:
+ { $CONSUMER_UUID: {
+ # The shape of each "allocations" dict below is identical
+ # to the return from GET /allocations/{consumer_uuid}
+ "allocations": {
+ $RP_UUID: {
+ "generation": $RP_GEN,
+ "resources": {
+ $RESOURCE_CLASS: $AMOUNT,
+ ...
+ },
+ },
+ ...
+ },
+ "project_id": $PROJ_ID,
+ "user_id": $USER_ID,
+ "consumer_generation": $CONSUMER_GEN,
+ },
+ ...
+ }
+ :param instances_under_same_host_resize: A list of instance UUIDs that
+ are undergoing same host resize on this host.
+ """
+ if not _is_placement_tracking_enabled():
+ ensure_tracking_was_not_enabled_before(provider_tree)
+ # If tracking is not enabled we just return without touching anything
+ return False
+
+ ensure_no_dev_spec_with_devname(pci_tracker.dev_filter.specs)
+
+ LOG.debug(
+ 'Collecting PCI inventories and allocations to track them in Placement'
+ )
+
+ pv = PlacementView(nodename, instances_under_same_host_resize)
+ for dev in pci_tracker.pci_devs:
+ # match the PCI device with the [pci]dev_spec config to access
+ # the configuration metadata tags
+ dev_spec = pci_tracker.dev_filter.get_devspec(dev)
+ pv.process_dev(dev, dev_spec)
+
+ LOG.info("Placement PCI resource view: %s", pv)
+
+ pv.update_provider_tree(provider_tree)
+ old_alloc = copy.deepcopy(allocations)
+ # update_provider_tree correlated the PciDevice objects with RPs in
+ # placement and recorded the RP UUID in the PciDevice object. We need to
+ # trigger an update on the device pools in the tracker to get the device
+ # RP UUID mapped to the device pools
+ pci_tracker.stats.populate_pools_metadata_from_assigned_devices()
+ updated = pv.update_allocations(allocations, provider_tree)
+
+ if updated:
+ LOG.debug(
+ "Placement PCI view needs allocation healing. This should only "
+ "happen if [filter_scheduler]pci_in_placement is still disabled. "
+ "Original allocations: %s New allocations: %s",
+ old_alloc,
+ allocations,
+ )
+
+ return updated
diff --git a/nova/compute/resource_tracker.py b/nova/compute/resource_tracker.py
index 0b801f7ddf..3f911f3708 100644
--- a/nova/compute/resource_tracker.py
+++ b/nova/compute/resource_tracker.py
@@ -30,6 +30,7 @@ import retrying
from nova.compute import claims
from nova.compute import monitors
+from nova.compute import pci_placement_translator
from nova.compute import provider_config
from nova.compute import stats as compute_stats
from nova.compute import task_states
@@ -48,6 +49,7 @@ from nova import rpc
from nova.scheduler.client import report
from nova import utils
from nova.virt import hardware
+from nova.virt import node
CONF = nova.conf.CONF
@@ -103,7 +105,7 @@ class ResourceTracker(object):
monitor_handler = monitors.MonitorHandler(self)
self.monitors = monitor_handler.monitors
self.old_resources = collections.defaultdict(objects.ComputeNode)
- self.reportclient = reportclient or report.SchedulerReportClient()
+ self.reportclient = reportclient or report.report_client_singleton()
self.ram_allocation_ratio = CONF.ram_allocation_ratio
self.cpu_allocation_ratio = CONF.cpu_allocation_ratio
self.disk_allocation_ratio = CONF.disk_allocation_ratio
@@ -618,18 +620,11 @@ class ResourceTracker(object):
:param prefix: Prefix to use when accessing migration context
attributes. 'old_' or 'new_', with 'new_' being the default.
"""
- # Remove usage for an instance that is tracked in migrations, such as
- # on the dest node during revert resize.
- if instance['uuid'] in self.tracked_migrations:
- migration = self.tracked_migrations.pop(instance['uuid'])
+ if instance["uuid"] in self.tracked_migrations:
if not flavor:
- flavor = self._get_flavor(instance, prefix, migration)
- # Remove usage for an instance that is not tracked in migrations (such
- # as on the source node after a migration).
- # NOTE(lbeliveau): On resize on the same node, the instance is
- # included in both tracked_migrations and tracked_instances.
- elif instance['uuid'] in self.tracked_instances:
- self.tracked_instances.remove(instance['uuid'])
+ flavor = self._get_flavor(
+ instance, prefix, self.tracked_migrations[instance["uuid"]]
+ )
if flavor is not None:
numa_topology = self._get_migration_context_resource(
@@ -645,6 +640,15 @@ class ResourceTracker(object):
ctxt = context.elevated()
self._update(ctxt, self.compute_nodes[nodename])
+ # Remove usage for an instance that is tracked in migrations, such as
+ # on the dest node during revert resize.
+ self.tracked_migrations.pop(instance['uuid'], None)
+ # Remove usage for an instance that is not tracked in migrations (such
+ # as on the source node after a migration).
+ # NOTE(lbeliveau): On resize on the same node, the instance is
+ # included in both tracked_migrations and tracked_instances.
+ self.tracked_instances.discard(instance['uuid'])
+
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True)
def update_usage(self, context, instance, nodename):
"""Update the resource usage and stats after a change in an
@@ -665,50 +669,6 @@ class ResourceTracker(object):
return (nodename not in self.compute_nodes or
not self.driver.node_is_available(nodename))
- def _check_for_nodes_rebalance(self, context, resources, nodename):
- """Check if nodes rebalance has happened.
-
- The ironic driver maintains a hash ring mapping bare metal nodes
- to compute nodes. If a compute dies, the hash ring is rebuilt, and
- some of its bare metal nodes (more precisely, those not in ACTIVE
- state) are assigned to other computes.
-
- This method checks for this condition and adjusts the database
- accordingly.
-
- :param context: security context
- :param resources: initial values
- :param nodename: node name
- :returns: True if a suitable compute node record was found, else False
- """
- if not self.driver.rebalances_nodes:
- return False
-
- # Its possible ironic just did a node re-balance, so let's
- # check if there is a compute node that already has the correct
- # hypervisor_hostname. We can re-use that rather than create a
- # new one and have to move existing placement allocations
- cn_candidates = objects.ComputeNodeList.get_by_hypervisor(
- context, nodename)
-
- if len(cn_candidates) == 1:
- cn = cn_candidates[0]
- LOG.info("ComputeNode %(name)s moving from %(old)s to %(new)s",
- {"name": nodename, "old": cn.host, "new": self.host})
- cn.host = self.host
- self.compute_nodes[nodename] = cn
- self._copy_resources(cn, resources)
- self._setup_pci_tracker(context, cn, resources)
- self._update(context, cn)
- return True
- elif len(cn_candidates) > 1:
- LOG.error(
- "Found more than one ComputeNode for nodename %s. "
- "Please clean up the orphaned ComputeNode records in your DB.",
- nodename)
-
- return False
-
def _init_compute_node(self, context, resources):
"""Initialize the compute node if it does not already exist.
@@ -726,6 +686,7 @@ class ResourceTracker(object):
False otherwise
"""
nodename = resources['hypervisor_hostname']
+ node_uuid = resources['uuid']
# if there is already a compute node just use resources
# to initialize
@@ -737,23 +698,43 @@ class ResourceTracker(object):
# now try to get the compute node record from the
# database. If we get one we use resources to initialize
- cn = self._get_compute_node(context, nodename)
+
+ # We use read_deleted=True so that we will find and recover a deleted
+ # node object, if necessary.
+ with utils.temporary_mutation(context, read_deleted='yes'):
+ cn = self._get_compute_node(context, node_uuid)
+ if cn and cn.deleted:
+ # Undelete and save this right now so that everything below
+ # can continue without read_deleted=yes
+ LOG.info('Undeleting compute node %s', cn.uuid)
+ cn.deleted = False
+ cn.deleted_at = None
+ cn.save()
if cn:
+ if cn.host != self.host:
+ LOG.info("ComputeNode %(name)s moving from %(old)s to %(new)s",
+ {"name": nodename, "old": cn.host, "new": self.host})
+ cn.host = self.host
+ self._update(context, cn)
+
self.compute_nodes[nodename] = cn
self._copy_resources(cn, resources)
self._setup_pci_tracker(context, cn, resources)
return False
- if self._check_for_nodes_rebalance(context, resources, nodename):
- return False
-
# there was no local copy and none in the database
# so we need to create a new compute node. This needs
# to be initialized with resource values.
cn = objects.ComputeNode(context)
cn.host = self.host
self._copy_resources(cn, resources, initial=True)
- cn.create()
+ try:
+ cn.create()
+ except exception.DuplicateRecord:
+ raise exception.InvalidConfiguration(
+ 'Duplicate compute node record found for host %s node %s' % (
+ cn.host, cn.hypervisor_hostname))
+
# Only map the ComputeNode into compute_nodes if create() was OK
# because if create() fails, on the next run through here nodename
# would be in compute_nodes and we won't try to create again (because
@@ -886,6 +867,14 @@ class ResourceTracker(object):
# contains a non-None value, even for non-Ironic nova-compute hosts. It
# is this value that will be populated in the compute_nodes table.
resources['host_ip'] = CONF.my_ip
+ if 'uuid' not in resources:
+ # NOTE(danms): Any driver that does not provide a uuid per
+ # node gets the locally-persistent compute_id. Only ironic
+ # should be setting the per-node uuid (and returning
+ # multiple nodes in general). If this is the first time we
+ # are creating a compute node on this host, we will
+ # generate and persist this uuid for the future.
+ resources['uuid'] = node.get_local_node_uuid()
# We want the 'cpu_info' to be None from the POV of the
# virt driver, but the DB requires it to be non-null so
@@ -990,8 +979,6 @@ class ResourceTracker(object):
# notified when instances are deleted, we need remove all usages
# from deleted instances.
self.pci_tracker.clean_usage(instances, migrations)
- dev_pools_obj = self.pci_tracker.stats.to_device_pools_obj()
- cn.pci_device_pools = dev_pools_obj
self._report_final_resource_view(nodename)
@@ -1013,14 +1000,13 @@ class ResourceTracker(object):
if startup:
self._check_resources(context)
- def _get_compute_node(self, context, nodename):
+ def _get_compute_node(self, context, node_uuid):
"""Returns compute node for the host and nodename."""
try:
- return objects.ComputeNode.get_by_host_and_nodename(
- context, self.host, nodename)
+ return objects.ComputeNode.get_by_uuid(context, node_uuid)
except exception.NotFound:
LOG.warning("No compute node record for %(host)s:%(node)s",
- {'host': self.host, 'node': nodename})
+ {'host': self.host, 'node': node_uuid})
def _report_hypervisor_resource_view(self, resources):
"""Log the hypervisor's view of free resources.
@@ -1194,9 +1180,16 @@ class ResourceTracker(object):
return list(traits)
- @retrying.retry(stop_max_attempt_number=4,
- retry_on_exception=lambda e: isinstance(
- e, exception.ResourceProviderUpdateConflict))
+ @retrying.retry(
+ stop_max_attempt_number=4,
+ retry_on_exception=lambda e: isinstance(
+ e,
+ (
+ exception.ResourceProviderUpdateConflict,
+ exception.PlacementReshapeConflict,
+ ),
+ ),
+ )
def _update_to_placement(self, context, compute_node, startup):
"""Send resource and inventory changes to placement."""
# NOTE(jianghuaw): Some resources(e.g. VGPU) are not saved in the
@@ -1216,7 +1209,9 @@ class ResourceTracker(object):
context, compute_node.uuid, name=compute_node.hypervisor_hostname)
# Let the virt driver rearrange the provider tree and set/update
# the inventory, traits, and aggregates throughout.
- allocs = None
+ allocs = self.reportclient.get_allocations_for_provider_tree(
+ context, nodename)
+ driver_reshaped = False
try:
self.driver.update_provider_tree(prov_tree, nodename)
except exception.ReshapeNeeded:
@@ -1227,10 +1222,9 @@ class ResourceTracker(object):
LOG.info("Performing resource provider inventory and "
"allocation data migration during compute service "
"startup or fast-forward upgrade.")
- allocs = self.reportclient.get_allocations_for_provider_tree(
- context, nodename)
- self.driver.update_provider_tree(prov_tree, nodename,
- allocations=allocs)
+ self.driver.update_provider_tree(
+ prov_tree, nodename, allocations=allocs)
+ driver_reshaped = True
# Inject driver capabilities traits into the provider
# tree. We need to determine the traits that the virt
@@ -1251,25 +1245,77 @@ class ResourceTracker(object):
context, nodename, provider_tree=prov_tree)
prov_tree.update_traits(nodename, traits)
+ instances_under_same_host_resize = [
+ migration.instance_uuid
+ for migration in self.tracked_migrations.values()
+ if migration.is_same_host_resize
+ ]
+ # NOTE(gibi): Tracking PCI in placement is different from other
+ # resources.
+ #
+ # While driver.update_provider_tree is used to let the virt driver
+ # create any kind of placement model for a resource the PCI data
+ # modelling is done virt driver independently by the PCI tracker.
+ # So the placement reporting needs to be also done here in the resource
+ # tracker independently of the virt driver.
+ #
+ # Additionally, when PCI tracking in placement was introduced there was
+ # already PCI allocations in nova. So both the PCI inventories and
+ # allocations needs to be healed. Moreover, to support rolling upgrade
+ # the placement prefilter for PCI devices was not turned on by default
+ # at the first release of this feature. Therefore, there could be new
+ # PCI allocation without placement being involved until the prefilter
+ # is enabled. So we need to be ready to heal PCI allocations at
+ # every call not just at startup.
+ pci_reshaped = pci_placement_translator.update_provider_tree_for_pci(
+ prov_tree,
+ nodename,
+ self.pci_tracker,
+ allocs,
+ instances_under_same_host_resize,
+ )
+
self.provider_tree = prov_tree
# This merges in changes from the provider config files loaded in init
self._merge_provider_configs(self.provider_configs, prov_tree)
- # Flush any changes. If we processed ReshapeNeeded above, allocs is not
- # None, and this will hit placement's POST /reshaper route.
- self.reportclient.update_from_provider_tree(context, prov_tree,
- allocations=allocs)
+ try:
+ # Flush any changes. If we either processed ReshapeNeeded above or
+ # update_provider_tree_for_pci did reshape, then we need to pass
+ # allocs to update_from_provider_tree to hit placement's POST
+ # /reshaper route.
+ self.reportclient.update_from_provider_tree(
+ context,
+ prov_tree,
+ allocations=allocs if driver_reshaped or pci_reshaped else None
+ )
+ except exception.InventoryInUse as e:
+ # This means an inventory reconfiguration (e.g.: removing a parent
+ # PF and adding a VF under that parent) was not possible due to
+ # existing allocations. Translate the exception to prevent the
+ # compute service to start
+ raise exception.PlacementPciException(error=str(e))
def _update(self, context, compute_node, startup=False):
"""Update partial stats locally and populate them to Scheduler."""
+
+ self._update_to_placement(context, compute_node, startup)
+
+ if self.pci_tracker:
+ # sync PCI device pool state stored in the compute node with
+ # the actual state from the PCI tracker as we commit changes in
+ # the DB and in the PCI tracker below
+ dev_pools_obj = self.pci_tracker.stats.to_device_pools_obj()
+ compute_node.pci_device_pools = dev_pools_obj
+
# _resource_change will update self.old_resources if it detects changes
# but we want to restore those if compute_node.save() fails.
nodename = compute_node.hypervisor_hostname
old_compute = self.old_resources[nodename]
if self._resource_change(compute_node):
# If the compute_node's resource changed, update to DB. Note that
- # _update_to_placement below does not supersede the need to do this
+ # _update_to_placement above does not supersede the need to do this
# because there are stats-related fields in the ComputeNode object
# which could have changed and still need to be reported to the
# scheduler filters/weighers (which could be out of tree as well).
@@ -1282,8 +1328,6 @@ class ResourceTracker(object):
with excutils.save_and_reraise_exception(logger=LOG):
self.old_resources[nodename] = old_compute
- self._update_to_placement(context, compute_node, startup)
-
if self.pci_tracker:
self.pci_tracker.save(context)
diff --git a/nova/compute/rpcapi.py b/nova/compute/rpcapi.py
index fa5b0ee8d9..efc06300db 100644
--- a/nova/compute/rpcapi.py
+++ b/nova/compute/rpcapi.py
@@ -402,6 +402,8 @@ class ComputeAPI(object):
* ... - Rename the instance_type argument of prep_resize() to flavor
* ... - Rename the instance_type argument of resize_instance() to
flavor
+ * 6.1 - Add reimage_boot_volume parameter to rebuild_instance()
+ * 6.2 - Add target_state parameter to rebuild_instance()
'''
VERSION_ALIASES = {
@@ -422,6 +424,8 @@ class ComputeAPI(object):
'wallaby': '6.0',
'xena': '6.0',
'yoga': '6.0',
+ 'zed': '6.1',
+ 'antelope': '6.2',
}
@property
@@ -1080,7 +1084,8 @@ class ComputeAPI(object):
self, ctxt, instance, new_pass, injected_files,
image_ref, orig_image_ref, orig_sys_metadata, bdms,
recreate, on_shared_storage, host, node,
- preserve_ephemeral, migration, limits, request_spec, accel_uuids):
+ preserve_ephemeral, migration, limits, request_spec, accel_uuids,
+ reimage_boot_volume, target_state):
# NOTE(edleafe): compute nodes can only use the dict form of limits.
if isinstance(limits, objects.SchedulerLimits):
@@ -1092,11 +1097,29 @@ class ComputeAPI(object):
'scheduled_node': node,
'limits': limits,
'request_spec': request_spec,
- 'accel_uuids': accel_uuids
+ 'accel_uuids': accel_uuids,
+ 'reimage_boot_volume': reimage_boot_volume,
+ 'target_state': target_state,
}
- version = self._ver(ctxt, '5.12')
+ version = '6.2'
client = self.router.client(ctxt)
if not client.can_send_version(version):
+ if msg_args['target_state']:
+ raise exception.UnsupportedRPCVersion(
+ api="rebuild_instance",
+ required="6.2")
+ else:
+ del msg_args['target_state']
+ version = '6.1'
+ if not client.can_send_version(version):
+ if msg_args['reimage_boot_volume']:
+ raise exception.NovaException(
+ 'Compute RPC version does not support '
+ 'reimage_boot_volume parameter.')
+ else:
+ del msg_args['reimage_boot_volume']
+ version = self._ver(ctxt, '5.12')
+ if not client.can_send_version(version):
del msg_args['accel_uuids']
version = '5.0'
cctxt = client.prepare(server=_compute_host(host, instance),
diff --git a/nova/compute/utils.py b/nova/compute/utils.py
index 036e69b7ce..30efc24fc7 100644
--- a/nova/compute/utils.py
+++ b/nova/compute/utils.py
@@ -1491,7 +1491,7 @@ def notify_about_instance_delete(notifier, context, instance,
phase=fields.NotificationPhase.END)
-def update_pci_request_spec_with_allocated_interface_name(
+def update_pci_request_with_placement_allocations(
context, report_client, pci_requests, provider_mapping):
"""Update the instance's PCI request based on the request group -
resource provider mapping and the device RP name from placement.
@@ -1512,12 +1512,33 @@ def update_pci_request_spec_with_allocated_interface_name(
if not pci_requests:
return
- def needs_update(pci_request, mapping):
+ def needs_update_due_to_qos(pci_request, mapping):
return (pci_request.requester_id and
pci_request.requester_id in mapping)
+ def get_group_mapping_for_flavor_based_pci_request(pci_request, mapping):
+ # NOTE(gibi): for flavor based PCI requests nova generates RequestGroup
+ # suffixes from InstancePCIRequests in the form of
+ # {request_id}-{count_index}
+ # NOTE(gibi): a suffixed request group always fulfilled from a single
+ # RP
+ return {
+ group_id: rp_uuids[0]
+ for group_id, rp_uuids in mapping.items()
+ if group_id.startswith(pci_request.request_id)
+ }
+
for pci_request in pci_requests:
- if needs_update(pci_request, provider_mapping):
+ mapping = get_group_mapping_for_flavor_based_pci_request(
+ pci_request, provider_mapping)
+
+ if mapping:
+ for spec in pci_request.spec:
+ # FIXME(gibi): this is baaad but spec is a dict of strings so
+ # we need to serialize
+ spec['rp_uuids'] = ','.join(mapping.values())
+
+ elif needs_update_due_to_qos(pci_request, provider_mapping):
provider_uuids = provider_mapping[pci_request.requester_id]
if len(provider_uuids) != 1:
diff --git a/nova/compute/vm_states.py b/nova/compute/vm_states.py
index 633894c1ea..1a916ea59a 100644
--- a/nova/compute/vm_states.py
+++ b/nova/compute/vm_states.py
@@ -76,3 +76,6 @@ ALLOW_TRIGGER_CRASH_DUMP = [ACTIVE, PAUSED, RESCUED, RESIZED, ERROR]
# states we allow resources to be freed in
ALLOW_RESOURCE_REMOVAL = [DELETED, SHELVED_OFFLOADED]
+
+# states we allow for evacuate instance
+ALLOW_TARGET_STATES = [STOPPED]
diff --git a/nova/conductor/api.py b/nova/conductor/api.py
index 4d94b680a4..843c8ce3a3 100644
--- a/nova/conductor/api.py
+++ b/nova/conductor/api.py
@@ -144,7 +144,8 @@ class ComputeTaskAPI(object):
injected_files, new_pass, orig_sys_metadata,
bdms, recreate=False, on_shared_storage=False,
preserve_ephemeral=False, host=None,
- request_spec=None):
+ request_spec=None, reimage_boot_volume=False,
+ target_state=None):
self.conductor_compute_rpcapi.rebuild_instance(context,
instance=instance,
new_pass=new_pass,
@@ -157,7 +158,9 @@ class ComputeTaskAPI(object):
on_shared_storage=on_shared_storage,
preserve_ephemeral=preserve_ephemeral,
host=host,
- request_spec=request_spec)
+ request_spec=request_spec,
+ reimage_boot_volume=reimage_boot_volume,
+ target_state=target_state)
def cache_images(self, context, aggregate, image_ids):
"""Request images be pre-cached on hosts within an aggregate.
diff --git a/nova/conductor/manager.py b/nova/conductor/manager.py
index c6946a8de5..4b34b8339c 100644
--- a/nova/conductor/manager.py
+++ b/nova/conductor/manager.py
@@ -21,6 +21,7 @@ import eventlet
import functools
import sys
+from keystoneauth1 import exceptions as ks_exc
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_limit import exception as limit_exceptions
@@ -234,7 +235,7 @@ class ComputeTaskManager:
may involve coordinating activities on multiple compute nodes.
"""
- target = messaging.Target(namespace='compute_task', version='1.23')
+ target = messaging.Target(namespace='compute_task', version='1.25')
def __init__(self):
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
@@ -243,11 +244,42 @@ class ComputeTaskManager:
self.network_api = neutron.API()
self.servicegroup_api = servicegroup.API()
self.query_client = query.SchedulerQueryClient()
- self.report_client = report.SchedulerReportClient()
self.notifier = rpc.get_notifier('compute')
# Help us to record host in EventReporter
self.host = CONF.host
+ try:
+ # Test our placement client during initialization
+ self.report_client
+ except (ks_exc.EndpointNotFound,
+ ks_exc.DiscoveryFailure,
+ ks_exc.RequestTimeout,
+ ks_exc.GatewayTimeout,
+ ks_exc.ConnectFailure) as e:
+ # Non-fatal, likely transient (although not definitely);
+ # continue startup but log the warning so that when things
+ # fail later, it will be clear why we can not do certain
+ # things.
+ LOG.warning('Unable to initialize placement client (%s); '
+ 'Continuing with startup, but some operations '
+ 'will not be possible.', e)
+ except (ks_exc.MissingAuthPlugin,
+ ks_exc.Unauthorized) as e:
+ # This is almost definitely fatal mis-configuration. The
+ # Unauthorized error might be transient, but it is
+ # probably reasonable to consider it fatal.
+ LOG.error('Fatal error initializing placement client; '
+ 'config is incorrect or incomplete: %s', e)
+ raise
+ except Exception as e:
+ # Unknown/unexpected errors here are fatal
+ LOG.error('Fatal error initializing placement client: %s', e)
+ raise
+
+ @property
+ def report_client(self):
+ return report.report_client_singleton()
+
def reset(self):
LOG.info('Reloading compute RPC API')
compute_rpcapi.LAST_VERSION = None
@@ -1005,6 +1037,12 @@ class ComputeTaskManager:
request_spec.requested_resources = res_req
request_spec.request_level_params = req_lvl_params
+ # NOTE(gibi): as PCI devices is tracked in placement we
+ # need to generate request groups from InstancePCIRequests.
+ # This will append new RequestGroup objects to the
+ # request_spec.requested_resources list if needed
+ request_spec.generate_request_groups_from_pci_requests()
+
# NOTE(cfriesen): Ensure that we restrict the scheduler to
# the cell specified by the instance mapping.
self._restrict_request_spec_to_cell(
@@ -1114,7 +1152,8 @@ class ComputeTaskManager:
injected_files, new_pass, orig_sys_metadata,
bdms, recreate, on_shared_storage,
preserve_ephemeral=False, host=None,
- request_spec=None):
+ request_spec=None, reimage_boot_volume=False,
+ target_state=None):
# recreate=True means the instance is being evacuated from a failed
# host to a new destination host. The 'recreate' variable name is
# confusing, so rename it to evacuate here at the top, which is simpler
@@ -1210,6 +1249,12 @@ class ComputeTaskManager:
request_spec.requested_resources = res_req
request_spec.request_level_params = req_lvl_params
+ # NOTE(gibi): as PCI devices is tracked in placement we
+ # need to generate request groups from InstancePCIRequests.
+ # This will append new RequestGroup objects to the
+ # request_spec.requested_resources list if needed
+ request_spec.generate_request_groups_from_pci_requests()
+
try:
# if this is a rebuild of instance on the same host with
# new image.
@@ -1311,7 +1356,9 @@ class ComputeTaskManager:
node=node,
limits=limits,
request_spec=request_spec,
- accel_uuids=accel_uuids)
+ accel_uuids=accel_uuids,
+ reimage_boot_volume=reimage_boot_volume,
+ target_state=target_state)
def _validate_image_traits_for_rebuild(self, context, instance, image_ref):
"""Validates that the traits specified in the image can be satisfied
diff --git a/nova/conductor/rpcapi.py b/nova/conductor/rpcapi.py
index 03797bfff9..a5f0cf0094 100644
--- a/nova/conductor/rpcapi.py
+++ b/nova/conductor/rpcapi.py
@@ -286,6 +286,8 @@ class ComputeTaskAPI(object):
1.21 - Added cache_images()
1.22 - Added confirm_snapshot_based_resize()
1.23 - Added revert_snapshot_based_resize()
+ 1.24 - Add reimage_boot_volume parameter to rebuild_instance()
+ 1.25 - Add target_state parameter to rebuild_instance()
"""
def __init__(self):
@@ -426,8 +428,9 @@ class ComputeTaskAPI(object):
def rebuild_instance(self, ctxt, instance, new_pass, injected_files,
image_ref, orig_image_ref, orig_sys_metadata, bdms,
recreate=False, on_shared_storage=False, host=None,
- preserve_ephemeral=False, request_spec=None):
- version = '1.12'
+ preserve_ephemeral=False, request_spec=None,
+ reimage_boot_volume=False, target_state=None):
+ version = '1.25'
kw = {'instance': instance,
'new_pass': new_pass,
'injected_files': injected_files,
@@ -440,8 +443,25 @@ class ComputeTaskAPI(object):
'preserve_ephemeral': preserve_ephemeral,
'host': host,
'request_spec': request_spec,
+ 'reimage_boot_volume': reimage_boot_volume,
+ 'target_state': target_state,
}
if not self.client.can_send_version(version):
+ if kw['target_state']:
+ raise exception.UnsupportedRPCVersion(
+ api="rebuild_instance", required="1.25")
+ else:
+ del kw['target_state']
+ version = '1.24'
+ if not self.client.can_send_version(version):
+ if kw['reimage_boot_volume']:
+ raise exception.NovaException(
+ 'Conductor RPC version does not support '
+ 'reimage_boot_volume parameter.')
+ else:
+ del kw['reimage_boot_volume']
+ version = '1.12'
+ if not self.client.can_send_version(version):
version = '1.8'
del kw['request_spec']
cctxt = self.client.prepare(version=version)
diff --git a/nova/conductor/tasks/live_migrate.py b/nova/conductor/tasks/live_migrate.py
index f8819b0dc8..cca97c53f7 100644
--- a/nova/conductor/tasks/live_migrate.py
+++ b/nova/conductor/tasks/live_migrate.py
@@ -542,7 +542,7 @@ class LiveMigrationTask(base.TaskBase):
# will be persisted when post_live_migration_at_destination
# runs.
compute_utils.\
- update_pci_request_spec_with_allocated_interface_name(
+ update_pci_request_with_placement_allocations(
self.context, self.report_client,
self.instance.pci_requests.requests, provider_mapping)
try:
diff --git a/nova/conductor/tasks/migrate.py b/nova/conductor/tasks/migrate.py
index 6ff6206f65..754f9e5ba7 100644
--- a/nova/conductor/tasks/migrate.py
+++ b/nova/conductor/tasks/migrate.py
@@ -54,7 +54,7 @@ def replace_allocation_with_migration(context, instance, migration):
# and do any rollback required
raise
- reportclient = report.SchedulerReportClient()
+ reportclient = report.report_client_singleton()
orig_alloc = reportclient.get_allocs_for_consumer(
context, instance.uuid)['allocations']
@@ -94,7 +94,7 @@ def replace_allocation_with_migration(context, instance, migration):
def revert_allocation_for_migration(context, source_cn, instance, migration):
"""Revert an allocation made for a migration back to the instance."""
- reportclient = report.SchedulerReportClient()
+ reportclient = report.report_client_singleton()
# FIXME(gibi): This method is flawed in that it does not handle allocations
# against sharing providers in any special way. This leads to duplicate
@@ -258,6 +258,11 @@ class MigrationTask(base.TaskBase):
# resource requests in a single list and add them to the RequestSpec.
self.request_spec.requested_resources = port_res_req
self.request_spec.request_level_params = req_lvl_params
+ # NOTE(gibi): as PCI devices is tracked in placement we need to
+ # generate request groups from InstancePCIRequests. This will append
+ # new RequestGroup objects to the request_spec.requested_resources list
+ # if needed
+ self.request_spec.generate_request_groups_from_pci_requests()
self._set_requested_destination_cell(legacy_props)
diff --git a/nova/conf/api.py b/nova/conf/api.py
index 5c8a367e8e..58cbc4931e 100644
--- a/nova/conf/api.py
+++ b/nova/conf/api.py
@@ -225,8 +225,11 @@ service.
help="""
Domain name used to configure FQDN for instances.
-Configure a fully-qualified domain name for instance hostnames. If unset, only
-the hostname without a domain will be configured.
+Configure a fully-qualified domain name for instance hostnames. The value is
+suffixed to the instance hostname from the database to construct the hostname
+that appears in the metadata API. To disable this behavior (for example in
+order to correctly support microversion's 2.94 FQDN hostnames), set this to the
+empty string.
Possible values:
diff --git a/nova/conf/compute.py b/nova/conf/compute.py
index 9588c65413..de2743d850 100644
--- a/nova/conf/compute.py
+++ b/nova/conf/compute.py
@@ -305,6 +305,21 @@ Related options:
agent disabled. When used with libvirt the instance mode should be
configured as HVM.
"""),
+ cfg.IntOpt('reimage_timeout_per_gb',
+ default=20,
+ min=1,
+ help="""
+Timeout for reimaging a volume.
+
+Number of seconds to wait for volume-reimaged events to arrive before
+continuing or failing.
+
+This is a per gigabyte time which has a default value of 20 seconds and
+will be multiplied by the GB size of image. Eg: an image of 6 GB will have
+a timeout of 20 * 6 = 120 seconds.
+Try increasing the timeout if the image copy per GB takes more time and you
+are hitting timeout failures.
+"""),
]
resource_tracker_opts = [
@@ -425,9 +440,7 @@ allocation_ratio_opts = [
Virtual CPU to physical CPU allocation ratio.
This option is used to influence the hosts selected by the Placement API by
-configuring the allocation ratio for ``VCPU`` inventory. In addition, the
-``AggregateCoreFilter`` (deprecated) will fall back to this configuration value
-if no per-aggregate setting is found.
+configuring the allocation ratio for ``VCPU`` inventory.
.. note::
@@ -458,9 +471,7 @@ Related options:
Virtual RAM to physical RAM allocation ratio.
This option is used to influence the hosts selected by the Placement API by
-configuring the allocation ratio for ``MEMORY_MB`` inventory. In addition, the
-``AggregateRamFilter`` (deprecated) will fall back to this configuration value
-if no per-aggregate setting is found.
+configuring the allocation ratio for ``MEMORY_MB`` inventory.
.. note::
@@ -486,9 +497,7 @@ Related options:
Virtual disk to physical disk allocation ratio.
This option is used to influence the hosts selected by the Placement API by
-configuring the allocation ratio for ``DISK_GB`` inventory. In addition, the
-``AggregateDiskFilter`` (deprecated) will fall back to this configuration value
-if no per-aggregate setting is found.
+configuring the allocation ratio for ``DISK_GB`` inventory.
When configured, a ratio greater than 1.0 will result in over-subscription of
the available physical disk, which can be useful for more efficiently packing
@@ -520,7 +529,7 @@ Related options:
* ``initial_disk_allocation_ratio``
"""),
cfg.FloatOpt('initial_cpu_allocation_ratio',
- default=16.0,
+ default=4.0,
min=0.0,
help="""
Initial virtual CPU to physical CPU allocation ratio.
@@ -536,7 +545,7 @@ Related options:
* ``cpu_allocation_ratio``
"""),
cfg.FloatOpt('initial_ram_allocation_ratio',
- default=1.5,
+ default=1.0,
min=0.0,
help="""
Initial virtual RAM to physical RAM allocation ratio.
@@ -1007,16 +1016,26 @@ Related options:
filtering computes based on supported image types, which is required
to be enabled for this to take effect.
"""),
+ cfg.ListOpt('vmdk_allowed_types',
+ default=['streamOptimized', 'monolithicSparse'],
+ help="""
+A list of strings describing allowed VMDK "create-type" subformats
+that will be allowed. This is recommended to only include
+single-file-with-sparse-header variants to avoid potential host file
+exposure due to processing named extents. If this list is empty, then no
+form of VMDK image will be allowed.
+"""),
cfg.BoolOpt('packing_host_numa_cells_allocation_strategy',
- default=True,
+ default=False,
help="""
This option controls allocation strategy used to choose NUMA cells on host for
placing VM's NUMA cells (for VMs with defined numa topology). By
-default host's NUMA cell with more resources consumed will be chosen first for
-placing attempt. So the host cell with some usage will be packed with VM's cell
-until it will be completely exhausted, before new free host's cell will be
-used. When the packing_host_numa_cells_allocation_strategy variable is set to
-``False``, host's NUMA cell with more resources available will be used.
+default host's NUMA cell with more resources consumed will be chosen last for
+placing attempt. When the packing_host_numa_cells_allocation_strategy variable
+is set to ``False``, host's NUMA cell with more resources available will be
+used. When set to ``True`` cells with some usage will be packed with VM's cell
+until it will be completely exhausted, before a new free host's cell will be
+used.
Possible values:
diff --git a/nova/conf/ironic.py b/nova/conf/ironic.py
index dc5d2412c4..2734f2b78a 100644
--- a/nova/conf/ironic.py
+++ b/nova/conf/ironic.py
@@ -27,6 +27,7 @@ ironic_group = cfg.OptGroup(
help="""
Configuration options for Ironic driver (Bare Metal).
If using the Ironic driver following options must be set:
+
* auth_type
* auth_url
* project_name
diff --git a/nova/conf/libvirt.py b/nova/conf/libvirt.py
index 4ea37b8fe9..204fe5c4b8 100644
--- a/nova/conf/libvirt.py
+++ b/nova/conf/libvirt.py
@@ -987,6 +987,7 @@ slowly to be useful. Actual errors will be reported by Glance and noticed
according to the poll interval.
Related options:
+
* images_type - must be set to ``rbd``
* images_rbd_glance_store_name - must be set to a store name
* images_rbd_glance_copy_poll_interval - controls the failure time-to-notice
@@ -1477,6 +1478,23 @@ Related options:
"""),
]
+libvirt_cpu_mgmt_opts = [
+ cfg.BoolOpt('cpu_power_management',
+ default=False,
+ help='Use libvirt to manage CPU cores performance.'),
+ cfg.StrOpt('cpu_power_management_strategy',
+ choices=['cpu_state', 'governor'],
+ default='cpu_state',
+ help='Tuning strategy to reduce CPU power consumption when '
+ 'unused'),
+ cfg.StrOpt('cpu_power_governor_low',
+ default='powersave',
+ help='Governor to use in order '
+ 'to reduce CPU power consumption'),
+ cfg.StrOpt('cpu_power_governor_high',
+ default='performance',
+ help='Governor to use in order to have best CPU performance'),
+]
ALL_OPTS = list(itertools.chain(
libvirt_general_opts,
@@ -1498,6 +1516,7 @@ ALL_OPTS = list(itertools.chain(
libvirt_volume_nvmeof_opts,
libvirt_pmem_opts,
libvirt_vtpm_opts,
+ libvirt_cpu_mgmt_opts,
))
diff --git a/nova/conf/mks.py b/nova/conf/mks.py
index 1703f5f240..ec403a1a4f 100644
--- a/nova/conf/mks.py
+++ b/nova/conf/mks.py
@@ -23,7 +23,9 @@ Nova compute node uses WebMKS, a desktop sharing protocol to provide
instance console access to VM's created by VMware hypervisors.
Related options:
+
Following options must be set to provide console access.
+
* mksproxy_base_url
* enabled
""")
diff --git a/nova/conf/pci.py b/nova/conf/pci.py
index de9a2e297b..533bf52ead 100644
--- a/nova/conf/pci.py
+++ b/nova/conf/pci.py
@@ -67,6 +67,36 @@ Possible Values:
Required NUMA affinity of device. Valid values are: ``legacy``,
``preferred`` and ``required``.
+ ``resource_class``
+ The optional Placement resource class name that is used
+ to track the requested PCI devices in Placement. It can be a standard
+ resource class from the ``os-resource-classes`` lib. Or it can be an
+ arbitrary string. If it is an non-standard resource class then Nova will
+ normalize it to a proper Placement resource class by
+ making it upper case, replacing any consecutive character outside of
+ ``[A-Z0-9_]`` with a single '_', and prefixing the name with ``CUSTOM_`` if
+ not yet prefixed. The maximum allowed length is 255 character including the
+ prefix. If ``resource_class`` is not provided Nova will generate it from
+ ``vendor_id`` and ``product_id`` values of the alias in the form of
+ ``CUSTOM_PCI_{vendor_id}_{product_id}``. The ``resource_class`` requested
+ in the alias is matched against the ``resource_class`` defined in the
+ ``[pci]device_spec``. This field can only be used only if
+ ``[filter_scheduler]pci_in_placement`` is enabled.
+
+ ``traits``
+ An optional comma separated list of Placement trait names requested to be
+ present on the resource provider that fulfills this alias. Each trait can
+ be a standard trait from ``os-traits`` lib or it can be an arbitrary
+ string. If it is a non-standard trait then Nova will normalize the
+ trait name by making it upper case, replacing any consecutive character
+ outside of ``[A-Z0-9_]`` with a single '_', and prefixing the name
+ with ``CUSTOM_`` if not yet prefixed. The maximum allowed length of a
+ trait name is 255 character including the prefix. Every trait in
+ ``traits`` requested in the alias ensured to be in the list of traits
+ provided in the ``traits`` field of the ``[pci]device_spec`` when
+ scheduling the request. This field can only be used only if
+ ``[filter_scheduler]pci_in_placement`` is enabled.
+
* Supports multiple aliases by repeating the option (not by specifying
a list value)::
@@ -85,16 +115,18 @@ Possible Values:
"numa_policy": "required"
}
"""),
- cfg.MultiStrOpt('passthrough_whitelist',
+ cfg.MultiStrOpt('device_spec',
default=[],
- deprecated_name='pci_passthrough_whitelist',
- deprecated_group='DEFAULT',
+ deprecated_opts=[
+ cfg.DeprecatedOpt('passthrough_whitelist', group='pci'),
+ cfg.DeprecatedOpt('pci_passthrough_whitelist', group='DEFAULT'),
+ ],
help="""
-White list of PCI devices available to VMs.
+Specify the PCI devices available to VMs.
Possible values:
-* A JSON dictionary which describe a whitelisted PCI device. It should take
+* A JSON dictionary which describe a PCI device. It should take
the following format::
["vendor_id": "<id>",] ["product_id": "<id>",]
@@ -129,7 +161,7 @@ Possible values:
have a name.
``<tag>``
- Additional ``<tag>`` and ``<tag_value>`` used for matching PCI devices.
+ Additional ``<tag>`` and ``<tag_value>`` used for specifying PCI devices.
Supported ``<tag>`` values are :
- ``physical_network``
@@ -142,62 +174,102 @@ Possible values:
VPD capability with a card serial number (either on a VF itself on
its corresponding PF), otherwise they will be ignored and not
available for allocation.
+ - ``resource_class`` - optional Placement resource class name to be used
+ to track the matching PCI devices in Placement when [pci]device_spec is
+ True. It can be a standard resource class from the
+ ``os-resource-classes`` lib. Or can be any string. In that case Nova will
+ normalize it to a proper Placement resource class by making it upper
+ case, replacing any consecutive character outside of ``[A-Z0-9_]`` with a
+ single '_', and prefixing the name with ``CUSTOM_`` if not yet prefixed.
+ The maximum allowed length is 255 character including the prefix.
+ If ``resource_class`` is not provided Nova will generate it from the PCI
+ device's ``vendor_id`` and ``product_id`` in the form of
+ ``CUSTOM_PCI_{vendor_id}_{product_id}``.
+ The ``resource_class`` can be requested from a ``[pci]alias``
+ - ``traits`` - optional comma separated list of Placement trait names to
+ report on the resource provider that will represent the matching PCI
+ device. Each trait can be a standard trait from ``os-traits`` lib or can
+ be any string. If it is not a standard trait then Nova will normalize the
+ trait name by making it upper case, replacing any consecutive character
+ outside of ``[A-Z0-9_]`` with a single '_', and prefixing the name with
+ ``CUSTOM_`` if not yet prefixed. The maximum allowed length of a trait
+ name is 255 character including the prefix.
+ Any trait from ``traits`` can be requested from a ``[pci]alias``.
Valid examples are::
- passthrough_whitelist = {"devname":"eth0",
- "physical_network":"physnet"}
- passthrough_whitelist = {"address":"*:0a:00.*"}
- passthrough_whitelist = {"address":":0a:00.",
- "physical_network":"physnet1"}
- passthrough_whitelist = {"vendor_id":"1137",
- "product_id":"0071"}
- passthrough_whitelist = {"vendor_id":"1137",
- "product_id":"0071",
- "address": "0000:0a:00.1",
- "physical_network":"physnet1"}
- passthrough_whitelist = {"address":{"domain": ".*",
- "bus": "02", "slot": "01",
- "function": "[2-7]"},
- "physical_network":"physnet1"}
- passthrough_whitelist = {"address":{"domain": ".*",
- "bus": "02", "slot": "0[1-2]",
- "function": ".*"},
- "physical_network":"physnet1"}
- passthrough_whitelist = {"devname": "eth0", "physical_network":"physnet1",
- "trusted": "true"}
- passthrough_whitelist = {"vendor_id":"a2d6",
- "product_id":"15b3",
- "remote_managed": "true"}
- passthrough_whitelist = {"vendor_id":"a2d6",
- "product_id":"15b3",
- "address": "0000:82:00.0",
- "physical_network":"physnet1",
- "remote_managed": "true"}
+ device_spec = {"devname":"eth0",
+ "physical_network":"physnet"}
+ device_spec = {"address":"*:0a:00.*"}
+ device_spec = {"address":":0a:00.",
+ "physical_network":"physnet1"}
+ device_spec = {"vendor_id":"1137",
+ "product_id":"0071"}
+ device_spec = {"vendor_id":"1137",
+ "product_id":"0071",
+ "address": "0000:0a:00.1",
+ "physical_network":"physnet1"}
+ device_spec = {"address":{"domain": ".*",
+ "bus": "02", "slot": "01",
+ "function": "[2-7]"},
+ "physical_network":"physnet1"}
+ device_spec = {"address":{"domain": ".*",
+ "bus": "02", "slot": "0[1-2]",
+ "function": ".*"},
+ "physical_network":"physnet1"}
+ device_spec = {"devname": "eth0", "physical_network":"physnet1",
+ "trusted": "true"}
+ device_spec = {"vendor_id":"a2d6",
+ "product_id":"15b3",
+ "remote_managed": "true"}
+ device_spec = {"vendor_id":"a2d6",
+ "product_id":"15b3",
+ "address": "0000:82:00.0",
+ "physical_network":"physnet1",
+ "remote_managed": "true"}
+ device_spec = {"vendor_id":"1002",
+ "product_id":"6929",
+ "address": "0000:82:00.0",
+ "resource_class": "PGPU",
+ "traits": "HW_GPU_API_VULKAN,my-awesome-gpu"}
The following are invalid, as they specify mutually exclusive options::
- passthrough_whitelist = {"devname":"eth0",
- "physical_network":"physnet",
- "address":"*:0a:00.*"}
+ device_spec = {"devname":"eth0",
+ "physical_network":"physnet",
+ "address":"*:0a:00.*"}
The following example is invalid because it specifies the ``remote_managed``
tag for a PF - it will result in an error during config validation at the
Nova Compute service startup::
- passthrough_whitelist = {"address": "0000:82:00.0",
- "product_id": "a2d6",
- "vendor_id": "15b3",
- "physical_network": null,
- "remote_managed": "true"}
+ device_spec = {"address": "0000:82:00.0",
+ "product_id": "a2d6",
+ "vendor_id": "15b3",
+ "physical_network": null,
+ "remote_managed": "true"}
* A JSON list of JSON dictionaries corresponding to the above format. For
example::
- passthrough_whitelist = [{"product_id":"0001", "vendor_id":"8086"},
- {"product_id":"0002", "vendor_id":"8086"}]
-""")
+ device_spec = [{"product_id":"0001", "vendor_id":"8086"},
+ {"product_id":"0002", "vendor_id":"8086"}]
+"""),
+ cfg.BoolOpt('report_in_placement',
+ default=False,
+ help="""
+Enable PCI resource inventory reporting to Placement. If it is enabled then the
+nova-compute service will report PCI resource inventories to Placement
+according to the [pci]device_spec configuration and the PCI devices reported
+by the hypervisor. Once it is enabled it cannot be disabled any more. In a
+future release the default of this config will be change to True.
+
+Related options:
+
+* [pci]device_spec: to define which PCI devices nova are allowed to track and
+ assign to guests.
+"""),
]
diff --git a/nova/conf/scheduler.py b/nova/conf/scheduler.py
index 03e78fe701..c75bd07c5b 100644
--- a/nova/conf/scheduler.py
+++ b/nova/conf/scheduler.py
@@ -745,7 +745,26 @@ Possible values:
Related options:
* ``[filter_scheduler] aggregate_image_properties_isolation_namespace``
-""")]
+"""),
+ cfg.BoolOpt(
+ "pci_in_placement",
+ default=False,
+ help="""
+Enable scheduling and claiming PCI devices in Placement.
+
+This can be enabled after ``[pci]report_in_placement`` is enabled on all
+compute hosts.
+
+When enabled the scheduler queries Placement about the PCI device
+availability to select destination for a server with PCI request. The scheduler
+also allocates the selected PCI devices in Placement. Note that this logic
+does not replace the PCIPassthroughFilter but extends it.
+
+* ``[pci] report_in_placement``
+* ``[pci] alias``
+* ``[pci] device_spec``
+"""),
+]
metrics_group = cfg.OptGroup(
name="metrics",
diff --git a/nova/conf/spice.py b/nova/conf/spice.py
index 59ed4e80a0..e5854946f1 100644
--- a/nova/conf/spice.py
+++ b/nova/conf/spice.py
@@ -85,6 +85,59 @@ Agent. With the Spice agent installed the following features are enabled:
needing to click inside the console or press keys to release it. The
performance of mouse movement is also improved.
"""),
+ cfg.StrOpt('image_compression',
+ advanced=True,
+ choices=[
+ ('auto_glz', 'enable image compression mode to choose between glz '
+ 'and quic algorithm, based on image properties'),
+ ('auto_lz', 'enable image compression mode to choose between lz '
+ 'and quic algorithm, based on image properties'),
+ ('quic', 'enable image compression based on the SFALIC algorithm'),
+ ('glz', 'enable image compression using lz with history based '
+ 'global dictionary'),
+ ('lz', 'enable image compression with the Lempel-Ziv algorithm'),
+ ('off', 'disable image compression')
+ ],
+ help="""
+Configure the SPICE image compression (lossless).
+"""),
+ cfg.StrOpt('jpeg_compression',
+ advanced=True,
+ choices=[
+ ('auto', 'enable JPEG image compression automatically'),
+ ('never', 'disable JPEG image compression'),
+ ('always', 'enable JPEG image compression')
+ ],
+ help="""
+Configure the SPICE wan image compression (lossy for slow links).
+"""),
+ cfg.StrOpt('zlib_compression',
+ advanced=True,
+ choices=[
+ ('auto', 'enable zlib image compression automatically'),
+ ('never', 'disable zlib image compression'),
+ ('always', 'enable zlib image compression')
+ ],
+ help="""
+Configure the SPICE wan image compression (lossless for slow links).
+"""),
+ cfg.BoolOpt('playback_compression',
+ advanced=True,
+ help="""
+Enable the SPICE audio stream compression (using celt).
+"""),
+ cfg.StrOpt('streaming_mode',
+ advanced=True,
+ choices=[
+ ('filter', 'SPICE server adds additional filters to decide if '
+ 'video streaming should be activated'),
+ ('all', 'any fast-refreshing window can be encoded into a video '
+ 'stream'),
+ ('off', 'no video detection and (lossy) compression is performed')
+ ],
+ help="""
+Configure the SPICE video stream detection and (lossy) compression.
+"""),
cfg.URIOpt('html5proxy_base_url',
default='http://127.0.0.1:6082/spice_auto.html',
help="""
diff --git a/nova/conf/vmware.py b/nova/conf/vmware.py
index 63a5f04ea4..17a2676b64 100644
--- a/nova/conf/vmware.py
+++ b/nova/conf/vmware.py
@@ -76,7 +76,9 @@ Possible values:
* Any valid URI (The scheme is 'telnet' or 'telnets'.)
Related options:
+
This option is ignored if serial_port_service_uri is not specified.
+
* serial_port_service_uri
"""),
cfg.StrOpt('serial_log_dir',
@@ -112,6 +114,7 @@ If true, the vCenter server certificate is not verified. If false,
then the default CA truststore is used for verification.
Related options:
+
* ca_file: This option is ignored if "ca_file" is set.
"""),
cfg.StrOpt('cluster_name',
@@ -158,7 +161,9 @@ Possible values:
* Any valid port number within 5900 -(5900 + vnc_port_total)
Related options:
+
Below options should be set to enable VNC client.
+
* vnc.enabled = True
* vnc_port_total
"""),
diff --git a/nova/conf/workarounds.py b/nova/conf/workarounds.py
index 2ec53282cd..943ec74885 100644
--- a/nova/conf/workarounds.py
+++ b/nova/conf/workarounds.py
@@ -374,6 +374,28 @@ Related options:
* :oslo.config:option:`DEFAULT.compute_driver` (libvirt)
"""),
+ cfg.IntOpt('qemu_monitor_announce_self_count',
+ default=3,
+ min=1,
+ help="""
+The total number of times to send the announce_self command to the QEMU
+monitor when enable_qemu_monitor_announce_self is enabled.
+
+Related options:
+
+* :oslo.config:option:`WORKAROUNDS.enable_qemu_monitor_announce_self` (libvirt)
+"""),
+ cfg.IntOpt('qemu_monitor_announce_self_interval',
+ default=1,
+ min=1,
+ help="""
+The number of seconds to wait before re-sending the announce_self
+command to the QEMU monitor.
+
+Related options:
+
+* :oslo.config:option:`WORKAROUNDS.enable_qemu_monitor_announce_self` (libvirt)
+"""),
cfg.BoolOpt('disable_compute_service_check_for_ffu',
default=False,
help="""
@@ -410,6 +432,13 @@ with the destination host. When using QEMU >= 2.9 and libvirt >=
4.4.0, libvirt will do the correct thing with respect to checking CPU
compatibility on the destination host during live migration.
"""),
+ cfg.BoolOpt('skip_cpu_compare_at_startup',
+ default=False,
+ help="""
+This will skip the CPU comparison call at the startup of Compute
+service and lets libvirt handle it.
+"""),
+
cfg.BoolOpt(
'skip_hypervisor_version_check_on_lm',
default=False,
@@ -417,6 +446,21 @@ compatibility on the destination host during live migration.
When this is enabled, it will skip version-checking of hypervisors
during live migration.
"""),
+ cfg.BoolOpt(
+ 'skip_reserve_in_use_ironic_nodes',
+ default=False,
+ help="""
+This may be useful if you use the Ironic driver, but don't have
+automatic cleaning enabled in Ironic. Nova, by default, will mark
+Ironic nodes as reserved as soon as they are in use. When you free
+the Ironic node (by deleting the nova instance) it takes a while
+for Nova to un-reserve that Ironic node in placement. Usually this
+is a good idea, because it avoids placement providing an Ironic
+as a valid candidate when it is still being cleaned.
+Howerver, if you don't use automatic cleaning, it can cause an
+extra delay before and Ironic node is available for building a
+new Nova instance.
+"""),
]
diff --git a/nova/db/main/migrations/versions/960aac0e09ea_de_duplicate_indexes_in_instances__.py b/nova/db/main/migrations/versions/960aac0e09ea_de_duplicate_indexes_in_instances__.py
new file mode 100644
index 0000000000..f4666a2b00
--- /dev/null
+++ b/nova/db/main/migrations/versions/960aac0e09ea_de_duplicate_indexes_in_instances__.py
@@ -0,0 +1,35 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""de-duplicate_indexes_in_instances__console_auth_tokens
+
+Revision ID: 960aac0e09ea
+Revises: ccb0fa1a2252
+Create Date: 2022-09-15 17:00:23.175991
+"""
+
+from alembic import op
+
+
+# revision identifiers, used by Alembic.
+revision = '960aac0e09ea'
+down_revision = 'ccb0fa1a2252'
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ with op.batch_alter_table('console_auth_tokens', schema=None) as batch_op:
+ batch_op.drop_index('console_auth_tokens_token_hash_idx')
+
+ with op.batch_alter_table('instances', schema=None) as batch_op:
+ batch_op.drop_index('uuid')
diff --git a/nova/db/main/migrations/versions/ccb0fa1a2252_add_encryption_fields_to_.py b/nova/db/main/migrations/versions/ccb0fa1a2252_add_encryption_fields_to_.py
new file mode 100644
index 0000000000..1fd3fb4780
--- /dev/null
+++ b/nova/db/main/migrations/versions/ccb0fa1a2252_add_encryption_fields_to_.py
@@ -0,0 +1,59 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Add encryption fields to BlockDeviceMapping
+
+Revision ID: ccb0fa1a2252
+Revises: 16f1fbcab42b
+Create Date: 2022-01-12 15:22:47.524285
+"""
+
+from alembic import op
+import sqlalchemy as sa
+
+# revision identifiers, used by Alembic.
+revision = 'ccb0fa1a2252'
+down_revision = '16f1fbcab42b'
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ for prefix in ('', 'shadow_'):
+ table_name = prefix + 'block_device_mapping'
+ with op.batch_alter_table(table_name, schema=None) as batch_op:
+ batch_op.add_column(
+ sa.Column(
+ 'encrypted',
+ sa.Boolean(),
+ nullable=True,
+ )
+ )
+ batch_op.add_column(
+ sa.Column(
+ 'encryption_secret_uuid',
+ sa.String(length=36),
+ nullable=True,
+ )
+ )
+ batch_op.add_column(
+ sa.Column('encryption_format',
+ sa.String(length=128),
+ nullable=True,
+ )
+ )
+ batch_op.add_column(
+ sa.Column('encryption_options',
+ sa.String(length=4096),
+ nullable=True,
+ )
+ )
diff --git a/nova/db/main/models.py b/nova/db/main/models.py
index f2f58b2db1..f8363a89c0 100644
--- a/nova/db/main/models.py
+++ b/nova/db/main/models.py
@@ -266,7 +266,6 @@ class Instance(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents a guest VM."""
__tablename__ = 'instances'
__table_args__ = (
- sa.Index('uuid', 'uuid', unique=True),
sa.Index('instances_project_id_idx', 'project_id'),
sa.Index('instances_project_id_deleted_idx',
'project_id', 'deleted'),
@@ -654,9 +653,15 @@ class BlockDeviceMapping(BASE, NovaBase, models.SoftDeleteMixin):
attachment_id = sa.Column(sa.String(36))
+ encrypted = sa.Column(sa.Boolean, default=False)
+ encryption_secret_uuid = sa.Column(sa.String(36))
+ encryption_format = sa.Column(sa.String(128))
+ encryption_options = sa.Column(sa.String(4096))
# TODO(stephenfin): Remove once we drop the security_groups field from the
# Instance table. Until then, this is tied to the SecurityGroup table
+
+
class SecurityGroupInstanceAssociation(BASE, NovaBase, models.SoftDeleteMixin):
__tablename__ = 'security_group_instance_association'
__table_args__ = (
@@ -679,7 +684,7 @@ class SecurityGroup(BASE, NovaBase, models.SoftDeleteMixin):
name='uniq_security_groups0project_id0'
'name0deleted'),
)
- id = sa.Column(sa.Integer, primary_key=True)
+ id = sa.Column(sa.Integer, primary_key = True)
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(255))
@@ -687,8 +692,8 @@ class SecurityGroup(BASE, NovaBase, models.SoftDeleteMixin):
project_id = sa.Column(sa.String(255))
instances = orm.relationship(Instance,
- secondary="security_group_instance_association",
- primaryjoin='and_('
+ secondary = "security_group_instance_association",
+ primaryjoin = 'and_('
'SecurityGroup.id == '
'SecurityGroupInstanceAssociation.security_group_id,'
'SecurityGroupInstanceAssociation.deleted == 0,'
@@ -1040,7 +1045,6 @@ class ConsoleAuthToken(BASE, NovaBase):
__table_args__ = (
sa.Index('console_auth_tokens_instance_uuid_idx', 'instance_uuid'),
sa.Index('console_auth_tokens_host_expires_idx', 'host', 'expires'),
- sa.Index('console_auth_tokens_token_hash_idx', 'token_hash'),
sa.Index(
'console_auth_tokens_token_hash_instance_uuid_idx', 'token_hash',
'instance_uuid',
diff --git a/nova/exception.py b/nova/exception.py
index 917bff078c..0c0ffa85a1 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -207,6 +207,21 @@ class Invalid(NovaException):
code = 400
+class InvalidVIOMMUMachineType(Invalid):
+ msg_fmt = _("vIOMMU is not supported by Current machine type %(mtype)s "
+ "(Architecture: %(arch)s).")
+
+
+class InvalidVIOMMUArchitecture(Invalid):
+ msg_fmt = _("vIOMMU required either x86 or AArch64 architecture, "
+ "but given architecture %(arch)s.")
+
+
+class InstanceQuiesceFailed(Invalid):
+ msg_fmt = _("Failed to quiesce instance: %(reason)s")
+ code = 409
+
+
class InvalidConfiguration(Invalid):
msg_fmt = _("Configuration is Invalid.")
@@ -1436,6 +1451,11 @@ class InstanceEvacuateNotSupported(Invalid):
msg_fmt = _('Instance evacuate is not supported.')
+class InstanceEvacuateNotSupportedTargetState(Invalid):
+ msg_fmt = _("Target state '%(target_state)s' for instance evacuate "
+ "is not supported.")
+
+
class DBNotAllowed(NovaException):
msg_fmt = _('%(binary)s attempted direct database access which is '
'not allowed by policy')
@@ -1464,6 +1484,11 @@ class UnsupportedRescueImage(Invalid):
msg_fmt = _("Requested rescue image '%(image)s' is not supported")
+class UnsupportedRPCVersion(Invalid):
+ msg_fmt = _("Unsupported RPC version for %(api)s. "
+ "Required >= %(required)s")
+
+
class Base64Exception(NovaException):
msg_fmt = _("Invalid Base 64 data for file %(path)s")
@@ -1570,8 +1595,8 @@ class PciRequestAliasNotDefined(NovaException):
msg_fmt = _("PCI alias %(alias)s is not defined")
-class PciConfigInvalidWhitelist(Invalid):
- msg_fmt = _("Invalid PCI devices Whitelist config: %(reason)s")
+class PciConfigInvalidSpec(Invalid):
+ msg_fmt = _("Invalid [pci]device_spec config: %(reason)s")
class PciRequestFromVIFNotFound(NotFound):
@@ -1861,6 +1886,17 @@ class MemoryPageSizeNotSupported(Invalid):
msg_fmt = _("Page size %(pagesize)s is not supported by the host.")
+class LockMemoryForbidden(Forbidden):
+ msg_fmt = _("locked_memory value in image or flavor is forbidden when "
+ "mem_page_size is not set.")
+
+
+class FlavorImageLockedMemoryConflict(NovaException):
+ msg_fmt = _("locked_memory value in image (%(image)s) and flavor "
+ "(%(flavor)s) conflict. A consistent value is expected if "
+ "both specified.")
+
+
class CPUPinningInvalid(Invalid):
msg_fmt = _("CPU set to pin %(requested)s must be a subset of "
"free CPU set %(available)s")
@@ -2077,6 +2113,16 @@ class ResourceProviderUpdateConflict(PlacementAPIConflict):
"provider %(uuid)s (generation %(generation)d): %(error)s")
+class PlacementReshapeConflict(PlacementAPIConflict):
+ """A 409 caused by generation mismatch from attempting to reshape a
+ provider tree.
+ """
+ msg_fmt = _(
+ "A conflict was encountered attempting to reshape a provider tree: "
+ "$(error)s"
+ )
+
+
class InvalidResourceClass(Invalid):
msg_fmt = _("Resource class '%(resource_class)s' invalid.")
@@ -2427,3 +2473,51 @@ class ProviderConfigException(NovaException):
"""
msg_fmt = _("An error occurred while processing "
"a provider config file: %(error)s")
+
+
+class PlacementPciException(NovaException):
+ msg_fmt = _(
+ "Failed to gather or report PCI resources to Placement: %(error)s")
+
+
+class PlacementPciDependentDeviceException(PlacementPciException):
+ msg_fmt = _(
+ "Configuring both %(parent_dev)s and %(children_devs)s in "
+ "[pci]device_spec is not supported. Either the parent PF or its "
+ "children VFs can be configured."
+ )
+
+
+class PlacementPciMixedResourceClassException(PlacementPciException):
+ msg_fmt = _(
+ "VFs from the same PF cannot be configured with different "
+ "'resource_class' values in [pci]device_spec. We got %(new_rc)s "
+ "for %(new_dev)s and %(current_rc)s for %(current_devs)s."
+ )
+
+
+class PlacementPciMixedTraitsException(PlacementPciException):
+ msg_fmt = _(
+ "VFs from the same PF cannot be configured with different set "
+ "of 'traits' in [pci]device_spec. We got %(new_traits)s for "
+ "%(new_dev)s and %(current_traits)s for %(current_devs)s."
+ )
+
+
+class ReimageException(NovaException):
+ msg_fmt = _("Reimaging volume failed.")
+
+
+class InvalidNodeConfiguration(NovaException):
+ msg_fmt = _('Invalid node identity configuration: %(reason)s')
+
+
+class DuplicateRecord(NovaException):
+ msg_fmt = _('Unable to create duplicate record for %(target)s')
+
+
+class NotSupportedComputeForEvacuateV295(NotSupported):
+ msg_fmt = _("Starting with microversion 2.95, evacuate API will stop "
+ "instance on destination. To evacuate before upgrades are "
+ "complete please use an older microversion. Required version "
+ "for compute %(expected), current version %(currently)s")
diff --git a/nova/filesystem.py b/nova/filesystem.py
new file mode 100644
index 0000000000..5394d2d835
--- /dev/null
+++ b/nova/filesystem.py
@@ -0,0 +1,59 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Functions to address filesystem calls, particularly sysfs."""
+
+import os
+
+from oslo_log import log as logging
+
+from nova import exception
+
+LOG = logging.getLogger(__name__)
+
+
+SYS = '/sys'
+
+
+# NOTE(bauzas): this method is deliberately not wrapped in a privsep entrypoint
+def read_sys(path: str) -> str:
+ """Reads the content of a file in the sys filesystem.
+
+ :param path: relative or absolute. If relative, will be prefixed by /sys.
+ :returns: contents of that file.
+ :raises: nova.exception.FileNotFound if we can't read that file.
+ """
+ try:
+ # The path can be absolute with a /sys prefix but that's fine.
+ with open(os.path.join(SYS, path), mode='r') as data:
+ return data.read()
+ except (OSError, ValueError) as exc:
+ raise exception.FileNotFound(file_path=path) from exc
+
+
+# NOTE(bauzas): this method is deliberately not wrapped in a privsep entrypoint
+# In order to correctly use it, you need to decorate the caller with a specific
+# privsep entrypoint.
+def write_sys(path: str, data: str) -> None:
+ """Writes the content of a file in the sys filesystem with data.
+
+ :param path: relative or absolute. If relative, will be prefixed by /sys.
+ :param data: the data to write.
+ :returns: contents of that file.
+ :raises: nova.exception.FileNotFound if we can't write that file.
+ """
+ try:
+ # The path can be absolute with a /sys prefix but that's fine.
+ with open(os.path.join(SYS, path), mode='w') as fd:
+ fd.write(data)
+ except (OSError, ValueError) as exc:
+ raise exception.FileNotFound(file_path=path) from exc
diff --git a/nova/hacking/checks.py b/nova/hacking/checks.py
index cd393e7b33..704538250f 100644
--- a/nova/hacking/checks.py
+++ b/nova/hacking/checks.py
@@ -141,6 +141,8 @@ rwlock_re = re.compile(
r"(?P<module_part>(oslo_concurrency\.)?(lockutils|fasteners))"
r"\.ReaderWriterLock\(.*\)")
six_re = re.compile(r"^(import six(\..*)?|from six(\..*)? import .*)$")
+# Regex for catching the setDaemon method
+set_daemon_re = re.compile(r"\.setDaemon\(")
class BaseASTChecker(ast.NodeVisitor):
@@ -1078,3 +1080,22 @@ def import_stock_mock(logical_line):
"N371: You must explicitly import python's mock: "
"``from unittest import mock``"
)
+
+
+@core.flake8ext
+def check_set_daemon(logical_line):
+ """Check for use of the setDaemon method of the threading.Thread class
+
+ The setDaemon method of the threading.Thread class has been deprecated
+ since Python 3.10. Use the daemon attribute instead.
+
+ See
+ https://docs.python.org/3.10/library/threading.html#threading.Thread.setDaemon
+ for details.
+
+ N372
+ """
+ res = set_daemon_re.search(logical_line)
+ if res:
+ yield (0, "N372: Don't use the setDaemon method. "
+ "Use the daemon attribute instead.")
diff --git a/nova/limit/placement.py b/nova/limit/placement.py
index 497986c4ab..eedf7d69e1 100644
--- a/nova/limit/placement.py
+++ b/nova/limit/placement.py
@@ -43,10 +43,8 @@ LEGACY_LIMITS = {
def _get_placement_usages(
context: 'nova.context.RequestContext', project_id: str
) -> ty.Dict[str, int]:
- global PLACEMENT_CLIENT
- if not PLACEMENT_CLIENT:
- PLACEMENT_CLIENT = report.SchedulerReportClient()
- return PLACEMENT_CLIENT.get_usages_counts_for_limits(context, project_id)
+ return report.report_client_singleton().get_usages_counts_for_limits(
+ context, project_id)
def _get_usage(
diff --git a/nova/locale/cs/LC_MESSAGES/nova.po b/nova/locale/cs/LC_MESSAGES/nova.po
index f2134f17ea..16baa8e1bb 100644
--- a/nova/locale/cs/LC_MESSAGES/nova.po
+++ b/nova/locale/cs/LC_MESSAGES/nova.po
@@ -12,7 +12,7 @@ msgid ""
msgstr ""
"Project-Id-Version: nova VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2022-03-30 00:54+0000\n"
+"POT-Creation-Date: 2022-08-09 17:12+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -1329,12 +1329,6 @@ msgstr "Pár klíčů %(name)s nenalezena pro uživatele %(user_id)s"
msgid "Keypair data is invalid: %(reason)s"
msgstr "Data páru klíčů jsou neplatná: %(reason)s"
-msgid "Keypair name contains unsafe characters"
-msgstr "Název páru klíčů obsahuje nebezpečné znaky"
-
-msgid "Keypair name must be string and between 1 and 255 characters long"
-msgstr "Název páru klíče musí být řetězec dlouhý 1 až 255 znaků"
-
msgid "Limits only supported from vCenter 6.0 and above"
msgstr "Limity jsou podporovány pouze ve vCenter verze 6.0 a vyšší"
diff --git a/nova/locale/de/LC_MESSAGES/nova.po b/nova/locale/de/LC_MESSAGES/nova.po
index 8efe607588..32e7c52060 100644
--- a/nova/locale/de/LC_MESSAGES/nova.po
+++ b/nova/locale/de/LC_MESSAGES/nova.po
@@ -17,7 +17,7 @@ msgid ""
msgstr ""
"Project-Id-Version: nova VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2022-03-30 00:54+0000\n"
+"POT-Creation-Date: 2022-08-09 17:12+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -1503,14 +1503,6 @@ msgstr "Schlüsselpaar %(name)s für Benutzer %(user_id)s nicht gefunden"
msgid "Keypair data is invalid: %(reason)s"
msgstr "Schlüsselpaardaten ungültig: %(reason)s"
-msgid "Keypair name contains unsafe characters"
-msgstr "Name von Schlüsselpaar enthält unsichere Zeichen"
-
-msgid "Keypair name must be string and between 1 and 255 characters long"
-msgstr ""
-"Name des Schlüsselpares muss eine Zeichenkette und zwischen 1 und 255 "
-"Zeichen lang sein."
-
msgid "Limits only supported from vCenter 6.0 and above"
msgstr "Grenzwerte werden nur von vCenter ab Version 6.0 unterstützt."
diff --git a/nova/locale/es/LC_MESSAGES/nova.po b/nova/locale/es/LC_MESSAGES/nova.po
index fbf387f893..4edd7b0ae3 100644
--- a/nova/locale/es/LC_MESSAGES/nova.po
+++ b/nova/locale/es/LC_MESSAGES/nova.po
@@ -16,7 +16,7 @@ msgid ""
msgstr ""
"Project-Id-Version: nova VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2022-03-30 00:54+0000\n"
+"POT-Creation-Date: 2022-08-09 17:12+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -1475,13 +1475,6 @@ msgstr ""
msgid "Keypair data is invalid: %(reason)s"
msgstr "El conjunto de claves son inválidos: %(reason)s"
-msgid "Keypair name contains unsafe characters"
-msgstr "El nombre de par de claves contiene caracteres no seguros"
-
-msgid "Keypair name must be string and between 1 and 255 characters long"
-msgstr ""
-"El nombre de par de claves debe ser serial y contener de 1 a 255 caracteres"
-
msgid "Limits only supported from vCenter 6.0 and above"
msgstr "Sólo se admiten límites a partir de vCenter 6.0 "
diff --git a/nova/locale/fr/LC_MESSAGES/nova.po b/nova/locale/fr/LC_MESSAGES/nova.po
index c6741d52b8..07946f1ddc 100644
--- a/nova/locale/fr/LC_MESSAGES/nova.po
+++ b/nova/locale/fr/LC_MESSAGES/nova.po
@@ -28,7 +28,7 @@ msgid ""
msgstr ""
"Project-Id-Version: nova VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2022-03-30 00:54+0000\n"
+"POT-Creation-Date: 2022-08-09 17:12+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -1486,14 +1486,6 @@ msgstr ""
msgid "Keypair data is invalid: %(reason)s"
msgstr "La donnée de paire de clés est invalide : %(reason)s"
-msgid "Keypair name contains unsafe characters"
-msgstr "Le nom de la paire de clés contient des caractères non sécurisés"
-
-msgid "Keypair name must be string and between 1 and 255 characters long"
-msgstr ""
-"La paire de clé doit être une chaîne et de longueur comprise entre 1 et 255 "
-"caractères"
-
msgid "Limits only supported from vCenter 6.0 and above"
msgstr "Limites seulement supportées sur vCenter 6.0 et supérieur"
diff --git a/nova/locale/it/LC_MESSAGES/nova.po b/nova/locale/it/LC_MESSAGES/nova.po
index d1793d1dcd..e1e7b777a7 100644
--- a/nova/locale/it/LC_MESSAGES/nova.po
+++ b/nova/locale/it/LC_MESSAGES/nova.po
@@ -13,7 +13,7 @@ msgid ""
msgstr ""
"Project-Id-Version: nova VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2022-03-30 00:54+0000\n"
+"POT-Creation-Date: 2022-08-09 17:12+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -1468,14 +1468,6 @@ msgstr ""
msgid "Keypair data is invalid: %(reason)s"
msgstr "I dati della coppia di chiavi non sono validi: %(reason)s"
-msgid "Keypair name contains unsafe characters"
-msgstr "Il nome coppia di chiavi contiene caratteri non sicuri"
-
-msgid "Keypair name must be string and between 1 and 255 characters long"
-msgstr ""
-"Il nome coppia di chiavi deve essere una stringa compresa tra 1 e 255 "
-"caratteri"
-
msgid "Limits only supported from vCenter 6.0 and above"
msgstr "Limiti supportati solo da vCenter 6.0 e successivi"
diff --git a/nova/locale/ja/LC_MESSAGES/nova.po b/nova/locale/ja/LC_MESSAGES/nova.po
index 845d555d38..1a3a0dfc82 100644
--- a/nova/locale/ja/LC_MESSAGES/nova.po
+++ b/nova/locale/ja/LC_MESSAGES/nova.po
@@ -13,7 +13,7 @@ msgid ""
msgstr ""
"Project-Id-Version: nova VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2022-03-30 00:54+0000\n"
+"POT-Creation-Date: 2022-08-09 17:12+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -1457,12 +1457,6 @@ msgstr "ユーザー %(user_id)s のキーペア %(name)s が見つかりませ
msgid "Keypair data is invalid: %(reason)s"
msgstr "キーペアデータが無効です: %(reason)s"
-msgid "Keypair name contains unsafe characters"
-msgstr "キーペア名に安全ではない文字が含まれています"
-
-msgid "Keypair name must be string and between 1 and 255 characters long"
-msgstr "キーペア名は 1 から 255 文字の長さの文字列でなければなりません"
-
msgid "Limits only supported from vCenter 6.0 and above"
msgstr "上限が適用されるのは、vCenter 6.0 以降の場合のみです。"
diff --git a/nova/locale/ko_KR/LC_MESSAGES/nova.po b/nova/locale/ko_KR/LC_MESSAGES/nova.po
index becf06963a..11197b6aee 100644
--- a/nova/locale/ko_KR/LC_MESSAGES/nova.po
+++ b/nova/locale/ko_KR/LC_MESSAGES/nova.po
@@ -15,7 +15,7 @@ msgid ""
msgstr ""
"Project-Id-Version: nova VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2022-03-30 00:54+0000\n"
+"POT-Creation-Date: 2022-08-09 17:12+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -1447,12 +1447,6 @@ msgstr "%(user_id)s 사용자에 대한 키 쌍 %(name)s을(를) 찾을 수 없
msgid "Keypair data is invalid: %(reason)s"
msgstr "키 쌍 데이터가 올바르지 않습니다: %(reason)s"
-msgid "Keypair name contains unsafe characters"
-msgstr "키 쌍 이름에 안전하지 않은 문자가 들어있음"
-
-msgid "Keypair name must be string and between 1 and 255 characters long"
-msgstr "키 쌍 이름은 문자열이고 길이가 1 - 255자 범위에 속해야 함"
-
msgid "Libguestfs does not have permission to read host kernel."
msgstr "Libguestfs에게는 커널 호스트를 읽어올 수 있는 권한이 없습니다"
diff --git a/nova/locale/pt_BR/LC_MESSAGES/nova.po b/nova/locale/pt_BR/LC_MESSAGES/nova.po
index ffcffa23c8..a760586ef9 100644
--- a/nova/locale/pt_BR/LC_MESSAGES/nova.po
+++ b/nova/locale/pt_BR/LC_MESSAGES/nova.po
@@ -19,7 +19,7 @@ msgid ""
msgstr ""
"Project-Id-Version: nova VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2022-03-30 00:54+0000\n"
+"POT-Creation-Date: 2022-08-09 17:12+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -1457,14 +1457,6 @@ msgstr "Par de chaves %(name)s não localizado para o usuário %(user_id)s"
msgid "Keypair data is invalid: %(reason)s"
msgstr "Dados do par de chaves é inválido: %(reason)s"
-msgid "Keypair name contains unsafe characters"
-msgstr "O nome do par de chaves contém caracteres não seguros"
-
-msgid "Keypair name must be string and between 1 and 255 characters long"
-msgstr ""
-"O nome do par de chaves deve ser uma sequência e entre 1 e 255 caracteres de "
-"comprimento"
-
msgid "Limits only supported from vCenter 6.0 and above"
msgstr "Limites suportados somente a partir do vCenter 6.0 e acima"
diff --git a/nova/locale/ru/LC_MESSAGES/nova.po b/nova/locale/ru/LC_MESSAGES/nova.po
index 98e345e680..1ea59ab496 100644
--- a/nova/locale/ru/LC_MESSAGES/nova.po
+++ b/nova/locale/ru/LC_MESSAGES/nova.po
@@ -16,7 +16,7 @@ msgid ""
msgstr ""
"Project-Id-Version: nova VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2022-03-30 00:54+0000\n"
+"POT-Creation-Date: 2022-08-09 17:12+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -1461,13 +1461,6 @@ msgstr ""
msgid "Keypair data is invalid: %(reason)s"
msgstr "Недопустимая пара ключей: %(reason)s"
-msgid "Keypair name contains unsafe characters"
-msgstr "Имя криптографической пары содержит ненадежные символы"
-
-msgid "Keypair name must be string and between 1 and 255 characters long"
-msgstr ""
-"Имя криптографической пары должно быть строкой длиной от 1 до 255 символов"
-
msgid "Limits only supported from vCenter 6.0 and above"
msgstr "Ограничения поддерживаются только в vCenter 6.0 и выше"
diff --git a/nova/locale/tr_TR/LC_MESSAGES/nova.po b/nova/locale/tr_TR/LC_MESSAGES/nova.po
index 813bf9a3f9..2e4783233d 100644
--- a/nova/locale/tr_TR/LC_MESSAGES/nova.po
+++ b/nova/locale/tr_TR/LC_MESSAGES/nova.po
@@ -10,7 +10,7 @@ msgid ""
msgstr ""
"Project-Id-Version: nova VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2022-03-30 00:54+0000\n"
+"POT-Creation-Date: 2022-08-09 17:12+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -1166,14 +1166,6 @@ msgstr "%(user_id)s kullanıcısı için %(name)s anahtar çifti bulunamadı"
msgid "Keypair data is invalid: %(reason)s"
msgstr "Anahtar çifti verisi geçersiz: %(reason)s"
-msgid "Keypair name contains unsafe characters"
-msgstr "Anahtar çifti ismi güvensiz karakterler içeriyor"
-
-msgid "Keypair name must be string and between 1 and 255 characters long"
-msgstr ""
-"Anahtar çifti adı karakter dizisi ve 1 ve 255 karakter uzunluğu arasında "
-"olmalıdır"
-
#, python-format
msgid "Malformed message body: %(reason)s"
msgstr "Hatalı biçimlendirilmiş mesaj gövdesi: %(reason)s"
diff --git a/nova/locale/zh_CN/LC_MESSAGES/nova.po b/nova/locale/zh_CN/LC_MESSAGES/nova.po
index d7a90345c6..f206bec321 100644
--- a/nova/locale/zh_CN/LC_MESSAGES/nova.po
+++ b/nova/locale/zh_CN/LC_MESSAGES/nova.po
@@ -37,11 +37,11 @@ msgid ""
msgstr ""
"Project-Id-Version: nova VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2022-06-21 17:28+0000\n"
+"POT-Creation-Date: 2022-08-09 17:12+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"PO-Revision-Date: 2022-06-14 09:55+0000\n"
+"PO-Revision-Date: 2022-07-26 02:32+0000\n"
"Last-Translator: Research and Development Center UnitedStack "
"<dev@unitedstack.com>\n"
"Language: zh_CN\n"
@@ -179,7 +179,7 @@ msgstr "聚合 %(aggregate_name)s 已经存在。"
#, python-format
msgid "Aggregate %s does not support empty named availability zone"
-msgstr "聚集 %s 不支持名称为空的可用区域"
+msgstr "聚合 %s 不支持名称为空的可用区域"
#, python-format
msgid "An invalid 'name' value was provided. The name must be: %(reason)s"
@@ -948,7 +948,7 @@ msgid "Host PowerOn is not supported by the Hyper-V driver"
msgstr "Hyper-V驱动不支持主机开机"
msgid "Host aggregate is not empty"
-msgstr "主机聚合不能为空"
+msgstr "主机聚合不为空"
msgid "Host does not support guests with NUMA topology set"
msgstr "主机不支持具有 NUMA 拓扑集的客户机"
@@ -1405,12 +1405,6 @@ msgstr "密钥对 %(name)s 没有为用户 %(user_id)s 找到。"
msgid "Keypair data is invalid: %(reason)s"
msgstr "密钥对数据不合法: %(reason)s"
-msgid "Keypair name contains unsafe characters"
-msgstr "密钥对名称包含不安全的字符"
-
-msgid "Keypair name must be string and between 1 and 255 characters long"
-msgstr "密钥对必须是字符串,并且长度在1到255个字符"
-
msgid "Limits only supported from vCenter 6.0 and above"
msgstr "仅 vCenter 6.0 及以上版本支持限制"
diff --git a/nova/locale/zh_TW/LC_MESSAGES/nova.po b/nova/locale/zh_TW/LC_MESSAGES/nova.po
index d73eae017c..83248a062e 100644
--- a/nova/locale/zh_TW/LC_MESSAGES/nova.po
+++ b/nova/locale/zh_TW/LC_MESSAGES/nova.po
@@ -12,7 +12,7 @@ msgid ""
msgstr ""
"Project-Id-Version: nova VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2022-03-30 00:54+0000\n"
+"POT-Creation-Date: 2022-08-09 17:12+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -1331,12 +1331,6 @@ msgstr "找不到使用者 %(user_id)s 的金鑰組 %(name)s"
msgid "Keypair data is invalid: %(reason)s"
msgstr "金鑰組資料無效:%(reason)s"
-msgid "Keypair name contains unsafe characters"
-msgstr "金鑰組名稱包含不安全的字元"
-
-msgid "Keypair name must be string and between 1 and 255 characters long"
-msgstr "金鑰組名稱必須是字串,並且長度必須介於 1 和 255 個字元之間"
-
msgid "Limits only supported from vCenter 6.0 and above"
msgstr "只有 vCenter 6.0 及更高版本中的限制才受支援"
diff --git a/nova/manager.py b/nova/manager.py
index 9c00401b96..df03305367 100644
--- a/nova/manager.py
+++ b/nova/manager.py
@@ -103,12 +103,15 @@ class Manager(PeriodicTasks, metaclass=ManagerMeta):
"""Tasks to be run at a periodic interval."""
return self.run_periodic_tasks(context, raise_on_error=raise_on_error)
- def init_host(self):
+ def init_host(self, service_ref):
"""Hook to do additional manager initialization when one requests
the service be started. This is called before any service record
- is created.
+ is created, but if one already exists for this service, it is
+ provided.
Child classes should override this method.
+
+ :param service_ref: An objects.Service if one exists, else None.
"""
pass
diff --git a/nova/network/model.py b/nova/network/model.py
index 5bd70837db..1260349bcd 100644
--- a/nova/network/model.py
+++ b/nova/network/model.py
@@ -122,20 +122,20 @@ VNIC_TYPE_REMOTE_MANAGED = "remote-managed"
# selected compute node.
VNIC_TYPES_SRIOV = (
VNIC_TYPE_DIRECT, VNIC_TYPE_MACVTAP, VNIC_TYPE_DIRECT_PHYSICAL,
- VNIC_TYPE_VIRTIO_FORWARDER, VNIC_TYPE_VDPA, VNIC_TYPE_REMOTE_MANAGED)
+ VNIC_TYPE_VIRTIO_FORWARDER, VNIC_TYPE_VDPA, VNIC_TYPE_REMOTE_MANAGED
+)
# Define list of ports which are passthrough to the guest
# and need a special treatment on snapshot and suspend/resume
-VNIC_TYPES_DIRECT_PASSTHROUGH = (VNIC_TYPE_DIRECT,
- VNIC_TYPE_DIRECT_PHYSICAL,
- VNIC_TYPE_ACCELERATOR_DIRECT,
- VNIC_TYPE_ACCELERATOR_DIRECT_PHYSICAL,
- VNIC_TYPE_REMOTE_MANAGED)
+VNIC_TYPES_DIRECT_PASSTHROUGH = (
+ VNIC_TYPE_DIRECT, VNIC_TYPE_DIRECT_PHYSICAL,
+ VNIC_TYPE_ACCELERATOR_DIRECT, VNIC_TYPE_ACCELERATOR_DIRECT_PHYSICAL,
+ VNIC_TYPE_REMOTE_MANAGED, VNIC_TYPE_VDPA
+)
# Define list of ports which contains devices managed by cyborg.
VNIC_TYPES_ACCELERATOR = (
- VNIC_TYPE_ACCELERATOR_DIRECT,
- VNIC_TYPE_ACCELERATOR_DIRECT_PHYSICAL
+ VNIC_TYPE_ACCELERATOR_DIRECT, VNIC_TYPE_ACCELERATOR_DIRECT_PHYSICAL
)
# Constants for the 'vif_model' values
diff --git a/nova/network/neutron.py b/nova/network/neutron.py
index 5d00064259..27e7d06455 100644
--- a/nova/network/neutron.py
+++ b/nova/network/neutron.py
@@ -43,7 +43,6 @@ from nova.network import constants
from nova.network import model as network_model
from nova import objects
from nova.objects import fields as obj_fields
-from nova.pci import manager as pci_manager
from nova.pci import request as pci_request
from nova.pci import utils as pci_utils
from nova.pci import whitelist as pci_whitelist
@@ -300,7 +299,7 @@ class API:
self.last_neutron_extension_sync = None
self.extensions = {}
self.pci_whitelist = pci_whitelist.Whitelist(
- CONF.pci.passthrough_whitelist)
+ CONF.pci.device_spec)
def _update_port_with_migration_profile(
self, instance, port_id, port_profile, admin_client):
@@ -1631,14 +1630,13 @@ class API:
pci_request_id cannot be found on the instance.
"""
if pci_request_id:
- pci_devices = pci_manager.get_instance_pci_devs(
- instance, pci_request_id)
+ pci_devices = instance.get_pci_devices(request_id=pci_request_id)
if not pci_devices:
# The pci_request_id likely won't mean much except for tracing
# through the logs since it is generated per request.
LOG.error('Unable to find PCI device using PCI request ID in '
'list of claimed instance PCI devices: %s. Is the '
- '[pci]/passthrough_whitelist configuration correct?',
+ '[pci]device_spec configuration correct?',
# Convert to a primitive list to stringify it.
list(instance.pci_devices), instance=instance)
raise exception.PciDeviceNotFound(
@@ -1662,8 +1660,7 @@ class API:
Currently this is done only for PF passthrough.
"""
if pci_request_id is not None:
- pci_devs = pci_manager.get_instance_pci_devs(
- instance, pci_request_id)
+ pci_devs = instance.get_pci_devices(request_id=pci_request_id)
if len(pci_devs) != 1:
# NOTE(ndipanov): We shouldn't ever get here since
# InstancePCIRequest instances built from network requests
@@ -3359,6 +3356,25 @@ class API:
delegate_create=True,
)
+ def _log_error_if_vnic_type_changed(
+ self, port_id, old_vnic_type, new_vnic_type, instance
+ ):
+ if old_vnic_type and old_vnic_type != new_vnic_type:
+ LOG.error(
+ 'The vnic_type of the bound port %s has '
+ 'been changed in neutron from "%s" to '
+ '"%s". Changing vnic_type of a bound port '
+ 'is not supported by Nova. To avoid '
+ 'breaking the connectivity of the instance '
+ 'please change the port vnic_type back to '
+ '"%s".',
+ port_id,
+ old_vnic_type,
+ new_vnic_type,
+ old_vnic_type,
+ instance=instance
+ )
+
def _build_network_info_model(self, context, instance, networks=None,
port_ids=None, admin_client=None,
preexisting_port_ids=None,
@@ -3432,6 +3448,12 @@ class API:
preexisting_port_ids)
for index, vif in enumerate(nw_info):
if vif['id'] == refresh_vif_id:
+ self._log_error_if_vnic_type_changed(
+ vif['id'],
+ vif['vnic_type'],
+ refreshed_vif['vnic_type'],
+ instance,
+ )
# Update the existing entry.
nw_info[index] = refreshed_vif
LOG.debug('Updated VIF entry in instance network '
@@ -3481,6 +3503,7 @@ class API:
networks, port_ids = self._gather_port_ids_and_networks(
context, instance, networks, port_ids, client)
+ old_nw_info = instance.get_network_info()
nw_info = network_model.NetworkInfo()
for port_id in port_ids:
current_neutron_port = current_neutron_port_map.get(port_id)
@@ -3488,6 +3511,14 @@ class API:
vif = self._build_vif_model(
context, client, current_neutron_port, networks,
preexisting_port_ids)
+ for old_vif in old_nw_info:
+ if old_vif['id'] == port_id:
+ self._log_error_if_vnic_type_changed(
+ port_id,
+ old_vif['vnic_type'],
+ vif['vnic_type'],
+ instance,
+ )
nw_info.append(vif)
elif nw_info_refresh:
LOG.info('Port %s from network info_cache is no '
diff --git a/nova/notifications/objects/image.py b/nova/notifications/objects/image.py
index b4852fc4e7..01c86d1cb0 100644
--- a/nova/notifications/objects/image.py
+++ b/nova/notifications/objects/image.py
@@ -126,7 +126,11 @@ class ImageMetaPropsPayload(base.NotificationPayloadBase):
# Version 1.7: Added 'hw_input_bus' field
# Version 1.8: Added 'bochs' as an option to 'hw_video_model'
# Version 1.9: Added 'hw_emulation_architecture' field
- VERSION = '1.9'
+ # Version 1.10: Added 'hw_ephemeral_encryption' and
+ # 'hw_ephemeral_encryption_format' fields
+ # Version 1.11: Added 'hw_locked_memory' field
+ # Version 1.12: Added 'hw_viommu_model' field
+ VERSION = '1.12'
SCHEMA = {
k: ('image_meta_props', k) for k in image_meta.ImageMetaProps.fields}
diff --git a/nova/objects/block_device.py b/nova/objects/block_device.py
index 97199cf17a..82ce1c6806 100644
--- a/nova/objects/block_device.py
+++ b/nova/objects/block_device.py
@@ -67,7 +67,9 @@ class BlockDeviceMapping(base.NovaPersistentObject, base.NovaObject,
# Version 1.18: Added attachment_id
# Version 1.19: Added uuid
# Version 1.20: Added volume_type
- VERSION = '1.20'
+ # Version 1.21: Added encrypted, encryption_secret_uuid, encryption_format
+ # and encryption_options
+ VERSION = '1.21'
fields = {
'id': fields.IntegerField(),
@@ -93,10 +95,20 @@ class BlockDeviceMapping(base.NovaPersistentObject, base.NovaObject,
'attachment_id': fields.UUIDField(nullable=True),
# volume_type field can be a volume type name or ID(UUID).
'volume_type': fields.StringField(nullable=True),
+ 'encrypted': fields.BooleanField(default=False),
+ 'encryption_secret_uuid': fields.UUIDField(nullable=True),
+ 'encryption_format': fields.BlockDeviceEncryptionFormatTypeField(
+ nullable=True),
+ 'encryption_options': fields.StringField(nullable=True),
}
def obj_make_compatible(self, primitive, target_version):
target_version = versionutils.convert_version_to_tuple(target_version)
+ if target_version < (1, 21):
+ primitive.pop('encrypted', None)
+ primitive.pop('encryption_secret_uuid', None)
+ primitive.pop('encryption_format', None)
+ primitive.pop('encryption_options', None)
if target_version < (1, 20) and 'volume_type' in primitive:
del primitive['volume_type']
if target_version < (1, 19) and 'uuid' in primitive:
@@ -308,26 +320,38 @@ class BlockDeviceMapping(base.NovaPersistentObject, base.NovaObject,
def is_image(self):
return self.source_type == fields.BlockDeviceSourceType.IMAGE
+ @property
+ def is_local(self):
+ return (self.destination_type ==
+ fields.BlockDeviceDestinationType.LOCAL)
+
def get_image_mapping(self):
return block_device.BlockDeviceDict(self).get_image_mapping()
def obj_load_attr(self, attrname):
- if attrname not in BLOCK_DEVICE_OPTIONAL_ATTRS:
- raise exception.ObjectActionError(
- action='obj_load_attr',
- reason='attribute %s not lazy-loadable' % attrname)
if not self._context:
raise exception.OrphanedObjectError(method='obj_load_attr',
objtype=self.obj_name())
-
- LOG.debug("Lazy-loading '%(attr)s' on %(name)s using uuid %(uuid)s",
- {'attr': attrname,
- 'name': self.obj_name(),
- 'uuid': self.instance_uuid,
- })
- self.instance = objects.Instance.get_by_uuid(self._context,
- self.instance_uuid)
- self.obj_reset_changes(fields=['instance'])
+ if attrname == 'encrypted':
+ # We attempt to load this if we're creating a BDM object during an
+ # attach volume request, for example. Use the default in that case.
+ self.obj_set_defaults(attrname)
+ elif attrname not in BLOCK_DEVICE_OPTIONAL_ATTRS:
+ raise exception.ObjectActionError(
+ action='obj_load_attr',
+ reason='attribute %s not lazy-loadable' % attrname)
+ else:
+ LOG.debug(
+ "Lazy-loading '%(attr)s' on %(name)s using uuid %(uuid)s",
+ {
+ 'attr': attrname,
+ 'name': self.obj_name(),
+ 'uuid': self.instance_uuid,
+ }
+ )
+ self.instance = objects.Instance.get_by_uuid(self._context,
+ self.instance_uuid)
+ self.obj_reset_changes(fields=['instance'])
@base.NovaObjectRegistry.register
diff --git a/nova/objects/compute_node.py b/nova/objects/compute_node.py
index 60c2be71cd..dfc1b2ae28 100644
--- a/nova/objects/compute_node.py
+++ b/nova/objects/compute_node.py
@@ -12,6 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+from oslo_db import exception as db_exc
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
from oslo_utils import versionutils
@@ -339,7 +340,12 @@ class ComputeNode(base.NovaPersistentObject, base.NovaObject):
self._convert_supported_instances_to_db_format(updates)
self._convert_pci_stats_to_db_format(updates)
- db_compute = db.compute_node_create(self._context, updates)
+ try:
+ db_compute = db.compute_node_create(self._context, updates)
+ except db_exc.DBDuplicateEntry:
+ target = 'compute node %s:%s' % (updates['hypervisor_hostname'],
+ updates['uuid'])
+ raise exception.DuplicateRecord(target=target)
self._from_db_object(self._context, self, db_compute)
@base.remotable
@@ -388,8 +394,11 @@ class ComputeNode(base.NovaPersistentObject, base.NovaObject):
# The uuid field is read-only so it should only be set when
# creating the compute node record for the first time. Ignore
# it otherwise.
- if key == 'uuid' and 'uuid' in self:
- continue
+ if (key == 'uuid' and 'uuid' in self and
+ resources[key] != self.uuid):
+ raise exception.InvalidNodeConfiguration(
+ reason='Attempt to overwrite node %s with %s!' % (
+ self.uuid, resources[key]))
setattr(self, key, resources[key])
# supported_instances has a different name in compute_node
diff --git a/nova/objects/external_event.py b/nova/objects/external_event.py
index b1acfc4aa0..e17008dade 100644
--- a/nova/objects/external_event.py
+++ b/nova/objects/external_event.py
@@ -33,6 +33,9 @@ EVENT_NAMES = [
# Accelerator Request got bound, tag is ARQ uuid.
# Sent when an ARQ for an instance has been bound or failed to bind.
'accelerator-request-bound',
+
+ # re-image operation has completed from cinder side
+ 'volume-reimaged',
]
EVENT_STATUSES = ['failed', 'completed', 'in-progress']
@@ -50,7 +53,8 @@ class InstanceExternalEvent(obj_base.NovaObject):
# Version 1.2: adds volume-extended event
# Version 1.3: adds power-update event
# Version 1.4: adds accelerator-request-bound event
- VERSION = '1.4'
+ # Version 1.5: adds volume-reimaged event
+ VERSION = '1.5'
fields = {
'instance_uuid': fields.UUIDField(),
diff --git a/nova/objects/fields.py b/nova/objects/fields.py
index d8cb10f700..cae1ea4a4d 100644
--- a/nova/objects/fields.py
+++ b/nova/objects/fields.py
@@ -260,6 +260,14 @@ class BlockDeviceType(BaseNovaEnum):
ALL = (CDROM, DISK, FLOPPY, FS, LUN)
+class BlockDeviceEncryptionFormatType(BaseNovaEnum):
+ PLAIN = 'plain'
+ LUKS = 'luks'
+ LUKSv2 = 'luksv2'
+
+ ALL = (PLAIN, LUKS, LUKSv2)
+
+
class ConfigDrivePolicy(BaseNovaEnum):
OPTIONAL = "optional"
MANDATORY = "mandatory"
@@ -608,6 +616,16 @@ class VIFModel(BaseNovaEnum):
return super(VIFModel, self).coerce(obj, attr, value)
+class VIOMMUModel(BaseNovaEnum):
+
+ INTEL = 'intel'
+ SMMUV3 = 'smmuv3'
+ VIRTIO = 'virtio'
+ AUTO = 'auto'
+
+ ALL = (INTEL, SMMUV3, VIRTIO, AUTO)
+
+
class VMMode(BaseNovaEnum):
"""Represents possible vm modes for instances.
@@ -1197,6 +1215,10 @@ class BlockDeviceTypeField(BaseEnumField):
AUTO_TYPE = BlockDeviceType()
+class BlockDeviceEncryptionFormatTypeField(BaseEnumField):
+ AUTO_TYPE = BlockDeviceEncryptionFormatType()
+
+
class ConfigDrivePolicyField(BaseEnumField):
AUTO_TYPE = ConfigDrivePolicy()
@@ -1289,6 +1311,10 @@ class VIFModelField(BaseEnumField):
AUTO_TYPE = VIFModel()
+class VIOMMUModelField(BaseEnumField):
+ AUTO_TYPE = VIOMMUModel()
+
+
class VMModeField(BaseEnumField):
AUTO_TYPE = VMMode()
@@ -1353,6 +1379,14 @@ class InstancePowerStateField(BaseEnumField):
AUTO_TYPE = InstancePowerState()
+class NetworkModelField(AutoTypedField):
+ AUTO_TYPE = NetworkModel()
+
+
+class NetworkVIFModelField(AutoTypedField):
+ AUTO_TYPE = NetworkVIFModel()
+
+
class ListOfListsOfStringsField(AutoTypedField):
AUTO_TYPE = List(List(fields.String()))
diff --git a/nova/objects/image_meta.py b/nova/objects/image_meta.py
index bd8ec69ab4..7927ad2575 100644
--- a/nova/objects/image_meta.py
+++ b/nova/objects/image_meta.py
@@ -188,14 +188,25 @@ class ImageMetaProps(base.NovaObject):
# Version 1.29: Added 'hw_input_bus' field
# Version 1.30: Added 'bochs' as an option to 'hw_video_model'
# Version 1.31: Added 'hw_emulation_architecture' field
+ # Version 1.32: Added 'hw_ephemeral_encryption' and
+ # 'hw_ephemeral_encryption_format' fields
+ # Version 1.33: Added 'hw_locked_memory' field
+ # Version 1.34: Added 'hw_viommu_model' field
# NOTE(efried): When bumping this version, the version of
# ImageMetaPropsPayload must also be bumped. See its docstring for details.
- VERSION = '1.31'
+ VERSION = '1.34'
def obj_make_compatible(self, primitive, target_version):
super(ImageMetaProps, self).obj_make_compatible(primitive,
target_version)
target_version = versionutils.convert_version_to_tuple(target_version)
+ if target_version < (1, 34):
+ primitive.pop('hw_viommu_model', None)
+ if target_version < (1, 33):
+ primitive.pop('hw_locked_memory', None)
+ if target_version < (1, 32):
+ primitive.pop('hw_ephemeral_encryption', None)
+ primitive.pop('hw_ephemeral_encryption_format', None)
if target_version < (1, 31):
primitive.pop('hw_emulation_architecture', None)
if target_version < (1, 30):
@@ -363,6 +374,10 @@ class ImageMetaProps(base.NovaObject):
# image with a network boot image
'hw_ipxe_boot': fields.FlexibleBooleanField(),
+ # string - make sure ``locked`` element is present in the
+ # ``memoryBacking``.
+ 'hw_locked_memory': fields.FlexibleBooleanField(),
+
# There are sooooooooooo many possible machine types in
# QEMU - several new ones with each new release - that it
# is not practical to enumerate them all. So we use a free
@@ -434,6 +449,9 @@ class ImageMetaProps(base.NovaObject):
# name of a NIC device model eg virtio, e1000, rtl8139
'hw_vif_model': fields.VIFModelField(),
+ # name of IOMMU device model eg virtio, intel, smmuv3, or auto
+ 'hw_viommu_model': fields.VIOMMUModelField(),
+
# "xen" vs "hvm"
'hw_vm_mode': fields.VMModeField(),
@@ -449,6 +467,12 @@ class ImageMetaProps(base.NovaObject):
# version of emulated TPM to use.
'hw_tpm_version': fields.TPMVersionField(),
+ # boolean - if true will enable ephemeral encryption for instance
+ 'hw_ephemeral_encryption': fields.FlexibleBooleanField(),
+ # encryption format to be used when ephemeral encryption is enabled
+ 'hw_ephemeral_encryption_format':
+ fields.BlockDeviceEncryptionFormatTypeField(),
+
# if true download using bittorrent
'img_bittorrent': fields.FlexibleBooleanField(),
diff --git a/nova/objects/instance.py b/nova/objects/instance.py
index e99762d277..fed1a7c58b 100644
--- a/nova/objects/instance.py
+++ b/nova/objects/instance.py
@@ -13,6 +13,7 @@
# under the License.
import contextlib
+import typing as ty
from oslo_config import cfg
from oslo_db import exception as db_exc
@@ -1226,6 +1227,46 @@ class Instance(base.NovaPersistentObject, base.NovaObject,
pci_req for pci_req in self.pci_requests.requests
if pci_req.request_id != pci_device.request_id]
+ def get_pci_devices(
+ self,
+ source: ty.Optional[int] = None,
+ request_id: ty.Optional[str] = None,
+ ) -> ty.List["objects.PciDevice"]:
+ """Return the PCI devices allocated to the instance
+
+ :param source: Filter by source. It can be
+ InstancePCIRequest.FLAVOR_ALIAS or InstancePCIRequest.NEUTRON_PORT
+ or None. None means returns devices from both type of requests.
+ :param request_id: Filter by PciDevice.request_id. None means do not
+ filter by request_id.
+ :return: a list of matching PciDevice objects
+ """
+ if not self.pci_devices:
+ # return early to avoid an extra lazy load on self.pci_requests
+ # if there are no devices allocated to be filtered
+ return []
+
+ devs = self.pci_devices.objects
+
+ if request_id is not None:
+ devs = [dev for dev in devs if dev.request_id == request_id]
+
+ if source is not None:
+ # NOTE(gibi): this happens to work for the old requests when the
+ # request has request_id None and therefore the device allocated
+ # due to that request has request_id None too, so they will be
+ # mapped via the None key.
+ req_id_to_req = {
+ req.request_id: req for req in self.pci_requests.requests
+ }
+ devs = [
+ dev
+ for dev in devs
+ if (req_id_to_req[dev.request_id].source == source)
+ ]
+
+ return devs
+
def _make_instance_list(context, inst_list, db_inst_list, expected_attrs):
get_fault = expected_attrs and 'fault' in expected_attrs
diff --git a/nova/objects/instance_info_cache.py b/nova/objects/instance_info_cache.py
index 3ab430baa1..506eb897c1 100644
--- a/nova/objects/instance_info_cache.py
+++ b/nova/objects/instance_info_cache.py
@@ -36,8 +36,8 @@ class InstanceInfoCache(base.NovaPersistentObject, base.NovaObject):
fields = {
'instance_uuid': fields.UUIDField(),
- 'network_info': fields.Field(fields.NetworkModel(), nullable=True),
- }
+ 'network_info': fields.NetworkModelField(nullable=True),
+ }
@staticmethod
def _from_db_object(context, info_cache, db_obj):
diff --git a/nova/objects/migrate_data.py b/nova/objects/migrate_data.py
index 54ba1a3a1b..299f46d03b 100644
--- a/nova/objects/migrate_data.py
+++ b/nova/objects/migrate_data.py
@@ -55,7 +55,7 @@ class VIFMigrateData(obj_base.NovaObject):
# destination host is configured for all vif types. See the note in
# the libvirt driver here: https://review.opendev.org/#/c/551370/
# 29/nova/virt/libvirt/driver.py@7036
- 'source_vif': fields.Field(fields.NetworkVIFModel()),
+ 'source_vif': fields.NetworkVIFModelField(),
}
@property
diff --git a/nova/objects/migration.py b/nova/objects/migration.py
index 7e340ceb78..6f5f217b80 100644
--- a/nova/objects/migration.py
+++ b/nova/objects/migration.py
@@ -215,6 +215,10 @@ class Migration(base.NovaPersistentObject, base.NovaObject):
def is_resize(self):
return self.migration_type == fields.MigrationType.RESIZE
+ @property
+ def is_same_host_resize(self):
+ return self.is_resize and self.source_node == self.dest_node
+
@base.NovaObjectRegistry.register
class MigrationList(base.ObjectListBase, base.NovaObject):
diff --git a/nova/objects/request_spec.py b/nova/objects/request_spec.py
index c17c963e77..a4ca77edf6 100644
--- a/nova/objects/request_spec.py
+++ b/nova/objects/request_spec.py
@@ -14,12 +14,15 @@
import copy
import itertools
+import typing as ty
import os_resource_classes as orc
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import versionutils
+from nova.compute import pci_placement_translator
+import nova.conf
from nova.db.api import api as api_db_api
from nova.db.api import models as api_models
from nova import exception
@@ -28,6 +31,7 @@ from nova.objects import base
from nova.objects import fields
from nova.objects import instance as obj_instance
+CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
REQUEST_SPEC_OPTIONAL_ATTRS = ['requested_destination',
@@ -473,6 +477,113 @@ class RequestSpec(base.NovaObject):
filt_props['requested_destination'] = self.requested_destination
return filt_props
+ @staticmethod
+ def _rc_from_request(spec: ty.Dict[str, ty.Any]) -> str:
+ return pci_placement_translator.get_resource_class(
+ spec.get("resource_class"),
+ spec.get("vendor_id"),
+ spec.get("product_id"),
+ )
+
+ @staticmethod
+ def _traits_from_request(spec: ty.Dict[str, ty.Any]) -> ty.Set[str]:
+ return pci_placement_translator.get_traits(spec.get("traits", ""))
+
+ def generate_request_groups_from_pci_requests(self):
+ if not CONF.filter_scheduler.pci_in_placement:
+ return False
+
+ for pci_request in self.pci_requests.requests:
+ if pci_request.source == objects.InstancePCIRequest.NEUTRON_PORT:
+ # TODO(gibi): Handle neutron based PCI requests here in a later
+ # cycle.
+ continue
+
+ if len(pci_request.spec) != 1:
+ # We are instantiating InstancePCIRequest objects with spec in
+ # two cases:
+ # 1) when a neutron port is translated to InstancePCIRequest
+ # object in
+ # nova.network.neutron.API.create_resource_requests
+ # 2) when the pci_passthrough:alias flavor extra_spec is
+ # translated to InstancePCIRequest objects in
+ # nova.pci.request._get_alias_from_config which enforces the
+ # json schema defined in nova.pci.request.
+ #
+ # In both cases only a single dict is added to the spec list.
+ # If we ever want to add support for multiple specs per request
+ # then we have to solve the issue that each spec can request a
+ # different resource class from placement. The only place in
+ # nova that currently handles multiple specs per request is
+ # nova.pci.utils.pci_device_prop_match() and it considers them
+ # as alternatives. So specs with different resource classes
+ # would mean alternative resource_class requests. This cannot
+ # be expressed today in the allocation_candidate query towards
+ # placement.
+ raise ValueError(
+ "PCI tracking in placement does not support multiple "
+ "specs per PCI request"
+ )
+
+ spec = pci_request.spec[0]
+
+ # The goal is to translate InstancePCIRequest to RequestGroup. Each
+ # InstancePCIRequest can be fulfilled from the whole RP tree. And
+ # a flavor based InstancePCIRequest might request more than one
+ # device (if count > 1) and those devices still need to be placed
+ # independently to RPs. So we could have two options to translate
+ # an InstancePCIRequest object to RequestGroup objects:
+ # 1) put the all the requested resources from every
+ # InstancePCIRequest to the unsuffixed RequestGroup.
+ # 2) generate a separate RequestGroup for each individual device
+ # request
+ #
+ # While #1) feels simpler it has a big downside. The unsuffixed
+ # group will have a bulk request group resource provider mapping
+ # returned from placement. So there would be no easy way to later
+ # untangle which InstancePCIRequest is fulfilled by which RP, and
+ # therefore which PCI device should be used to allocate a specific
+ # device on the hypervisor during the PCI claim. Note that there
+ # could be multiple PF RPs providing the same type of resources but
+ # still we need to make sure that if a resource is allocated in
+ # placement from a specific RP (representing a physical device)
+ # then the PCI claim should consume resources from the same
+ # physical device.
+ #
+ # So we need at least a separate RequestGroup per
+ # InstancePCIRequest. However, for a InstancePCIRequest(count=2)
+ # that would mean a RequestGroup(RC:2) which would mean both
+ # resource should come from the same RP in placement. This is
+ # impossible for PF or PCI type requests and over restrictive for
+ # VF type requests. Therefore we need to generate one RequestGroup
+ # per requested device. So for InstancePCIRequest(count=2) we need
+ # to generate two separate RequestGroup(RC:1) objects.
+
+ # NOTE(gibi): If we have count=2 requests then the multiple
+ # RequestGroup split below only works if group_policy is set to
+ # none as group_policy=isolate would prevent allocating two VFs
+ # from the same PF. Fortunately
+ # nova.scheduler.utils.resources_from_request_spec() already
+ # defaults group_policy to none if it is not specified in the
+ # flavor and there are multiple RequestGroups in the RequestSpec.
+
+ for i in range(pci_request.count):
+ rg = objects.RequestGroup(
+ use_same_provider=True,
+ # we need to generate a unique ID for each group, so we use
+ # a counter
+ requester_id=f"{pci_request.request_id}-{i}",
+ # as we split count >= 2 requests to independent groups
+ # each group will have a resource request of one
+ resources={
+ self._rc_from_request(spec): 1
+ },
+ required_traits=self._traits_from_request(spec),
+ # TODO(gibi): later we can add support for complex trait
+ # queries here including forbidden_traits.
+ )
+ self.requested_resources.append(rg)
+
@classmethod
def from_components(
cls, context, instance_uuid, image, flavor,
@@ -539,6 +650,8 @@ class RequestSpec(base.NovaObject):
if port_resource_requests:
spec_obj.requested_resources.extend(port_resource_requests)
+ spec_obj.generate_request_groups_from_pci_requests()
+
# NOTE(gibi): later the scheduler adds more request level params but
# never overrides existing ones so we can initialize them here.
if request_level_params is None:
diff --git a/nova/objects/service.py b/nova/objects/service.py
index bc35132565..0ed443ef17 100644
--- a/nova/objects/service.py
+++ b/nova/objects/service.py
@@ -31,7 +31,7 @@ LOG = logging.getLogger(__name__)
# NOTE(danms): This is the global service version counter
-SERVICE_VERSION = 61
+SERVICE_VERSION = 66
# NOTE(danms): This is our SERVICE_VERSION history. The idea is that any
@@ -216,16 +216,36 @@ SERVICE_VERSION_HISTORY = (
# Version 61: Compute RPC v6.0:
# Add support for remotely-managed ports (vnic-type 'remote-managed')
{'compute_rpc': '6.0'},
+ # Version 62: Compute RPC v6.0:
+ # Add support for VDPA port attach/detach
+ {'compute_rpc': '6.0'},
+ # Version 63: Compute RPC v6.0:
+ # Add support for VDPA hotplug live migration and suspend/resume
+ {'compute_rpc': '6.0'},
+ # Version 64: Compute RPC v6.1:
+ # Add reimage_boot_volume parameter to rebuild_instance()
+ {'compute_rpc': '6.1'},
+ # Version 65: Compute RPC v6.1:
+ # Added stable local node identity
+ {'compute_rpc': '6.1'},
+ # Version 66: Compute RPC v6.2:
+ # Add target_state parameter to rebuild_instance()
+ {'compute_rpc': '6.2'},
)
+# This is the version after which we can rely on having a persistent
+# local node identity for single-node systems.
+NODE_IDENTITY_VERSION = 65
+
# This is used to raise an error at service startup if older than N-1 computes
# are detected. Update this at the beginning of every release cycle to point to
# the smallest service version that was added in N-1.
-OLDEST_SUPPORTED_SERVICE_VERSION = 'Xena'
+OLDEST_SUPPORTED_SERVICE_VERSION = 'Yoga'
SERVICE_VERSION_ALIASES = {
'Victoria': 52,
'Wallaby': 54,
'Xena': 57,
+ 'Yoga': 61,
}
diff --git a/nova/pci/devspec.py b/nova/pci/devspec.py
index 1cefaed0cb..386005c8eb 100644
--- a/nova/pci/devspec.py
+++ b/nova/pci/devspec.py
@@ -12,6 +12,7 @@
# under the License.
import abc
+import copy
import re
import string
import typing as ty
@@ -41,7 +42,7 @@ PCISpecAddressType = ty.Union[ty.Dict[str, str], str]
class PciAddressSpec(metaclass=abc.ABCMeta):
"""Abstract class for all PCI address spec styles
- This class checks the address fields of the pci.passthrough_whitelist
+ This class checks the address fields of the pci.device_spec
"""
def __init__(self, pci_addr: str) -> None:
@@ -70,11 +71,11 @@ class PciAddressSpec(metaclass=abc.ABCMeta):
try:
v = int(a, 16)
except ValueError:
- raise exception.PciConfigInvalidWhitelist(
+ raise exception.PciConfigInvalidSpec(
reason=_("property %(property)s ('%(attr)s') does not parse "
"as a hex number.") % {'property': prop, 'attr': a})
if v > maxval:
- raise exception.PciConfigInvalidWhitelist(
+ raise exception.PciConfigInvalidSpec(
reason=_("property %(property)s (%(attr)s) is greater than "
"the maximum allowable value (%(max)X).") %
{'property': prop, 'attr': a, 'max': maxval})
@@ -195,19 +196,19 @@ class PciAddressRegexSpec(PciAddressSpec):
class WhitelistPciAddress(object):
"""Manages the address fields of the whitelist.
- This class checks the address fields of the pci.passthrough_whitelist
+ This class checks the address fields of the pci.device_spec
configuration option, validating the address fields.
Example configs:
| [pci]
- | passthrough_whitelist = {"address":"*:0a:00.*",
- | "physical_network":"physnet1"}
- | passthrough_whitelist = {"address": {"domain": ".*",
- "bus": "02",
- "slot": "01",
- "function": "[0-2]"},
- "physical_network":"net1"}
- | passthrough_whitelist = {"vendor_id":"1137","product_id":"0071"}
+ | device_spec = {"address":"*:0a:00.*",
+ | "physical_network":"physnet1"}
+ | device_spec = {"address": {"domain": ".*",
+ "bus": "02",
+ "slot": "01",
+ "function": "[0-2]"},
+ "physical_network":"net1"}
+ | device_spec = {"vendor_id":"1137","product_id":"0071"}
"""
@@ -254,7 +255,7 @@ class WhitelistPciAddress(object):
# Try to match on the parent PCI address if the PciDeviceSpec is a
# PF (sriov is available) and the device to match is a VF. This
# makes it possible to specify the PCI address of a PF in the
- # pci.passthrough_whitelist to match any of its VFs' PCI addresses.
+ # pci.device_spec to match any of its VFs' PCI addresses.
if self.is_physical_function and pci_phys_addr:
pci_phys_addr_obj = PhysicalPciAddress(pci_phys_addr)
if self.pci_address_spec.match(pci_phys_addr_obj):
@@ -267,6 +268,10 @@ class WhitelistPciAddress(object):
class PciDeviceSpec(PciAddressSpec):
def __init__(self, dev_spec: ty.Dict[str, str]) -> None:
+ # stored for better error reporting
+ self.dev_spec_conf = copy.deepcopy(dev_spec)
+ # the non tag fields (i.e. address, devname) will be removed by
+ # _init_dev_details
self.tags = dev_spec
self._init_dev_details()
@@ -341,7 +346,7 @@ class PciDeviceSpec(PciAddressSpec):
# to be a VF corresponding to the PF PCI address do not
# match the actual ones for this PF. This means that the
# whitelist is invalid.
- raise exception.PciConfigInvalidWhitelist(
+ raise exception.PciConfigInvalidSpec(
reason=_('the specified VF vendor ID %(vendor_id)s and'
' product ID %(product_id)s do not match the'
' expected VF IDs based on the corresponding'
diff --git a/nova/pci/manager.py b/nova/pci/manager.py
index b2a6157102..af6d72521b 100644
--- a/nova/pci/manager.py
+++ b/nova/pci/manager.py
@@ -69,7 +69,7 @@ class PciDevTracker(object):
"""
self.stale: ty.Dict[str, objects.PciDevice] = {}
self.node_id: str = compute_node.id
- self.dev_filter = whitelist.Whitelist(CONF.pci.passthrough_whitelist)
+ self.dev_filter = whitelist.Whitelist(CONF.pci.device_spec)
numa_topology = compute_node.numa_topology
if numa_topology:
# For legacy reasons, the NUMATopology is stored as a JSON blob.
@@ -133,7 +133,7 @@ class PciDevTracker(object):
try:
if self.dev_filter.device_assignable(dev):
devices.append(dev)
- except exception.PciConfigInvalidWhitelist as e:
+ except exception.PciConfigInvalidSpec as e:
# The raised exception is misleading as the problem is not with
# the whitelist config but with the host PCI device reported by
# libvirt. The code that matches the host PCI device to the
@@ -164,7 +164,7 @@ class PciDevTracker(object):
# parse whitelist config with
# devspec.PciAddressSpec._set_pci_dev_info()
str(e).replace(
- 'Invalid PCI devices Whitelist config:', 'The'))
+ 'Invalid [pci]device_spec config:', 'The'))
self._set_hvdevs(devices)
@@ -224,7 +224,7 @@ class PciDevTracker(object):
LOG.warning("Unable to remove device with status "
"'%(status)s' and ownership %(instance_uuid)s "
"because of %(pci_exception)s. "
- "Check your [pci]passthrough_whitelist "
+ "Check your [pci]device_spec "
"configuration to make sure this allocated "
"device is whitelisted. If you have removed "
"the device from the whitelist intentionally "
@@ -480,24 +480,3 @@ class PciDevTracker(object):
devs = self.allocations.pop(uuid, [])
for dev in devs:
self._free_device(dev)
-
-
-def get_instance_pci_devs(
- inst: 'objects.Instance', request_id: str = None,
-) -> ty.List['objects.PciDevice']:
- """Get the devices allocated to one or all requests for an instance.
-
- - For generic PCI request, the request id is None.
- - For sr-iov networking, the request id is a valid uuid
- - There are a couple of cases where all the PCI devices allocated to an
- instance need to be returned. Refer to libvirt driver that handles
- soft_reboot and hard_boot of 'xen' instances.
- """
- pci_devices = inst.pci_devices
- if pci_devices is None:
- return []
-
- return [
- device for device in pci_devices if
- device.request_id == request_id or request_id == 'all'
- ]
diff --git a/nova/pci/request.py b/nova/pci/request.py
index d179d36cd9..27ada6c045 100644
--- a/nova/pci/request.py
+++ b/nova/pci/request.py
@@ -43,6 +43,7 @@ import typing as ty
import jsonschema
from oslo_log import log as logging
from oslo_serialization import jsonutils
+from oslo_utils import uuidutils
import nova.conf
from nova import context as ctx
@@ -105,6 +106,12 @@ _ALIAS_SCHEMA = {
"type": "string",
"enum": list(obj_fields.PCINUMAAffinityPolicy.ALL),
},
+ "resource_class": {
+ "type": "string",
+ },
+ "traits": {
+ "type": "string",
+ },
},
"required": ["name"],
}
@@ -113,7 +120,7 @@ _ALIAS_SCHEMA = {
def _get_alias_from_config() -> Alias:
"""Parse and validate PCI aliases from the nova config.
- :returns: A dictionary where the keys are device names and the values are
+ :returns: A dictionary where the keys are alias names and the values are
tuples of form ``(numa_policy, specs)``. ``numa_policy`` describes the
required NUMA affinity of the device(s), while ``specs`` is a list of
PCI device specs.
@@ -183,7 +190,9 @@ def _translate_alias_to_requests(
count=int(count),
spec=spec,
alias_name=name,
- numa_policy=policy))
+ numa_policy=policy,
+ request_id=uuidutils.generate_uuid(),
+ ))
return pci_requests
diff --git a/nova/pci/stats.py b/nova/pci/stats.py
index 6a53c43c78..5c5f7c669c 100644
--- a/nova/pci/stats.py
+++ b/nova/pci/stats.py
@@ -13,7 +13,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-
+import collections
import copy
import typing as ty
@@ -64,6 +64,19 @@ class PciDeviceStats(object):
"""
pool_keys = ['product_id', 'vendor_id', 'numa_node', 'dev_type']
+ # these can be specified in the [pci]device_spec and can be requested via
+ # the PCI alias, but they are matched by the placement
+ # allocation_candidates query, so we can ignore them during pool creation
+ # and during filtering here
+ ignored_spec_tags = ignored_pool_tags = ['resource_class', 'traits']
+ # this is a metadata key in the spec that is matched
+ # specially in _filter_pools_based_on_placement_allocation. So we can
+ # ignore them in the general matching logic.
+ ignored_spec_tags += ['rp_uuids']
+ # this is a metadata key in the pool that is matched
+ # specially in _filter_pools_based_on_placement_allocation. So we can
+ # ignore them in the general matching logic.
+ ignored_pool_tags += ['rp_uuid']
def __init__(
self,
@@ -77,7 +90,7 @@ class PciDeviceStats(object):
)
self.pools.sort(key=lambda item: len(item))
self.dev_filter = dev_filter or whitelist.Whitelist(
- CONF.pci.passthrough_whitelist)
+ CONF.pci.device_spec)
def _equal_properties(
self, dev: Pool, entry: Pool, matching_keys: ty.List[str],
@@ -134,8 +147,22 @@ class PciDeviceStats(object):
return None
tags = devspec.get_tags()
pool = {k: getattr(dev, k) for k in self.pool_keys}
+
if tags:
- pool.update(tags)
+ pool.update(
+ {
+ k: v
+ for k, v in tags.items()
+ if k not in self.ignored_pool_tags
+ }
+ )
+ # NOTE(gibi): since PCI in placement maps a PCI dev or a PF to a
+ # single RP and the scheduler allocates from a specific RP we need
+ # to split the pools by PCI or PF address. We can still keep
+ # the VFs from the same parent PF in a single pool though as they
+ # are equivalent from placement perspective.
+ pool['address'] = dev.parent_addr or dev.address
+
# NOTE(gibi): parent_ifname acts like a tag during pci claim but
# not provided as part of the whitelist spec as it is auto detected
# by the virt driver.
@@ -224,6 +251,17 @@ class PciDeviceStats(object):
free_devs.extend(pool['devices'])
return free_devs
+ def _allocate_devs(
+ self, pool: Pool, num: int, request_id: str
+ ) -> ty.List["objects.PciDevice"]:
+ alloc_devices = []
+ for _ in range(num):
+ pci_dev = pool['devices'].pop()
+ self._handle_device_dependents(pci_dev)
+ pci_dev.request_id = request_id
+ alloc_devices.append(pci_dev)
+ return alloc_devices
+
def consume_requests(
self,
pci_requests: 'objects.InstancePCIRequests',
@@ -235,7 +273,10 @@ class PciDeviceStats(object):
for request in pci_requests:
count = request.count
- pools = self._filter_pools(self.pools, request, numa_cells)
+ rp_uuids = self._get_rp_uuids_for_request(
+ request=request, provider_mapping=None)
+ pools = self._filter_pools(
+ self.pools, request, numa_cells, rp_uuids=rp_uuids)
# Failed to allocate the required number of devices. Return the
# devices already allocated during previous iterations back to
@@ -249,22 +290,31 @@ class PciDeviceStats(object):
"on the compute node semaphore.")
for d in range(len(alloc_devices)):
self.add_device(alloc_devices.pop())
- return None
-
- for pool in pools:
- if pool['count'] >= count:
- num_alloc = count
- else:
- num_alloc = pool['count']
- count -= num_alloc
- pool['count'] -= num_alloc
- for d in range(num_alloc):
- pci_dev = pool['devices'].pop()
- self._handle_device_dependents(pci_dev)
- pci_dev.request_id = request.request_id
- alloc_devices.append(pci_dev)
- if count == 0:
- break
+ raise exception.PciDeviceRequestFailed(requests=pci_requests)
+
+ if not rp_uuids:
+ # if there is no placement allocation then we are free to
+ # consume from the pools in any order:
+ for pool in pools:
+ if pool['count'] >= count:
+ num_alloc = count
+ else:
+ num_alloc = pool['count']
+ count -= num_alloc
+ pool['count'] -= num_alloc
+ alloc_devices += self._allocate_devs(
+ pool, num_alloc, request.request_id)
+ if count == 0:
+ break
+ else:
+ # but if there is placement allocation then we have to follow
+ # it
+ requested_devs_per_pool_rp = collections.Counter(rp_uuids)
+ for pool in pools:
+ count = requested_devs_per_pool_rp[pool['rp_uuid']]
+ pool['count'] -= count
+ alloc_devices += self._allocate_devs(
+ pool, count, request.request_id)
return alloc_devices
@@ -313,7 +363,15 @@ class PciDeviceStats(object):
:returns: A list of pools that can be used to support the request if
this is possible.
"""
- request_specs = request.spec
+
+ def ignore_keys(spec):
+ return {
+ k: v
+ for k, v in spec.items()
+ if k not in self.ignored_spec_tags
+ }
+
+ request_specs = [ignore_keys(spec) for spec in request.spec]
return [
pool for pool in pools
if utils.pci_device_prop_match(pool, request_specs)
@@ -510,11 +568,52 @@ class PciDeviceStats(object):
pool.get(PCI_REMOTE_MANAGED_TAG))]
return pools
+ def _filter_pools_based_on_placement_allocation(
+ self,
+ pools: ty.List[Pool],
+ request: 'objects.InstancePCIRequest',
+ rp_uuids: ty.List[str],
+ ) -> ty.List[Pool]:
+ if not rp_uuids:
+ # If there is no placement allocation then we don't need to filter
+ # by it. This could happen if the instance only has neutron port
+ # based InstancePCIRequest as that is currently not having
+ # placement allocation (except for QoS ports, but that handled in a
+ # separate codepath) or if the [filter_scheduler]pci_in_placement
+ # configuration option is not enabled in the scheduler.
+ return pools
+
+ requested_dev_count_per_rp = collections.Counter(rp_uuids)
+ matching_pools = []
+ for pool in pools:
+ rp_uuid = pool.get('rp_uuid')
+ if rp_uuid is None:
+ # NOTE(gibi): As rp_uuids is not empty the scheduler allocated
+ # PCI resources on this host, so we know that
+ # [pci]report_in_placement is enabled on this host. But this
+ # pool has no RP mapping which can only happen if the pool
+ # contains PCI devices with physical_network tag, as those
+ # devices not yet reported in placement. But if they are not
+ # reported then we can ignore them here too.
+ continue
+
+ if (
+ # the placement allocation contains this pool
+ rp_uuid in requested_dev_count_per_rp and
+ # the amount of dev allocated in placement can be consumed
+ # from the pool
+ pool["count"] >= requested_dev_count_per_rp[rp_uuid]
+ ):
+ matching_pools.append(pool)
+
+ return matching_pools
+
def _filter_pools(
self,
pools: ty.List[Pool],
request: 'objects.InstancePCIRequest',
numa_cells: ty.Optional[ty.List['objects.InstanceNUMACell']],
+ rp_uuids: ty.List[str],
) -> ty.Optional[ty.List[Pool]]:
"""Determine if an individual PCI request can be met.
@@ -529,6 +628,9 @@ class PciDeviceStats(object):
quantity and required NUMA affinity of device(s) we want.
:param numa_cells: A list of InstanceNUMACell objects whose ``id``
corresponds to the ``id`` of host NUMACell objects.
+ :param rp_uuids: A list of PR uuids this request fulfilled from in
+ placement. So here we have to consider only the pools matching with
+ thes RP uuids
:returns: A list of pools that can be used to support the request if
this is possible, else None.
"""
@@ -613,6 +715,19 @@ class PciDeviceStats(object):
before_count - after_count
)
+ # if there is placement allocation for the request then we have to
+ # remove the pools that are not in the placement allocation
+ before_count = after_count
+ pools = self._filter_pools_based_on_placement_allocation(
+ pools, request, rp_uuids)
+ after_count = sum([pool['count'] for pool in pools])
+ if after_count < before_count:
+ LOG.debug(
+ 'Dropped %d device(s) that are not part of the placement '
+ 'allocation',
+ before_count - after_count
+ )
+
if after_count < request.count:
LOG.debug('Not enough PCI devices left to satisfy request')
return None
@@ -622,6 +737,7 @@ class PciDeviceStats(object):
def support_requests(
self,
requests: ty.List['objects.InstancePCIRequest'],
+ provider_mapping: ty.Optional[ty.Dict[str, ty.List[str]]],
numa_cells: ty.Optional[ty.List['objects.InstanceNUMACell']] = None,
) -> bool:
"""Determine if the PCI requests can be met.
@@ -635,20 +751,38 @@ class PciDeviceStats(object):
:param requests: A list of InstancePCIRequest object describing the
types, quantities and required NUMA affinities of devices we want.
:type requests: nova.objects.InstancePCIRequests
+ :param provider_mapping: A dict keyed by RequestGroup requester_id,
+ to a list of resource provider UUIDs which provide resource
+ for that RequestGroup. If it is None then it signals that the
+ InstancePCIRequest objects already stores a mapping per request.
+ I.e.: we are called _after_ the scheduler made allocations for this
+ request in placement.
:param numa_cells: A list of InstanceNUMACell objects whose ``id``
corresponds to the ``id`` of host NUMACells, or None.
:returns: Whether this compute node can satisfy the given request.
"""
- # NOTE(yjiang5): this function has high possibility to fail,
- # so no exception should be triggered for performance reason.
- return all(
- self._filter_pools(self.pools, r, numa_cells) for r in requests
- )
+
+ # try to apply the requests on the copy of the stats if it applies
+ # cleanly then we know that the requests is supported. We call apply
+ # only on a copy as we don't want to actually consume resources from
+ # the pool as at this point this is just a test during host filtering.
+ # Later the scheduler will call apply_request to consume on the
+ # selected host. The compute will call consume_request during PCI claim
+ # to consume not just from the pools but also consume PciDevice
+ # objects.
+ stats = copy.deepcopy(self)
+ try:
+ stats.apply_requests(requests, provider_mapping, numa_cells)
+ except exception.PciDeviceRequestFailed:
+ return False
+
+ return True
def _apply_request(
self,
pools: ty.List[Pool],
request: 'objects.InstancePCIRequest',
+ rp_uuids: ty.List[str],
numa_cells: ty.Optional[ty.List['objects.InstanceNUMACell']] = None,
) -> bool:
"""Apply an individual PCI request.
@@ -662,6 +796,8 @@ class PciDeviceStats(object):
:param pools: A list of PCI device pool dicts
:param request: An InstancePCIRequest object describing the type,
quantity and required NUMA affinity of device(s) we want.
+ :param rp_uuids: A list of PR uuids this request fulfilled from in
+ placement
:param numa_cells: A list of InstanceNUMACell objects whose ``id``
corresponds to the ``id`` of host NUMACell objects.
:returns: True if the request was applied against the provided pools
@@ -671,22 +807,77 @@ class PciDeviceStats(object):
# Two concurrent requests may succeed when called support_requests
# because this method does not remove related devices from the pools
- filtered_pools = self._filter_pools(pools, request, numa_cells)
+ filtered_pools = self._filter_pools(
+ pools, request, numa_cells, rp_uuids)
if not filtered_pools:
return False
- count = request.count
- for pool in filtered_pools:
- count = self._decrease_pool_count(pools, pool, count)
- if not count:
- break
+ if not rp_uuids:
+ # If there is no placement allocation for this request then we are
+ # free to consume from the filtered pools in any order
+ count = request.count
+ for pool in filtered_pools:
+ count = self._decrease_pool_count(pools, pool, count)
+ if not count:
+ break
+ else:
+ # but if there is placement allocation then we have to follow that
+ requested_devs_per_pool_rp = collections.Counter(rp_uuids)
+ for pool in filtered_pools:
+ count = requested_devs_per_pool_rp[pool['rp_uuid']]
+ pool['count'] -= count
+ if pool['count'] == 0:
+ pools.remove(pool)
return True
+ def _get_rp_uuids_for_request(
+ self,
+ provider_mapping: ty.Optional[ty.Dict[str, ty.List[str]]],
+ request: 'objects.InstancePCIRequest'
+ ) -> ty.List[str]:
+ """Return the list of RP uuids that are fulfilling the request.
+
+ An RP will be in the list as many times as many devices needs to
+ be allocated from that RP.
+ """
+
+ if request.source == objects.InstancePCIRequest.NEUTRON_PORT:
+ # TODO(gibi): support neutron based requests in a later cycle
+ # an empty list will signal that any PCI pool can be used for this
+ # request
+ return []
+
+ if not provider_mapping:
+ # NOTE(gibi): AFAIK specs is always a list of a single dict
+ # but the object is hard to change retroactively
+ rp_uuids = request.spec[0].get('rp_uuids')
+ if not rp_uuids:
+ # This can happen if [filter_scheduler]pci_in_placement is not
+ # enabled yet
+ # An empty list will signal that any PCI pool can be used for
+ # this request
+ return []
+
+ # TODO(gibi): this is baaad but spec is a dict of string so
+ # the list is serialized
+ return rp_uuids.split(',')
+
+ # NOTE(gibi): the PCI prefilter generates RequestGroup suffixes from
+ # InstancePCIRequests in the form of {request_id}-{count_index}
+ # NOTE(gibi): a suffixed request group always fulfilled from a single
+ # RP
+ return [
+ rp_uuids[0]
+ for group_id, rp_uuids in provider_mapping.items()
+ if group_id.startswith(request.request_id)
+ ]
+
def apply_requests(
self,
requests: ty.List['objects.InstancePCIRequest'],
+ provider_mapping: ty.Optional[ty.Dict[str, ty.List[str]]],
numa_cells: ty.Optional[ty.List['objects.InstanceNUMACell']] = None,
) -> None:
"""Apply PCI requests to the PCI stats.
@@ -700,15 +891,23 @@ class PciDeviceStats(object):
:param requests: A list of InstancePCIRequest object describing the
types, quantities and required NUMA affinities of devices we want.
:type requests: nova.objects.InstancePCIRequests
+ :param provider_mapping: A dict keyed by RequestGroup requester_id,
+ to a list of resource provider UUIDs which provide resource
+ for that RequestGroup. If it is None then it signals that the
+ InstancePCIRequest objects already stores a mapping per request.
+ I.e.: we are called _after_ the scheduler made allocations for this
+ request in placement.
:param numa_cells: A list of InstanceNUMACell objects whose ``id``
corresponds to the ``id`` of host NUMACells, or None.
:raises: exception.PciDeviceRequestFailed if this compute node cannot
satisfy the given request.
"""
- if not all(
- self._apply_request(self.pools, r, numa_cells) for r in requests
- ):
- raise exception.PciDeviceRequestFailed(requests=requests)
+
+ for r in requests:
+ rp_uuids = self._get_rp_uuids_for_request(provider_mapping, r)
+
+ if not self._apply_request(self.pools, r, rp_uuids, numa_cells):
+ raise exception.PciDeviceRequestFailed(requests=requests)
def __iter__(self) -> ty.Iterator[Pool]:
pools: ty.List[Pool] = []
@@ -746,3 +945,40 @@ class PciDeviceStats(object):
)
pools = self._filter_pools_for_spec(self.pools, dummy_req)
return bool(pools)
+
+ def populate_pools_metadata_from_assigned_devices(self):
+ """Populate the rp_uuid of each pool based on the rp_uuid of the
+ devices assigned to the pool. This can only be called from the compute
+ where devices are assigned to each pool. This should not be called from
+ the scheduler as there device - pool assignment is not known.
+ """
+ # PciDevices are tracked in placement and flavor based PCI requests
+ # are scheduled and allocated in placement. To be able to correlate
+ # what is allocated in placement and what is consumed in nova we
+ # need to map device pools to RPs. We can do that as the PciDevice
+ # contains the RP UUID that represents it in placement.
+ # NOTE(gibi): We cannot do this when the device is originally added to
+ # the pool as the device -> placement translation, that creates the
+ # RPs, runs after all the device is created and assigned to pools.
+ for pool in self.pools:
+ pool_rps = {
+ dev.extra_info.get("rp_uuid")
+ for dev in pool["devices"]
+ if "rp_uuid" in dev.extra_info
+ }
+ if len(pool_rps) >= 2:
+ # FIXME(gibi): Do we have a 1:1 pool - RP mapping even
+ # if two PFs providing very similar VFs?
+ raise ValueError(
+ "We have a pool %s connected to more than one RPs %s in "
+ "placement via devs %s" % (pool, pool_rps, pool["devices"])
+ )
+
+ if not pool_rps:
+ # this can happen if the nova-compute is upgraded to have the
+ # PCI in placement inventory handling code but
+ # [pci]report_in_placement is not turned on yet.
+ continue
+
+ if pool_rps: # now we know that it is a single RP
+ pool['rp_uuid'] = next(iter(pool_rps))
diff --git a/nova/pci/whitelist.py b/nova/pci/whitelist.py
index 1e49971237..8862a0ef4f 100644
--- a/nova/pci/whitelist.py
+++ b/nova/pci/whitelist.py
@@ -44,7 +44,7 @@ class Whitelist(object):
:param whitelist_spec: A JSON string for a dictionary or list thereof.
Each dictionary specifies the pci device properties requirement.
- See the definition of ``passthrough_whitelist`` in
+ See the definition of ``device_spec`` in
``nova.conf.pci`` for details and examples.
"""
if whitelist_spec:
@@ -62,18 +62,18 @@ class Whitelist(object):
try:
dev_spec = jsonutils.loads(jsonspec)
except ValueError:
- raise exception.PciConfigInvalidWhitelist(
+ raise exception.PciConfigInvalidSpec(
reason=_("Invalid entry: '%s'") % jsonspec)
if isinstance(dev_spec, dict):
dev_spec = [dev_spec]
elif not isinstance(dev_spec, list):
- raise exception.PciConfigInvalidWhitelist(
+ raise exception.PciConfigInvalidSpec(
reason=_("Invalid entry: '%s'; "
"Expecting list or dict") % jsonspec)
for ds in dev_spec:
if not isinstance(ds, dict):
- raise exception.PciConfigInvalidWhitelist(
+ raise exception.PciConfigInvalidSpec(
reason=_("Invalid entry: '%s'; "
"Expecting dict") % ds)
diff --git a/nova/policies/admin_actions.py b/nova/policies/admin_actions.py
index 4db7d8e1c3..e07d66ee36 100644
--- a/nova/policies/admin_actions.py
+++ b/nova/policies/admin_actions.py
@@ -24,7 +24,7 @@ POLICY_ROOT = 'os_compute_api:os-admin-actions:%s'
admin_actions_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'reset_state',
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="Reset the state of a given server",
operations=[
{
@@ -35,7 +35,7 @@ admin_actions_policies = [
scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'inject_network_info',
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="Inject network information into the server",
operations=[
{
diff --git a/nova/policies/admin_password.py b/nova/policies/admin_password.py
index 439966a9af..ad87aa7c96 100644
--- a/nova/policies/admin_password.py
+++ b/nova/policies/admin_password.py
@@ -24,7 +24,7 @@ BASE_POLICY_NAME = 'os_compute_api:os-admin-password'
admin_password_policies = [
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME,
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Change the administrative password for a server",
operations=[
{
diff --git a/nova/policies/aggregates.py b/nova/policies/aggregates.py
index 73597f73eb..2775721699 100644
--- a/nova/policies/aggregates.py
+++ b/nova/policies/aggregates.py
@@ -33,7 +33,7 @@ aggregates_policies = [
'method': 'POST'
}
],
- scope_types=['system']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'add_host',
check_str=base.ADMIN,
@@ -44,7 +44,7 @@ aggregates_policies = [
'method': 'POST'
}
],
- scope_types=['system']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'create',
check_str=base.ADMIN,
@@ -55,7 +55,7 @@ aggregates_policies = [
'method': 'POST'
}
],
- scope_types=['system']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'remove_host',
check_str=base.ADMIN,
@@ -66,7 +66,7 @@ aggregates_policies = [
'method': 'POST'
}
],
- scope_types=['system']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'update',
check_str=base.ADMIN,
@@ -77,7 +77,7 @@ aggregates_policies = [
'method': 'PUT'
}
],
- scope_types=['system']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'index',
check_str=base.ADMIN,
@@ -88,7 +88,7 @@ aggregates_policies = [
'method': 'GET'
}
],
- scope_types=['system']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'delete',
check_str=base.ADMIN,
@@ -99,7 +99,7 @@ aggregates_policies = [
'method': 'DELETE'
}
],
- scope_types=['system']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'show',
check_str=base.ADMIN,
@@ -110,7 +110,7 @@ aggregates_policies = [
'method': 'GET'
}
],
- scope_types=['system']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=NEW_POLICY_ROOT % 'images',
check_str=base.ADMIN,
@@ -121,7 +121,7 @@ aggregates_policies = [
'method': 'POST'
}
],
- scope_types=['system']),
+ scope_types=['project']),
]
diff --git a/nova/policies/assisted_volume_snapshots.py b/nova/policies/assisted_volume_snapshots.py
index 0630ee7a50..98a67a8e37 100644
--- a/nova/policies/assisted_volume_snapshots.py
+++ b/nova/policies/assisted_volume_snapshots.py
@@ -29,7 +29,7 @@ assisted_volume_snapshots_policies = [
# can call it with user having 'service' role (not having
# correct project_id). That is for phase-2 of RBAC goal and until
# then, we keep it open for all admin in any project. We cannot
- # default it to PROJECT_ADMIN which has the project_id in
+ # default it to ADMIN which has the project_id in
# check_str and will fail if cinder call it with other project_id.
check_str=base.ADMIN,
description="Create an assisted volume snapshot",
@@ -47,7 +47,7 @@ assisted_volume_snapshots_policies = [
# can call it with user having 'service' role (not having
# correct project_id). That is for phase-2 of RBAC goal and until
# then, we keep it open for all admin in any project. We cannot
- # default it to PROJECT_ADMIN which has the project_id in
+ # default it to ADMIN which has the project_id in
# check_str and will fail if cinder call it with other project_id.
check_str=base.ADMIN,
description="Delete an assisted volume snapshot",
diff --git a/nova/policies/attach_interfaces.py b/nova/policies/attach_interfaces.py
index eb365fd99d..b996e8ae59 100644
--- a/nova/policies/attach_interfaces.py
+++ b/nova/policies/attach_interfaces.py
@@ -37,7 +37,7 @@ DEPRECATED_INTERFACES_POLICY = policy.DeprecatedRule(
attach_interfaces_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'list',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="List port interfaces attached to a server",
operations=[
{
@@ -49,7 +49,7 @@ attach_interfaces_policies = [
deprecated_rule=DEPRECATED_INTERFACES_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'show',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="Show details of a port interface attached to a server",
operations=[
{
@@ -61,7 +61,7 @@ attach_interfaces_policies = [
deprecated_rule=DEPRECATED_INTERFACES_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'create',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Attach an interface to a server",
operations=[
{
@@ -73,7 +73,7 @@ attach_interfaces_policies = [
deprecated_rule=DEPRECATED_INTERFACES_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'delete',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Detach an interface from a server",
operations=[
{
diff --git a/nova/policies/availability_zone.py b/nova/policies/availability_zone.py
index de78dd864d..9a32c095b2 100644
--- a/nova/policies/availability_zone.py
+++ b/nova/policies/availability_zone.py
@@ -33,7 +33,7 @@ availability_zone_policies = [
'path': '/os-availability-zone'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'detail',
check_str=base.ADMIN,
@@ -45,7 +45,7 @@ availability_zone_policies = [
'path': '/os-availability-zone/detail'
}
],
- scope_types=['system'])
+ scope_types=['project'])
]
diff --git a/nova/policies/baremetal_nodes.py b/nova/policies/baremetal_nodes.py
index fdce0372b4..8fd66d57ba 100644
--- a/nova/policies/baremetal_nodes.py
+++ b/nova/policies/baremetal_nodes.py
@@ -49,7 +49,7 @@ These APIs are proxy calls to the Ironic service and are deprecated.
'path': '/os-baremetal-nodes'
}
],
- scope_types=['system'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_BAREMETAL_POLICY),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'show',
@@ -61,7 +61,7 @@ These APIs are proxy calls to the Ironic service and are deprecated.
'path': '/os-baremetal-nodes/{node_id}'
}
],
- scope_types=['system'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_BAREMETAL_POLICY)
]
diff --git a/nova/policies/base.py b/nova/policies/base.py
index 2d60f4634a..ab0c319cdf 100644
--- a/nova/policies/base.py
+++ b/nova/policies/base.py
@@ -36,28 +36,26 @@ DEPRECATED_ADMIN_OR_OWNER_POLICY = policy.DeprecatedRule(
deprecated_reason=DEPRECATED_REASON,
deprecated_since='21.0.0'
)
-PROJECT_ADMIN = 'rule:project_admin_api'
+
+ADMIN = 'rule:context_is_admin'
PROJECT_MEMBER = 'rule:project_member_api'
PROJECT_READER = 'rule:project_reader_api'
+PROJECT_MEMBER_OR_ADMIN = 'rule:project_member_or_admin'
PROJECT_READER_OR_ADMIN = 'rule:project_reader_or_admin'
-ADMIN = 'rule:context_is_admin'
-# NOTE(gmann): Below is the mapping of new roles and scope_types
-# with legacy roles::
+# NOTE(gmann): Below is the mapping of new roles with legacy roles::
-# Legacy Rule | New Rules |Operation |scope_type|
-# -------------------+---------------------+----------------+-----------
-# |-> ADMIN |Global resource | [system]
-# RULE_ADMIN_API | |Write & Read |
-# |-> PROJECT_ADMIN |Project resource| [project]
-# | |Write |
-# ----------------------------------------------------------------------
-# |-> PROJECT_ADMIN |Project resource| [project]
-# | |Write |
-# |-> PROJECT_MEMBER |Project resource| [project]
-# RULE_ADMIN_OR_OWNER| |Write |
-# |-> PROJECT_READER |Project resource| [project]
-# | |Read |
+# Legacy Rule | New Rules |Operation |scope_type|
+# -------------------+---------------------------+----------------+-----------
+# RULE_ADMIN_API |-> ADMIN |Global resource | [project]
+# | |Write & Read |
+# -------------------+---------------------------+----------------+-----------
+# |-> ADMIN |Project admin | [project]
+# | |level operation |
+# RULE_ADMIN_OR_OWNER|-> PROJECT_MEMBER_OR_ADMIN |Project resource| [project]
+# | |Write |
+# |-> PROJECT_READER_OR_ADMIN |Project resource| [project]
+# | |Read |
# NOTE(johngarbutt) The base rules here affect so many APIs the list
# of related API operations has not been populated. It would be
@@ -92,11 +90,6 @@ rules = [
deprecated_reason=DEPRECATED_REASON,
deprecated_since='21.0.0'),
policy.RuleDefault(
- "project_admin_api",
- "role:admin and project_id:%(project_id)s",
- "Default rule for Project level admin APIs.",
- deprecated_rule=DEPRECATED_ADMIN_POLICY),
- policy.RuleDefault(
"project_member_api",
"role:member and project_id:%(project_id)s",
"Default rule for Project level non admin APIs.",
@@ -107,9 +100,14 @@ rules = [
"Default rule for Project level read only APIs.",
deprecated_rule=DEPRECATED_ADMIN_OR_OWNER_POLICY),
policy.RuleDefault(
+ "project_member_or_admin",
+ "rule:project_member_api or rule:context_is_admin",
+ "Default rule for Project Member or admin APIs.",
+ deprecated_rule=DEPRECATED_ADMIN_OR_OWNER_POLICY),
+ policy.RuleDefault(
"project_reader_or_admin",
"rule:project_reader_api or rule:context_is_admin",
- "Default rule for Project reader and admin APIs.",
+ "Default rule for Project reader or admin APIs.",
deprecated_rule=DEPRECATED_ADMIN_OR_OWNER_POLICY)
]
diff --git a/nova/policies/console_auth_tokens.py b/nova/policies/console_auth_tokens.py
index bad3130e78..5f784965cf 100644
--- a/nova/policies/console_auth_tokens.py
+++ b/nova/policies/console_auth_tokens.py
@@ -24,7 +24,7 @@ BASE_POLICY_NAME = 'os_compute_api:os-console-auth-tokens'
console_auth_tokens_policies = [
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME,
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="Show console connection information for a given console "
"authentication token",
operations=[
diff --git a/nova/policies/console_output.py b/nova/policies/console_output.py
index 4a5a21ef55..625971b5d7 100644
--- a/nova/policies/console_output.py
+++ b/nova/policies/console_output.py
@@ -24,7 +24,7 @@ BASE_POLICY_NAME = 'os_compute_api:os-console-output'
console_output_policies = [
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME,
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description='Show console output for a server',
operations=[
{
diff --git a/nova/policies/create_backup.py b/nova/policies/create_backup.py
index 173ad3e36f..c18fa11e84 100644
--- a/nova/policies/create_backup.py
+++ b/nova/policies/create_backup.py
@@ -24,7 +24,7 @@ BASE_POLICY_NAME = 'os_compute_api:os-create-backup'
create_backup_policies = [
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME,
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description='Create a back up of a server',
operations=[
{
diff --git a/nova/policies/deferred_delete.py b/nova/policies/deferred_delete.py
index a912966897..9c18aa02de 100644
--- a/nova/policies/deferred_delete.py
+++ b/nova/policies/deferred_delete.py
@@ -36,7 +36,7 @@ DEPRECATED_POLICY = policy.DeprecatedRule(
deferred_delete_policies = [
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'restore',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Restore a soft deleted server",
operations=[
{
@@ -48,7 +48,7 @@ deferred_delete_policies = [
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'force',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Force delete a server before deferred cleanup",
operations=[
{
diff --git a/nova/policies/evacuate.py b/nova/policies/evacuate.py
index 4c66f90147..3a0fd502fd 100644
--- a/nova/policies/evacuate.py
+++ b/nova/policies/evacuate.py
@@ -24,7 +24,7 @@ BASE_POLICY_NAME = 'os_compute_api:os-evacuate'
evacuate_policies = [
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME,
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="Evacuate a server from a failed host to a new host",
operations=[
{
diff --git a/nova/policies/extended_server_attributes.py b/nova/policies/extended_server_attributes.py
index ce5c531a73..ba151a36cc 100644
--- a/nova/policies/extended_server_attributes.py
+++ b/nova/policies/extended_server_attributes.py
@@ -24,7 +24,7 @@ BASE_POLICY_NAME = 'os_compute_api:os-extended-server-attributes'
extended_server_attributes_policies = [
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME,
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="""Return extended attributes for server.
This rule will control the visibility for a set of servers attributes:
diff --git a/nova/policies/extensions.py b/nova/policies/extensions.py
index b049db7a7d..36c3fa0a05 100644
--- a/nova/policies/extensions.py
+++ b/nova/policies/extensions.py
@@ -37,7 +37,7 @@ extensions_policies = [
'path': '/extensions/{alias}'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
]
diff --git a/nova/policies/flavor_access.py b/nova/policies/flavor_access.py
index d86e472c2f..e7044d0cec 100644
--- a/nova/policies/flavor_access.py
+++ b/nova/policies/flavor_access.py
@@ -53,7 +53,7 @@ flavor_access_policies = [
'path': '/flavors/{flavor_id}/action (addTenantAccess)'
}
],
- scope_types=['system']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'remove_tenant_access',
check_str=base.ADMIN,
@@ -64,7 +64,7 @@ flavor_access_policies = [
'path': '/flavors/{flavor_id}/action (removeTenantAccess)'
}
],
- scope_types=['system']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME,
check_str=base.ADMIN,
@@ -79,7 +79,7 @@ to a flavor via an os-flavor-access API.
'path': '/flavors/{flavor_id}/os-flavor-access'
},
],
- scope_types=['system'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_FLAVOR_ACCESS_POLICY),
]
diff --git a/nova/policies/flavor_extra_specs.py b/nova/policies/flavor_extra_specs.py
index 06b486bf49..eaa7dd52cb 100644
--- a/nova/policies/flavor_extra_specs.py
+++ b/nova/policies/flavor_extra_specs.py
@@ -31,7 +31,7 @@ flavor_extra_specs_policies = [
'method': 'GET'
}
],
- scope_types=['system', 'project']
+ scope_types=['project']
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'create',
@@ -43,7 +43,7 @@ flavor_extra_specs_policies = [
'method': 'POST'
}
],
- scope_types=['system']
+ scope_types=['project']
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'update',
@@ -56,7 +56,7 @@ flavor_extra_specs_policies = [
'method': 'PUT'
}
],
- scope_types=['system']
+ scope_types=['project']
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'delete',
@@ -69,7 +69,7 @@ flavor_extra_specs_policies = [
'method': 'DELETE'
}
],
- scope_types=['system']
+ scope_types=['project']
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'index',
@@ -100,7 +100,7 @@ flavor_extra_specs_policies = [
'method': 'PUT'
}
],
- scope_types=['system', 'project']
+ scope_types=['project']
),
]
diff --git a/nova/policies/flavor_manage.py b/nova/policies/flavor_manage.py
index b7876e8c96..a2ac6d8b21 100644
--- a/nova/policies/flavor_manage.py
+++ b/nova/policies/flavor_manage.py
@@ -33,7 +33,7 @@ flavor_manage_policies = [
'path': '/flavors'
}
],
- scope_types=['system']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'update',
check_str=base.ADMIN,
@@ -44,7 +44,7 @@ flavor_manage_policies = [
'path': '/flavors/{flavor_id}'
}
],
- scope_types=['system']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'delete',
check_str=base.ADMIN,
@@ -55,7 +55,7 @@ flavor_manage_policies = [
'path': '/flavors/{flavor_id}'
}
],
- scope_types=['system']),
+ scope_types=['project']),
]
diff --git a/nova/policies/floating_ip_pools.py b/nova/policies/floating_ip_pools.py
index 61105efcb7..dd1d8f6851 100644
--- a/nova/policies/floating_ip_pools.py
+++ b/nova/policies/floating_ip_pools.py
@@ -32,7 +32,7 @@ floating_ip_pools_policies = [
'path': '/os-floating-ip-pools'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
]
diff --git a/nova/policies/floating_ips.py b/nova/policies/floating_ips.py
index 2cb5b34679..48d60d7b89 100644
--- a/nova/policies/floating_ips.py
+++ b/nova/policies/floating_ips.py
@@ -38,7 +38,7 @@ DEPRECATED_FIP_POLICY = policy.DeprecatedRule(
floating_ips_policies = [
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'add',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Associate floating IPs to server. "
" This API is deprecated.",
operations=[
@@ -51,7 +51,7 @@ floating_ips_policies = [
deprecated_rule=DEPRECATED_FIP_POLICY),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'remove',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Disassociate floating IPs to server. "
" This API is deprecated.",
operations=[
@@ -64,7 +64,7 @@ floating_ips_policies = [
deprecated_rule=DEPRECATED_FIP_POLICY),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'list',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="List floating IPs. This API is deprecated.",
operations=[
{
@@ -76,7 +76,7 @@ floating_ips_policies = [
deprecated_rule=DEPRECATED_FIP_POLICY),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'create',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Create floating IPs. This API is deprecated.",
operations=[
{
@@ -88,7 +88,7 @@ floating_ips_policies = [
deprecated_rule=DEPRECATED_FIP_POLICY),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'show',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="Show floating IPs. This API is deprecated.",
operations=[
{
@@ -100,7 +100,7 @@ floating_ips_policies = [
deprecated_rule=DEPRECATED_FIP_POLICY),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'delete',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Delete floating IPs. This API is deprecated.",
operations=[
{
diff --git a/nova/policies/hosts.py b/nova/policies/hosts.py
index 1505f225ba..04b91a8641 100644
--- a/nova/policies/hosts.py
+++ b/nova/policies/hosts.py
@@ -48,7 +48,7 @@ This API is deprecated in favor of os-hypervisors and os-services.""",
'path': '/os-hosts'
},
],
- scope_types=['system'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'show',
@@ -62,7 +62,7 @@ This API is deprecated in favor of os-hypervisors and os-services.""",
'path': '/os-hosts/{host_name}'
}
],
- scope_types=['system'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'update',
@@ -76,7 +76,7 @@ This API is deprecated in favor of os-hypervisors and os-services.""",
'path': '/os-hosts/{host_name}'
},
],
- scope_types=['system'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'reboot',
@@ -90,7 +90,7 @@ This API is deprecated in favor of os-hypervisors and os-services.""",
'path': '/os-hosts/{host_name}/reboot'
},
],
- scope_types=['system'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'shutdown',
@@ -104,7 +104,7 @@ This API is deprecated in favor of os-hypervisors and os-services.""",
'path': '/os-hosts/{host_name}/shutdown'
},
],
- scope_types=['system'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'start',
@@ -118,7 +118,7 @@ This API is deprecated in favor of os-hypervisors and os-services.""",
'path': '/os-hosts/{host_name}/startup'
}
],
- scope_types=['system'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_POLICY),
]
diff --git a/nova/policies/hypervisors.py b/nova/policies/hypervisors.py
index 92bb12e90f..f4f29d1e1b 100644
--- a/nova/policies/hypervisors.py
+++ b/nova/policies/hypervisors.py
@@ -45,7 +45,7 @@ hypervisors_policies = [
'method': 'GET'
},
],
- scope_types=['system'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'list-detail',
@@ -57,7 +57,7 @@ hypervisors_policies = [
'method': 'GET'
},
],
- scope_types=['system'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'statistics',
@@ -70,7 +70,7 @@ hypervisors_policies = [
'method': 'GET'
},
],
- scope_types=['system'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'show',
@@ -82,7 +82,7 @@ hypervisors_policies = [
'method': 'GET'
},
],
- scope_types=['system'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'uptime',
@@ -94,7 +94,7 @@ hypervisors_policies = [
'method': 'GET'
},
],
- scope_types=['system'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'search',
@@ -106,7 +106,7 @@ hypervisors_policies = [
'method': 'GET'
},
],
- scope_types=['system'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'servers',
@@ -120,7 +120,7 @@ hypervisors_policies = [
'method': 'GET'
}
],
- scope_types=['system'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_POLICY
),
]
diff --git a/nova/policies/instance_actions.py b/nova/policies/instance_actions.py
index 85e2f63244..e3e16a58f0 100644
--- a/nova/policies/instance_actions.py
+++ b/nova/policies/instance_actions.py
@@ -38,7 +38,7 @@ DEPRECATED_INSTANCE_ACTION_POLICY = policy.DeprecatedRule(
instance_actions_policies = [
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'events:details',
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="""Add "details" key in action events for a server.
This check is performed only after the check
@@ -59,7 +59,7 @@ but in the other hand it might leak information about the deployment
scope_types=['project']),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'events',
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="""Add events details in action details for a server.
This check is performed only after the check
os_compute_api:os-instance-actions:show passes. Beginning with Microversion
@@ -76,7 +76,7 @@ passes, the name of the host.""",
scope_types=['project']),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'list',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="""List actions for a server.""",
operations=[
{
@@ -88,7 +88,7 @@ passes, the name of the host.""",
deprecated_rule=DEPRECATED_INSTANCE_ACTION_POLICY),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'show',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="""Show action details for a server.""",
operations=[
{
diff --git a/nova/policies/instance_usage_audit_log.py b/nova/policies/instance_usage_audit_log.py
index f93ace08c1..7884134e4a 100644
--- a/nova/policies/instance_usage_audit_log.py
+++ b/nova/policies/instance_usage_audit_log.py
@@ -44,7 +44,7 @@ instance_usage_audit_log_policies = [
'path': '/os-instance_usage_audit_log'
},
],
- scope_types=['system'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'show',
@@ -59,7 +59,7 @@ instance_usage_audit_log_policies = [
'path': '/os-instance_usage_audit_log/{before_timestamp}'
}
],
- scope_types=['system'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_POLICY),
]
diff --git a/nova/policies/ips.py b/nova/policies/ips.py
index d63c345389..20cad2522a 100644
--- a/nova/policies/ips.py
+++ b/nova/policies/ips.py
@@ -24,7 +24,7 @@ POLICY_ROOT = 'os_compute_api:ips:%s'
ips_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'show',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="Show IP addresses details for a network label of a "
" server",
operations=[
@@ -36,7 +36,7 @@ ips_policies = [
scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'index',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="List IP addresses that are assigned to a server",
operations=[
{
diff --git a/nova/policies/keypairs.py b/nova/policies/keypairs.py
index b0ee4a8906..a42ee6302b 100644
--- a/nova/policies/keypairs.py
+++ b/nova/policies/keypairs.py
@@ -31,7 +31,7 @@ keypairs_policies = [
'method': 'GET'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'create',
check_str='(' + base.ADMIN + ') or user_id:%(user_id)s',
@@ -42,7 +42,7 @@ keypairs_policies = [
'method': 'POST'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'delete',
check_str='(' + base.ADMIN + ') or user_id:%(user_id)s',
@@ -53,7 +53,7 @@ keypairs_policies = [
'method': 'DELETE'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'show',
check_str='(' + base.ADMIN + ') or user_id:%(user_id)s',
@@ -64,7 +64,7 @@ keypairs_policies = [
'method': 'GET'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
]
diff --git a/nova/policies/limits.py b/nova/policies/limits.py
index 56bc0e830d..1216dd1995 100644
--- a/nova/policies/limits.py
+++ b/nova/policies/limits.py
@@ -49,7 +49,7 @@ limits_policies = [
scope_types=['project']),
policy.DocumentedRuleDefault(
name=OTHER_PROJECT_LIMIT_POLICY_NAME,
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="""Show rate and absolute limits of other project.
This policy only checks if the user has access to the requested
diff --git a/nova/policies/lock_server.py b/nova/policies/lock_server.py
index ca65b1cf9b..f7a018803c 100644
--- a/nova/policies/lock_server.py
+++ b/nova/policies/lock_server.py
@@ -24,7 +24,7 @@ POLICY_ROOT = 'os_compute_api:os-lock-server:%s'
lock_server_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'lock',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Lock a server",
operations=[
{
@@ -36,7 +36,7 @@ lock_server_policies = [
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'unlock',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Unlock a server",
operations=[
{
@@ -48,7 +48,7 @@ lock_server_policies = [
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'unlock:unlock_override',
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="""Unlock a server, regardless who locked the server.
This check is performed only after the check
diff --git a/nova/policies/migrate_server.py b/nova/policies/migrate_server.py
index d00fd562d2..0b3d7c8bd1 100644
--- a/nova/policies/migrate_server.py
+++ b/nova/policies/migrate_server.py
@@ -24,7 +24,7 @@ POLICY_ROOT = 'os_compute_api:os-migrate-server:%s'
migrate_server_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'migrate',
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="Cold migrate a server to a host",
operations=[
{
@@ -35,7 +35,7 @@ migrate_server_policies = [
scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'migrate_live',
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="Live migrate a server to a new host without a reboot",
operations=[
{
diff --git a/nova/policies/migrations.py b/nova/policies/migrations.py
index 4647d53496..ce2aeaa564 100644
--- a/nova/policies/migrations.py
+++ b/nova/policies/migrations.py
@@ -24,7 +24,7 @@ POLICY_ROOT = 'os_compute_api:os-migrations:%s'
migrations_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'index',
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="List migrations",
operations=[
{
diff --git a/nova/policies/multinic.py b/nova/policies/multinic.py
index ff16cb5143..7119ec25b4 100644
--- a/nova/policies/multinic.py
+++ b/nova/policies/multinic.py
@@ -38,7 +38,7 @@ DEPRECATED_POLICY = policy.DeprecatedRule(
multinic_policies = [
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'add',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="""Add a fixed IP address to a server.
This API is proxy calls to the Network service. This is
@@ -53,7 +53,7 @@ deprecated.""",
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'remove',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="""Remove a fixed IP address from a server.
This API is proxy calls to the Network service. This is
diff --git a/nova/policies/networks.py b/nova/policies/networks.py
index ab0ce1512b..928705d8be 100644
--- a/nova/policies/networks.py
+++ b/nova/policies/networks.py
@@ -38,7 +38,7 @@ DEPRECATED_POLICY = policy.DeprecatedRule(
networks_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'list',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="""List networks for the project.
This API is proxy calls to the Network service. This is deprecated.""",
@@ -52,7 +52,7 @@ This API is proxy calls to the Network service. This is deprecated.""",
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'show',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="""Show network details.
This API is proxy calls to the Network service. This is deprecated.""",
diff --git a/nova/policies/pause_server.py b/nova/policies/pause_server.py
index a7318b16f8..96a1ff4c0d 100644
--- a/nova/policies/pause_server.py
+++ b/nova/policies/pause_server.py
@@ -24,7 +24,7 @@ POLICY_ROOT = 'os_compute_api:os-pause-server:%s'
pause_server_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'pause',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Pause a server",
operations=[
{
@@ -36,7 +36,7 @@ pause_server_policies = [
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'unpause',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Unpause a paused server",
operations=[
{
diff --git a/nova/policies/quota_class_sets.py b/nova/policies/quota_class_sets.py
index e9d22d2f68..b01102b44e 100644
--- a/nova/policies/quota_class_sets.py
+++ b/nova/policies/quota_class_sets.py
@@ -32,7 +32,7 @@ quota_class_sets_policies = [
'path': '/os-quota-class-sets/{quota_class}'
}
],
- scope_types=['system']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'update',
check_str=base.ADMIN,
@@ -43,7 +43,7 @@ quota_class_sets_policies = [
'path': '/os-quota-class-sets/{quota_class}'
}
],
- scope_types=['system']),
+ scope_types=['project']),
]
diff --git a/nova/policies/quota_sets.py b/nova/policies/quota_sets.py
index 2aa7439390..ae8c471f56 100644
--- a/nova/policies/quota_sets.py
+++ b/nova/policies/quota_sets.py
@@ -24,7 +24,7 @@ POLICY_ROOT = 'os_compute_api:os-quota-sets:%s'
quota_sets_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'update',
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="Update the quotas",
operations=[
{
@@ -43,16 +43,10 @@ quota_sets_policies = [
'path': '/os-quota-sets/{tenant_id}/defaults'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'show',
- # TODO(gmann): Until we have domain admin or so to get other project's
- # data, allow admin role(with scope check it will be project admin) to
- # get other project quota. We cannot use PROJECT_ADMIN here as
- # project_id passed in request url is used as policy targets which
- # would not match with context's project_id fetched for rule
- # PROJECT_ADMIN check.
- check_str='(' + base.PROJECT_READER + ') or role:admin',
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="Show a quota",
operations=[
{
@@ -63,7 +57,7 @@ quota_sets_policies = [
scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'delete',
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="Revert quotas to defaults",
operations=[
{
@@ -77,7 +71,7 @@ quota_sets_policies = [
# TODO(gmann): Until we have domain admin or so to get other project's
# data, allow admin role(with scope check it will be project admin) to
# get other project quota.
- check_str='(' + base.PROJECT_READER + ') or role:admin',
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="Show the detail of quota",
operations=[
{
diff --git a/nova/policies/remote_consoles.py b/nova/policies/remote_consoles.py
index 4b217dc74c..e32dd33d4c 100644
--- a/nova/policies/remote_consoles.py
+++ b/nova/policies/remote_consoles.py
@@ -24,7 +24,7 @@ BASE_POLICY_NAME = 'os_compute_api:os-remote-consoles'
remote_consoles_policies = [
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME,
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="""Generate a URL to access remove server console.
This policy is for ``POST /remote-consoles`` API and below Server actions APIs
diff --git a/nova/policies/rescue.py b/nova/policies/rescue.py
index 040caa4275..f9f72e92ef 100644
--- a/nova/policies/rescue.py
+++ b/nova/policies/rescue.py
@@ -37,7 +37,7 @@ DEPRECATED_POLICY = policy.DeprecatedRule(
rescue_policies = [
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME,
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Rescue a server",
operations=[
{
@@ -48,7 +48,7 @@ rescue_policies = [
scope_types=['project']),
policy.DocumentedRuleDefault(
name=UNRESCUE_POLICY_NAME,
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Unrescue a server",
operations=[
{
diff --git a/nova/policies/security_groups.py b/nova/policies/security_groups.py
index e5649d5da5..d6318bc724 100644
--- a/nova/policies/security_groups.py
+++ b/nova/policies/security_groups.py
@@ -38,7 +38,7 @@ DEPRECATED_POLICY = policy.DeprecatedRule(
security_groups_policies = [
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'get',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="List security groups. This API is deprecated.",
operations=[
{
@@ -50,7 +50,7 @@ security_groups_policies = [
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'show',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="Show security group. This API is deprecated.",
operations=[
{
@@ -62,7 +62,7 @@ security_groups_policies = [
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'create',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Create security group. This API is deprecated.",
operations=[
{
@@ -74,7 +74,7 @@ security_groups_policies = [
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'update',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Update security group. This API is deprecated.",
operations=[
{
@@ -86,7 +86,7 @@ security_groups_policies = [
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'delete',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Delete security group. This API is deprecated.",
operations=[
{
@@ -98,7 +98,7 @@ security_groups_policies = [
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'rule:create',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Create security group Rule. This API is deprecated.",
operations=[
{
@@ -110,7 +110,7 @@ security_groups_policies = [
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'rule:delete',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Delete security group Rule. This API is deprecated.",
operations=[
{
@@ -122,7 +122,7 @@ security_groups_policies = [
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'list',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="List security groups of server.",
operations=[
{
@@ -134,7 +134,7 @@ security_groups_policies = [
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'add',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Add security groups to server.",
operations=[
{
@@ -146,7 +146,7 @@ security_groups_policies = [
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'remove',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Remove security groups from server.",
operations=[
{
diff --git a/nova/policies/server_diagnostics.py b/nova/policies/server_diagnostics.py
index ebafab4378..6774b7e862 100644
--- a/nova/policies/server_diagnostics.py
+++ b/nova/policies/server_diagnostics.py
@@ -24,7 +24,7 @@ BASE_POLICY_NAME = 'os_compute_api:os-server-diagnostics'
server_diagnostics_policies = [
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME,
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="Show the usage data for a server",
operations=[
{
diff --git a/nova/policies/server_external_events.py b/nova/policies/server_external_events.py
index da832eb94d..56034d0186 100644
--- a/nova/policies/server_external_events.py
+++ b/nova/policies/server_external_events.py
@@ -30,7 +30,7 @@ server_external_events_policies = [
# neutron can call it with user having 'service' role (not having
# server's project_id). That is for phase-2 of RBAC goal and until
# then, we keep it open for all admin in any project. We cannot
- # default it to PROJECT_ADMIN which has the project_id in
+ # default it to ADMIN which has the project_id in
# check_str and will fail if neutron call it with other project_id.
check_str=base.ADMIN,
description="Create one or more external events",
diff --git a/nova/policies/server_groups.py b/nova/policies/server_groups.py
index be1cb62835..8dfbe7c202 100644
--- a/nova/policies/server_groups.py
+++ b/nova/policies/server_groups.py
@@ -24,7 +24,7 @@ POLICY_ROOT = 'os_compute_api:os-server-groups:%s'
server_groups_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'create',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Create a new server group",
operations=[
{
@@ -36,7 +36,7 @@ server_groups_policies = [
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'delete',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Delete a server group",
operations=[
{
@@ -48,7 +48,7 @@ server_groups_policies = [
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'index',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="List all server groups",
operations=[
{
@@ -60,7 +60,7 @@ server_groups_policies = [
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'index:all_projects',
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="List all server groups for all projects",
operations=[
{
@@ -72,7 +72,7 @@ server_groups_policies = [
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'show',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="Show details of a server group",
operations=[
{
diff --git a/nova/policies/server_metadata.py b/nova/policies/server_metadata.py
index 1e6b525cb6..f136df8439 100644
--- a/nova/policies/server_metadata.py
+++ b/nova/policies/server_metadata.py
@@ -24,7 +24,7 @@ POLICY_ROOT = 'os_compute_api:server-metadata:%s'
server_metadata_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'index',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="List all metadata of a server",
operations=[
{
@@ -36,7 +36,7 @@ server_metadata_policies = [
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'show',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="Show metadata for a server",
operations=[
{
@@ -48,7 +48,7 @@ server_metadata_policies = [
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'create',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Create metadata for a server",
operations=[
{
@@ -60,7 +60,7 @@ server_metadata_policies = [
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'update_all',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Replace metadata for a server",
operations=[
{
@@ -72,7 +72,7 @@ server_metadata_policies = [
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'update',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Update metadata from a server",
operations=[
{
@@ -84,7 +84,7 @@ server_metadata_policies = [
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'delete',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Delete metadata from a server",
operations=[
{
diff --git a/nova/policies/server_password.py b/nova/policies/server_password.py
index 95fa95830c..1f9ddafd3c 100644
--- a/nova/policies/server_password.py
+++ b/nova/policies/server_password.py
@@ -37,7 +37,7 @@ DEPRECATED_POLICY = policy.DeprecatedRule(
server_password_policies = [
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'show',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="Show the encrypted administrative "
"password of a server",
operations=[
@@ -50,7 +50,7 @@ server_password_policies = [
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'clear',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Clear the encrypted administrative "
"password of a server",
operations=[
diff --git a/nova/policies/server_tags.py b/nova/policies/server_tags.py
index 014c8d1488..baa1123987 100644
--- a/nova/policies/server_tags.py
+++ b/nova/policies/server_tags.py
@@ -24,7 +24,7 @@ POLICY_ROOT = 'os_compute_api:os-server-tags:%s'
server_tags_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'delete_all',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Delete all the server tags",
operations=[
{
@@ -35,7 +35,7 @@ server_tags_policies = [
scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'index',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="List all tags for given server",
operations=[
{
@@ -46,7 +46,7 @@ server_tags_policies = [
scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'update_all',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Replace all tags on specified server with the new set "
"of tags.",
operations=[
@@ -59,7 +59,7 @@ server_tags_policies = [
scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'delete',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Delete a single tag from the specified server",
operations=[
{
@@ -71,7 +71,7 @@ server_tags_policies = [
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'update',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Add a single tag to the server if server has no "
"specified tag",
operations=[
@@ -84,7 +84,7 @@ server_tags_policies = [
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'show',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="Check tag existence on the server.",
operations=[
{
diff --git a/nova/policies/server_topology.py b/nova/policies/server_topology.py
index 7b68e67481..0e6c203e4f 100644
--- a/nova/policies/server_topology.py
+++ b/nova/policies/server_topology.py
@@ -21,7 +21,7 @@ BASE_POLICY_NAME = 'compute:server:topology:%s'
server_topology_policies = [
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'index',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="Show the NUMA topology data for a server",
operations=[
{
@@ -33,7 +33,7 @@ server_topology_policies = [
policy.DocumentedRuleDefault(
# Control host NUMA node and cpu pinning information
name=BASE_POLICY_NAME % 'host:index',
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="Show the NUMA topology data for a server with host "
"NUMA ID and CPU pinning information",
operations=[
diff --git a/nova/policies/servers.py b/nova/policies/servers.py
index faa8f8d02c..1e41baa203 100644
--- a/nova/policies/servers.py
+++ b/nova/policies/servers.py
@@ -36,7 +36,7 @@ not for list extra specs and showing it in flavor API response.
rules = [
policy.DocumentedRuleDefault(
name=SERVERS % 'index',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="List all servers",
operations=[
{
@@ -47,7 +47,7 @@ rules = [
scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'detail',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="List all servers with detailed information",
operations=[
{
@@ -58,7 +58,7 @@ rules = [
scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'index:get_all_tenants',
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="List all servers for all projects",
operations=[
{
@@ -70,7 +70,7 @@ rules = [
policy.DocumentedRuleDefault(
name=SERVERS % 'detail:get_all_tenants',
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="List all servers with detailed information for "
" all projects",
operations=[
@@ -82,7 +82,7 @@ rules = [
scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'allow_all_filters',
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="Allow all filters when listing servers",
operations=[
{
@@ -97,7 +97,7 @@ rules = [
scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'show',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="Show a server",
operations=[
{
@@ -108,7 +108,7 @@ rules = [
scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'show:flavor-extra-specs',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="Starting with microversion 2.47, the flavor and its "
"extra specs used for a server is also returned in the response "
"when showing server details, updating a server or rebuilding a "
@@ -140,7 +140,7 @@ rules = [
# should do that by default.
policy.DocumentedRuleDefault(
name=SERVERS % 'show:host_status',
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="""
Show a server with additional host status information.
@@ -174,7 +174,7 @@ API responses which are also controlled by this policy rule, like the
scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'show:host_status:unknown-only',
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="""
Show a server with additional host status information, only if host status is
UNKNOWN.
@@ -207,7 +207,7 @@ allow everyone.
scope_types=['project'],),
policy.DocumentedRuleDefault(
name=SERVERS % 'create',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Create a server",
operations=[
{
@@ -218,7 +218,7 @@ allow everyone.
scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'create:forced_host',
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="""
Create a server on the specified host and/or node.
@@ -235,7 +235,7 @@ host and/or node by bypassing the scheduler filters unlike the
scope_types=['project']),
policy.DocumentedRuleDefault(
name=REQUESTED_DESTINATION,
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="""
Create a server on the requested compute service host and/or
hypervisor_hostname.
@@ -253,7 +253,7 @@ validated by the scheduler filters unlike the
scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'create:attach_volume',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Create a server with the requested volume attached to it",
operations=[
{
@@ -264,7 +264,7 @@ validated by the scheduler filters unlike the
scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'create:attach_network',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Create a server with the requested network attached "
" to it",
operations=[
@@ -276,7 +276,7 @@ validated by the scheduler filters unlike the
scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'create:trusted_certs',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Create a server with trusted image certificate IDs",
operations=[
{
@@ -287,7 +287,7 @@ validated by the scheduler filters unlike the
scope_types=['project']),
policy.DocumentedRuleDefault(
name=ZERO_DISK_FLAVOR,
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="""
This rule controls the compute API validation behavior of creating a server
with a flavor that has 0 disk, indicating the server should be volume-backed.
@@ -312,7 +312,7 @@ https://bugs.launchpad.net/nova/+bug/1739646 for details.
scope_types=['project']),
policy.DocumentedRuleDefault(
name=NETWORK_ATTACH_EXTERNAL,
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="Attach an unshared external network to a server",
operations=[
# Create a server with a requested network or port.
@@ -329,7 +329,7 @@ https://bugs.launchpad.net/nova/+bug/1739646 for details.
scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'delete',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Delete a server",
operations=[
{
@@ -340,7 +340,7 @@ https://bugs.launchpad.net/nova/+bug/1739646 for details.
scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'update',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Update a server",
operations=[
{
@@ -351,7 +351,7 @@ https://bugs.launchpad.net/nova/+bug/1739646 for details.
scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'confirm_resize',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Confirm a server resize",
operations=[
{
@@ -362,7 +362,7 @@ https://bugs.launchpad.net/nova/+bug/1739646 for details.
scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'revert_resize',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Revert a server resize",
operations=[
{
@@ -373,7 +373,7 @@ https://bugs.launchpad.net/nova/+bug/1739646 for details.
scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'reboot',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Reboot a server",
operations=[
{
@@ -384,7 +384,7 @@ https://bugs.launchpad.net/nova/+bug/1739646 for details.
scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'resize',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Resize a server",
operations=[
{
@@ -410,7 +410,7 @@ https://bugs.launchpad.net/nova/+bug/1739646 for details.
scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'rebuild',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Rebuild a server",
operations=[
{
@@ -421,7 +421,7 @@ https://bugs.launchpad.net/nova/+bug/1739646 for details.
scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'rebuild:trusted_certs',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Rebuild a server with trusted image certificate IDs",
operations=[
{
@@ -432,7 +432,7 @@ https://bugs.launchpad.net/nova/+bug/1739646 for details.
scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'create_image',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Create an image from a server",
operations=[
{
@@ -443,7 +443,7 @@ https://bugs.launchpad.net/nova/+bug/1739646 for details.
scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'create_image:allow_volume_backed',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Create an image from a volume backed server",
operations=[
{
@@ -454,7 +454,7 @@ https://bugs.launchpad.net/nova/+bug/1739646 for details.
scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'start',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Start a server",
operations=[
{
@@ -465,7 +465,7 @@ https://bugs.launchpad.net/nova/+bug/1739646 for details.
scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'stop',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Stop a server",
operations=[
{
@@ -476,7 +476,7 @@ https://bugs.launchpad.net/nova/+bug/1739646 for details.
scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'trigger_crash_dump',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Trigger crash dump in a server",
operations=[
{
diff --git a/nova/policies/servers_migrations.py b/nova/policies/servers_migrations.py
index 427da8bba2..21762fc575 100644
--- a/nova/policies/servers_migrations.py
+++ b/nova/policies/servers_migrations.py
@@ -24,7 +24,7 @@ POLICY_ROOT = 'os_compute_api:servers:migrations:%s'
servers_migrations_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'show',
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="Show details for an in-progress live migration for a "
"given server",
operations=[
@@ -36,7 +36,7 @@ servers_migrations_policies = [
scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'force_complete',
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="Force an in-progress live migration for a given server "
"to complete",
operations=[
@@ -49,7 +49,7 @@ servers_migrations_policies = [
scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'delete',
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="Delete(Abort) an in-progress live migration",
operations=[
{
@@ -60,7 +60,7 @@ servers_migrations_policies = [
scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'index',
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="Lists in-progress live migrations for a given server",
operations=[
{
diff --git a/nova/policies/services.py b/nova/policies/services.py
index 8174bf92df..7300d3bdb3 100644
--- a/nova/policies/services.py
+++ b/nova/policies/services.py
@@ -45,7 +45,7 @@ services_policies = [
'path': '/os-services'
}
],
- scope_types=['system'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_SERVICE_POLICY),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'update',
@@ -58,7 +58,7 @@ services_policies = [
'path': '/os-services/{service_id}'
},
],
- scope_types=['system'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_SERVICE_POLICY),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'delete',
@@ -70,7 +70,7 @@ services_policies = [
'path': '/os-services/{service_id}'
}
],
- scope_types=['system'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_SERVICE_POLICY),
]
diff --git a/nova/policies/shelve.py b/nova/policies/shelve.py
index eb06ffaa2f..476d212b04 100644
--- a/nova/policies/shelve.py
+++ b/nova/policies/shelve.py
@@ -24,7 +24,7 @@ POLICY_ROOT = 'os_compute_api:os-shelve:%s'
shelve_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'shelve',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Shelve server",
operations=[
{
@@ -35,7 +35,7 @@ shelve_policies = [
scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'unshelve',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Unshelve (restore) shelved server",
operations=[
{
@@ -46,7 +46,7 @@ shelve_policies = [
scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'unshelve_to_host',
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="Unshelve (restore) shelve offloaded server to a "
"specific host",
operations=[
@@ -58,7 +58,7 @@ shelve_policies = [
scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'shelve_offload',
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="Shelf-offload (remove) server",
operations=[
{
diff --git a/nova/policies/simple_tenant_usage.py b/nova/policies/simple_tenant_usage.py
index d97d5909eb..41d87d1426 100644
--- a/nova/policies/simple_tenant_usage.py
+++ b/nova/policies/simple_tenant_usage.py
@@ -24,7 +24,7 @@ POLICY_ROOT = 'os_compute_api:os-simple-tenant-usage:%s'
simple_tenant_usage_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'show',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="Show usage statistics for a specific tenant",
operations=[
{
@@ -35,7 +35,7 @@ simple_tenant_usage_policies = [
scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'list',
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="List per tenant usage statistics for all tenants",
operations=[
{
diff --git a/nova/policies/suspend_server.py b/nova/policies/suspend_server.py
index 3a603903c8..5e889808fd 100644
--- a/nova/policies/suspend_server.py
+++ b/nova/policies/suspend_server.py
@@ -24,7 +24,7 @@ POLICY_ROOT = 'os_compute_api:os-suspend-server:%s'
suspend_server_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'resume',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Resume suspended server",
operations=[
{
@@ -35,7 +35,7 @@ suspend_server_policies = [
scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'suspend',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Suspend server",
operations=[
{
diff --git a/nova/policies/tenant_networks.py b/nova/policies/tenant_networks.py
index ee5bd66cdf..79f8d21eaa 100644
--- a/nova/policies/tenant_networks.py
+++ b/nova/policies/tenant_networks.py
@@ -38,7 +38,7 @@ DEPRECATED_POLICY = policy.DeprecatedRule(
tenant_networks_policies = [
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'list',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="""List project networks.
This API is proxy calls to the Network service. This is deprecated.""",
@@ -52,7 +52,7 @@ This API is proxy calls to the Network service. This is deprecated.""",
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'show',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="""Show project network details.
This API is proxy calls to the Network service. This is deprecated.""",
diff --git a/nova/policies/volumes.py b/nova/policies/volumes.py
index 0ee941074d..129ced82c1 100644
--- a/nova/policies/volumes.py
+++ b/nova/policies/volumes.py
@@ -38,7 +38,7 @@ DEPRECATED_POLICY = policy.DeprecatedRule(
volumes_policies = [
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'list',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="""List volumes.
This API is a proxy call to the Volume service. It is deprecated.""",
@@ -52,7 +52,7 @@ This API is a proxy call to the Volume service. It is deprecated.""",
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'create',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="""Create volume.
This API is a proxy call to the Volume service. It is deprecated.""",
@@ -66,7 +66,7 @@ This API is a proxy call to the Volume service. It is deprecated.""",
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'detail',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="""List volumes detail.
This API is a proxy call to the Volume service. It is deprecated.""",
@@ -80,7 +80,7 @@ This API is a proxy call to the Volume service. It is deprecated.""",
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'show',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="""Show volume.
This API is a proxy call to the Volume service. It is deprecated.""",
@@ -94,7 +94,7 @@ This API is a proxy call to the Volume service. It is deprecated.""",
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'delete',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="""Delete volume.
This API is a proxy call to the Volume service. It is deprecated.""",
@@ -108,7 +108,7 @@ This API is a proxy call to the Volume service. It is deprecated.""",
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'snapshots:list',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="""List snapshots.
This API is a proxy call to the Volume service. It is deprecated.""",
@@ -122,7 +122,7 @@ This API is a proxy call to the Volume service. It is deprecated.""",
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'snapshots:create',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="""Create snapshots.
This API is a proxy call to the Volume service. It is deprecated.""",
@@ -136,7 +136,7 @@ This API is a proxy call to the Volume service. It is deprecated.""",
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'snapshots:detail',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="""List snapshots details.
This API is a proxy call to the Volume service. It is deprecated.""",
@@ -150,7 +150,7 @@ This API is a proxy call to the Volume service. It is deprecated.""",
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'snapshots:show',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="""Show snapshot.
This API is a proxy call to the Volume service. It is deprecated.""",
@@ -164,7 +164,7 @@ This API is a proxy call to the Volume service. It is deprecated.""",
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'snapshots:delete',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="""Delete snapshot.
This API is a proxy call to the Volume service. It is deprecated.""",
diff --git a/nova/policies/volumes_attachments.py b/nova/policies/volumes_attachments.py
index 20b3a2f3e6..68a1694c59 100644
--- a/nova/policies/volumes_attachments.py
+++ b/nova/policies/volumes_attachments.py
@@ -24,7 +24,7 @@ POLICY_ROOT = 'os_compute_api:os-volumes-attachments:%s'
volumes_attachments_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'index',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="List volume attachments for an instance",
operations=[
{'method': 'GET',
@@ -34,7 +34,7 @@ volumes_attachments_policies = [
scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'create',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Attach a volume to an instance",
operations=[
{
@@ -45,7 +45,7 @@ volumes_attachments_policies = [
scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'show',
- check_str=base.PROJECT_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="Show details of a volume attachment",
operations=[
{
@@ -57,7 +57,7 @@ volumes_attachments_policies = [
scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'update',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="""Update a volume attachment.
New 'update' policy about 'swap + update' request (which is possible
only >2.85) only <swap policy> is checked. We expect <swap policy> to be
@@ -78,7 +78,7 @@ always superset of this policy permission.
# can call it with user having 'service' role (not having server's
# project_id). That is for phase-2 of RBAC goal and until then,
# we keep it open for all admin in any project. We cannot default it to
- # PROJECT_ADMIN which has the project_id in check_str and will fail
+ # ADMIN which has the project_id in check_str and will fail
# if cinder call it with other project_id.
check_str=base.ADMIN,
description="Update a volume attachment with a different volumeId",
@@ -92,7 +92,7 @@ always superset of this policy permission.
scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'delete',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Detach a volume from an instance",
operations=[
{
diff --git a/nova/policy.py b/nova/policy.py
index 55455a9271..c66489cc8d 100644
--- a/nova/policy.py
+++ b/nova/policy.py
@@ -41,11 +41,15 @@ USER_BASED_RESOURCES = ['os-keypairs']
saved_file_rules = []
KEY_EXPR = re.compile(r'%\((\w+)\)s')
-# TODO(gmann): Remove setting the default value of config policy_file
-# once oslo_policy change the default value to 'policy.yaml'.
-# https://github.com/openstack/oslo.policy/blob/a626ad12fe5a3abd49d70e3e5b95589d279ab578/oslo_policy/opts.py#L49
+# TODO(gmann): Remove overriding the default value of config options
+# 'policy_file', 'enforce_scope', and 'enforce_new_defaults' once
+# oslo_policy change their default value to what is overridden here.
DEFAULT_POLICY_FILE = 'policy.yaml'
-opts.set_defaults(cfg.CONF, DEFAULT_POLICY_FILE)
+opts.set_defaults(
+ cfg.CONF,
+ DEFAULT_POLICY_FILE,
+ enforce_scope=True,
+ enforce_new_defaults=True)
def reset():
diff --git a/nova/quota.py b/nova/quota.py
index b9dd763012..eafad4cd23 100644
--- a/nova/quota.py
+++ b/nova/quota.py
@@ -1348,11 +1348,8 @@ def _instances_cores_ram_count_legacy(context, project_id, user_id=None):
def _cores_ram_count_placement(context, project_id, user_id=None):
- global PLACEMENT_CLIENT
- if not PLACEMENT_CLIENT:
- PLACEMENT_CLIENT = report.SchedulerReportClient()
- return PLACEMENT_CLIENT.get_usages_counts_for_quota(context, project_id,
- user_id=user_id)
+ return report.report_client_singleton().get_usages_counts_for_quota(
+ context, project_id, user_id=user_id)
def _instances_cores_ram_count_api_db_placement(context, project_id,
diff --git a/nova/rpc.py b/nova/rpc.py
index a32b920e06..7a92650414 100644
--- a/nova/rpc.py
+++ b/nova/rpc.py
@@ -204,11 +204,9 @@ def get_client(target, version_cap=None, serializer=None,
else:
serializer = RequestContextSerializer(serializer)
- return messaging.RPCClient(TRANSPORT,
- target,
- version_cap=version_cap,
- serializer=serializer,
- call_monitor_timeout=call_monitor_timeout)
+ return messaging.get_rpc_client(TRANSPORT, target,
+ version_cap=version_cap, serializer=serializer,
+ call_monitor_timeout=call_monitor_timeout)
def get_server(target, endpoints, serializer=None):
@@ -436,9 +434,9 @@ class ClientRouter(periodic_task.PeriodicTasks):
transport = context.mq_connection
if transport:
cmt = self.default_client.call_monitor_timeout
- return messaging.RPCClient(transport, self.target,
- version_cap=self.version_cap,
- serializer=self.serializer,
- call_monitor_timeout=cmt)
+ return messaging.get_rpc_client(transport, self.target,
+ version_cap=self.version_cap,
+ serializer=self.serializer,
+ call_monitor_timeout=cmt)
else:
return self.default_client
diff --git a/nova/scheduler/client/report.py b/nova/scheduler/client/report.py
index e4d0c8e3db..1242752be1 100644
--- a/nova/scheduler/client/report.py
+++ b/nova/scheduler/client/report.py
@@ -52,6 +52,7 @@ AGGREGATE_GENERATION_VERSION = '1.19'
NESTED_PROVIDER_API_VERSION = '1.14'
POST_ALLOCATIONS_API_VERSION = '1.13'
GET_USAGES_VERSION = '1.9'
+PLACEMENTCLIENT = None
AggInfo = collections.namedtuple('AggInfo', ['aggregates', 'generation'])
TraitInfo = collections.namedtuple('TraitInfo', ['traits', 'generation'])
@@ -67,6 +68,51 @@ def warn_limit(self, msg):
LOG.warning(msg)
+def report_client_singleton():
+ """Return a reference to the global placement client singleton.
+
+ This initializes the placement client once and returns a reference
+ to that singleton on subsequent calls. Errors are raised
+ (particularly ks_exc.*) but context-specific error messages are
+ logged for consistency.
+ """
+ # NOTE(danms): The report client maintains internal state in the
+ # form of the provider tree, which will be shared across all users
+ # of this global client. That is not a problem now, but in the
+ # future it may be beneficial to fix that. One idea would be to
+ # change the behavior of the client such that the static-config
+ # pieces of the actual keystone client are separate from the
+ # internal state, so that we can return a new object here with a
+ # context-specific local state object, but with the client bits
+ # shared.
+ global PLACEMENTCLIENT
+ if PLACEMENTCLIENT is None:
+ try:
+ PLACEMENTCLIENT = SchedulerReportClient()
+ except ks_exc.EndpointNotFound:
+ LOG.error('The placement API endpoint was not found.')
+ raise
+ except ks_exc.MissingAuthPlugin:
+ LOG.error('No authentication information found for placement API.')
+ raise
+ except ks_exc.Unauthorized:
+ LOG.error('Placement service credentials do not work.')
+ raise
+ except ks_exc.DiscoveryFailure:
+ LOG.error('Discovering suitable URL for placement API failed.')
+ raise
+ except (ks_exc.ConnectFailure,
+ ks_exc.RequestTimeout,
+ ks_exc.GatewayTimeout):
+ LOG.error('Placement API service is not responding.')
+ raise
+ except Exception:
+ LOG.error('Failed to initialize placement client '
+ '(is keystone available?)')
+ raise
+ return PLACEMENTCLIENT
+
+
def safe_connect(f):
@functools.wraps(f)
def wrapper(self, *a, **k):
@@ -1231,6 +1277,11 @@ class SchedulerReportClient(object):
resp = self.post('/reshaper', payload, version=RESHAPER_VERSION,
global_request_id=context.global_id)
if not resp:
+ if resp.status_code == 409:
+ err = resp.json()['errors'][0]
+ if err['code'] == 'placement.concurrent_update':
+ raise exception.PlacementReshapeConflict(error=resp.text)
+
raise exception.ReshapeFailed(error=resp.text)
return resp
@@ -1264,7 +1315,7 @@ class SchedulerReportClient(object):
# failure here to be fatal to the caller.
try:
self._reshape(context, inventories, allocations)
- except exception.ReshapeFailed:
+ except (exception.ReshapeFailed, exception.PlacementReshapeConflict):
raise
except Exception as e:
# Make sure the original stack trace gets logged.
@@ -1322,7 +1373,6 @@ class SchedulerReportClient(object):
# can inherit.
helper_exceptions = (
exception.InvalidResourceClass,
- exception.InventoryInUse,
exception.ResourceProviderAggregateRetrievalFailed,
exception.ResourceProviderDeletionFailed,
exception.ResourceProviderInUse,
@@ -1341,8 +1391,8 @@ class SchedulerReportClient(object):
# the conflict exception. This signals the resource tracker to
# redrive the update right away rather than waiting until the
# next periodic.
- with excutils.save_and_reraise_exception():
- self._clear_provider_cache_for_tree(rp_uuid)
+ self._clear_provider_cache_for_tree(rp_uuid)
+ raise
except helper_exceptions:
# Invalidate the relevant part of the cache. It gets rebuilt on
# the next pass.
@@ -1383,8 +1433,16 @@ class SchedulerReportClient(object):
if allocations is not None:
# NOTE(efried): We do not catch_all here, because ReshapeFailed
# needs to bubble up right away and be handled specially.
- self._set_up_and_do_reshape(context, old_tree, new_tree,
- allocations)
+ try:
+ self._set_up_and_do_reshape(
+ context, old_tree, new_tree, allocations)
+ except exception.PlacementReshapeConflict:
+ # The conflict means we need to invalidate the local caches and
+ # let the retry mechanism in _update_to_placement to re-drive
+ # the reshape top of the fresh data
+ with excutils.save_and_reraise_exception():
+ self.clear_provider_cache()
+
# The reshape updated provider generations, so the ones we have in
# the cache are now stale. The inventory update below will short
# out, but we would still bounce with a provider generation
diff --git a/nova/scheduler/filters/__init__.py b/nova/scheduler/filters/__init__.py
index 44f283f7ac..785a13279e 100644
--- a/nova/scheduler/filters/__init__.py
+++ b/nova/scheduler/filters/__init__.py
@@ -16,8 +16,12 @@
"""
Scheduler host filters
"""
+from oslo_log import log as logging
+
from nova import filters
+LOG = logging.getLogger(__name__)
+
class BaseHostFilter(filters.BaseFilter):
"""Base class for host filters."""
@@ -28,6 +32,9 @@ class BaseHostFilter(filters.BaseFilter):
# other parameters. We care about running policy filters (i.e.
# ImagePropertiesFilter) but not things that check usage on the
# existing compute node, etc.
+ # This also means that filters marked with RUN_ON_REBUILD = True cannot
+ # filter on allocation candidates or need to handle the rebuild case
+ # specially.
RUN_ON_REBUILD = False
def _filter_one(self, obj, spec):
@@ -50,6 +57,43 @@ class BaseHostFilter(filters.BaseFilter):
raise NotImplementedError()
+class CandidateFilterMixin:
+ """Mixing that helps to implement a Filter that needs to filter host by
+ Placement allocation candidates.
+ """
+
+ def filter_candidates(self, host_state, filter_func):
+ """Checks still viable allocation candidates by the filter_func and
+ keep only those that are passing it.
+
+ :param host_state: HostState object holding the list of still viable
+ allocation candidates
+ :param filter_func: A callable that takes an allocation candidate and
+ returns a True like object if the candidate passed the filter or a
+ False like object if it doesn't.
+ """
+ good_candidates = []
+ for candidate in host_state.allocation_candidates:
+ LOG.debug(
+ f'{self.__class__.__name__} tries allocation candidate: '
+ f'{candidate}',
+ )
+ if filter_func(candidate):
+ LOG.debug(
+ f'{self.__class__.__name__} accepted allocation '
+ f'candidate: {candidate}',
+ )
+ good_candidates.append(candidate)
+ else:
+ LOG.debug(
+ f'{self.__class__.__name__} rejected allocation '
+ f'candidate: {candidate}',
+ )
+
+ host_state.allocation_candidates = good_candidates
+ return good_candidates
+
+
class HostFilterHandler(filters.BaseFilterHandler):
def __init__(self):
super(HostFilterHandler, self).__init__(BaseHostFilter)
diff --git a/nova/scheduler/filters/numa_topology_filter.py b/nova/scheduler/filters/numa_topology_filter.py
index 74d6012f82..ae50db90e5 100644
--- a/nova/scheduler/filters/numa_topology_filter.py
+++ b/nova/scheduler/filters/numa_topology_filter.py
@@ -20,7 +20,10 @@ from nova.virt import hardware
LOG = logging.getLogger(__name__)
-class NUMATopologyFilter(filters.BaseHostFilter):
+class NUMATopologyFilter(
+ filters.BaseHostFilter,
+ filters.CandidateFilterMixin,
+):
"""Filter on requested NUMA topology."""
# NOTE(sean-k-mooney): In change I0322d872bdff68936033a6f5a54e8296a6fb343
@@ -97,12 +100,19 @@ class NUMATopologyFilter(filters.BaseHostFilter):
if network_metadata:
limits.network_metadata = network_metadata
- instance_topology = (hardware.numa_fit_instance_to_host(
- host_topology, requested_topology,
- limits=limits,
- pci_requests=pci_requests,
- pci_stats=host_state.pci_stats))
- if not instance_topology:
+ good_candidates = self.filter_candidates(
+ host_state,
+ lambda candidate: hardware.numa_fit_instance_to_host(
+ host_topology,
+ requested_topology,
+ limits=limits,
+ pci_requests=pci_requests,
+ pci_stats=host_state.pci_stats,
+ provider_mapping=candidate["mappings"],
+ ),
+ )
+
+ if not good_candidates:
LOG.debug("%(host)s, %(node)s fails NUMA topology "
"requirements. The instance does not fit on this "
"host.", {'host': host_state.host,
diff --git a/nova/scheduler/filters/pci_passthrough_filter.py b/nova/scheduler/filters/pci_passthrough_filter.py
index f08899586a..992879072a 100644
--- a/nova/scheduler/filters/pci_passthrough_filter.py
+++ b/nova/scheduler/filters/pci_passthrough_filter.py
@@ -20,7 +20,10 @@ from nova.scheduler import filters
LOG = logging.getLogger(__name__)
-class PciPassthroughFilter(filters.BaseHostFilter):
+class PciPassthroughFilter(
+ filters.BaseHostFilter,
+ filters.CandidateFilterMixin,
+):
"""Pci Passthrough Filter based on PCI request
Filter that schedules instances on a host if the host has devices
@@ -47,10 +50,24 @@ class PciPassthroughFilter(filters.BaseHostFilter):
pci_requests = spec_obj.pci_requests
if not pci_requests or not pci_requests.requests:
return True
- if (not host_state.pci_stats or
- not host_state.pci_stats.support_requests(pci_requests.requests)):
+
+ if not host_state.pci_stats:
+ LOG.debug("%(host_state)s doesn't have the required PCI devices"
+ " (%(requests)s)",
+ {'host_state': host_state, 'requests': pci_requests})
+ return False
+
+ good_candidates = self.filter_candidates(
+ host_state,
+ lambda candidate: host_state.pci_stats.support_requests(
+ pci_requests.requests, provider_mapping=candidate["mappings"]
+ ),
+ )
+
+ if not good_candidates:
LOG.debug("%(host_state)s doesn't have the required PCI devices"
" (%(requests)s)",
{'host_state': host_state, 'requests': pci_requests})
return False
+
return True
diff --git a/nova/scheduler/host_manager.py b/nova/scheduler/host_manager.py
index 80511ffad6..8cb775a923 100644
--- a/nova/scheduler/host_manager.py
+++ b/nova/scheduler/host_manager.py
@@ -153,6 +153,8 @@ class HostState(object):
self.updated = None
+ self.allocation_candidates = []
+
def update(self, compute=None, service=None, aggregates=None,
inst_dict=None):
"""Update all information about a host."""
@@ -296,7 +298,9 @@ class HostState(object):
spec_obj.numa_topology = hardware.numa_fit_instance_to_host(
self.numa_topology, spec_obj.numa_topology,
limits=self.limits.get('numa_topology'),
- pci_requests=pci_requests, pci_stats=self.pci_stats)
+ pci_requests=pci_requests,
+ pci_stats=self.pci_stats,
+ provider_mapping=spec_obj.get_request_group_mapping())
self.numa_topology = hardware.numa_usage_from_instance_numa(
self.numa_topology, spec_obj.numa_topology)
@@ -306,7 +310,11 @@ class HostState(object):
instance_cells = None
if spec_obj.numa_topology:
instance_cells = spec_obj.numa_topology.cells
- self.pci_stats.apply_requests(pci_requests, instance_cells)
+ self.pci_stats.apply_requests(
+ pci_requests,
+ spec_obj.get_request_group_mapping(),
+ instance_cells
+ )
# NOTE(sbauza): By considering all cases when the scheduler is called
# and when consume_from_request() is run, we can safely say that there
@@ -314,13 +322,21 @@ class HostState(object):
self.num_io_ops += 1
def __repr__(self):
- return ("(%(host)s, %(node)s) ram: %(free_ram)sMB "
- "disk: %(free_disk)sMB io_ops: %(num_io_ops)s "
- "instances: %(num_instances)s" %
- {'host': self.host, 'node': self.nodename,
- 'free_ram': self.free_ram_mb, 'free_disk': self.free_disk_mb,
- 'num_io_ops': self.num_io_ops,
- 'num_instances': self.num_instances})
+ return (
+ "(%(host)s, %(node)s) ram: %(free_ram)sMB "
+ "disk: %(free_disk)sMB io_ops: %(num_io_ops)s "
+ "instances: %(num_instances)s, "
+ "allocation_candidates: %(num_a_c)s"
+ % {
+ "host": self.host,
+ "node": self.nodename,
+ "free_ram": self.free_ram_mb,
+ "free_disk": self.free_disk_mb,
+ "num_io_ops": self.num_io_ops,
+ "num_instances": self.num_instances,
+ "num_a_c": len(self.allocation_candidates),
+ }
+ )
class HostManager(object):
diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py
index 03df615f6a..11581c4f2d 100644
--- a/nova/scheduler/manager.py
+++ b/nova/scheduler/manager.py
@@ -20,6 +20,7 @@ Scheduler Service
"""
import collections
+import copy
import random
from oslo_log import log as logging
@@ -66,7 +67,7 @@ class SchedulerManager(manager.Manager):
self.host_manager = host_manager.HostManager()
self.servicegroup_api = servicegroup.API()
self.notifier = rpc.get_notifier('scheduler')
- self.placement_client = report.SchedulerReportClient()
+ self.placement_client = report.report_client_singleton()
super().__init__(service_name='scheduler', *args, **kwargs)
@@ -299,12 +300,29 @@ class SchedulerManager(manager.Manager):
# host, we virtually consume resources on it so subsequent
# selections can adjust accordingly.
+ def hosts_with_alloc_reqs(hosts_gen):
+ """Extend the HostState objects returned by the generator with
+ the allocation requests of that host
+ """
+ for host in hosts_gen:
+ host.allocation_candidates = copy.deepcopy(
+ alloc_reqs_by_rp_uuid[host.uuid])
+ yield host
+
# Note: remember, we are using a generator-iterator here. So only
# traverse this list once. This can bite you if the hosts
# are being scanned in a filter or weighing function.
hosts = self._get_all_host_states(
elevated, spec_obj, provider_summaries)
+ # alloc_reqs_by_rp_uuid is None during rebuild, so this mean we cannot
+ # run filters that are using allocation candidates during rebuild
+ if alloc_reqs_by_rp_uuid is not None:
+ # wrap the generator to extend the HostState objects with the
+ # allocation requests for that given host. This is needed to
+ # support scheduler filters filtering on allocation candidates.
+ hosts = hosts_with_alloc_reqs(hosts)
+
# NOTE(sbauza): The RequestSpec.num_instances field contains the number
# of instances created when the RequestSpec was used to first boot some
# instances. This is incorrect when doing a move or resize operation,
@@ -332,6 +350,13 @@ class SchedulerManager(manager.Manager):
# the older dict format representing HostState objects.
# TODO(stephenfin): Remove this when we bump scheduler the RPC API
# version to 5.0
+ # NOTE(gibi): We cannot remove this branch as it is actively used
+ # when nova calls the scheduler during rebuild (not evacuate) to
+ # check if the current host is still good for the new image used
+ # for the rebuild. In this case placement cannot be used to
+ # generate candidates as that would require space on the current
+ # compute for double allocation. So no allocation candidates for
+ # rebuild and therefore alloc_reqs_by_rp_uuid is None
return self._legacy_find_hosts(
context, num_instances, spec_obj, hosts, num_alts,
instance_uuids=instance_uuids)
@@ -345,6 +370,9 @@ class SchedulerManager(manager.Manager):
# The list of hosts that have been selected (and claimed).
claimed_hosts = []
+ # The allocation request allocated on the given claimed host
+ claimed_alloc_reqs = []
+
for num, instance_uuid in enumerate(instance_uuids):
# In a multi-create request, the first request spec from the list
# is passed to the scheduler and that request spec's instance_uuid
@@ -371,21 +399,20 @@ class SchedulerManager(manager.Manager):
# resource provider UUID
claimed_host = None
for host in hosts:
- cn_uuid = host.uuid
- if cn_uuid not in alloc_reqs_by_rp_uuid:
- msg = ("A host state with uuid = '%s' that did not have a "
- "matching allocation_request was encountered while "
- "scheduling. This host was skipped.")
- LOG.debug(msg, cn_uuid)
+ if not host.allocation_candidates:
+ LOG.debug(
+ "The nova scheduler removed every allocation candidate"
+ "for host %s so this host was skipped.",
+ host
+ )
continue
- alloc_reqs = alloc_reqs_by_rp_uuid[cn_uuid]
# TODO(jaypipes): Loop through all allocation_requests instead
# of just trying the first one. For now, since we'll likely
# want to order the allocation_requests in the future based on
# information in the provider summaries, we'll just try to
# claim resources using the first allocation_request
- alloc_req = alloc_reqs[0]
+ alloc_req = host.allocation_candidates[0]
if utils.claim_resources(
elevated, self.placement_client, spec_obj, instance_uuid,
alloc_req,
@@ -405,6 +432,15 @@ class SchedulerManager(manager.Manager):
claimed_instance_uuids.append(instance_uuid)
claimed_hosts.append(claimed_host)
+ claimed_alloc_reqs.append(alloc_req)
+
+ # update the provider mapping in the request spec based
+ # on the allocated candidate as the _consume_selected_host depends
+ # on this information to temporally consume PCI devices tracked in
+ # placement
+ for request_group in spec_obj.requested_resources:
+ request_group.provider_uuids = alloc_req[
+ 'mappings'][request_group.requester_id]
# Now consume the resources so the filter/weights will change for
# the next instance.
@@ -416,11 +452,19 @@ class SchedulerManager(manager.Manager):
self._ensure_sufficient_hosts(
context, claimed_hosts, num_instances, claimed_instance_uuids)
- # We have selected and claimed hosts for each instance. Now we need to
- # find alternates for each host.
+ # We have selected and claimed hosts for each instance along with a
+ # claimed allocation request. Now we need to find alternates for each
+ # host.
return self._get_alternate_hosts(
- claimed_hosts, spec_obj, hosts, num, num_alts,
- alloc_reqs_by_rp_uuid, allocation_request_version)
+ claimed_hosts,
+ spec_obj,
+ hosts,
+ num,
+ num_alts,
+ alloc_reqs_by_rp_uuid,
+ allocation_request_version,
+ claimed_alloc_reqs,
+ )
def _ensure_sufficient_hosts(
self, context, hosts, required_count, claimed_uuids=None,
@@ -532,7 +576,21 @@ class SchedulerManager(manager.Manager):
def _get_alternate_hosts(
self, selected_hosts, spec_obj, hosts, index, num_alts,
alloc_reqs_by_rp_uuid=None, allocation_request_version=None,
+ selected_alloc_reqs=None,
):
+ """Generate the main Selection and possible alternate Selection
+ objects for each "instance".
+
+ :param selected_hosts: This is a list of HostState objects. Each
+ HostState represents the main selection for a given instance being
+ scheduled (we can have multiple instances during multi create).
+ :param selected_alloc_reqs: This is a list of allocation requests that
+ are already allocated in placement for the main Selection for each
+ instance. This list is matching with selected_hosts by index. So
+ for the first instance the selected host is selected_host[0] and
+ the already allocated placement candidate is
+ selected_alloc_reqs[0].
+ """
# We only need to filter/weigh the hosts again if we're dealing with
# more than one instance and are going to be picking alternates.
if index > 0 and num_alts > 0:
@@ -546,11 +604,10 @@ class SchedulerManager(manager.Manager):
# representing the selected host along with alternates from the same
# cell.
selections_to_return = []
- for selected_host in selected_hosts:
+ for i, selected_host in enumerate(selected_hosts):
# This is the list of hosts for one particular instance.
if alloc_reqs_by_rp_uuid:
- selected_alloc_req = alloc_reqs_by_rp_uuid.get(
- selected_host.uuid)[0]
+ selected_alloc_req = selected_alloc_reqs[i]
else:
selected_alloc_req = None
@@ -571,15 +628,17 @@ class SchedulerManager(manager.Manager):
if len(selected_plus_alts) >= num_alts + 1:
break
+ # TODO(gibi): In theory we could generate alternatives on the
+ # same host if that host has different possible allocation
+ # candidates for the request. But we don't do that today
if host.cell_uuid == cell_uuid and host not in selected_hosts:
if alloc_reqs_by_rp_uuid is not None:
- alt_uuid = host.uuid
- if alt_uuid not in alloc_reqs_by_rp_uuid:
+ if not host.allocation_candidates:
msg = ("A host state with uuid = '%s' that did "
- "not have a matching allocation_request "
+ "not have any remaining allocation_request "
"was encountered while scheduling. This "
"host was skipped.")
- LOG.debug(msg, alt_uuid)
+ LOG.debug(msg, host.uuid)
continue
# TODO(jaypipes): Loop through all allocation_requests
@@ -588,7 +647,13 @@ class SchedulerManager(manager.Manager):
# the future based on information in the provider
# summaries, we'll just try to claim resources using
# the first allocation_request
- alloc_req = alloc_reqs_by_rp_uuid[alt_uuid][0]
+ # NOTE(gibi): we are using, and re-using, allocation
+ # candidates for alternatives here. This is OK as
+ # these candidates are not yet allocated in placement
+ # and we don't know if an alternate will ever be used.
+ # To increase our success we could try to use different
+ # candidate for different alternative though.
+ alloc_req = host.allocation_candidates[0]
alt_selection = objects.Selection.from_host_state(
host, alloc_req, allocation_request_version)
else:
diff --git a/nova/scheduler/request_filter.py b/nova/scheduler/request_filter.py
index bd237b06ca..bf5c32f372 100644
--- a/nova/scheduler/request_filter.py
+++ b/nova/scheduler/request_filter.py
@@ -24,7 +24,7 @@ from nova.network import neutron
from nova import objects
from nova.scheduler.client import report
from nova.scheduler import utils
-
+from nova.virt import hardware
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
@@ -214,6 +214,7 @@ def transform_image_metadata(ctxt, request_spec):
'hw_vif_model': 'COMPUTE_NET_VIF_MODEL',
'hw_architecture': 'HW_ARCH',
'hw_emulation_architecture': 'COMPUTE_ARCH',
+ 'hw_viommu_model': 'COMPUTE_VIOMMU',
}
trait_names = []
@@ -311,7 +312,7 @@ def routed_networks_filter(
# Get the clients we need
network_api = neutron.API()
- report_api = report.SchedulerReportClient()
+ report_api = report.report_client_singleton()
for requested_network in requested_networks:
network_id = None
@@ -394,6 +395,41 @@ def remote_managed_ports_filter(
return True
+@trace_request_filter
+def ephemeral_encryption_filter(
+ ctxt: nova_context.RequestContext,
+ request_spec: 'objects.RequestSpec'
+) -> bool:
+ """Pre-filter resource provides by ephemeral encryption support
+
+ This filter will only retain compute node resource providers that support
+ ephemeral storage encryption when the associated image properties or flavor
+ extra specs are present within the request spec.
+ """
+ # Skip if ephemeral encryption isn't requested in the flavor or image
+ if not hardware.get_ephemeral_encryption_constraint(
+ request_spec.flavor, request_spec.image):
+ LOG.debug("ephemeral_encryption_filter skipped")
+ return False
+
+ # Always add the feature trait regardless of the format being provided
+ request_spec.root_required.add(os_traits.COMPUTE_EPHEMERAL_ENCRYPTION)
+ LOG.debug("ephemeral_encryption_filter added trait "
+ "COMPUTE_EPHEMERAL_ENCRYPTION")
+
+ # Try to find the format in the flavor or image and add as a trait
+ eph_format = hardware.get_ephemeral_encryption_format(
+ request_spec.flavor, request_spec.image)
+ if eph_format:
+ # We don't need to validate the trait here because the earlier call to
+ # get_ephemeral_encryption_format will raise if it is not valid
+ trait_name = f"COMPUTE_EPHEMERAL_ENCRYPTION_{eph_format.upper()}"
+ request_spec.root_required.add(trait_name)
+ LOG.debug(f"ephemeral_encryption_filter added trait {trait_name}")
+
+ return True
+
+
ALL_REQUEST_FILTERS = [
require_tenant_aggregate,
map_az_to_placement_aggregate,
@@ -404,6 +440,7 @@ ALL_REQUEST_FILTERS = [
accelerators_filter,
routed_networks_filter,
remote_managed_ports_filter,
+ ephemeral_encryption_filter,
]
diff --git a/nova/service.py b/nova/service.py
index 2c10224926..bd3b49ae66 100644
--- a/nova/service.py
+++ b/nova/service.py
@@ -156,11 +156,11 @@ class Service(service.Service):
LOG.info('Starting %(topic)s node (version %(version)s)',
{'topic': self.topic, 'version': verstr})
self.basic_config_check()
- self.manager.init_host()
- self.model_disconnected = False
ctxt = context.get_admin_context()
self.service_ref = objects.Service.get_by_host_and_binary(
ctxt, self.host, self.binary)
+ self.manager.init_host(self.service_ref)
+ self.model_disconnected = False
if self.service_ref:
_update_service_ref(self.service_ref)
diff --git a/nova/test.py b/nova/test.py
index e48245c093..0f7965ea33 100644
--- a/nova/test.py
+++ b/nova/test.py
@@ -61,6 +61,7 @@ from nova import exception
from nova import objects
from nova.objects import base as objects_base
from nova import quota
+from nova.scheduler.client import report
from nova.tests import fixtures as nova_fixtures
from nova.tests.unit import matchers
from nova import utils
@@ -170,6 +171,12 @@ class TestCase(base.BaseTestCase):
# base class when USES_DB is True.
NUMBER_OF_CELLS = 1
+ # The stable compute id stuff is intentionally singleton-ish, which makes
+ # it a nightmare for testing multiple host/node combinations in tests like
+ # we do. So, mock it out by default, unless the test is specifically
+ # designed to handle it.
+ STUB_COMPUTE_ID = True
+
def setUp(self):
"""Run before each test method to initialize test environment."""
# Ensure BaseTestCase's ConfigureLogging fixture is disabled since
@@ -285,11 +292,24 @@ class TestCase(base.BaseTestCase):
quota.UID_QFD_POPULATED_CACHE_ALL = False
self.useFixture(nova_fixtures.GenericPoisonFixture())
+ self.useFixture(nova_fixtures.SysFsPoisonFixture())
+
+ # Additional module names can be added to this set if needed
+ self.useFixture(nova_fixtures.ImportModulePoisonFixture(
+ set(['guestfs', 'libvirt'])))
# make sure that the wsgi app is fully initialized for all testcase
# instead of only once initialized for test worker
wsgi_app.init_global_data.reset()
+ # Reset the placement client singleton
+ report.PLACEMENTCLIENT = None
+
+ # Reset our local node uuid cache (and avoid writing to the
+ # local filesystem when we generate a new one).
+ if self.STUB_COMPUTE_ID:
+ self.useFixture(nova_fixtures.ComputeNodeIdFixture())
+
def _setup_cells(self):
"""Setup a normal cellsv2 environment.
@@ -678,6 +698,7 @@ class SubclassSignatureTestCase(testtools.TestCase, metaclass=abc.ABCMeta):
raise NotImplementedError()
def setUp(self):
+ self.useFixture(nova_fixtures.ConfFixture(CONF))
self.base = self._get_base_class()
super(SubclassSignatureTestCase, self).setUp()
diff --git a/nova/tests/fixtures/__init__.py b/nova/tests/fixtures/__init__.py
index df254608fd..9ff4a2a601 100644
--- a/nova/tests/fixtures/__init__.py
+++ b/nova/tests/fixtures/__init__.py
@@ -16,6 +16,8 @@ from .cast_as_call import CastAsCallFixture # noqa: F401
from .cinder import CinderFixture # noqa: F401
from .conf import ConfFixture # noqa: F401, F403
from .cyborg import CyborgFixture # noqa: F401
+from .filesystem import SysFileSystemFixture # noqa: F401
+from .filesystem import TempFileSystemFixture # noqa: F401
from .glance import GlanceFixture # noqa: F401
from .libvirt import LibvirtFixture # noqa: F401
from .libvirt_imagebackend import LibvirtImageBackendFixture # noqa: F401
diff --git a/nova/tests/fixtures/cinder.py b/nova/tests/fixtures/cinder.py
index 97b32d9b84..025a3d8b81 100644
--- a/nova/tests/fixtures/cinder.py
+++ b/nova/tests/fixtures/cinder.py
@@ -47,6 +47,13 @@ class CinderFixture(fixtures.Fixture):
# This represents a bootable image-backed volume to test
# boot-from-volume scenarios.
IMAGE_BACKED_VOL = '6ca404f3-d844-4169-bb96-bc792f37de98'
+
+ # This represents a bootable image-backed volume to test
+ # boot-from-volume scenarios with
+ # os_require_quiesce
+ # hw_qemu_guest_agent
+ IMAGE_BACKED_VOL_QUIESCE = '6ca404f3-d844-4169-bb96-bc792f37de26'
+
# This represents a bootable image-backed volume with required traits
# as part of volume image metadata
IMAGE_WITH_TRAITS_BACKED_VOL = '6194fc02-c60e-4a01-a8e5-600798208b5f'
@@ -157,6 +164,13 @@ class CinderFixture(fixtures.Fixture):
'image_id': '155d900f-4e14-4e4c-a73d-069cbf4541e6'
}
+ if volume_id == self.IMAGE_BACKED_VOL_QUIESCE:
+ volume['bootable'] = True
+ volume['volume_image_metadata'] = {
+ "os_require_quiesce": "True",
+ "hw_qemu_guest_agent": "True"
+ }
+
if volume_id == self.IMAGE_WITH_TRAITS_BACKED_VOL:
volume['bootable'] = True
volume['volume_image_metadata'] = {
@@ -327,6 +341,16 @@ class CinderFixture(fixtures.Fixture):
_find_attachment(attachment_id)
LOG.info('Completing volume attachment: %s', attachment_id)
+ def fake_reimage_volume(*args, **kwargs):
+ if self.IMAGE_BACKED_VOL not in args:
+ raise exception.VolumeNotFound()
+ if 'reimage_reserved' not in kwargs:
+ raise exception.InvalidInput('reimage_reserved not specified')
+
+ def fake_get_absolute_limits(_self, context):
+ limits = {'totalSnapshotsUsed': 0, 'maxTotalSnapshots': -1}
+ return limits
+
self.test.stub_out(
'nova.volume.cinder.API.attachment_create', fake_attachment_create)
self.test.stub_out(
@@ -366,6 +390,12 @@ class CinderFixture(fixtures.Fixture):
self.test.stub_out(
'nova.volume.cinder.API.terminate_connection',
lambda *args, **kwargs: None)
+ self.test.stub_out(
+ 'nova.volume.cinder.API.reimage_volume',
+ fake_reimage_volume)
+ self.test.stub_out(
+ 'nova.volume.cinder.API.get_absolute_limits',
+ fake_get_absolute_limits)
def volume_ids_for_instance(self, instance_uuid):
for volume_id, attachments in self.volume_to_attachment.items():
diff --git a/nova/tests/fixtures/filesystem.py b/nova/tests/fixtures/filesystem.py
new file mode 100644
index 0000000000..932d42fe27
--- /dev/null
+++ b/nova/tests/fixtures/filesystem.py
@@ -0,0 +1,81 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+import shutil
+import tempfile
+from unittest import mock
+
+import fixtures
+
+from nova import filesystem
+from nova.virt.libvirt.cpu import core
+
+
+SYS = 'sys'
+
+
+class TempFileSystemFixture(fixtures.Fixture):
+ """Creates a fake / filesystem"""
+
+ def _setUp(self):
+ self.temp_dir = tempfile.TemporaryDirectory(prefix='fake_fs')
+ # NOTE(sbauza): I/O disk errors may raise an exception here, as we
+ # don't ignore them. If that's causing a problem in our CI jobs, the
+ # recommended solution is to use shutil.rmtree instead of cleanup()
+ # with ignore_errors parameter set to True (or wait for the minimum
+ # python version to be 3.10 as TemporaryDirectory will provide
+ # ignore_cleanup_errors parameter)
+ self.addCleanup(self.temp_dir.cleanup)
+
+
+class SysFileSystemFixture(TempFileSystemFixture):
+ """Creates a fake /sys filesystem"""
+
+ def __init__(self, cpus_supported=None):
+ self.cpus_supported = cpus_supported or 10
+
+ def _setUp(self):
+ super()._setUp()
+ self.sys_path = os.path.join(self.temp_dir.name, SYS)
+ self.addCleanup(shutil.rmtree, self.sys_path, ignore_errors=True)
+
+ sys_patcher = mock.patch(
+ 'nova.filesystem.SYS',
+ new_callable=mock.PropertyMock(return_value=self.sys_path))
+ self.sys_mock = sys_patcher.start()
+ self.addCleanup(sys_patcher.stop)
+
+ avail_path_patcher = mock.patch(
+ 'nova.virt.libvirt.cpu.core.AVAILABLE_PATH',
+ new_callable=mock.PropertyMock(
+ return_value=os.path.join(self.sys_path,
+ 'devices/system/cpu/present')))
+ self.avail_path_mock = avail_path_patcher.start()
+ self.addCleanup(avail_path_patcher.stop)
+
+ cpu_path_patcher = mock.patch(
+ 'nova.virt.libvirt.cpu.core.CPU_PATH_TEMPLATE',
+ new_callable=mock.PropertyMock(
+ return_value=os.path.join(self.sys_path,
+ 'devices/system/cpu/cpu%(core)s')))
+ self.cpu_path_mock = cpu_path_patcher.start()
+ self.addCleanup(cpu_path_patcher.stop)
+
+ for cpu_nr in range(self.cpus_supported):
+ cpu_dir = os.path.join(self.cpu_path_mock % {'core': cpu_nr})
+ os.makedirs(os.path.join(cpu_dir, 'cpufreq'))
+ filesystem.write_sys(
+ os.path.join(cpu_dir, 'cpufreq/scaling_governor'),
+ data='powersave')
+ filesystem.write_sys(core.AVAILABLE_PATH,
+ f'0-{self.cpus_supported - 1}')
diff --git a/nova/tests/fixtures/glance.py b/nova/tests/fixtures/glance.py
index cf68f490b4..b718f28c2a 100644
--- a/nova/tests/fixtures/glance.py
+++ b/nova/tests/fixtures/glance.py
@@ -15,6 +15,7 @@ import datetime
import fixtures
from oslo_log import log as logging
+from oslo_utils.fixture import uuidsentinel
from oslo_utils import uuidutils
from nova import exception
@@ -198,6 +199,32 @@ class GlanceFixture(fixtures.Fixture):
},
}
+ eph_encryption = copy.deepcopy(image1)
+ eph_encryption['id'] = uuidsentinel.eph_encryption
+ eph_encryption['properties'] = {
+ 'hw_ephemeral_encryption': 'True'
+ }
+
+ eph_encryption_disabled = copy.deepcopy(image1)
+ eph_encryption_disabled['id'] = uuidsentinel.eph_encryption_disabled
+ eph_encryption_disabled['properties'] = {
+ 'hw_ephemeral_encryption': 'False'
+ }
+
+ eph_encryption_luks = copy.deepcopy(image1)
+ eph_encryption_luks['id'] = uuidsentinel.eph_encryption_luks
+ eph_encryption_luks['properties'] = {
+ 'hw_ephemeral_encryption': 'True',
+ 'hw_ephemeral_encryption_format': 'luks'
+ }
+
+ eph_encryption_plain = copy.deepcopy(image1)
+ eph_encryption_plain['id'] = uuidsentinel.eph_encryption_plain
+ eph_encryption_plain['properties'] = {
+ 'hw_ephemeral_encryption': 'True',
+ 'hw_ephemeral_encryption_format': 'plain'
+ }
+
def __init__(self, test):
super().__init__()
self.test = test
@@ -222,6 +249,10 @@ class GlanceFixture(fixtures.Fixture):
self.create(None, self.image5)
self.create(None, self.auto_disk_config_disabled_image)
self.create(None, self.auto_disk_config_enabled_image)
+ self.create(None, self.eph_encryption)
+ self.create(None, self.eph_encryption_disabled)
+ self.create(None, self.eph_encryption_luks)
+ self.create(None, self.eph_encryption_plain)
self._imagedata = {}
diff --git a/nova/tests/fixtures/libvirt.py b/nova/tests/fixtures/libvirt.py
index 46b2313cbe..4f48463118 100644
--- a/nova/tests/fixtures/libvirt.py
+++ b/nova/tests/fixtures/libvirt.py
@@ -31,6 +31,7 @@ from nova.objects import fields as obj_fields
from nova.tests.fixtures import libvirt_data as fake_libvirt_data
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import driver as libvirt_driver
+from nova.virt.libvirt import host
# Allow passing None to the various connect methods
@@ -533,7 +534,7 @@ class HostPCIDevicesInfo(object):
"""
self.devices = {}
- if not (num_vfs or num_pfs) and not num_mdevcap:
+ if not (num_vfs or num_pfs or num_pci) and not num_mdevcap:
return
if num_vfs and not num_pfs:
@@ -1433,20 +1434,28 @@ class Domain(object):
'Test attempts to add more than 8 PCI devices. This is '
'not supported by the fake libvirt implementation.')
nic['func'] = func
- # this branch covers most interface types with a source
- # such as linux bridge interfaces.
- if 'source' in nic:
+ if nic['type'] in ('ethernet',):
+ # this branch covers kernel ovs interfaces
nics += '''<interface type='%(type)s'>
<mac address='%(mac)s'/>
- <source %(type)s='%(source)s'/>
<target dev='tap274487d1-6%(func)s'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x03'
function='0x%(func)s'/>
</interface>''' % nic
- elif nic['type'] in ('ethernet',):
- # this branch covers kernel ovs interfaces
+ elif nic['type'] in ('vdpa',):
+ # this branch covers hardware offloaded ovs with vdpa
nics += '''<interface type='%(type)s'>
<mac address='%(mac)s'/>
+ <source dev='%(source)s'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x03'
+ function='0x%(func)s'/>
+ </interface>''' % nic
+ # this branch covers most interface types with a source
+ # such as linux bridge interfaces.
+ elif 'source' in nic:
+ nics += '''<interface type='%(type)s'>
+ <mac address='%(mac)s'/>
+ <source %(type)s='%(source)s'/>
<target dev='tap274487d1-6%(func)s'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x03'
function='0x%(func)s'/>
@@ -2035,6 +2044,12 @@ class Connection(object):
return VIR_CPU_COMPARE_IDENTICAL
+ def compareHypervisorCPU(
+ self, emulator, arch, machine, virttype,
+ xml, flags
+ ):
+ return self.compareCPU(xml, flags)
+
def getCPUStats(self, cpuNum, flag):
if cpuNum < 2:
return {'kernel': 5664160000000,
@@ -2220,31 +2235,39 @@ class LibvirtFixture(fixtures.Fixture):
self.useFixture(
fixtures.MockPatch('nova.virt.libvirt.utils.get_fs_info'))
- self.useFixture(
- fixtures.MockPatch('nova.compute.utils.get_machine_ips'))
+ self.mock_get_machine_ips = self.useFixture(
+ fixtures.MockPatch('nova.compute.utils.get_machine_ips')).mock
# libvirt driver needs to call out to the filesystem to get the
# parent_ifname for the SRIOV VFs.
- self.useFixture(fixtures.MockPatch(
- 'nova.pci.utils.get_ifname_by_pci_address',
- return_value='fake_pf_interface_name'))
+ self.mock_get_ifname_by_pci_address = self.useFixture(
+ fixtures.MockPatch(
+ "nova.pci.utils.get_ifname_by_pci_address",
+ return_value="fake_pf_interface_name",
+ )
+ ).mock
self.useFixture(fixtures.MockPatch(
'nova.pci.utils.get_mac_by_pci_address',
- new=self.fake_get_mac_by_pci_address))
+ side_effect=self.fake_get_mac_by_pci_address))
# libvirt calls out to sysfs to get the vfs ID during macvtap plug
- self.useFixture(fixtures.MockPatch(
- 'nova.pci.utils.get_vf_num_by_pci_address', return_value=1))
+ self.mock_get_vf_num_by_pci_address = self.useFixture(
+ fixtures.MockPatch(
+ 'nova.pci.utils.get_vf_num_by_pci_address', return_value=1
+ )
+ ).mock
# libvirt calls out to privsep to set the mac and vlan of a macvtap
- self.useFixture(fixtures.MockPatch(
- 'nova.privsep.linux_net.set_device_macaddr_and_vlan'))
+ self.mock_set_device_macaddr_and_vlan = self.useFixture(
+ fixtures.MockPatch(
+ 'nova.privsep.linux_net.set_device_macaddr_and_vlan')).mock
# libvirt calls out to privsep to set the port state during macvtap
# plug
- self.useFixture(fixtures.MockPatch(
- 'nova.privsep.linux_net.set_device_macaddr'))
+ self.mock_set_device_macaddr = self.useFixture(
+ fixtures.MockPatch(
+ 'nova.privsep.linux_net.set_device_macaddr')).mock
# Don't assume that the system running tests has a valid machine-id
self.useFixture(fixtures.MockPatch(
@@ -2259,8 +2282,17 @@ class LibvirtFixture(fixtures.Fixture):
# Ensure tests perform the same on all host architectures
fake_uname = os_uname(
'Linux', '', '5.4.0-0-generic', '', obj_fields.Architecture.X86_64)
- self.useFixture(
- fixtures.MockPatch('os.uname', return_value=fake_uname))
+ self.mock_uname = self.useFixture(
+ fixtures.MockPatch('os.uname', return_value=fake_uname)).mock
+
+ real_exists = os.path.exists
+
+ def fake_exists(path):
+ if path == host.SEV_KERNEL_PARAM_FILE:
+ return False
+ return real_exists(path)
+
+ self.useFixture(fixtures.MonkeyPatch('os.path.exists', fake_exists))
# ...and on all machine types
fake_loaders = [
diff --git a/nova/tests/fixtures/libvirt_imagebackend.py b/nova/tests/fixtures/libvirt_imagebackend.py
index 3d6f2e81e9..4ce3f03710 100644
--- a/nova/tests/fixtures/libvirt_imagebackend.py
+++ b/nova/tests/fixtures/libvirt_imagebackend.py
@@ -154,7 +154,9 @@ class LibvirtImageBackendFixture(fixtures.Fixture):
# their construction. Tests can use this to assert that disks were
# created of the expected type.
- def image_init(instance=None, disk_name=None, path=None):
+ def image_init(
+ instance=None, disk_name=None, path=None, disk_info_mapping=None
+ ):
# There's nothing special about this path except that it's
# predictable and unique for (instance, disk).
if path is None:
@@ -169,6 +171,7 @@ class LibvirtImageBackendFixture(fixtures.Fixture):
# the real constructor.
setattr(disk, 'path', path)
setattr(disk, 'is_block_dev', mock.sentinel.is_block_dev)
+ setattr(disk, 'disk_info_mapping', disk_info_mapping)
# Used by tests. Note that image_init is a closure over image_type.
setattr(disk, 'image_type', image_type)
@@ -187,6 +190,9 @@ class LibvirtImageBackendFixture(fixtures.Fixture):
# Set the SUPPORTS_CLONE member variable to mimic the Image base
# class.
image_init.SUPPORTS_CLONE = False
+ # Set the SUPPORTS_LUKS member variable to mimic the Image base
+ # class.
+ image_init.SUPPORTS_LUKS = False
# Ditto for the 'is_shared_block_storage' and
# 'is_file_in_instance_path' functions
@@ -217,16 +223,16 @@ class LibvirtImageBackendFixture(fixtures.Fixture):
self.imported_files.append((local_filename, remote_filename))
def _fake_libvirt_info(
- self, mock_disk, disk_info, cache_mode, extra_specs, disk_unit=None,
+ self, mock_disk, cache_mode, extra_specs, disk_unit=None,
boot_order=None,
):
# For tests in test_virt_drivers which expect libvirt_info to be
# functional
info = config.LibvirtConfigGuestDisk()
info.source_type = 'file'
- info.source_device = disk_info['type']
- info.target_bus = disk_info['bus']
- info.target_dev = disk_info['dev']
+ info.source_device = mock_disk.disk_info_mapping['type']
+ info.target_bus = mock_disk.disk_info_mapping['bus']
+ info.target_dev = mock_disk.disk_info_mapping['dev']
info.driver_cache = cache_mode
info.driver_format = 'raw'
info.source_path = mock_disk.path
diff --git a/nova/tests/fixtures/nova.py b/nova/tests/fixtures/nova.py
index b7fcf33e53..9a652c02cb 100644
--- a/nova/tests/fixtures/nova.py
+++ b/nova/tests/fixtures/nova.py
@@ -20,8 +20,10 @@ import collections
import contextlib
from contextlib import contextmanager
import functools
+from importlib.abc import MetaPathFinder
import logging as std_logging
import os
+import sys
import time
from unittest import mock
import warnings
@@ -63,6 +65,7 @@ from nova.scheduler import weights
from nova import service
from nova.tests.functional.api import client
from nova import utils
+from nova.virt import node
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
@@ -563,11 +566,10 @@ class CellDatabases(fixtures.Fixture):
call_monitor_timeout=None):
"""Mirror rpc.get_client() but with our special sauce."""
serializer = CheatingSerializer(serializer)
- return messaging.RPCClient(rpc.TRANSPORT,
- target,
- version_cap=version_cap,
- serializer=serializer,
- call_monitor_timeout=call_monitor_timeout)
+ return messaging.get_rpc_client(rpc.TRANSPORT, target,
+ version_cap=version_cap,
+ serializer=serializer,
+ call_monitor_timeout=call_monitor_timeout)
def add_cell_database(self, connection_str, default=False):
"""Add a cell database to the fixture.
@@ -1002,9 +1004,15 @@ class OSAPIFixture(fixtures.Fixture):
self.api = client.TestOpenStackClient(
'fake', base_url, project_id=self.project_id,
roles=['reader', 'member'])
+ self.alternative_api = client.TestOpenStackClient(
+ 'fake', base_url, project_id=self.project_id,
+ roles=['reader', 'member'])
self.admin_api = client.TestOpenStackClient(
'admin', base_url, project_id=self.project_id,
roles=['reader', 'member', 'admin'])
+ self.alternative_admin_api = client.TestOpenStackClient(
+ 'admin', base_url, project_id=self.project_id,
+ roles=['reader', 'member', 'admin'])
self.reader_api = client.TestOpenStackClient(
'reader', base_url, project_id=self.project_id,
roles=['reader'])
@@ -1100,9 +1108,9 @@ class PoisonFunctions(fixtures.Fixture):
# Don't poison the function if it's already mocked
import nova.virt.libvirt.host
if not isinstance(nova.virt.libvirt.host.Host._init_events, mock.Mock):
- self.useFixture(fixtures.MockPatch(
+ self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.host.Host._init_events',
- side_effect=evloop))
+ evloop))
class IndirectionAPIFixture(fixtures.Fixture):
@@ -1733,3 +1741,155 @@ class ReaderWriterLock(lockutils.ReaderWriterLock):
'threading.current_thread', eventlet.getcurrent)
with mpatch if eventlet_patched else contextlib.ExitStack():
super().__init__(*a, **kw)
+
+
+class SysFsPoisonFixture(fixtures.Fixture):
+
+ def inject_poison(self, module_name, function_name):
+ import importlib
+ mod = importlib.import_module(module_name)
+ orig_f = getattr(mod, function_name)
+ if (
+ isinstance(orig_f, mock.Mock) or
+ # FIXME(gibi): Is this a bug in unittest.mock? If I remove this
+ # then LibvirtReportSevTraitsTests fails as builtins.open is mocked
+ # there at import time via @test.patch_open. That injects a
+ # MagicMock instance to builtins.open which we check here against
+ # Mock (or even MagicMock) via isinstance and that check says it is
+ # not a mock. More interestingly I cannot reproduce the same
+ # issue with @test.patch_open and isinstance in a simple python
+ # interpreter. So to make progress I'm checking the class name
+ # here instead as that works.
+ orig_f.__class__.__name__ == "MagicMock"
+ ):
+ # the target is already mocked, probably via a decorator run at
+ # import time, so we don't need to inject our poison
+ return
+
+ full_name = module_name + "." + function_name
+
+ def toxic_wrapper(*args, **kwargs):
+ path = args[0]
+ if isinstance(path, bytes):
+ pattern = b'/sys'
+ elif isinstance(path, str):
+ pattern = '/sys'
+ else:
+ # we ignore the rest of the potential pathlike types for now
+ pattern = None
+
+ if pattern and path.startswith(pattern):
+ raise Exception(
+ 'This test invokes %s on %s. It is bad, you '
+ 'should mock it.'
+ % (full_name, path)
+ )
+ else:
+ return orig_f(*args, **kwargs)
+
+ self.useFixture(fixtures.MonkeyPatch(full_name, toxic_wrapper))
+
+ def setUp(self):
+ super().setUp()
+ self.inject_poison("os.path", "isdir")
+ self.inject_poison("builtins", "open")
+ self.inject_poison("glob", "iglob")
+ self.inject_poison("os", "listdir")
+ self.inject_poison("glob", "glob")
+ # TODO(gibi): Would be good to poison these too but that makes
+ # a bunch of test to fail
+ # self.inject_poison("os.path", "exists")
+ # self.inject_poison("os", "stat")
+
+
+class ImportModulePoisonFixture(fixtures.Fixture):
+ """Poison imports of modules unsuitable for the test environment.
+
+ Examples are guestfs and libvirt. Ordinarily, these would not be installed
+ in the test environment but if they _are_ present, it can result in
+ actual calls to libvirt, for example, which could cause tests to fail.
+
+ This fixture will inspect module imports and if they are in the disallowed
+ list, it will fail the test with a helpful message about mocking needed in
+ the test.
+ """
+
+ class ForbiddenModules(MetaPathFinder):
+ def __init__(self, test, modules):
+ super().__init__()
+ self.test = test
+ self.modules = modules
+
+ def find_spec(self, fullname, path, target=None):
+ if fullname in self.modules:
+ current = eventlet.getcurrent()
+ # NOTE(gibi) not all eventlet spawn is under our control, so
+ # there can be senders without test_case_id set, find the first
+ # ancestor that was spawned from nova.utils.spawn[_n] and
+ # therefore has the id set.
+ while (
+ current is not None and
+ not getattr(current, 'test_case_id', None)
+ ):
+ current = current.parent
+
+ if current is not None:
+ self.test.tc_id = current.test_case_id
+ LOG.warning(
+ "!!!---!!! TestCase ID %s hit the import poison while "
+ "importing %s. If you see this in a failed functional "
+ "test then please let #openstack-nova on IRC know "
+ "about it. !!!---!!!", current.test_case_id, fullname)
+ self.test.fail_message = (
+ f"This test imports the '{fullname}' module, which it "
+ f'should not in the test environment. Please add '
+ f'appropriate mocking to this test.'
+ )
+ raise ImportError(fullname)
+
+ def __init__(self, module_names):
+ self.module_names = module_names
+ self.fail_message = ''
+ self.tc_id = None
+ if isinstance(module_names, str):
+ self.module_names = {module_names}
+ self.meta_path_finder = self.ForbiddenModules(self, self.module_names)
+
+ def setUp(self):
+ super().setUp()
+ self.addCleanup(self.cleanup)
+ sys.meta_path.insert(0, self.meta_path_finder)
+
+ def cleanup(self):
+ sys.meta_path.remove(self.meta_path_finder)
+ # We use a flag and check it during the cleanup phase to fail the test
+ # if needed. This is done because some module imports occur inside of a
+ # try-except block that ignores all exceptions, so raising an exception
+ # there (which is also what self.assert* and self.fail() do underneath)
+ # will not work to cause a failure in the test.
+ if self.fail_message:
+ if self.tc_id is not None:
+ LOG.warning(
+ "!!!---!!! TestCase ID %s hit the import poison. If you "
+ "see this in a failed functional test then please let "
+ "#openstack-nova on IRC know about it. !!!---!!!",
+ self.tc_id
+ )
+ raise ImportError(self.fail_message)
+
+
+class ComputeNodeIdFixture(fixtures.Fixture):
+ def setUp(self):
+ super().setUp()
+
+ node.LOCAL_NODE_UUID = None
+ self.useFixture(fixtures.MockPatch(
+ 'nova.virt.node.read_local_node_uuid',
+ lambda: None))
+ self.useFixture(fixtures.MockPatch(
+ 'nova.virt.node.write_local_node_uuid',
+ lambda uuid: None))
+ self.useFixture(fixtures.MockPatch(
+ 'nova.compute.manager.ComputeManager.'
+ '_ensure_existing_node_identity',
+ mock.DEFAULT))
diff --git a/nova/tests/functional/api_sample_tests/api_samples/images/images-details-get-resp.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/images/images-details-get-resp.json.tpl
index 6b56f72139..d35850baed 100644
--- a/nova/tests/functional/api_sample_tests/api_samples/images/images-details-get-resp.json.tpl
+++ b/nova/tests/functional/api_sample_tests/api_samples/images/images-details-get-resp.json.tpl
@@ -217,6 +217,124 @@
"progress": 100,
"status": "ACTIVE",
"updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "%(eph_encryption_id)s",
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/images/%(eph_encryption_id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/images/%(eph_encryption_id)s",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/images/%(eph_encryption_id)s",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "hw_ephemeral_encryption": "True"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "OS-EXT-IMG-SIZE:size": %(int)s,
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "%(eph_encryption_disabled_id)s",
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/images/%(eph_encryption_disabled_id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/images/%(eph_encryption_disabled_id)s",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/images/%(eph_encryption_disabled_id)s",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "hw_ephemeral_encryption": "False"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "OS-EXT-IMG-SIZE:size": %(int)s,
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "%(eph_encryption_luks_id)s",
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/images/%(eph_encryption_luks_id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/images/%(eph_encryption_luks_id)s",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/images/%(eph_encryption_luks_id)s",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "hw_ephemeral_encryption": "True",
+ "hw_ephemeral_encryption_format": "luks"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "OS-EXT-IMG-SIZE:size": %(int)s,
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "%(eph_encryption_plain_id)s",
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/images/%(eph_encryption_plain_id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/images/%(eph_encryption_plain_id)s",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/images/%(eph_encryption_plain_id)s",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "hw_ephemeral_encryption": "True",
+ "hw_ephemeral_encryption_format": "plain"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "OS-EXT-IMG-SIZE:size": %(int)s,
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
}
]
}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/images/images-list-get-resp.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/images/images-list-get-resp.json.tpl
index 035cc83695..dc08ba7053 100644
--- a/nova/tests/functional/api_sample_tests/api_samples/images/images-list-get-resp.json.tpl
+++ b/nova/tests/functional/api_sample_tests/api_samples/images/images-list-get-resp.json.tpl
@@ -132,6 +132,82 @@
}
],
"name": "fakeimage123456"
+ },
+ {
+ "id": "%(eph_encryption_id)s",
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/images/%(eph_encryption_id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/images/%(eph_encryption_id)s",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/images/%(eph_encryption_id)s",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "name": "fakeimage123456"
+ },
+ {
+ "id": "%(eph_encryption_disabled_id)s",
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/images/%(eph_encryption_disabled_id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/images/%(eph_encryption_disabled_id)s",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/images/%(eph_encryption_disabled_id)s",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "name": "fakeimage123456"
+ },
+ {
+ "id": "%(eph_encryption_luks_id)s",
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/images/%(eph_encryption_luks_id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/images/%(eph_encryption_luks_id)s",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/images/%(eph_encryption_luks_id)s",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "name": "fakeimage123456"
+ },
+ {
+ "id": "%(eph_encryption_plain_id)s",
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/images/%(eph_encryption_plain_id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/images/%(eph_encryption_plain_id)s",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/images/%(eph_encryption_plain_id)s",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "name": "fakeimage123456"
}
]
}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/os-evacuate/v2.95/server-evacuate-find-host-req.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/os-evacuate/v2.95/server-evacuate-find-host-req.json.tpl
new file mode 100644
index 0000000000..8abf0b4e18
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/os-evacuate/v2.95/server-evacuate-find-host-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "evacuate": {
+ "adminPass": "%(adminPass)s"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/os-evacuate/v2.95/server-evacuate-req.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/os-evacuate/v2.95/server-evacuate-req.json.tpl
new file mode 100644
index 0000000000..8abf0b4e18
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/os-evacuate/v2.95/server-evacuate-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "evacuate": {
+ "adminPass": "%(adminPass)s"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-action-rebuild-resp.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-action-rebuild-resp.json.tpl
new file mode 100644
index 0000000000..486433733d
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-action-rebuild-resp.json.tpl
@@ -0,0 +1,80 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO",
+ "OS-EXT-AZ:availability_zone": "us-west",
+ "OS-EXT-SRV-ATTR:hostname": "%(hostname)s",
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "OS-SRV-USG:launched_at": "%(strtime)s",
+ "OS-SRV-USG:terminated_at": null,
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "addresses": {
+ "private": [
+ {
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
+ "OS-EXT-IPS:type": "fixed",
+ "addr": "192.168.1.30",
+ "version": 4
+ }
+ ]
+ },
+ "adminPass": "seekr3t",
+ "config_drive": "",
+ "created": "%(isotime)s",
+ "description": null,
+ "flavor": {
+ "disk": 1,
+ "ephemeral": 0,
+ "extra_specs": {},
+ "original_name": "m1.tiny",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1
+ },
+ "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(compute_endpoint)s/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "locked": false,
+ "locked_reason": null,
+ "metadata": {
+ "meta_var": "meta_val"
+ },
+ "name": "foobar",
+ "os-extended-volumes:volumes_attached": [],
+ "progress": 0,
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "server_groups": [],
+ "status": "ACTIVE",
+ "tags": [],
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "trusted_image_certificates": null,
+ "updated": "%(isotime)s",
+ "user_data": "ZWNobyAiaGVsbG8gd29ybGQi",
+ "user_id": "fake"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-action-rebuild.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-action-rebuild.json.tpl
new file mode 100644
index 0000000000..3becc83fba
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-action-rebuild.json.tpl
@@ -0,0 +1,15 @@
+{
+ "rebuild" : {
+ "accessIPv4" : "%(access_ip_v4)s",
+ "accessIPv6" : "%(access_ip_v6)s",
+ "OS-DCF:diskConfig": "AUTO",
+ "imageRef" : "%(uuid)s",
+ "name" : "%(name)s",
+ "adminPass" : "%(pass)s",
+ "hostname": "%(hostname)s",
+ "metadata" : {
+ "meta_var" : "meta_val"
+ },
+ "user_data": "ZWNobyAiaGVsbG8gd29ybGQi"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-create-req.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-create-req.json.tpl
new file mode 100644
index 0000000000..f83c78fdc9
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-create-req.json.tpl
@@ -0,0 +1,21 @@
+{
+ "server" : {
+ "accessIPv4": "%(access_ip_v4)s",
+ "accessIPv6": "%(access_ip_v6)s",
+ "name" : "new-server-test",
+ "imageRef" : "%(image_id)s",
+ "flavorRef" : "1",
+ "OS-DCF:diskConfig": "AUTO",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "user_data" : "%(user_data)s",
+ "networks": "auto",
+ "hostname": "custom-hostname.example.com"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-create-resp.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-create-resp.json.tpl
new file mode 100644
index 0000000000..4b30e0cfbd
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-create-resp.json.tpl
@@ -0,0 +1,22 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO",
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ],
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-get-resp.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-get-resp.json.tpl
new file mode 100644
index 0000000000..ae2088619a
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-get-resp.json.tpl
@@ -0,0 +1,81 @@
+{
+ "server": {
+ "accessIPv4": "%(access_ip_v4)s",
+ "accessIPv6": "%(access_ip_v6)s",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
+ "OS-EXT-IPS:type": "fixed",
+ "version": 4
+ }
+ ]
+ },
+ "created": "%(isotime)s",
+ "description": null,
+ "locked": false,
+ "locked_reason": null,
+ "flavor": {
+ "disk": 1,
+ "ephemeral": 0,
+ "extra_specs": {},
+ "original_name": "m1.tiny",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(compute_endpoint)s/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "config_drive": "%(cdrive)s",
+ "OS-DCF:diskConfig": "AUTO",
+ "OS-EXT-AZ:availability_zone": "nova",
+ "OS-EXT-SRV-ATTR:hostname": "%(hostname)s",
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "os-extended-volumes:volumes_attached": [
+ {"id": "volume_id1", "delete_on_termination": false},
+ {"id": "volume_id2", "delete_on_termination": false}
+ ],
+ "OS-SRV-USG:launched_at": "%(strtime)s",
+ "OS-SRV-USG:terminated_at": null,
+ "progress": 0,
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "server_groups": [],
+ "status": "ACTIVE",
+ "tags": [],
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "trusted_image_certificates": null,
+ "updated": "%(isotime)s",
+ "user_id": "fake"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-update-req.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-update-req.json.tpl
new file mode 100644
index 0000000000..bc4be64a8e
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-update-req.json.tpl
@@ -0,0 +1,8 @@
+{
+ "server": {
+ "accessIPv4": "%(access_ip_v4)s",
+ "accessIPv6": "%(access_ip_v6)s",
+ "OS-DCF:diskConfig": "AUTO",
+ "hostname": "new-server-hostname.example.com"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-update-resp.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-update-resp.json.tpl
new file mode 100644
index 0000000000..2adc16df5e
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-update-resp.json.tpl
@@ -0,0 +1,78 @@
+{
+ "server": {
+ "accessIPv4": "%(access_ip_v4)s",
+ "accessIPv6": "%(access_ip_v6)s",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
+ "OS-EXT-IPS:type": "fixed",
+ "version": 4
+ }
+ ]
+ },
+ "created": "%(isotime)s",
+ "description": null,
+ "locked": false,
+ "locked_reason": null,
+ "flavor": {
+ "disk": 1,
+ "ephemeral": 0,
+ "extra_specs": {},
+ "original_name": "m1.tiny",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(compute_endpoint)s/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "config_drive": "",
+ "OS-DCF:diskConfig": "AUTO",
+ "OS-EXT-AZ:availability_zone": "us-west",
+ "OS-EXT-SRV-ATTR:hostname": "new-server-hostname.example.com",
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "os-extended-volumes:volumes_attached": [],
+ "OS-SRV-USG:launched_at": "%(strtime)s",
+ "OS-SRV-USG:terminated_at": null,
+ "progress": 0,
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "server_groups": [],
+ "status": "ACTIVE",
+ "tags": [],
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "trusted_image_certificates": null,
+ "updated": "%(isotime)s",
+ "user_id": "fake"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/servers-details-resp.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/servers-details-resp.json.tpl
new file mode 100644
index 0000000000..f49d21e7a2
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/servers-details-resp.json.tpl
@@ -0,0 +1,88 @@
+{
+ "servers": [
+ {
+ "accessIPv4": "%(access_ip_v4)s",
+ "accessIPv6": "%(access_ip_v6)s",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
+ "OS-EXT-IPS:type": "fixed",
+ "version": 4
+ }
+ ]
+ },
+ "created": "%(isotime)s",
+ "description": "",
+ "flavor": {
+ "disk": 1,
+ "ephemeral": 0,
+ "extra_specs": {},
+ "original_name": "m1.tiny",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(compute_endpoint)s/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "config_drive": "%(cdrive)s",
+ "locked": false,
+ "locked_reason": "",
+ "OS-DCF:diskConfig": "AUTO",
+ "OS-EXT-AZ:availability_zone": "nova",
+ "OS-EXT-SRV-ATTR:hostname": "custom-hostname.example.com",
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "os-extended-volumes:volumes_attached": [
+ {"id": "volume_id1", "delete_on_termination": false},
+ {"id": "volume_id2", "delete_on_termination": false}
+ ],
+ "OS-SRV-USG:launched_at": "%(strtime)s",
+ "OS-SRV-USG:terminated_at": null,
+ "progress": 0,
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "status": "ACTIVE",
+ "tags": [],
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "trusted_image_certificates": null,
+ "updated": "%(isotime)s",
+ "user_id": "fake"
+ }
+ ],
+ "servers_links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/servers/detail?limit=1&marker=%(id)s",
+ "rel": "next"
+ }
+ ]
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/servers-list-resp.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/servers-list-resp.json.tpl
new file mode 100644
index 0000000000..9cdb3aa644
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/servers-list-resp.json.tpl
@@ -0,0 +1,24 @@
+{
+ "servers": [
+ {
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "new-server-test"
+ }
+ ],
+ "servers_links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/servers?limit=1&marker=%(id)s",
+ "rel": "next"
+ }
+ ]
+}
diff --git a/nova/tests/functional/api_sample_tests/test_evacuate.py b/nova/tests/functional/api_sample_tests/test_evacuate.py
index 14b7b09cf0..15efb39d44 100644
--- a/nova/tests/functional/api_sample_tests/test_evacuate.py
+++ b/nova/tests/functional/api_sample_tests/test_evacuate.py
@@ -79,7 +79,8 @@ class EvacuateJsonTest(test_servers.ServersSampleBase):
injected_files=mock.ANY, new_pass="MySecretPass",
orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
on_shared_storage=False, preserve_ephemeral=mock.ANY,
- host='testHost', request_spec=mock.ANY)
+ host='testHost', request_spec=mock.ANY,
+ reimage_boot_volume=False, target_state=None)
@mock.patch('nova.conductor.manager.ComputeTaskManager.rebuild_instance')
def test_server_evacuate_find_host(self, rebuild_mock):
@@ -95,7 +96,8 @@ class EvacuateJsonTest(test_servers.ServersSampleBase):
injected_files=mock.ANY, new_pass="MySecretPass",
orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
on_shared_storage=False, preserve_ephemeral=mock.ANY,
- host=None, request_spec=mock.ANY)
+ host=None, request_spec=mock.ANY,
+ reimage_boot_volume=False, target_state=None)
class EvacuateJsonTestV214(EvacuateJsonTest):
@@ -116,7 +118,8 @@ class EvacuateJsonTestV214(EvacuateJsonTest):
injected_files=mock.ANY, new_pass="MySecretPass",
orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
on_shared_storage=None, preserve_ephemeral=mock.ANY,
- host='testHost', request_spec=mock.ANY)
+ host='testHost', request_spec=mock.ANY,
+ reimage_boot_volume=False, target_state=None)
@mock.patch('nova.conductor.manager.ComputeTaskManager.rebuild_instance')
def test_server_evacuate_find_host(self, rebuild_mock):
@@ -131,7 +134,8 @@ class EvacuateJsonTestV214(EvacuateJsonTest):
injected_files=mock.ANY, new_pass="MySecretPass",
orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
on_shared_storage=None, preserve_ephemeral=mock.ANY,
- host=None, request_spec=mock.ANY)
+ host=None, request_spec=mock.ANY,
+ reimage_boot_volume=False, target_state=None)
class EvacuateJsonTestV229(EvacuateJsonTestV214):
@@ -158,7 +162,8 @@ class EvacuateJsonTestV229(EvacuateJsonTestV214):
injected_files=mock.ANY, new_pass="MySecretPass",
orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
on_shared_storage=None, preserve_ephemeral=mock.ANY,
- host=None, request_spec=mock.ANY)
+ host=None, request_spec=mock.ANY,
+ reimage_boot_volume=False, target_state=None)
@mock.patch('nova.conductor.manager.ComputeTaskManager.rebuild_instance')
@mock.patch('nova.objects.ComputeNodeList.get_all_by_host')
@@ -178,7 +183,8 @@ class EvacuateJsonTestV229(EvacuateJsonTestV214):
injected_files=mock.ANY, new_pass="MySecretPass",
orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
on_shared_storage=None, preserve_ephemeral=mock.ANY,
- host='testHost', request_spec=mock.ANY)
+ host='testHost', request_spec=mock.ANY,
+ reimage_boot_volume=False, target_state=None)
class EvacuateJsonTestV268(EvacuateJsonTestV229):
@@ -204,8 +210,47 @@ class EvacuateJsonTestV268(EvacuateJsonTestV229):
injected_files=mock.ANY, new_pass="MySecretPass",
orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
on_shared_storage=None, preserve_ephemeral=mock.ANY,
- host=None, request_spec=mock.ANY)
+ host=None, request_spec=mock.ANY,
+ reimage_boot_volume=False, target_state=None)
def test_server_evacuate_with_force(self):
# doesn't apply to v2.68+, which removed the ability to force migrate
pass
+
+
+class EvacuateJsonTestV295(EvacuateJsonTestV268):
+ microversion = '2.95'
+ scenarios = [('v2_95', {'api_major_version': 'v2.1'})]
+
+ @mock.patch('nova.conductor.manager.ComputeTaskManager.rebuild_instance')
+ def test_server_evacuate(self, rebuild_mock):
+ req_subs = {
+ "adminPass": "MySecretPass",
+ }
+ self._test_evacuate(req_subs, 'server-evacuate-req',
+ server_resp=None, expected_resp_code=200)
+
+ rebuild_mock.assert_called_once_with(mock.ANY, instance=mock.ANY,
+ orig_image_ref=mock.ANY, image_ref=mock.ANY,
+ injected_files=mock.ANY, new_pass="MySecretPass",
+ orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
+ on_shared_storage=None, preserve_ephemeral=mock.ANY,
+ host=None, request_spec=mock.ANY,
+ reimage_boot_volume=False, target_state="stopped")
+
+ @mock.patch('nova.conductor.manager.ComputeTaskManager.rebuild_instance')
+ def test_server_evacuate_find_host(self, rebuild_mock):
+ req_subs = {
+ 'host': 'testHost',
+ "adminPass": "MySecretPass",
+ }
+ self._test_evacuate(req_subs, 'server-evacuate-find-host-req',
+ server_resp=None, expected_resp_code=200)
+
+ rebuild_mock.assert_called_once_with(mock.ANY, instance=mock.ANY,
+ orig_image_ref=mock.ANY, image_ref=mock.ANY,
+ injected_files=mock.ANY, new_pass="MySecretPass",
+ orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
+ on_shared_storage=None, preserve_ephemeral=mock.ANY,
+ host=None, request_spec=mock.ANY,
+ reimage_boot_volume=False, target_state="stopped")
diff --git a/nova/tests/functional/api_sample_tests/test_images.py b/nova/tests/functional/api_sample_tests/test_images.py
index 924bc7768f..c84e566409 100644
--- a/nova/tests/functional/api_sample_tests/test_images.py
+++ b/nova/tests/functional/api_sample_tests/test_images.py
@@ -19,10 +19,29 @@ from nova.tests.functional.api_sample_tests import api_sample_base
class ImagesSampleJsonTest(api_sample_base.ApiSampleTestBaseV21):
sample_dir = 'images'
+ def generalize_subs(self, subs, vanilla_regexes):
+ """Give the test a chance to modify subs after the server response
+ was verified, and before the on-disk doc/api_samples file is checked.
+ """
+ # When comparing the template to the sample we just care that the image
+ # IDs are UUIDs.
+ subs['eph_encryption_id'] = vanilla_regexes['uuid']
+ subs['eph_encryption_disabled_id'] = vanilla_regexes['uuid']
+ subs['eph_encryption_luks_id'] = vanilla_regexes['uuid']
+ subs['eph_encryption_plain_id'] = vanilla_regexes['uuid']
+ return subs
+
def test_images_list(self):
# Get api sample of images get list request.
response = self._do_get('images')
- self._verify_response('images-list-get-resp', {}, response, 200)
+ subs = {
+ 'eph_encryption_id': self.glance.eph_encryption['id'],
+ 'eph_encryption_disabled_id':
+ self.glance.eph_encryption_disabled['id'],
+ 'eph_encryption_luks_id': self.glance.eph_encryption_luks['id'],
+ 'eph_encryption_plain_id': self.glance.eph_encryption_plain['id'],
+ }
+ self._verify_response('images-list-get-resp', subs, response, 200)
def test_image_get(self):
# Get api sample of one single image details request.
@@ -34,7 +53,14 @@ class ImagesSampleJsonTest(api_sample_base.ApiSampleTestBaseV21):
def test_images_details(self):
# Get api sample of all images details request.
response = self._do_get('images/detail')
- self._verify_response('images-details-get-resp', {}, response, 200)
+ subs = {
+ 'eph_encryption_id': self.glance.eph_encryption['id'],
+ 'eph_encryption_disabled_id':
+ self.glance.eph_encryption_disabled['id'],
+ 'eph_encryption_luks_id': self.glance.eph_encryption_luks['id'],
+ 'eph_encryption_plain_id': self.glance.eph_encryption_plain['id'],
+ }
+ self._verify_response('images-details-get-resp', subs, response, 200)
def test_image_metadata_get(self):
# Get api sample of an image metadata request.
diff --git a/nova/tests/functional/api_sample_tests/test_remote_consoles.py b/nova/tests/functional/api_sample_tests/test_remote_consoles.py
index 986826bfee..e304402ee9 100644
--- a/nova/tests/functional/api_sample_tests/test_remote_consoles.py
+++ b/nova/tests/functional/api_sample_tests/test_remote_consoles.py
@@ -13,6 +13,10 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
+from nova.compute import api as compute
+from nova import exception
from nova.tests.functional.api_sample_tests import test_servers
HTTP_RE = r'(https?://)([\w\d:#@%/;$()~_?\+-=\\.&](#!)?)*'
@@ -38,6 +42,22 @@ class ConsolesSampleJsonTests(test_servers.ServersSampleBase):
self._verify_response('get-vnc-console-post-resp', {'url': HTTP_RE},
response, 200)
+ @mock.patch.object(compute.API, 'get_vnc_console')
+ def test_get_vnc_console_instance_invalid_state(self,
+ mock_get_vnc_console):
+ uuid = self._post_server()
+
+ def fake_get_vnc_console(*args, **kwargs):
+ raise exception.InstanceInvalidState(
+ attr='fake_attr', state='fake_state', method='fake_method',
+ instance_uuid=uuid)
+
+ mock_get_vnc_console.side_effect = fake_get_vnc_console
+ response = self._do_post('servers/%s/action' % uuid,
+ 'get-vnc-console-post-req',
+ {'action': 'os-getVNCConsole'})
+ self.assertEqual(409, response.status_code)
+
def test_get_spice_console(self):
uuid = self._post_server()
response = self._do_post('servers/%s/action' % uuid,
diff --git a/nova/tests/functional/api_sample_tests/test_servers.py b/nova/tests/functional/api_sample_tests/test_servers.py
index aa07b88247..7679c9b734 100644
--- a/nova/tests/functional/api_sample_tests/test_servers.py
+++ b/nova/tests/functional/api_sample_tests/test_servers.py
@@ -618,6 +618,13 @@ class ServersSampleJson290Test(ServersSampleJsonTest):
ADMIN_API = False
+class ServersSampleJson294Test(ServersSampleJsonTest):
+ microversion = '2.94'
+ scenarios = [('v2_94', {'api_major_version': 'v2.1'})]
+ use_common_server_post = False
+ ADMIN_API = False
+
+
class ServersUpdateSampleJsonTest(ServersSampleBase):
# Many of the 'os_compute_api:servers:*' policies are admin-only, and we
@@ -702,6 +709,44 @@ class ServersUpdateSampleJson290Test(ServersUpdateSampleJsonTest):
self._verify_response('server-action-rebuild-resp', subs, resp, 202)
+class ServersUpdateSampleJson294Test(ServersUpdateSampleJsonTest):
+ microversion = '2.94'
+ scenarios = [('v2_94', {'api_major_version': 'v2.1'})]
+ ADMIN_API = False
+
+ def test_update_server(self):
+ uuid = self._post_server()
+ subs = {}
+ subs['hostid'] = '[a-f0-9]+'
+ subs['access_ip_v4'] = '1.2.3.4'
+ subs['access_ip_v6'] = '80fe::'
+ subs['hostname'] = 'updated-hostname.example.com'
+ response = self._do_put('servers/%s' % uuid,
+ 'server-update-req', subs)
+ self._verify_response('server-update-resp', subs, response, 200)
+
+ def test_server_rebuild(self):
+ uuid = self._post_server()
+ params = {
+ 'uuid': self.glance.auto_disk_config_enabled_image['id'],
+ 'name': 'foobar',
+ 'pass': 'seekr3t',
+ 'hostid': '[a-f0-9]+',
+ 'access_ip_v4': '1.2.3.4',
+ 'access_ip_v6': '80fe::',
+ 'hostname': 'updated-hostname.example.com',
+ }
+
+ resp = self._do_post(
+ 'servers/%s/action' % uuid,
+ 'server-action-rebuild',
+ params,
+ )
+ subs = params.copy()
+ del subs['uuid']
+ self._verify_response('server-action-rebuild-resp', subs, resp, 202)
+
+
class ServerSortKeysJsonTests(ServersSampleBase):
sample_dir = 'servers-sort'
diff --git a/nova/tests/functional/compute/test_resource_tracker.py b/nova/tests/functional/compute/test_resource_tracker.py
index 24d72f56ee..139fb5e6ac 100644
--- a/nova/tests/functional/compute/test_resource_tracker.py
+++ b/nova/tests/functional/compute/test_resource_tracker.py
@@ -29,7 +29,6 @@ from nova import conf
from nova import context
from nova import objects
from nova import test
-from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import fixtures as func_fixtures
from nova.tests.functional import integrated_helpers
from nova.virt import driver as virt_driver
@@ -249,6 +248,7 @@ class IronicResourceTrackerTest(test.TestCase):
'numa_topology': None,
'resource_class': None, # Act like admin hasn't set yet...
'stats': stats,
+ 'uuid': str(getattr(uuids, nodename)),
}
self.rt.update_available_resource(self.ctx, nodename)
@@ -694,15 +694,6 @@ class TestProviderConfig(integrated_helpers.ProviderUsageBaseTestCase):
feature a vm cannot be spawning using a custom trait and then start a
compute service that provides that trait.
"""
-
- self.useFixture(nova_fixtures.NeutronFixture(self))
- self.useFixture(nova_fixtures.GlanceFixture(self))
-
- # Start nova services.
- self.api = self.useFixture(nova_fixtures.OSAPIFixture(
- api_version='v2.1')).admin_api
- self.api.microversion = 'latest'
- self.start_service('conductor')
# start nova-compute that will not have the additional trait.
self._start_compute("fake-host-1")
diff --git a/nova/tests/functional/integrated_helpers.py b/nova/tests/functional/integrated_helpers.py
index 028ef53d7e..cdf71da0d4 100644
--- a/nova/tests/functional/integrated_helpers.py
+++ b/nova/tests/functional/integrated_helpers.py
@@ -247,6 +247,27 @@ class InstanceHelperMixin:
self.assertIn(error_in_tb, event['traceback'])
return event
+ def _assert_build_request_success(self, server_request):
+ server = self.api.post_server({'server': server_request})
+ self._wait_for_state_change(server, 'ACTIVE')
+ return server['id']
+
+ def _assert_build_request_schedule_failure(self, server_request):
+ server = self.api.post_server({'server': server_request})
+ self._wait_for_state_change(server, 'ERROR')
+
+ def _assert_bad_build_request_error(self, server_request):
+ ex = self.assertRaises(
+ api_client.OpenStackApiException, self.api.post_server,
+ {'server': server_request})
+ self.assertEqual(400, ex.response.status_code)
+
+ def _assert_build_request_error(self, server_request):
+ ex = self.assertRaises(
+ api_client.OpenStackApiException, self.api.post_server,
+ {'server': server_request})
+ self.assertEqual(500, ex.response.status_code)
+
def _wait_for_migration_status(self, server, expected_statuses):
"""Waits for a migration record with the given statuses to be found
for the given server, else the test fails. The migration record, if
@@ -540,8 +561,8 @@ class InstanceHelperMixin:
self.api.post_server_action(
server['id'],
{'os-migrateLive': {'host': None, 'block_migration': 'auto'}})
- self._wait_for_state_change(server, server_expected_state)
self._wait_for_migration_status(server, [migration_expected_state])
+ return self._wait_for_state_change(server, server_expected_state)
_live_migrate_server = _live_migrate
@@ -577,7 +598,7 @@ class InstanceHelperMixin:
def _evacuate_server(
self, server, extra_post_args=None, expected_host=None,
- expected_state='ACTIVE', expected_task_state=NOT_SPECIFIED,
+ expected_state='SHUTOFF', expected_task_state=NOT_SPECIFIED,
expected_migration_status='done'):
"""Evacuate a server."""
api = getattr(self, 'admin_api', self.api)
@@ -612,6 +633,13 @@ class InstanceHelperMixin:
return self._wait_for_state_change(server, 'SHUTOFF')
return server
+ def _snapshot_server(self, server, snapshot_name):
+ """Create server snapshot."""
+ self.api.post_server_action(
+ server['id'],
+ {'createImage': {'name': snapshot_name}}
+ )
+
class PlacementHelperMixin:
"""A helper mixin for interacting with placement."""
@@ -631,12 +659,16 @@ class PlacementHelperMixin:
'/resource_providers', version='1.14'
).body['resource_providers']
- def _get_all_rp_uuids_in_a_tree(self, in_tree_rp_uuid):
+ def _get_all_rps_in_a_tree(self, in_tree_rp_uuid):
rps = self.placement.get(
'/resource_providers?in_tree=%s' % in_tree_rp_uuid,
version='1.20',
).body['resource_providers']
- return [rp['uuid'] for rp in rps]
+ return rps
+
+ def _get_all_rp_uuids_in_a_tree(self, in_tree_rp_uuid):
+ return [
+ rp['uuid'] for rp in self._get_all_rps_in_a_tree(in_tree_rp_uuid)]
def _post_resource_provider(self, rp_name):
return self.placement.post(
@@ -844,6 +876,20 @@ class PlacementHelperMixin:
'Test expected a single migration but found %i' % len(migrations))
return migrations[0].uuid
+ def _reserve_placement_resource(self, rp_name, rc_name, reserved):
+ rp_uuid = self._get_provider_uuid_by_name(rp_name)
+ inv = self.placement.get(
+ '/resource_providers/%s/inventories/%s' % (rp_uuid, rc_name),
+ version='1.26'
+ ).body
+ inv["reserved"] = reserved
+ result = self.placement.put(
+ '/resource_providers/%s/inventories/%s' % (rp_uuid, rc_name),
+ version='1.26', body=inv
+ ).body
+ self.assertEqual(reserved, result["reserved"])
+ return result
+
class PlacementInstanceHelperMixin(InstanceHelperMixin, PlacementHelperMixin):
"""A placement-aware variant of InstanceHelperMixin."""
diff --git a/nova/tests/functional/libvirt/base.py b/nova/tests/functional/libvirt/base.py
index efae4369e4..7b6ee10631 100644
--- a/nova/tests/functional/libvirt/base.py
+++ b/nova/tests/functional/libvirt/base.py
@@ -18,6 +18,7 @@ import io
from unittest import mock
import fixtures
+from oslo_utils.fixture import uuidsentinel as uuids
from nova import conf
from nova.tests import fixtures as nova_fixtures
@@ -51,12 +52,12 @@ class ServersTestBase(integrated_helpers._IntegratedTestBase):
self.useFixture(fixtures.MockPatch(
'nova.virt.libvirt.LibvirtDriver._get_local_gb_info',
return_value={'total': 128, 'used': 44, 'free': 84}))
- self.useFixture(fixtures.MockPatch(
+ self.mock_is_valid_hostname = self.useFixture(fixtures.MockPatch(
'nova.virt.libvirt.driver.libvirt_utils.is_valid_hostname',
- return_value=True))
- self.useFixture(fixtures.MockPatch(
+ return_value=True)).mock
+ self.mock_file_open = self.useFixture(fixtures.MockPatch(
'nova.virt.libvirt.driver.libvirt_utils.file_open',
- side_effect=lambda *a, **k: io.BytesIO(b'')))
+ side_effect=lambda *a, **k: io.BytesIO(b''))).mock
self.useFixture(fixtures.MockPatch(
'nova.privsep.utils.supports_direct_io',
return_value=True))
@@ -114,7 +115,7 @@ class ServersTestBase(integrated_helpers._IntegratedTestBase):
def start_compute(
self, hostname='compute1', host_info=None, pci_info=None,
mdev_info=None, vdpa_info=None, libvirt_version=None,
- qemu_version=None,
+ qemu_version=None, cell_name=None, connection=None
):
"""Start a compute service.
@@ -124,16 +125,35 @@ class ServersTestBase(integrated_helpers._IntegratedTestBase):
:param host_info: A fakelibvirt.HostInfo object for the host. Defaults
to a HostInfo with 2 NUMA nodes, 2 cores per node, 2 threads per
core, and 16GB of RAM.
+ :param connection: A fake libvirt connection. You should not provide it
+ directly. However it is used by restart_compute_service to
+ implement restart without loosing the hypervisor state.
:returns: The hostname of the created service, which can be used to
lookup the created service and UUID of the assocaited resource
provider.
"""
+ if connection and (
+ host_info or
+ pci_info or
+ mdev_info or
+ vdpa_info or
+ libvirt_version or
+ qemu_version
+ ):
+ raise ValueError(
+ "Either an existing connection instance can be provided or a "
+ "list of parameters for a new connection"
+ )
def _start_compute(hostname, host_info):
- fake_connection = self._get_connection(
- host_info, pci_info, mdev_info, vdpa_info, libvirt_version,
- qemu_version, hostname,
- )
+ if connection:
+ fake_connection = connection
+ else:
+ fake_connection = self._get_connection(
+ host_info, pci_info, mdev_info, vdpa_info, libvirt_version,
+ qemu_version, hostname,
+ )
+
# If the compute is configured with PCI devices then we need to
# make sure that the stubs around sysfs has the MAC address
# information for the PCI PF devices
@@ -144,7 +164,8 @@ class ServersTestBase(integrated_helpers._IntegratedTestBase):
# actually start the service.
orig_con = self.mock_conn.return_value
self.mock_conn.return_value = fake_connection
- compute = self.start_service('compute', host=hostname)
+ compute = self.start_service(
+ 'compute', host=hostname, cell_name=cell_name)
# Once that's done, we need to tweak the compute "service" to
# make sure it returns unique objects.
compute.driver._host.get_connection = lambda: fake_connection
@@ -157,7 +178,12 @@ class ServersTestBase(integrated_helpers._IntegratedTestBase):
self.assertNotIn(hostname, self.computes)
self.assertNotIn(hostname, self.compute_rp_uuids)
- self.computes[hostname] = _start_compute(hostname, host_info)
+ with mock.patch('nova.virt.node.get_local_node_uuid') as m:
+ m.return_value = str(getattr(uuids, 'node_%s' % hostname))
+ self.computes[hostname] = _start_compute(hostname, host_info)
+ # We need to trigger libvirt.Host() to capture the node-local
+ # uuid while we have it mocked out.
+ self.computes[hostname].driver._host.get_node_uuid()
self.compute_rp_uuids[hostname] = self.placement.get(
'/resource_providers?name=%s' % hostname).body[
@@ -165,6 +191,74 @@ class ServersTestBase(integrated_helpers._IntegratedTestBase):
return hostname
+ def restart_compute_service(
+ self,
+ hostname,
+ host_info=None,
+ pci_info=None,
+ mdev_info=None,
+ vdpa_info=None,
+ libvirt_version=None,
+ qemu_version=None,
+ keep_hypervisor_state=True,
+ ):
+ """Stops the service and starts a new one to have realistic restart
+
+ :param hostname: the hostname of the nova-compute service to be
+ restarted
+ :param keep_hypervisor_state: If True then we reuse the fake connection
+ from the existing driver. If False a new connection will be created
+ based on the other parameters provided
+ """
+ # We are intentionally not calling super() here. Nova's base test class
+ # defines starting and restarting compute service with a very
+ # different signatures and also those calls are cannot be made aware of
+ # the intricacies of the libvirt fixture. So we simply hide that
+ # implementation.
+
+ if keep_hypervisor_state and (
+ host_info or
+ pci_info or
+ mdev_info or
+ vdpa_info or
+ libvirt_version or
+ qemu_version
+ ):
+ raise ValueError(
+ "Either keep_hypervisor_state=True or a list of libvirt "
+ "parameters can be provided but not both"
+ )
+
+ compute = self.computes.pop(hostname)
+ self.compute_rp_uuids.pop(hostname)
+
+ # NOTE(gibi): The service interface cannot be used to simulate a real
+ # service restart as the manager object will not be recreated after a
+ # service.stop() and service.start() therefore the manager state will
+ # survive. For example the resource tracker will not be recreated after
+ # a stop start. The service.kill() call cannot help as it deletes
+ # the service from the DB which is unrealistic and causes that some
+ # operation that refers to the killed host (e.g. evacuate) fails.
+ # So this helper method will stop the original service and then starts
+ # a brand new compute service for the same host and node. This way
+ # a new ComputeManager instance will be created and initialized during
+ # the service startup.
+ compute.stop()
+
+ # this service was running previously, so we have to make sure that
+ # we restart it in the same cell
+ cell_name = self.host_mappings[compute.host].cell_mapping.name
+
+ old_connection = compute.manager.driver._get_connection()
+
+ self.start_compute(
+ hostname, host_info, pci_info, mdev_info, vdpa_info,
+ libvirt_version, qemu_version, cell_name,
+ old_connection if keep_hypervisor_state else None
+ )
+
+ return self.computes[hostname]
+
class LibvirtMigrationMixin(object):
"""A simple mixin to facilliate successful libvirt live migrations
diff --git a/nova/tests/functional/libvirt/test_device_bus_migration.py b/nova/tests/functional/libvirt/test_device_bus_migration.py
index 82a0d4556e..3852e31c68 100644
--- a/nova/tests/functional/libvirt/test_device_bus_migration.py
+++ b/nova/tests/functional/libvirt/test_device_bus_migration.py
@@ -51,7 +51,7 @@ class LibvirtDeviceBusMigration(base.ServersTestBase):
def _assert_stashed_image_properties_persist(self, server, properties):
# Assert the stashed properties persist across a host reboot
- self.restart_compute_service(self.compute)
+ self.restart_compute_service(self.compute_hostname)
self._assert_stashed_image_properties(server['id'], properties)
# Assert the stashed properties persist across a guest reboot
@@ -173,7 +173,7 @@ class LibvirtDeviceBusMigration(base.ServersTestBase):
self.flags(pointer_model='ps2mouse')
# Restart compute to pick up ps2 setting, which means the guest will
# not get a prescribed pointer device
- self.restart_compute_service(self.compute)
+ self.restart_compute_service(self.compute_hostname)
# Create a server with default image properties
default_image_properties1 = {
@@ -187,7 +187,7 @@ class LibvirtDeviceBusMigration(base.ServersTestBase):
# Assert the defaults persist across a host flag change
self.flags(pointer_model='usbtablet')
# Restart compute to pick up usb setting
- self.restart_compute_service(self.compute)
+ self.restart_compute_service(self.compute_hostname)
self._assert_stashed_image_properties(
server1['id'], default_image_properties1)
@@ -216,7 +216,7 @@ class LibvirtDeviceBusMigration(base.ServersTestBase):
# https://bugs.launchpad.net/nova/+bug/1866106
self.flags(pointer_model=None)
# Restart compute to pick up None setting
- self.restart_compute_service(self.compute)
+ self.restart_compute_service(self.compute_hostname)
self._assert_stashed_image_properties(
server1['id'], default_image_properties1)
self._assert_stashed_image_properties(
diff --git a/nova/tests/functional/libvirt/test_evacuate.py b/nova/tests/functional/libvirt/test_evacuate.py
index 9d3deec99d..92d7ffba29 100644
--- a/nova/tests/functional/libvirt/test_evacuate.py
+++ b/nova/tests/functional/libvirt/test_evacuate.py
@@ -415,7 +415,9 @@ class _LibvirtEvacuateTest(integrated_helpers.InstanceHelperMixin):
with mock.patch.object(fakelibvirt.Connection, 'getHostname',
return_value=name):
- compute = self.start_service('compute', host=name)
+ with mock.patch('nova.virt.node.get_local_node_uuid') as m:
+ m.return_value = str(getattr(uuids, 'node_%s' % name))
+ compute = self.start_service('compute', host=name)
compute.driver._host.get_connection().getHostname = lambda: name
return compute
diff --git a/nova/tests/functional/libvirt/test_numa_live_migration.py b/nova/tests/functional/libvirt/test_numa_live_migration.py
index 2f3897d6b2..0e504d2df2 100644
--- a/nova/tests/functional/libvirt/test_numa_live_migration.py
+++ b/nova/tests/functional/libvirt/test_numa_live_migration.py
@@ -206,10 +206,8 @@ class NUMALiveMigrationPositiveTests(NUMALiveMigrationPositiveBase):
# Increase cpu_dedicated_set to 0-3, expecting the live migrated server
# to end up on 2,3.
self.flags(cpu_dedicated_set='0-3', group='compute')
- self.computes['host_a'] = self.restart_compute_service(
- self.computes['host_a'])
- self.computes['host_b'] = self.restart_compute_service(
- self.computes['host_b'])
+ self.restart_compute_service('host_a')
+ self.restart_compute_service('host_b')
# Live migrate, RPC-pinning the destination host if asked
if pin_dest:
@@ -333,10 +331,8 @@ class NUMALiveMigrationRollbackTests(NUMALiveMigrationPositiveBase):
# Increase cpu_dedicated_set to 0-3, expecting the live migrated server
# to end up on 2,3.
self.flags(cpu_dedicated_set='0-3', group='compute')
- self.computes['host_a'] = self.restart_compute_service(
- self.computes['host_a'])
- self.computes['host_b'] = self.restart_compute_service(
- self.computes['host_b'])
+ self.restart_compute_service('host_a')
+ self.restart_compute_service('host_b')
# Live migrate, RPC-pinning the destination host if asked. This is a
# rollback test, so server_a is expected to remain on host_a.
diff --git a/nova/tests/functional/libvirt/test_numa_servers.py b/nova/tests/functional/libvirt/test_numa_servers.py
index e4c2b9317d..bb428159ad 100644
--- a/nova/tests/functional/libvirt/test_numa_servers.py
+++ b/nova/tests/functional/libvirt/test_numa_servers.py
@@ -1187,10 +1187,8 @@ class ReshapeForPCPUsTest(NUMAServersTestBase):
self.flags(cpu_dedicated_set='0-7', group='compute')
self.flags(vcpu_pin_set=None)
- computes = {}
- for host, compute in self.computes.items():
- computes[host] = self.restart_compute_service(compute)
- self.computes = computes
+ for host in list(self.computes.keys()):
+ self.restart_compute_service(host)
# verify that the inventory, usages and allocation are correct after
# the reshape
diff --git a/nova/tests/functional/libvirt/test_pci_in_placement.py b/nova/tests/functional/libvirt/test_pci_in_placement.py
new file mode 100644
index 0000000000..41d6c8e008
--- /dev/null
+++ b/nova/tests/functional/libvirt/test_pci_in_placement.py
@@ -0,0 +1,1997 @@
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+from unittest import mock
+
+import ddt
+import fixtures
+import os_resource_classes
+import os_traits
+from oslo_config import cfg
+from oslo_log import log as logging
+from oslo_serialization import jsonutils
+
+from nova import exception
+from nova.tests.fixtures import libvirt as fakelibvirt
+from nova.tests.functional.libvirt import test_pci_sriov_servers
+
+CONF = cfg.CONF
+LOG = logging.getLogger(__name__)
+
+
+class PlacementPCIReportingTests(test_pci_sriov_servers._PCIServersTestBase):
+ PCI_RC = f"CUSTOM_PCI_{fakelibvirt.PCI_VEND_ID}_{fakelibvirt.PCI_PROD_ID}"
+ PF_RC = f"CUSTOM_PCI_{fakelibvirt.PCI_VEND_ID}_{fakelibvirt.PF_PROD_ID}"
+ VF_RC = f"CUSTOM_PCI_{fakelibvirt.PCI_VEND_ID}_{fakelibvirt.VF_PROD_ID}"
+
+ # Just placeholders to satisfy the base class. The real value will be
+ # redefined by the tests
+ PCI_DEVICE_SPEC = []
+ PCI_ALIAS = [
+ jsonutils.dumps(x)
+ for x in (
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PCI_PROD_ID,
+ "name": "a-pci-dev",
+ },
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PF_PROD_ID,
+ "device_type": "type-PF",
+ "name": "a-pf",
+ },
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.VF_PROD_ID,
+ "device_type": "type-VF",
+ "name": "a-vf",
+ },
+ )
+ ]
+
+ def setUp(self):
+ super().setUp()
+ self.flags(group="pci", report_in_placement=True)
+
+ # These tests should not depend on the host's sysfs
+ self.useFixture(
+ fixtures.MockPatch('nova.pci.utils.is_physical_function'))
+ self.useFixture(
+ fixtures.MockPatch(
+ 'nova.pci.utils.get_function_by_ifname',
+ return_value=(None, False)
+ )
+ )
+
+
+class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
+
+ def test_new_compute_init_with_pci_devs(self):
+ """A brand new compute is started with multiple pci devices configured
+ for nova.
+ """
+ # The fake libvirt will emulate on the host:
+ # * two type-PCI devs (slot 0 and 1)
+ # * two type-PFs (slot 2 and 3) with two type-VFs each
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=2, num_pfs=2, num_vfs=4)
+
+ # the emulated devices will then be filtered by the device_spec:
+ device_spec = self._to_list_of_json_str(
+ [
+ # PCI_PROD_ID will match two type-PCI devs (slot 0, 1)
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PCI_PROD_ID,
+ "traits": ",".join(
+ [os_traits.HW_GPU_API_VULKAN, "CUSTOM_GPU", "purple"]
+ )
+ },
+ # PF_PROD_ID + slot 2 will match one PF but not their children
+ # VFs
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PF_PROD_ID,
+ "address": "0000:81:02.0",
+ "traits": ",".join(
+ [os_traits.HW_NIC_SRIOV, "CUSTOM_PF", "pf-white"]
+ ),
+ },
+ # VF_PROD_ID + slot 3 will match two VFs but not their parent
+ # PF
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.VF_PROD_ID,
+ "address": "0000:81:03.*",
+ "traits": ",".join(
+ [os_traits.HW_NIC_SRIOV_TRUSTED, "CUSTOM_VF", "vf-red"]
+ ),
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ self.start_compute(hostname="compute1", pci_info=pci_info)
+
+ # Finally we assert that only the filtered devices are reported to
+ # placement.
+ self.assert_placement_pci_view(
+ "compute1",
+ inventories={
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ "0000:81:02.0": {self.PF_RC: 1},
+ # Note that the VF inventory is reported on the parent PF
+ "0000:81:03.0": {self.VF_RC: 2},
+ },
+ traits={
+ "0000:81:00.0": [
+ "HW_GPU_API_VULKAN",
+ "CUSTOM_GPU",
+ "CUSTOM_PURPLE",
+ ],
+ "0000:81:01.0": [
+ "HW_GPU_API_VULKAN",
+ "CUSTOM_GPU",
+ "CUSTOM_PURPLE",
+ ],
+ "0000:81:02.0": [
+ "HW_NIC_SRIOV",
+ "CUSTOM_PF",
+ "CUSTOM_PF_WHITE",
+ ],
+ "0000:81:03.0": [
+ "HW_NIC_SRIOV_TRUSTED",
+ "CUSTOM_VF",
+ "CUSTOM_VF_RED",
+ ],
+ },
+ )
+
+ def test_new_compute_init_with_pci_dev_custom_rc(self):
+ # The fake libvirt will emulate on the host:
+ # * one type-PCI devs slot 0
+ # * one type-PF dev in slot 1 with a single type-VF under it
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=1, num_pfs=1, num_vfs=1)
+
+ device_spec = self._to_list_of_json_str(
+ [
+ # PCI_PROD_ID will match the type-PCI in slot 0
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PCI_PROD_ID,
+ "resource_class": os_resource_classes.PGPU,
+ "traits": os_traits.HW_GPU_API_VULKAN,
+ },
+ # slot 1 func 0 is the type-PF dev. The child VF is ignored
+ {
+ "product_id": fakelibvirt.PF_PROD_ID,
+ "address": "0000:81:01.0",
+ "resource_class": "crypto",
+ "traits": "to-the-moon,hodl"
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ self.start_compute(hostname="compute1", pci_info=pci_info)
+
+ self.assert_placement_pci_view(
+ "compute1",
+ inventories={
+ "0000:81:00.0": {os_resource_classes.PGPU: 1},
+ "0000:81:01.0": {"CUSTOM_CRYPTO": 1},
+ },
+ traits={
+ "0000:81:00.0": [
+ "HW_GPU_API_VULKAN",
+ ],
+ "0000:81:01.0": [
+ "CUSTOM_TO_THE_MOON",
+ "CUSTOM_HODL",
+ ],
+ },
+ )
+
+ def test_dependent_device_config_is_rejected(self):
+ """Configuring both the PF and its children VFs is not supported.
+ Only either of them can be given to nova.
+ """
+ # The fake libvirt will emulate on the host:
+ # * one type-PF dev in slot 0 with a single type-VF under it
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=0, num_pfs=1, num_vfs=1)
+ # both device will be matched by our config
+ device_spec = self._to_list_of_json_str(
+ [
+ # PF
+ {
+ "address": "0000:81:00.0"
+ },
+ # Its child VF
+ {
+ "address": "0000:81:00.1"
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+
+ ex = self.assertRaises(
+ exception.PlacementPciException,
+ self.start_compute,
+ hostname="compute1",
+ pci_info=pci_info
+ )
+ self.assertIn(
+ "Configuring both 0000:81:00.1 and 0000:81:00.0 in "
+ "[pci]device_spec is not supported",
+ str(ex)
+ )
+
+ def test_sibling_vfs_with_contradicting_resource_classes_rejected(self):
+ # The fake libvirt will emulate on the host:
+ # * one type-PF dev in slot 0 with two type-VF under it
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=0, num_pfs=1, num_vfs=2)
+ # the config matches the two VFs separately and tries to configure
+ # them with different resource class
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "address": "0000:81:00.1",
+ "resource_class": "vf1"
+ },
+ {
+ "address": "0000:81:00.2",
+ "resource_class": "vf2"
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+
+ ex = self.assertRaises(
+ exception.PlacementPciMixedResourceClassException,
+ self.start_compute,
+ hostname="compute1",
+ pci_info=pci_info
+ )
+ self.assertIn(
+ "VFs from the same PF cannot be configured with different "
+ "'resource_class' values in [pci]device_spec. We got "
+ "CUSTOM_VF2 for 0000:81:00.2 and CUSTOM_VF1 for 0000:81:00.1.",
+ str(ex)
+ )
+
+ def test_sibling_vfs_with_contradicting_traits_rejected(self):
+ # The fake libvirt will emulate on the host:
+ # * one type-PF dev in slot 0 with two type-VF under it
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=0, num_pfs=1, num_vfs=2)
+ # the config matches the two VFs separately and tries to configure
+ # them with different trait list
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "address": "0000:81:00.1",
+ "traits": "foo",
+ },
+ {
+ "address": "0000:81:00.2",
+ "traits": "bar",
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+
+ ex = self.assertRaises(
+ exception.PlacementPciMixedTraitsException,
+ self.start_compute,
+ hostname="compute1",
+ pci_info=pci_info
+ )
+ self.assertIn(
+ "VFs from the same PF cannot be configured with different set of "
+ "'traits' in [pci]device_spec. We got "
+ "COMPUTE_MANAGED_PCI_DEVICE,CUSTOM_BAR for 0000:81:00.2 and "
+ "COMPUTE_MANAGED_PCI_DEVICE,CUSTOM_FOO for 0000:81:00.1.",
+ str(ex)
+ )
+
+ def test_neutron_sriov_devs_ignored(self):
+ # The fake libvirt will emulate on the host:
+ # * one type-PF dev in slot 0 with one type-VF under it
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=0, num_pfs=1, num_vfs=1)
+ # then the config assigns physnet to the dev
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PF_PROD_ID,
+ "physical_network": "physnet0",
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ self.start_compute(hostname="compute1", pci_info=pci_info)
+
+ # As every matching dev has physnet configured they are ignored
+ self.assert_placement_pci_view(
+ "compute1",
+ inventories={},
+ traits={},
+ )
+
+ def test_devname_based_dev_spec_rejected(self):
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "devname": "eth0",
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+
+ ex = self.assertRaises(
+ exception.PlacementPciException,
+ self.start_compute,
+ hostname="compute1",
+ )
+ self.assertIn(
+ " Invalid [pci]device_spec configuration. PCI Placement reporting "
+ "does not support 'devname' based device specification but we got "
+ "{'devname': 'eth0'}. Please use PCI address in the configuration "
+ "instead.",
+ str(ex)
+ )
+
+ def test_remove_pci(self):
+ # The fake libvirt will emulate on the host:
+ # * one type-PCI
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=1, num_pfs=0, num_vfs=0)
+ # the config matches that PCI dev
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PCI_PROD_ID,
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ self.start_compute(hostname="compute1", pci_info=pci_info)
+
+ self.assert_placement_pci_view(
+ "compute1",
+ inventories={
+ "0000:81:00.0": {self.PCI_RC: 1},
+ },
+ traits={
+ "0000:81:00.0": [],
+ },
+ )
+
+ # now un-configure the PCI device and restart the compute
+ self.flags(group='pci', device_spec=self._to_list_of_json_str([]))
+ self.restart_compute_service(hostname="compute1")
+
+ # the RP had no allocation so nova could remove it
+ self.assert_placement_pci_view(
+ "compute1",
+ inventories={},
+ traits={},
+ )
+
+ def test_remove_one_vf(self):
+ # The fake libvirt will emulate on the host:
+ # * one type-PFs in slot 0 with two type-VFs 00.1, 00.2
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=0, num_pfs=1, num_vfs=2)
+ # then the config matching the VFs
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.VF_PROD_ID,
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ self.start_compute(hostname="compute1", pci_info=pci_info)
+
+ self.assert_placement_pci_view(
+ "compute1",
+ inventories={
+ "0000:81:00.0": {self.VF_RC: 2},
+ },
+ traits={
+ "0000:81:00.0": [],
+ },
+ )
+
+ # remove one of the VFs from the hypervisor and then restart the
+ # compute
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=0, num_pfs=1, num_vfs=1)
+ self.restart_compute_service(
+ hostname="compute1",
+ pci_info=pci_info,
+ keep_hypervisor_state=False
+ )
+
+ # total value is expected to decrease to 1
+ self.assert_placement_pci_view(
+ "compute1",
+ inventories={
+ "0000:81:00.0": {self.VF_RC: 1},
+ },
+ traits={
+ "0000:81:00.0": [],
+ },
+ )
+
+ def test_remove_all_vfs(self):
+ # The fake libvirt will emulate on the host:
+ # * one type-PFs in slot 0 with two type-VFs 00.1, 00.2
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=0, num_pfs=1, num_vfs=2)
+ # then the config patches the VFs
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.VF_PROD_ID,
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ self.start_compute(hostname="compute1", pci_info=pci_info)
+
+ self.assert_placement_pci_view(
+ "compute1",
+ inventories={
+ "0000:81:00.0": {self.VF_RC: 2},
+ },
+ traits={
+ "0000:81:00.0": [],
+ },
+ )
+
+ # remove both VFs from the hypervisor and restart the compute
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=0, num_pfs=1, num_vfs=0)
+ self.restart_compute_service(
+ hostname="compute1",
+ pci_info=pci_info,
+ keep_hypervisor_state=False
+ )
+
+ # we expect that the RP is deleted
+ self.assert_placement_pci_view(
+ "compute1",
+ inventories={},
+ traits={},
+ )
+
+ def test_remove_all_vfs_add_pf(self):
+ # The fake libvirt will emulate on the host:
+ # * one type-PFs in slot 0 with two type-VFs 00.1, 00.2
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=0, num_pfs=1, num_vfs=2)
+ # then the config matches both VFs
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.VF_PROD_ID,
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ self.start_compute(hostname="compute1", pci_info=pci_info)
+
+ self.assert_placement_pci_view(
+ "compute1",
+ inventories={
+ "0000:81:00.0": {self.VF_RC: 2},
+ },
+ traits={
+ "0000:81:00.0": [],
+ },
+ )
+
+ # change the config to match the PF but do not match the VFs and
+ # restart the compute
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PF_PROD_ID,
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ self.restart_compute_service(
+ hostname="compute1",
+ pci_info=pci_info,
+ keep_hypervisor_state=False
+ )
+
+ # we expect that VF inventory is removed and the PF inventory is added
+ self.assert_placement_pci_view(
+ "compute1",
+ inventories={
+ "0000:81:00.0": {self.PF_RC: 1},
+ },
+ traits={
+ "0000:81:00.0": [],
+ },
+ )
+
+ def test_remove_pf_add_vfs(self):
+ # The fake libvirt will emulate on the host:
+ # * one type-PFs in slot 0 with two type-VFs 00.1, 00.2
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=0, num_pfs=1, num_vfs=2)
+ # then the config only matches the PF
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PF_PROD_ID,
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ self.start_compute(hostname="compute1", pci_info=pci_info)
+
+ self.assert_placement_pci_view(
+ "compute1",
+ inventories={
+ "0000:81:00.0": {self.PF_RC: 1},
+ },
+ traits={
+ "0000:81:00.0": [],
+ },
+ )
+
+ # remove the PF from the config and add the VFs instead then restart
+ # the compute
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.VF_PROD_ID,
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ self.restart_compute_service(
+ hostname="compute1",
+ pci_info=pci_info,
+ keep_hypervisor_state=False
+ )
+
+ # we expect that PF inventory is removed and the VF inventory is added
+ self.assert_placement_pci_view(
+ "compute1",
+ inventories={
+ "0000:81:00.0": {self.VF_RC: 2},
+ },
+ traits={
+ "0000:81:00.0": [],
+ },
+ )
+
+ def test_device_reconfiguration(self):
+ # The fake libvirt will emulate on the host:
+ # * two type-PFs in slot 0, 1 with two type-VFs each
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=0, num_pfs=2, num_vfs=4)
+ # from slot 0 we match the PF only and ignore the VFs
+ # from slot 1 we match the VFs but ignore the parent PF
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PF_PROD_ID,
+ "address": "0000:81:00.0",
+ "traits": ",".join(
+ [os_traits.HW_NIC_SRIOV, "CUSTOM_PF", "pf-white"]
+ ),
+ },
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.VF_PROD_ID,
+ "address": "0000:81:01.*",
+ "traits": ",".join(
+ [os_traits.HW_NIC_SRIOV_TRUSTED, "CUSTOM_VF", "vf-red"]
+ ),
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ self.start_compute(hostname="compute1", pci_info=pci_info)
+
+ self.assert_placement_pci_view(
+ "compute1",
+ inventories={
+ "0000:81:00.0": {self.PF_RC: 1},
+ "0000:81:01.0": {self.VF_RC: 2},
+ },
+ traits={
+ "0000:81:00.0": [
+ "HW_NIC_SRIOV",
+ "CUSTOM_PF",
+ "CUSTOM_PF_WHITE",
+ ],
+ "0000:81:01.0": [
+ "HW_NIC_SRIOV_TRUSTED",
+ "CUSTOM_VF",
+ "CUSTOM_VF_RED",
+ ],
+ },
+ )
+
+ # change the resource class and traits configuration and restart the
+ # compute
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "product_id": fakelibvirt.PF_PROD_ID,
+ "resource_class": "CUSTOM_PF",
+ "address": "0000:81:00.0",
+ "traits": ",".join(
+ [os_traits.HW_NIC_SRIOV, "pf-black"]
+ ),
+ },
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.VF_PROD_ID,
+ "resource_class": "CUSTOM_VF",
+ "address": "0000:81:01.*",
+ "traits": ",".join(
+ [os_traits.HW_NIC_SRIOV_TRUSTED, "vf-blue", "foobar"]
+ ),
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ self.restart_compute_service(
+ hostname="compute1",
+ pci_info=pci_info,
+ keep_hypervisor_state=False
+ )
+
+ self.assert_placement_pci_view(
+ "compute1",
+ inventories={
+ "0000:81:00.0": {"CUSTOM_PF": 1},
+ "0000:81:01.0": {"CUSTOM_VF": 2},
+ },
+ traits={
+ "0000:81:00.0": [
+ "HW_NIC_SRIOV",
+ "CUSTOM_PF_BLACK",
+ ],
+ "0000:81:01.0": [
+ "HW_NIC_SRIOV_TRUSTED",
+ "CUSTOM_VF_BLUE",
+ "CUSTOM_FOOBAR",
+ ],
+ },
+ )
+
+ def _create_one_compute_with_a_pf_consumed_by_an_instance(self):
+ # The fake libvirt will emulate on the host:
+ # * two type-PFs in slot 0, with one type-VF
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=0, num_pfs=1, num_vfs=1)
+ # we match the PF only and ignore the VF
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PF_PROD_ID,
+ "address": "0000:81:00.0",
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ self.flags(group="pci", report_in_placement=True)
+ self.start_compute(hostname="compute1", pci_info=pci_info)
+
+ self.assertPCIDeviceCounts("compute1", total=1, free=1)
+ compute1_expected_placement_view = {
+ "inventories": {
+ "0000:81:00.0": {self.PF_RC: 1},
+ },
+ "traits": {
+ "0000:81:00.0": [],
+ },
+ "usages": {
+ "0000:81:00.0": {self.PF_RC: 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+
+ # Create an instance consuming the PF
+ extra_spec = {"pci_passthrough:alias": "a-pf:1"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._create_server(flavor_id=flavor_id, networks=[])
+
+ self.assertPCIDeviceCounts("compute1", total=1, free=0)
+ compute1_expected_placement_view["usages"] = {
+ "0000:81:00.0": {self.PF_RC: 1},
+ }
+ compute1_expected_placement_view["allocations"][server["id"]] = {
+ "0000:81:00.0": {self.PF_RC: 1},
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+ self._run_periodics()
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+
+ return server, compute1_expected_placement_view
+
+ def test_device_reconfiguration_with_allocations_config_change_warn(self):
+ server, compute1_expected_placement_view = (
+ self._create_one_compute_with_a_pf_consumed_by_an_instance())
+
+ # remove 0000:81:00.0 from the device spec and restart the compute
+ device_spec = self._to_list_of_json_str([])
+ self.flags(group='pci', device_spec=device_spec)
+ # The PF is used but removed from the config. The PciTracker warns
+ # but keeps the device so the placement logic mimic this and only warns
+ # but keeps the RP and the allocation in placement intact.
+ self.restart_compute_service(hostname="compute1")
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+ self._run_periodics()
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+ # the warning from the PciTracker
+ self.assertIn(
+ "WARNING [nova.pci.manager] Unable to remove device with status "
+ "'allocated' and ownership %s because of PCI device "
+ "1:0000:81:00.0 is allocated instead of ['available', "
+ "'unavailable', 'unclaimable']. Check your [pci]device_spec "
+ "configuration to make sure this allocated device is whitelisted. "
+ "If you have removed the device from the whitelist intentionally "
+ "or the device is no longer available on the host you will need "
+ "to delete the server or migrate it to another host to silence "
+ "this warning."
+ % server['id'],
+ self.stdlog.logger.output,
+ )
+ # the warning from the placement PCI tracking logic
+ self.assertIn(
+ "WARNING [nova.compute.pci_placement_translator] Device spec is "
+ "not found for device 0000:81:00.0 in [pci]device_spec. We are "
+ "skipping this devices during Placement update. The device is "
+ "allocated by %s. You should not remove an allocated device from "
+ "the configuration. Please restore the configuration or cold "
+ "migrate the instance to resolve the inconsistency."
+ % server['id'],
+ self.stdlog.logger.output,
+ )
+
+ def test_device_reconfiguration_with_allocations_config_change_stop(self):
+ self._create_one_compute_with_a_pf_consumed_by_an_instance()
+
+ # switch 0000:81:00.0 PF to 0000:81:00.1 VF
+ # in the config, then restart the compute service
+
+ # only match the VF now
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.VF_PROD_ID,
+ "address": "0000:81:00.1",
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ # The compute fails to start as the new config would mean that the PF
+ # inventory is removed from the 0000:81:00.0 RP and the PF inventory is
+ # added instead there, but the VF inventory has allocations. Keeping
+ # the old inventory as in
+ # test_device_reconfiguration_with_allocations_config_change_warn is
+ # not an option as it would result in two resource class on the same RP
+ # one for the PF and one for the VF. That would allow consuming
+ # the same physical device twice. Such dependent device configuration
+ # is intentionally not supported so we are stopping the compute
+ # service.
+ ex = self.assertRaises(
+ exception.PlacementPciException,
+ self.restart_compute_service,
+ hostname="compute1"
+ )
+ self.assertRegex(
+ str(ex),
+ "Failed to gather or report PCI resources to Placement: There was "
+ "a conflict when trying to complete your request.\n\n "
+ "update conflict: Inventory for 'CUSTOM_PCI_8086_1528' on "
+ "resource provider '.*' in use.",
+ )
+
+ def test_device_reconfiguration_with_allocations_hyp_change(self):
+ server, compute1_expected_placement_view = (
+ self._create_one_compute_with_a_pf_consumed_by_an_instance())
+
+ # restart the compute but simulate that the device 0000:81:00.0 is
+ # removed from the hypervisor while the device spec config left
+ # intact. The PciTracker will notice this and log a warning. The
+ # placement tracking logic simply keeps the allocation intact in
+ # placement as both the PciDevice and the DeviceSpec is available.
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=0, num_pfs=0, num_vfs=0)
+ self.restart_compute_service(
+ hostname="compute1",
+ pci_info=pci_info,
+ keep_hypervisor_state=False
+ )
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+ self._run_periodics()
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+ # the warning from the PciTracker
+ self.assertIn(
+ "WARNING [nova.pci.manager] Unable to remove device with status "
+ "'allocated' and ownership %s because of PCI device "
+ "1:0000:81:00.0 is allocated instead of ['available', "
+ "'unavailable', 'unclaimable']. Check your [pci]device_spec "
+ "configuration to make sure this allocated device is whitelisted. "
+ "If you have removed the device from the whitelist intentionally "
+ "or the device is no longer available on the host you will need "
+ "to delete the server or migrate it to another host to silence "
+ "this warning."
+ % server['id'],
+ self.stdlog.logger.output,
+ )
+
+ def test_reporting_disabled_nothing_is_reported(self):
+ # The fake libvirt will emulate on the host:
+ # * one type-PCI in slot 0
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=1, num_pfs=0, num_vfs=0)
+ # the config matches the PCI dev
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PCI_PROD_ID,
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ # Disable placement reporting so even if there are PCI devices on the
+ # hypervisor matching the [pci]device_spec config they are not reported
+ # to Placement
+ self.flags(group="pci", report_in_placement=False)
+ self.start_compute(hostname="compute1", pci_info=pci_info)
+
+ self.assert_placement_pci_view(
+ "compute1",
+ inventories={},
+ traits={},
+ )
+
+ def test_reporting_cannot_be_disable_once_it_is_enabled(self):
+ # The fake libvirt will emulate on the host:
+ # * one type-PCI in slot 0
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=1, num_pfs=0, num_vfs=0)
+ # the config matches the PCI dev
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PCI_PROD_ID,
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ self.start_compute(hostname="compute1", pci_info=pci_info)
+
+ self.assert_placement_pci_view(
+ "compute1",
+ inventories={
+ "0000:81:00.0": {self.PCI_RC: 1},
+ },
+ traits={
+ "0000:81:00.0": [],
+ },
+ )
+
+ # Try to disable placement reporting. The compute will refuse to start
+ # as there are already PCI device RPs in placement.
+ self.flags(group="pci", report_in_placement=False)
+ ex = self.assertRaises(
+ exception.PlacementPciException,
+ self.restart_compute_service,
+ hostname="compute1",
+ pci_info=pci_info,
+ keep_hypervisor_state=False,
+ )
+ self.assertIn(
+ "The [pci]report_in_placement is False but it was enabled before "
+ "on this compute. Nova does not support disabling it after it is "
+ "enabled.",
+ str(ex)
+ )
+
+
+class PlacementPCIAllocationHealingTests(PlacementPCIReportingTests):
+ def setUp(self):
+ super().setUp()
+ # Make migration succeed
+ self.useFixture(
+ fixtures.MockPatch(
+ "nova.virt.libvirt.driver.LibvirtDriver."
+ "migrate_disk_and_power_off",
+ new=mock.Mock(return_value='{}'),
+ )
+ )
+
+ def test_heal_single_pci_allocation(self):
+ # The fake libvirt will emulate on the host:
+ # * one type-PCI in slot 0
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=1, num_pfs=0, num_vfs=0)
+ # the config matches the PCI dev
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PCI_PROD_ID,
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+
+ # Start a compute *without* PCI tracking in placement
+ self.flags(group="pci", report_in_placement=False)
+ self.start_compute(hostname="compute1", pci_info=pci_info)
+ self.assertPCIDeviceCounts("compute1", total=1, free=1)
+
+ # Create an instance that consume our PCI dev
+ extra_spec = {"pci_passthrough:alias": "a-pci-dev:1"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._create_server(flavor_id=flavor_id, networks=[])
+ self.assertPCIDeviceCounts("compute1", total=1, free=0)
+
+ # Restart the compute but now with PCI tracking enabled
+ self.flags(group="pci", report_in_placement=True)
+ self.restart_compute_service("compute1")
+ # Assert that the PCI allocation is healed in placement
+ self.assertPCIDeviceCounts("compute1", total=1, free=0)
+ expected_placement_view = {
+ "inventories": {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ },
+ "traits": {
+ "0000:81:00.0": [],
+ },
+ "usages": {
+ "0000:81:00.0": {self.PCI_RC: 1}
+ },
+ "allocations": {
+ server['id']: {
+ "0000:81:00.0": {self.PCI_RC: 1}
+ }
+ }
+ }
+ self.assert_placement_pci_view("compute1", **expected_placement_view)
+
+ # run an update_available_resources periodic and assert that the usage
+ # and allocation stays
+ self._run_periodics()
+ self.assert_placement_pci_view("compute1", **expected_placement_view)
+
+ def test_heal_multiple_allocations(self):
+ # The fake libvirt will emulate on the host:
+ # * two type-PCI devs (slot 0 and 1)
+ # * two type-PFs (slot 2 and 3) with 4 type-VFs each
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=2, num_pfs=2, num_vfs=8)
+ # the config matches:
+ device_spec = self._to_list_of_json_str(
+ [
+ # both type-PCI
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PCI_PROD_ID,
+ },
+ # the PF in slot 2
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PF_PROD_ID,
+ "address": "0000:81:02.0",
+ },
+ # the VFs in slot 3
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.VF_PROD_ID,
+ "address": "0000:81:03.*",
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+
+ # Start a compute *without* PCI tracking in placement
+ self.flags(group="pci", report_in_placement=False)
+ self.start_compute(hostname="compute1", pci_info=pci_info)
+ # 2 PCI + 1 PF + 4 VFs
+ self.assertPCIDeviceCounts("compute1", total=7, free=7)
+
+ # Create three instances consuming devices:
+ # * server_2pci: two type-PCI
+ # * server_pf_vf: one PF and one VF
+ # * server_2vf: two VFs
+ extra_spec = {"pci_passthrough:alias": "a-pci-dev:2"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server_2pci = self._create_server(flavor_id=flavor_id, networks=[])
+ self.assertPCIDeviceCounts("compute1", total=7, free=5)
+
+ extra_spec = {"pci_passthrough:alias": "a-pf:1,a-vf:1"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server_pf_vf = self._create_server(flavor_id=flavor_id, networks=[])
+ self.assertPCIDeviceCounts("compute1", total=7, free=3)
+
+ extra_spec = {"pci_passthrough:alias": "a-vf:2"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server_2vf = self._create_server(flavor_id=flavor_id, networks=[])
+ self.assertPCIDeviceCounts("compute1", total=7, free=1)
+
+ # Restart the compute but now with PCI tracking enabled
+ self.flags(group="pci", report_in_placement=True)
+ self.restart_compute_service("compute1")
+ # Assert that the PCI allocation is healed in placement
+ self.assertPCIDeviceCounts("compute1", total=7, free=1)
+ expected_placement_view = {
+ "inventories": {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ "0000:81:02.0": {self.PF_RC: 1},
+ "0000:81:03.0": {self.VF_RC: 4},
+ },
+ "traits": {
+ "0000:81:00.0": [],
+ "0000:81:01.0": [],
+ "0000:81:02.0": [],
+ "0000:81:03.0": [],
+ },
+ "usages": {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ "0000:81:02.0": {self.PF_RC: 1},
+ "0000:81:03.0": {self.VF_RC: 3},
+ },
+ "allocations": {
+ server_2pci['id']: {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ },
+ server_pf_vf['id']: {
+ "0000:81:02.0": {self.PF_RC: 1},
+ "0000:81:03.0": {self.VF_RC: 1},
+ },
+ server_2vf['id']: {
+ "0000:81:03.0": {self.VF_RC: 2}
+ },
+ },
+ }
+ self.assert_placement_pci_view("compute1", **expected_placement_view)
+
+ # run an update_available_resources periodic and assert that the usage
+ # and allocation stays
+ self._run_periodics()
+ self.assert_placement_pci_view("compute1", **expected_placement_view)
+
+ def test_heal_partial_allocations(self):
+ # The fake libvirt will emulate on the host:
+ # * two type-PCI devs (slot 0 and 1)
+ # * two type-PFs (slot 2 and 3) with 4 type-VFs each
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=2, num_pfs=2, num_vfs=8)
+ # the config matches:
+ device_spec = self._to_list_of_json_str(
+ [
+ # both type-PCI
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PCI_PROD_ID,
+ },
+ # the PF in slot 2
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PF_PROD_ID,
+ "address": "0000:81:02.0",
+ },
+ # the VFs in slot 3
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.VF_PROD_ID,
+ "address": "0000:81:03.*",
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+
+ # Start a compute with PCI tracking in placement
+ self.flags(group="pci", report_in_placement=True)
+ self.start_compute(hostname="compute1", pci_info=pci_info)
+ # 2 PCI + 1 PF + 4 VFs
+ self.assertPCIDeviceCounts("compute1", total=7, free=7)
+ expected_placement_view = {
+ "inventories": {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ "0000:81:02.0": {self.PF_RC: 1},
+ "0000:81:03.0": {self.VF_RC: 4},
+ },
+ "traits": {
+ "0000:81:00.0": [],
+ "0000:81:01.0": [],
+ "0000:81:02.0": [],
+ "0000:81:03.0": [],
+ },
+ "usages": {
+ "0000:81:00.0": {self.PCI_RC: 0},
+ "0000:81:01.0": {self.PCI_RC: 0},
+ "0000:81:02.0": {self.PF_RC: 0},
+ "0000:81:03.0": {self.VF_RC: 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view("compute1", **expected_placement_view)
+
+ # Create an instance consuming a VF
+ extra_spec = {"pci_passthrough:alias": "a-vf:1"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server_vf = self._create_server(flavor_id=flavor_id, networks=[])
+ self.assertPCIDeviceCounts("compute1", total=7, free=6)
+ # As scheduling does not support PCI in placement yet no allocation
+ # is created for the PCI consumption by the scheduler. BUT the resource
+ # tracker in the compute will heal the missing PCI allocation
+ expected_placement_view["usages"]["0000:81:03.0"][self.VF_RC] = 1
+ expected_placement_view["allocations"][server_vf["id"]] = {
+ "0000:81:03.0": {self.VF_RC: 1}
+ }
+ self.assert_placement_pci_view("compute1", **expected_placement_view)
+ self._run_periodics()
+ self.assert_placement_pci_view("compute1", **expected_placement_view)
+
+ # Create another instance consuming two VFs
+ extra_spec = {"pci_passthrough:alias": "a-vf:2"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server_2vf = self._create_server(flavor_id=flavor_id, networks=[])
+ self.assertPCIDeviceCounts("compute1", total=7, free=4)
+ # As scheduling does not support PCI in placement yet no allocation
+ # is created for the PCI consumption by the scheduler. BUT the resource
+ # tracker in the compute will heal the missing PCI allocation
+ expected_placement_view["usages"]["0000:81:03.0"][self.VF_RC] = 3
+ expected_placement_view["allocations"][server_2vf["id"]] = {
+ "0000:81:03.0": {self.VF_RC: 2}
+ }
+ self.assert_placement_pci_view("compute1", **expected_placement_view)
+ self._run_periodics()
+ self.assert_placement_pci_view("compute1", **expected_placement_view)
+
+ def test_heal_partial_allocations_during_resize_downsize(self):
+ # The fake libvirt will emulate on the host:
+ # * one type-PFs (slot 0) with 2 type-VFs
+ compute1_pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=0, num_pfs=1, num_vfs=2)
+ # the config matches just the VFs
+ compute1_device_spec = self._to_list_of_json_str(
+ [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.VF_PROD_ID,
+ "address": "0000:81:00.*",
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=compute1_device_spec)
+
+ # Start a compute with PCI tracking in placement
+ self.flags(group="pci", report_in_placement=True)
+ self.start_compute(hostname="compute1", pci_info=compute1_pci_info)
+ self.assertPCIDeviceCounts("compute1", total=2, free=2)
+ compute1_expected_placement_view = {
+ "inventories": {
+ "0000:81:00.0": {self.VF_RC: 2},
+ },
+ "traits": {
+ "0000:81:00.0": [],
+ },
+ "usages": {
+ "0000:81:00.0": {self.VF_RC: 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+
+ # Create an instance consuming two VFs
+ extra_spec = {"pci_passthrough:alias": "a-vf:2"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._create_server(flavor_id=flavor_id, networks=[])
+ self.assertPCIDeviceCounts("compute1", total=2, free=0)
+ # As scheduling does not support PCI in placement yet no allocation
+ # is created for the PCI consumption by the scheduler. BUT the resource
+ # tracker in the compute will heal the missing PCI allocation
+ compute1_expected_placement_view[
+ "usages"]["0000:81:00.0"][self.VF_RC] = 2
+ compute1_expected_placement_view["allocations"][server["id"]] = {
+ "0000:81:00.0": {self.VF_RC: 2}
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+ self._run_periodics()
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+
+ # Resize server to use only one VF
+
+ # Start a new compute with only one VF available
+ # The fake libvirt will emulate on the host:
+ # * one type-PFs (slot 0) with 1 type-VFs
+ compute2_pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=0, num_pfs=1, num_vfs=1)
+ # the config matches just the VFs
+ compute2_device_spec = self._to_list_of_json_str(
+ [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.VF_PROD_ID,
+ "address": "0000:81:00.*",
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=compute2_device_spec)
+
+ # Start a compute with PCI tracking in placement
+ self.start_compute(hostname="compute2", pci_info=compute2_pci_info)
+ self.assertPCIDeviceCounts("compute2", total=1, free=1)
+ compute2_expected_placement_view = {
+ "inventories": {
+ "0000:81:00.0": {self.VF_RC: 1},
+ },
+ "traits": {
+ "0000:81:00.0": [],
+ },
+ "usages": {
+ "0000:81:00.0": {self.VF_RC: 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "compute2", **compute2_expected_placement_view)
+
+ extra_spec = {"pci_passthrough:alias": "a-vf:1"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._resize_server(server, flavor_id)
+
+ self.assertPCIDeviceCounts("compute2", total=1, free=0)
+ # As scheduling does not support PCI in placement yet no allocation
+ # is created for the PCI consumption by the scheduler on the
+ # destination. BUT the resource tracker in the compute will heal the
+ # missing PCI allocation
+ compute2_expected_placement_view[
+ "usages"]["0000:81:00.0"][self.VF_RC] = 1
+ compute2_expected_placement_view["allocations"][server["id"]] = {
+ "0000:81:00.0": {self.VF_RC: 1}
+ }
+ self.assert_placement_pci_view(
+ "compute2", **compute2_expected_placement_view)
+ self._run_periodics()
+ self.assert_placement_pci_view(
+ "compute2", **compute2_expected_placement_view)
+ # the resize is not confirmed, so we expect that the source host
+ # still has PCI allocation in placement, but it is held by the
+ # migration UUID now.
+ self._move_server_allocation(
+ compute1_expected_placement_view["allocations"], server['id'])
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+
+ # revert the resize
+ server = self._revert_resize(server)
+ # the dest host should be freed up
+ compute2_expected_placement_view[
+ "usages"]["0000:81:00.0"][self.VF_RC] = 0
+ del compute2_expected_placement_view["allocations"][server["id"]]
+ self.assert_placement_pci_view(
+ "compute2", **compute2_expected_placement_view)
+ self._run_periodics()
+ self.assert_placement_pci_view(
+ "compute2", **compute2_expected_placement_view)
+ # on the source host the allocation should be moved back from the
+ # migration UUID to the instance UUID
+ self._move_server_allocation(
+ compute1_expected_placement_view["allocations"],
+ server['id'],
+ revert=True
+ )
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+
+ # resize again and this time confirm the resize
+ server = self._resize_server(server, flavor_id)
+ server = self._confirm_resize(server)
+ # the dest should have the allocation for the server
+ compute2_expected_placement_view[
+ "usages"]["0000:81:00.0"][self.VF_RC] = 1
+ compute2_expected_placement_view["allocations"][server["id"]] = {
+ "0000:81:00.0": {self.VF_RC: 1}
+ }
+ self.assert_placement_pci_view(
+ "compute2", **compute2_expected_placement_view)
+ self._run_periodics()
+ self.assert_placement_pci_view(
+ "compute2", **compute2_expected_placement_view)
+ # the source host should be freed
+ compute1_expected_placement_view[
+ "usages"]["0000:81:00.0"][self.VF_RC] = 0
+ del compute1_expected_placement_view["allocations"][server["id"]]
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+
+ def test_heal_partial_allocations_during_resize_change_dev_type(self):
+ # The fake libvirt will emulate on the host:
+ # * one type-PFs (slot 0) with 1 type-VFs
+ compute1_pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=0, num_pfs=1, num_vfs=1)
+ # the config matches just the VFs
+ compute1_device_spec = self._to_list_of_json_str(
+ [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.VF_PROD_ID,
+ "address": "0000:81:00.*",
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=compute1_device_spec)
+
+ # Start a compute with PCI tracking in placement
+ self.flags(group="pci", report_in_placement=True)
+ self.start_compute(hostname="compute1", pci_info=compute1_pci_info)
+ self.assertPCIDeviceCounts("compute1", total=1, free=1)
+ compute1_expected_placement_view = {
+ "inventories": {
+ "0000:81:00.0": {self.VF_RC: 1},
+ },
+ "traits": {
+ "0000:81:00.0": [],
+ },
+ "usages": {
+ "0000:81:00.0": {self.VF_RC: 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+
+ # Create an instance consuming one VFs
+ extra_spec = {"pci_passthrough:alias": "a-vf:1"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._create_server(flavor_id=flavor_id, networks=[])
+ self.assertPCIDeviceCounts("compute1", total=1, free=0)
+ # As scheduling does not support PCI in placement yet no allocation
+ # is created for the PCI consumption by the scheduler. BUT the resource
+ # tracker in the compute will heal the missing PCI allocation
+ compute1_expected_placement_view[
+ "usages"]["0000:81:00.0"][self.VF_RC] = 1
+ compute1_expected_placement_view["allocations"][server["id"]] = {
+ "0000:81:00.0": {self.VF_RC: 1}
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+ self._run_periodics()
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+
+ # Resize the instance to consume a PF and two PCI devs instead
+
+ # start a compute with enough devices for the resize
+ # The fake libvirt will emulate on the host:
+ # * two type-PCI (slot 0, 1)
+ # * one type-PFs (slot 2) with 1 type-VFs
+ compute2_pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=2, num_pfs=1, num_vfs=1)
+ # the config matches the PCI devs and hte PF but not the VFs
+ compute2_device_spec = self._to_list_of_json_str(
+ [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PCI_PROD_ID,
+ "address": "0000:81:*",
+ },
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PF_PROD_ID,
+ "address": "0000:81:*",
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=compute2_device_spec)
+
+ # Start a compute with PCI tracking in placement
+ self.flags(group="pci", report_in_placement=True)
+ self.start_compute(hostname="compute2", pci_info=compute2_pci_info)
+ self.assertPCIDeviceCounts("compute2", total=3, free=3)
+ compute2_expected_placement_view = {
+ "inventories": {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ "0000:81:02.0": {self.PF_RC: 1},
+ },
+ "traits": {
+ "0000:81:00.0": [],
+ "0000:81:01.0": [],
+ "0000:81:02.0": [],
+ },
+ "usages": {
+ "0000:81:00.0": {self.PCI_RC: 0},
+ "0000:81:01.0": {self.PCI_RC: 0},
+ "0000:81:02.0": {self.PF_RC: 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "compute2", **compute2_expected_placement_view)
+
+ # resize the server to consume a PF and two PCI devs instead
+ extra_spec = {"pci_passthrough:alias": "a-pci-dev:2,a-pf:1"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._resize_server(server, flavor_id)
+ server = self._confirm_resize(server)
+
+ # on the dest we have the new PCI allocations
+ self.assertPCIDeviceCounts("compute2", total=3, free=0)
+ compute2_expected_placement_view["usages"] = (
+ {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ "0000:81:02.0": {self.PF_RC: 1},
+ }
+ )
+ compute2_expected_placement_view["allocations"][server["id"]] = {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ "0000:81:02.0": {self.PF_RC: 1},
+ }
+ self.assert_placement_pci_view(
+ "compute2", **compute2_expected_placement_view)
+ self._run_periodics()
+ self.assert_placement_pci_view(
+ "compute2", **compute2_expected_placement_view)
+
+ # on the source the allocation is freed up
+ compute1_expected_placement_view[
+ "usages"]["0000:81:00.0"][self.VF_RC] = 0
+ del compute1_expected_placement_view["allocations"][server["id"]]
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+
+ def test_heal_allocation_during_same_host_resize(self):
+ self.flags(allow_resize_to_same_host=True)
+ # The fake libvirt will emulate on the host:
+ # * one type-PFs (slot 0) with 3 type-VFs
+ compute1_pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=0, num_pfs=1, num_vfs=3)
+ # the config matches just the VFs
+ compute1_device_spec = self._to_list_of_json_str(
+ [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.VF_PROD_ID,
+ "address": "0000:81:00.*",
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=compute1_device_spec)
+ # Start a compute with PCI tracking in placement
+ self.flags(group="pci", report_in_placement=True)
+ self.start_compute(hostname="compute1", pci_info=compute1_pci_info)
+ self.assertPCIDeviceCounts("compute1", total=3, free=3)
+ compute1_expected_placement_view = {
+ "inventories": {
+ "0000:81:00.0": {self.VF_RC: 3},
+ },
+ "traits": {
+ "0000:81:00.0": [],
+ },
+ "usages": {
+ "0000:81:00.0": {self.VF_RC: 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+ # Create an instance consuming one VFs
+ extra_spec = {"pci_passthrough:alias": "a-vf:1"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._create_server(flavor_id=flavor_id, networks=[])
+ self.assertPCIDeviceCounts("compute1", total=3, free=2)
+ # As scheduling does not support PCI in placement yet no allocation
+ # is created for the PCI consumption by the scheduler. BUT the resource
+ # tracker in the compute will heal the missing PCI allocation
+ compute1_expected_placement_view[
+ "usages"]["0000:81:00.0"][self.VF_RC] = 1
+ compute1_expected_placement_view["allocations"][server["id"]] = {
+ "0000:81:00.0": {self.VF_RC: 1}
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+ self._run_periodics()
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+
+ # resize the server to consume 2 VFs on the same host
+ extra_spec = {"pci_passthrough:alias": "a-vf:2"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._resize_server(server, flavor_id)
+ # during resize both the source and the dest allocation is kept
+ # and in same host resize that means both consumed from the same host
+ self.assertPCIDeviceCounts("compute1", total=3, free=0)
+ # the source side of the allocation held by the migration
+ self._move_server_allocation(
+ compute1_expected_placement_view["allocations"], server['id'])
+ # NOTE(gibi): we intentionally don't heal allocation for the instance
+ # while it is being resized. See the comment in the
+ # pci_placement_translator about the reasoning.
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+ self._run_periodics()
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+
+ # revert the resize
+ self._revert_resize(server)
+ self.assertPCIDeviceCounts("compute1", total=3, free=2)
+ # the original allocations are restored
+ self._move_server_allocation(
+ compute1_expected_placement_view["allocations"],
+ server["id"],
+ revert=True,
+ )
+ compute1_expected_placement_view[
+ "usages"]["0000:81:00.0"][self.VF_RC] = 1
+ compute1_expected_placement_view["allocations"][server["id"]] = {
+ "0000:81:00.0": {self.VF_RC: 1}
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+ self._run_periodics()
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+
+ # now resize and then confirm it
+ self._resize_server(server, flavor_id)
+ self._confirm_resize(server)
+
+ # we expect that the consumption is according to the new flavor
+ self.assertPCIDeviceCounts("compute1", total=3, free=1)
+ compute1_expected_placement_view[
+ "usages"]["0000:81:00.0"][self.VF_RC] = 2
+ compute1_expected_placement_view["allocations"][server["id"]] = {
+ "0000:81:00.0": {self.VF_RC: 2}
+ }
+ # NOTE(gibi): This is unfortunate but during same host resize
+ # confirm when the PCI scheduling is not enabled the healing logic
+ # cannot heal the dest host allocation during the claim. It will only
+ # heal it in the next run of the ResourceTracker._update(). This due
+ # to the fact that ResourceTracker.drop_move_claim runs both for
+ # revert (on the dest) and confirm (on the source) and in same host
+ # resize this means that it runs on both the source and the dest as
+ # they are the same.
+ # Anyhow the healing will happen just a bit later. And the end goal is
+ # to make the scheduler support enabled by default and delete the
+ # whole healing logic. So I think this is acceptable.
+ self._run_periodics()
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+
+
+@ddt.ddt
+class SimpleRCAndTraitBasedPCIAliasTests(PlacementPCIReportingTests):
+ def setUp(self):
+ super().setUp()
+ self.flags(group='filter_scheduler', pci_in_placement=True)
+
+ # The fake libvirt will emulate on the host:
+ # * one type-PCI in slot 0
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=1, num_pfs=0, num_vfs=0)
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "address": "0000:81:00.0",
+ "resource_class": "gpu",
+ "traits": ",".join(
+ [
+ os_traits.HW_GPU_API_VULKAN,
+ "purple",
+ "round",
+ ]
+ ),
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ self.start_compute(hostname="compute1", pci_info=pci_info)
+
+ self.assertPCIDeviceCounts("compute1", total=1, free=1)
+ self.compute1_expected_placement_view = {
+ "inventories": {
+ "0000:81:00.0": {"CUSTOM_GPU": 1},
+ },
+ "traits": {
+ "0000:81:00.0": [
+ "HW_GPU_API_VULKAN",
+ "CUSTOM_PURPLE",
+ "CUSTOM_ROUND",
+ ],
+ },
+ "usages": {
+ "0000:81:00.0": {"CUSTOM_GPU": 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "compute1", **self.compute1_expected_placement_view)
+
+ @ddt.data(
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PCI_PROD_ID,
+ "name": "a-gpu-wrong-rc",
+ },
+ {
+ "resource_class": os_resource_classes.PGPU,
+ "name": "a-gpu-wrong-rc-2",
+ },
+ {
+ "resource_class": "GPU",
+ # NOTE(gibi): "big" is missing from device spec
+ "traits": "purple,big",
+ "name": "a-gpu-missing-trait",
+ },
+ )
+ def test_boot_with_custom_rc_and_traits_no_matching_device(
+ self, pci_alias
+ ):
+ self.flags(group="pci", alias=self._to_list_of_json_str([pci_alias]))
+ extra_spec = {"pci_passthrough:alias": f"{pci_alias['name']}:1"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._create_server(
+ flavor_id=flavor_id, networks=[], expected_state="ERROR"
+ )
+ self.assertIn("fault", server)
+ self.assertIn("No valid host", server["fault"]["message"])
+
+ self.assertPCIDeviceCounts("compute1", total=1, free=1)
+ self.assert_placement_pci_view(
+ "compute1", **self.compute1_expected_placement_view
+ )
+
+ def test_boot_with_custom_rc_and_traits_succeeds(self):
+ pci_alias_gpu = {
+ "resource_class": "GPU",
+ "traits": "HW_GPU_API_VULKAN,PURPLE",
+ "name": "a-gpu",
+ }
+ self.flags(
+ group="pci", alias=self._to_list_of_json_str([pci_alias_gpu])
+ )
+
+ extra_spec = {"pci_passthrough:alias": "a-gpu:1"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._create_server(flavor_id=flavor_id, networks=[])
+
+ self.assertPCIDeviceCounts("compute1", total=1, free=0)
+ self.compute1_expected_placement_view["usages"]["0000:81:00.0"][
+ "CUSTOM_GPU"
+ ] = 1
+ self.compute1_expected_placement_view["allocations"][server["id"]] = {
+ "0000:81:00.0": {"CUSTOM_GPU": 1}
+ }
+ self.assert_placement_pci_view(
+ "compute1", **self.compute1_expected_placement_view
+ )
+ self.assert_no_pci_healing("compute1")
+
+
+class RCAndTraitBasedPCIAliasTests(PlacementPCIReportingTests):
+ def setUp(self):
+ super().setUp()
+ self.flags(group='filter_scheduler', pci_in_placement=True)
+
+ def test_device_claim_consistent_with_placement_allocation(self):
+ """As soon as [filter_scheduler]pci_in_placement is enabled the
+ nova-scheduler will allocate PCI devices in placement. Then on the
+ nova-compute side the PCI claim will also allocate PCI devices in the
+ nova DB. This test will create a situation where the two allocation
+ could contradict and observes that in a contradicting situation the PCI
+ claim will fail instead of allocating a device that is not allocated in
+ placement.
+
+ For the contradiction to happen we need two PCI devices that looks
+ different from placement perspective than from the nova DB perspective.
+
+ We can do that by assigning different traits from in placement and
+ having different product_id in the Nova DB. Then we will create a
+ request that would match from placement perspective to one of the
+ device only and would match to the other device from nova DB
+ perspective. Then we will expect that the boot request fails with no
+ valid host.
+ """
+ # The fake libvirt will emulate on the host:
+ # * one type-PCI in slot 0
+ # * one type-PF in slot 1
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=1, num_pfs=1, num_vfs=0)
+ # we allow both device to be consumed, but we assign different traits
+ # so we can selectively schedule to one of the devices in placement
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "address": "0000:81:00.0",
+ "resource_class": "MY_DEV",
+ "traits": "A_PCI",
+ },
+ {
+ "address": "0000:81:01.0",
+ "resource_class": "MY_DEV",
+ "traits": "A_PF",
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ self.start_compute(hostname="compute1", pci_info=pci_info)
+
+ self.assertPCIDeviceCounts("compute1", total=2, free=2)
+ compute1_expected_placement_view = {
+ "inventories": {
+ "0000:81:00.0": {"CUSTOM_MY_DEV": 1},
+ "0000:81:01.0": {"CUSTOM_MY_DEV": 1},
+ },
+ "traits": {
+ "0000:81:00.0": [
+ "CUSTOM_A_PCI",
+ ],
+ "0000:81:01.0": [
+ "CUSTOM_A_PF",
+ ],
+ },
+ "usages": {
+ "0000:81:00.0": {"CUSTOM_MY_DEV": 0},
+ "0000:81:01.0": {"CUSTOM_MY_DEV": 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+
+ # now we create a PCI alias that cannot be fulfilled from both
+ # nova and placement perspective at the same time, but can be fulfilled
+ # from each perspective individually
+ pci_alias_no_match = {
+ "resource_class": "MY_DEV",
+ # by product_id this matches 81.00 only
+ "product_id": fakelibvirt.PCI_PROD_ID,
+ # by trait this matches 81.01 only
+ "traits": "A_PF",
+ "name": "a-pci",
+ }
+ self.flags(
+ group="pci",
+ alias=self._to_list_of_json_str([pci_alias_no_match]),
+ )
+
+ # then try to boot with the alias and expect no valid host error
+ extra_spec = {"pci_passthrough:alias": "a-pci:1"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._create_server(
+ flavor_id=flavor_id, networks=[], expected_state='ERROR')
+ self.assertIn('fault', server)
+ self.assertIn('No valid host', server['fault']['message'])
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+ self.assert_no_pci_healing("compute1")
+
+ def test_vf_with_split_allocation(self):
+ # The fake libvirt will emulate on the host:
+ # * two type-PFs in slot 0, 1 with 2 VFs each
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=0, num_pfs=2, num_vfs=4)
+ # make all 4 VFs available
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "product_id": fakelibvirt.VF_PROD_ID,
+ "resource_class": "MY_VF",
+ "traits": "blue",
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ self.start_compute(hostname="compute1", pci_info=pci_info)
+
+ compute1_expected_placement_view = {
+ "inventories": {
+ "0000:81:00.0": {"CUSTOM_MY_VF": 2},
+ "0000:81:01.0": {"CUSTOM_MY_VF": 2},
+ },
+ "traits": {
+ "0000:81:00.0": [
+ "CUSTOM_BLUE",
+ ],
+ "0000:81:01.0": [
+ "CUSTOM_BLUE",
+ ],
+ },
+ "usages": {
+ "0000:81:00.0": {"CUSTOM_MY_VF": 0},
+ "0000:81:01.0": {"CUSTOM_MY_VF": 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+ self.assertPCIDeviceCounts('compute1', total=4, free=4)
+
+ pci_alias_vf = {
+ "resource_class": "MY_VF",
+ "traits": "blue",
+ "name": "a-vf",
+ }
+ self.flags(
+ group="pci",
+ alias=self._to_list_of_json_str([pci_alias_vf]),
+ )
+
+ # reserve VFs from 81.01 in placement to drive the first instance to
+ # 81.00
+ self._reserve_placement_resource(
+ "compute1_0000:81:01.0", "CUSTOM_MY_VF", 2)
+ # boot an instance with a single VF
+ # we expect that it is allocated from 81.00 as both VF on 81.01 is
+ # reserved
+ extra_spec = {"pci_passthrough:alias": "a-vf:1"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server_1vf = self._create_server(flavor_id=flavor_id, networks=[])
+
+ self.assertPCIDeviceCounts('compute1', total=4, free=3)
+ compute1_expected_placement_view["usages"] = {
+ "0000:81:00.0": {"CUSTOM_MY_VF": 1}
+ }
+ compute1_expected_placement_view["allocations"][server_1vf["id"]] = {
+ "0000:81:00.0": {"CUSTOM_MY_VF": 1},
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+ self.assert_no_pci_healing("compute1")
+
+ # Boot a second instance requesting two VFs and ensure that the only
+ # way that placement allows this is to split the two VFs between PFs.
+ # Let's remove the reservation of one resource from 81.01 so the only
+ # viable placement candidate is: one VF from 81.00 and one VF from
+ # 81.01
+ self._reserve_placement_resource(
+ "compute1_0000:81:01.0", "CUSTOM_MY_VF", 1)
+
+ extra_spec = {"pci_passthrough:alias": "a-vf:2"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server_2vf = self._create_server(flavor_id=flavor_id, networks=[])
+
+ self.assertPCIDeviceCounts('compute1', total=4, free=1)
+ compute1_expected_placement_view["usages"] = {
+ # both VM uses one VF
+ "0000:81:00.0": {"CUSTOM_MY_VF": 2},
+ "0000:81:01.0": {"CUSTOM_MY_VF": 1},
+ }
+ compute1_expected_placement_view["allocations"][server_2vf["id"]] = {
+ "0000:81:00.0": {"CUSTOM_MY_VF": 1},
+ "0000:81:01.0": {"CUSTOM_MY_VF": 1},
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+ self.assert_no_pci_healing("compute1")
+
+ def test_3vfs_asymmetric_split_between_pfs(self):
+ # The fake libvirt will emulate on the host:
+ # * two type-PFs in slot 0, 1 with 2 VFs each
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=0, num_pfs=2, num_vfs=4)
+ # make all 4 VFs available
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "product_id": fakelibvirt.VF_PROD_ID,
+ "resource_class": "MY_VF",
+ "traits": "blue",
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ self.start_compute(hostname="compute1", pci_info=pci_info)
+
+ compute1_expected_placement_view = {
+ "inventories": {
+ "0000:81:00.0": {"CUSTOM_MY_VF": 2},
+ "0000:81:01.0": {"CUSTOM_MY_VF": 2},
+ },
+ "traits": {
+ "0000:81:00.0": [
+ "CUSTOM_BLUE",
+ ],
+ "0000:81:01.0": [
+ "CUSTOM_BLUE",
+ ],
+ },
+ "usages": {
+ "0000:81:00.0": {"CUSTOM_MY_VF": 0},
+ "0000:81:01.0": {"CUSTOM_MY_VF": 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+ self.assertPCIDeviceCounts('compute1', total=4, free=4)
+
+ pci_alias_vf = {
+ "resource_class": "MY_VF",
+ "traits": "blue",
+ "name": "a-vf",
+ }
+ self.flags(
+ group="pci",
+ alias=self._to_list_of_json_str([pci_alias_vf]),
+ )
+
+ # Boot an instance requesting three VFs. The 3 VFs can be split between
+ # the two PFs two ways: 2 from 81.00 and 1 from 81.01, or 1 from 81.00
+ # and 2 from 81.01.
+ # Let's block the first way in placement by reserving 1 device from
+ # 81.00
+ self._reserve_placement_resource(
+ "compute1_0000:81:00.0", "CUSTOM_MY_VF", 1)
+ extra_spec = {"pci_passthrough:alias": "a-vf:3"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ # We expect this to fit.
+ server_3vf = self._create_server(flavor_id=flavor_id, networks=[])
+
+ self.assertPCIDeviceCounts('compute1', total=4, free=1)
+ compute1_expected_placement_view["usages"] = {
+ "0000:81:00.0": {"CUSTOM_MY_VF": 1},
+ "0000:81:01.0": {"CUSTOM_MY_VF": 2},
+ }
+ compute1_expected_placement_view["allocations"][server_3vf["id"]] = {
+ "0000:81:00.0": {"CUSTOM_MY_VF": 1},
+ "0000:81:01.0": {"CUSTOM_MY_VF": 2},
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+ self.assert_no_pci_healing("compute1")
diff --git a/nova/tests/functional/libvirt/test_pci_sriov_servers.py b/nova/tests/functional/libvirt/test_pci_sriov_servers.py
index df3e0468b9..6b8b254af9 100644
--- a/nova/tests/functional/libvirt/test_pci_sriov_servers.py
+++ b/nova/tests/functional/libvirt/test_pci_sriov_servers.py
@@ -14,6 +14,8 @@
# under the License.
import copy
+import pprint
+import typing as ty
from unittest import mock
from urllib import parse as urlparse
@@ -27,7 +29,9 @@ from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import units
import nova
+from nova.compute import pci_placement_translator
from nova import context
+from nova import exception
from nova.network import constants
from nova import objects
from nova.objects import fields
@@ -41,15 +45,65 @@ CONF = cfg.CONF
LOG = logging.getLogger(__name__)
+class PciPlacementHealingFixture(fixtures.Fixture):
+ """Allow asserting if the pci_placement_translator module needed to
+ heal PCI allocations. Such healing is only normal during upgrade. After
+ every compute is upgraded and the scheduling support of PCI tracking in
+ placement is enabled there should be no need to heal PCI allocations in
+ the resource tracker. We assert this as we eventually want to remove the
+ automatic healing logic from the resource tracker.
+ """
+
+ def __init__(self):
+ super().__init__()
+ # a list of (nodename, result, allocation_before, allocation_after)
+ # tuples recoding the result of the calls to
+ # update_provider_tree_for_pci
+ self.calls = []
+
+ def setUp(self):
+ super().setUp()
+
+ orig = pci_placement_translator.update_provider_tree_for_pci
+
+ def wrapped_update(
+ provider_tree, nodename, pci_tracker, allocations, same_host
+ ):
+ alloc_before = copy.deepcopy(allocations)
+ updated = orig(
+ provider_tree, nodename, pci_tracker, allocations, same_host)
+ alloc_after = copy.deepcopy(allocations)
+ self.calls.append((nodename, updated, alloc_before, alloc_after))
+ return updated
+
+ self.useFixture(
+ fixtures.MonkeyPatch(
+ "nova.compute.pci_placement_translator."
+ "update_provider_tree_for_pci",
+ wrapped_update,
+ )
+ )
+
+ def last_healing(self, hostname: str) -> ty.Optional[ty.Tuple[dict, dict]]:
+ for h, updated, before, after in self.calls:
+ if h == hostname and updated:
+ return before, after
+ return None
+
+
class _PCIServersTestBase(base.ServersTestBase):
ADDITIONAL_FILTERS = ['NUMATopologyFilter', 'PciPassthroughFilter']
+ PCI_RC = f"CUSTOM_PCI_{fakelibvirt.PCI_VEND_ID}_{fakelibvirt.PCI_PROD_ID}"
+
def setUp(self):
self.ctxt = context.get_admin_context()
- self.flags(passthrough_whitelist=self.PCI_PASSTHROUGH_WHITELIST,
- alias=self.PCI_ALIAS,
- group='pci')
+ self.flags(
+ device_spec=self.PCI_DEVICE_SPEC,
+ alias=self.PCI_ALIAS,
+ group='pci'
+ )
super(_PCIServersTestBase, self).setUp()
@@ -63,6 +117,9 @@ class _PCIServersTestBase(base.ServersTestBase):
'.PciPassthroughFilter.host_passes',
side_effect=host_pass_mock)).mock
+ self.pci_healing_fixture = self.useFixture(
+ PciPlacementHealingFixture())
+
def assertPCIDeviceCounts(self, hostname, total, free):
"""Ensure $hostname has $total devices, $free of which are free."""
devices = objects.PciDeviceList.get_by_compute_node(
@@ -72,6 +129,176 @@ class _PCIServersTestBase(base.ServersTestBase):
self.assertEqual(total, len(devices))
self.assertEqual(free, len([d for d in devices if d.is_available()]))
+ def assert_no_pci_healing(self, hostname):
+ last_healing = self.pci_healing_fixture.last_healing(hostname)
+ before = last_healing[0] if last_healing else None
+ after = last_healing[1] if last_healing else None
+ self.assertIsNone(
+ last_healing,
+ "The resource tracker needed to heal PCI allocation in placement "
+ "on host %s. This should not happen in normal operation as the "
+ "scheduler should create the proper allocation instead.\n"
+ "Allocations before healing:\n %s\n"
+ "Allocations after healing:\n %s\n"
+ % (
+ hostname,
+ pprint.pformat(before),
+ pprint.pformat(after),
+ ),
+ )
+
+ def _get_rp_by_name(self, name, rps):
+ for rp in rps:
+ if rp["name"] == name:
+ return rp
+ self.fail(f'RP {name} is not found in Placement {rps}')
+
+ def assert_placement_pci_inventory(self, hostname, inventories, traits):
+ compute_rp_uuid = self.compute_rp_uuids[hostname]
+ rps = self._get_all_rps_in_a_tree(compute_rp_uuid)
+
+ # rps also contains the root provider so we subtract 1
+ self.assertEqual(
+ len(inventories),
+ len(rps) - 1,
+ f"Number of RPs on {hostname} doesn't match. "
+ f"Expected {list(inventories)} actual {[rp['name'] for rp in rps]}"
+ )
+
+ for rp_name, inv in inventories.items():
+ real_rp_name = f'{hostname}_{rp_name}'
+ rp = self._get_rp_by_name(real_rp_name, rps)
+ rp_inv = self._get_provider_inventory(rp['uuid'])
+
+ self.assertEqual(
+ len(inv),
+ len(rp_inv),
+ f"Number of inventories on {real_rp_name} are not as "
+ f"expected. Expected {inv}, actual {rp_inv}"
+ )
+ for rc, total in inv.items():
+ self.assertEqual(
+ total,
+ rp_inv[rc]["total"])
+ self.assertEqual(
+ total,
+ rp_inv[rc]["max_unit"])
+
+ rp_traits = self._get_provider_traits(rp['uuid'])
+ self.assertEqual(
+ # COMPUTE_MANAGED_PCI_DEVICE is automatically reported on
+ # PCI device RPs by nova
+ set(traits[rp_name]) | {"COMPUTE_MANAGED_PCI_DEVICE"},
+ set(rp_traits),
+ f"Traits on RP {real_rp_name} does not match with expectation"
+ )
+
+ def assert_placement_pci_usages(self, hostname, usages):
+ compute_rp_uuid = self.compute_rp_uuids[hostname]
+ rps = self._get_all_rps_in_a_tree(compute_rp_uuid)
+
+ for rp_name, usage in usages.items():
+ real_rp_name = f'{hostname}_{rp_name}'
+ rp = self._get_rp_by_name(real_rp_name, rps)
+ rp_usage = self._get_provider_usages(rp['uuid'])
+ self.assertEqual(
+ usage,
+ rp_usage,
+ f"Usage on RP {real_rp_name} does not match with expectation"
+ )
+
+ def assert_placement_pci_allocations(self, allocations):
+ for consumer, expected_allocations in allocations.items():
+ actual_allocations = self._get_allocations_by_server_uuid(consumer)
+ self.assertEqual(
+ len(expected_allocations),
+ len(actual_allocations),
+ f"The consumer {consumer} allocates from different number of "
+ f"RPs than expected. Expected: {expected_allocations}, "
+ f"Actual: {actual_allocations}"
+ )
+ for rp_name, expected_rp_allocs in expected_allocations.items():
+ rp_uuid = self._get_provider_uuid_by_name(rp_name)
+ self.assertIn(
+ rp_uuid,
+ actual_allocations,
+ f"The consumer {consumer} expected to allocate from "
+ f"{rp_name}. Expected: {expected_allocations}, "
+ f"Actual: {actual_allocations}"
+ )
+ actual_rp_allocs = actual_allocations[rp_uuid]['resources']
+ self.assertEqual(
+ expected_rp_allocs,
+ actual_rp_allocs,
+ f"The consumer {consumer} expected to have allocation "
+ f"{expected_rp_allocs} on {rp_name} but it has "
+ f"{actual_rp_allocs} instead."
+ )
+
+ def assert_placement_pci_allocations_on_host(self, hostname, allocations):
+ compute_rp_uuid = self.compute_rp_uuids[hostname]
+ rps = self._get_all_rps_in_a_tree(compute_rp_uuid)
+
+ for consumer, expected_allocations in allocations.items():
+ actual_allocations = self._get_allocations_by_server_uuid(consumer)
+ self.assertEqual(
+ len(expected_allocations),
+ # actual_allocations also contains allocations against the
+ # root provider for VCPU, MEMORY_MB, and DISK_GB so subtract
+ # one
+ len(actual_allocations) - 1,
+ f"The consumer {consumer} allocates from different number of "
+ f"RPs than expected. Expected: {expected_allocations}, "
+ f"Actual: {actual_allocations}"
+ )
+ for rp_name, expected_rp_allocs in expected_allocations.items():
+ real_rp_name = f'{hostname}_{rp_name}'
+ rp = self._get_rp_by_name(real_rp_name, rps)
+ self.assertIn(
+ rp['uuid'],
+ actual_allocations,
+ f"The consumer {consumer} expected to allocate from "
+ f"{rp['uuid']}. Expected: {expected_allocations}, "
+ f"Actual: {actual_allocations}"
+ )
+ actual_rp_allocs = actual_allocations[rp['uuid']]['resources']
+ self.assertEqual(
+ expected_rp_allocs,
+ actual_rp_allocs,
+ f"The consumer {consumer} expected to have allocation "
+ f"{expected_rp_allocs} on {rp_name} but it has "
+ f"{actual_rp_allocs} instead."
+ )
+
+ def assert_placement_pci_view(
+ self, hostname, inventories, traits, usages=None, allocations=None
+ ):
+ if not usages:
+ usages = {}
+
+ if not allocations:
+ allocations = {}
+
+ self.assert_placement_pci_inventory(hostname, inventories, traits)
+ self.assert_placement_pci_usages(hostname, usages)
+ self.assert_placement_pci_allocations_on_host(hostname, allocations)
+
+ @staticmethod
+ def _to_list_of_json_str(list):
+ return [jsonutils.dumps(x) for x in list]
+
+ @staticmethod
+ def _move_allocation(allocations, from_uuid, to_uuid):
+ allocations[to_uuid] = allocations[from_uuid]
+ del allocations[from_uuid]
+
+ def _move_server_allocation(self, allocations, server_uuid, revert=False):
+ migration_uuid = self.get_migration_uuid_for_instance(server_uuid)
+ if revert:
+ self._move_allocation(allocations, migration_uuid, server_uuid)
+ else:
+ self._move_allocation(allocations, server_uuid, migration_uuid)
+
class _PCIServersWithMigrationTestBase(_PCIServersTestBase):
@@ -123,7 +350,7 @@ class SRIOVServersTest(_PCIServersWithMigrationTestBase):
VFS_ALIAS_NAME = 'vfs'
PFS_ALIAS_NAME = 'pfs'
- PCI_PASSTHROUGH_WHITELIST = [jsonutils.dumps(x) for x in (
+ PCI_DEVICE_SPEC = [jsonutils.dumps(x) for x in (
{
'vendor_id': fakelibvirt.PCI_VEND_ID,
'product_id': fakelibvirt.PF_PROD_ID,
@@ -711,7 +938,7 @@ class SRIOVServersTest(_PCIServersWithMigrationTestBase):
# start two compute services with differing PCI device inventory
source_pci_info = fakelibvirt.HostPCIDevicesInfo(
- num_pfs=2, num_vfs=8, numa_node=0)
+ num_pfs=1, num_vfs=4, numa_node=0)
# add an extra PF without VF to be used by direct-physical ports
source_pci_info.add_device(
dev_type='PF',
@@ -764,7 +991,7 @@ class SRIOVServersTest(_PCIServersWithMigrationTestBase):
# our source host should have marked two PCI devices as used, the VF
# and the parent PF, while the future destination is currently unused
self.assertEqual('test_compute0', server['OS-EXT-SRV-ATTR:host'])
- self.assertPCIDeviceCounts('test_compute0', total=11, free=8)
+ self.assertPCIDeviceCounts('test_compute0', total=6, free=3)
self.assertPCIDeviceCounts('test_compute1', total=4, free=4)
# the instance should be on host NUMA node 0, since that's where our
@@ -788,7 +1015,7 @@ class SRIOVServersTest(_PCIServersWithMigrationTestBase):
# TODO(stephenfin): Stop relying on a side-effect of how nova
# chooses from multiple PCI devices (apparently the last
# matching one)
- 'pci_slot': '0000:81:01.4',
+ 'pci_slot': '0000:81:00.4',
'physical_network': 'physnet4',
},
port['binding:profile'],
@@ -812,7 +1039,7 @@ class SRIOVServersTest(_PCIServersWithMigrationTestBase):
# we should now have transitioned our usage to the destination, freeing
# up the source in the process
- self.assertPCIDeviceCounts('test_compute0', total=11, free=11)
+ self.assertPCIDeviceCounts('test_compute0', total=6, free=6)
self.assertPCIDeviceCounts('test_compute1', total=4, free=1)
# the instance should now be on host NUMA node 1, since that's where
@@ -908,11 +1135,8 @@ class SRIOVServersTest(_PCIServersWithMigrationTestBase):
# Disable SRIOV capabilties in PF and delete the VFs
self._disable_sriov_in_pf(pci_info_no_sriov)
- fake_connection = self._get_connection(pci_info=pci_info_no_sriov,
- hostname='test_compute0')
- self.mock_conn.return_value = fake_connection
-
- self.compute = self.start_service('compute', host='test_compute0')
+ self.start_compute('test_compute0', pci_info=pci_info_no_sriov)
+ self.compute = self.computes['test_compute0']
ctxt = context.get_admin_context()
pci_devices = objects.PciDeviceList.get_by_compute_node(
@@ -924,13 +1148,9 @@ class SRIOVServersTest(_PCIServersWithMigrationTestBase):
self.assertEqual(1, len(pci_devices))
self.assertEqual('type-PCI', pci_devices[0].dev_type)
- # Update connection with original pci info with sriov PFs
- fake_connection = self._get_connection(pci_info=pci_info,
- hostname='test_compute0')
- self.mock_conn.return_value = fake_connection
-
- # Restart the compute service
- self.restart_compute_service(self.compute)
+ # Restart the compute service with sriov PFs
+ self.restart_compute_service(
+ self.compute.host, pci_info=pci_info, keep_hypervisor_state=False)
# Verify if PCI devices are of type type-PF or type-VF
pci_devices = objects.PciDeviceList.get_by_compute_node(
@@ -952,12 +1172,92 @@ class SRIOVServersTest(_PCIServersWithMigrationTestBase):
],
)
+ def test_change_bound_port_vnic_type_kills_compute_at_restart(self):
+ """Create a server with a direct port and change the vnic_type of the
+ bound port to macvtap. Then restart the compute service.
+
+ As the vnic_type is changed on the port but the vif_type is hwveb
+ instead of macvtap the vif plug logic will try to look up the netdev
+ of the parent VF. Howvere that VF consumed by the instance so the
+ netdev does not exists. This causes that the compute service will fail
+ with an exception during startup
+ """
+ pci_info = fakelibvirt.HostPCIDevicesInfo(num_pfs=1, num_vfs=2)
+ self.start_compute(pci_info=pci_info)
+
+ # create a direct port
+ port = self.neutron.network_4_port_1
+ self.neutron.create_port({'port': port})
+
+ # create a server using the VF via neutron
+ server = self._create_server(networks=[{'port': port['id']}])
+
+ # update the vnic_type of the port in neutron
+ port = copy.deepcopy(port)
+ port['binding:vnic_type'] = 'macvtap'
+ self.neutron.update_port(port['id'], {"port": port})
+
+ compute = self.computes['compute1']
+
+ # Force an update on the instance info cache to ensure nova gets the
+ # information about the updated port
+ with context.target_cell(
+ context.get_admin_context(),
+ self.host_mappings['compute1'].cell_mapping
+ ) as cctxt:
+ compute.manager._heal_instance_info_cache(cctxt)
+ self.assertIn(
+ 'The vnic_type of the bound port %s has been changed in '
+ 'neutron from "direct" to "macvtap". Changing vnic_type of a '
+ 'bound port is not supported by Nova. To avoid breaking the '
+ 'connectivity of the instance please change the port '
+ 'vnic_type back to "direct".' % port['id'],
+ self.stdlog.logger.output,
+ )
+
+ def fake_get_ifname_by_pci_address(pci_addr: str, pf_interface=False):
+ # we want to fail the netdev lookup only if the pci_address is
+ # already consumed by our instance. So we look into the instance
+ # definition to see if the device is attached to the instance as VF
+ conn = compute.manager.driver._host.get_connection()
+ dom = conn.lookupByUUIDString(server['id'])
+ dev = dom._def['devices']['nics'][0]
+ lookup_addr = pci_addr.replace(':', '_').replace('.', '_')
+ if (
+ dev['type'] == 'hostdev' and
+ dev['source'] == 'pci_' + lookup_addr
+ ):
+ # nova tried to look up the netdev of an already consumed VF.
+ # So we have to fail
+ raise exception.PciDeviceNotFoundById(id=pci_addr)
+
+ # We need to simulate the actual failure manually as in our functional
+ # environment all the PCI lookup is mocked. In reality nova tries to
+ # look up the netdev of the pci device on the host used by the port as
+ # the parent of the macvtap. However, as the originally direct port is
+ # bound to the instance, the VF pci device is already consumed by the
+ # instance and therefore there is no netdev for the VF.
+ self.libvirt.mock_get_ifname_by_pci_address.side_effect = (
+ fake_get_ifname_by_pci_address
+ )
+ # Nova cannot prevent the vnic_type change on a bound port. Neutron
+ # should prevent that instead. But the nova-compute should still
+ # be able to start up and only log an ERROR for this instance in
+ # inconsistent state.
+ self.restart_compute_service('compute1')
+ self.assertIn(
+ 'Virtual interface plugging failed for instance. Probably the '
+ 'vnic_type of the bound port has been changed. Nova does not '
+ 'support such change.',
+ self.stdlog.logger.output,
+ )
+
class SRIOVAttachDetachTest(_PCIServersTestBase):
# no need for aliases as these test will request SRIOV via neutron
PCI_ALIAS = []
- PCI_PASSTHROUGH_WHITELIST = [jsonutils.dumps(x) for x in (
+ PCI_DEVICE_SPEC = [jsonutils.dumps(x) for x in (
{
'vendor_id': fakelibvirt.PCI_VEND_ID,
'product_id': fakelibvirt.PF_PROD_ID,
@@ -1015,10 +1315,9 @@ class SRIOVAttachDetachTest(_PCIServersTestBase):
host_info = fakelibvirt.HostInfo(cpu_nodes=2, cpu_sockets=1,
cpu_cores=2, cpu_threads=2)
pci_info = fakelibvirt.HostPCIDevicesInfo(num_pfs=1, num_vfs=1)
- fake_connection = self._get_connection(host_info, pci_info)
- self.mock_conn.return_value = fake_connection
-
- self.compute = self.start_service('compute', host='test_compute0')
+ self.start_compute(
+ 'test_compute0', host_info=host_info, pci_info=pci_info)
+ self.compute = self.computes['test_compute0']
# Create server with a port
server = self._create_server(networks=[{'port': first_port_id}])
@@ -1068,7 +1367,7 @@ class SRIOVAttachDetachTest(_PCIServersTestBase):
self.neutron.sriov_pf_port2['id'])
-class VDPAServersTest(_PCIServersTestBase):
+class VDPAServersTest(_PCIServersWithMigrationTestBase):
# this is needed for os_compute_api:os-migrate-server:migrate policy
ADMIN_API = True
@@ -1077,7 +1376,7 @@ class VDPAServersTest(_PCIServersTestBase):
# Whitelist both the PF and VF; in reality, you probably wouldn't do this
# but we want to make sure that the PF is correctly taken off the table
# once any VF is used
- PCI_PASSTHROUGH_WHITELIST = [jsonutils.dumps(x) for x in (
+ PCI_DEVICE_SPEC = [jsonutils.dumps(x) for x in (
{
'vendor_id': '15b3',
'product_id': '101d',
@@ -1100,14 +1399,13 @@ class VDPAServersTest(_PCIServersTestBase):
def setUp(self):
super().setUp()
-
# The ultimate base class _IntegratedTestBase uses NeutronFixture but
# we need a bit more intelligent neutron for these tests. Applying the
# new fixture here means that we re-stub what the previous neutron
# fixture already stubbed.
self.neutron = self.useFixture(base.LibvirtNeutronFixture(self))
- def start_compute(self):
+ def start_vdpa_compute(self, hostname='compute-0'):
vf_ratio = self.NUM_VFS // self.NUM_PFS
pci_info = fakelibvirt.HostPCIDevicesInfo(
@@ -1145,7 +1443,7 @@ class VDPAServersTest(_PCIServersTestBase):
driver_name='mlx5_core')
vdpa_info.add_device(f'vdpa_vdpa{idx}', idx, vf)
- return super().start_compute(
+ return super().start_compute(hostname=hostname,
pci_info=pci_info, vdpa_info=vdpa_info,
libvirt_version=self.FAKE_LIBVIRT_VERSION,
qemu_version=self.FAKE_QEMU_VERSION)
@@ -1186,7 +1484,6 @@ class VDPAServersTest(_PCIServersTestBase):
expected = """
<interface type="vdpa">
<mac address="b5:bc:2e:e7:51:ee"/>
- <model type="virtio"/>
<source dev="/dev/vhost-vdpa-3"/>
</interface>"""
actual = etree.tostring(elem, encoding='unicode')
@@ -1200,7 +1497,7 @@ class VDPAServersTest(_PCIServersTestBase):
fake_create,
)
- hostname = self.start_compute()
+ hostname = self.start_vdpa_compute()
num_pci = self.NUM_PFS + self.NUM_VFS
# both the PF and VF with vDPA capabilities (dev_type=vdpa) should have
@@ -1233,12 +1530,16 @@ class VDPAServersTest(_PCIServersTestBase):
port['binding:profile'],
)
- def _test_common(self, op, *args, **kwargs):
- self.start_compute()
-
+ def _create_port_and_server(self):
# create the port and a server, with the port attached to the server
vdpa_port = self.create_vdpa_port()
server = self._create_server(networks=[{'port': vdpa_port['id']}])
+ return vdpa_port, server
+
+ def _test_common(self, op, *args, **kwargs):
+ self.start_vdpa_compute()
+
+ vdpa_port, server = self._create_port_and_server()
# attempt the unsupported action and ensure it fails
ex = self.assertRaises(
@@ -1248,40 +1549,384 @@ class VDPAServersTest(_PCIServersTestBase):
'not supported for instance with vDPA ports',
ex.response.text)
- def test_attach_interface(self):
- self.start_compute()
+ def test_attach_interface_service_version_61(self):
+ with mock.patch(
+ "nova.objects.service.get_minimum_version_all_cells",
+ return_value=61
+ ):
+ self._test_common(self._attach_interface, uuids.vdpa_port)
+ def test_attach_interface(self):
+ hostname = self.start_vdpa_compute()
# create the port and a server, but don't attach the port to the server
# yet
- vdpa_port = self.create_vdpa_port()
server = self._create_server(networks='none')
-
+ vdpa_port = self.create_vdpa_port()
# attempt to attach the port to the server
- ex = self.assertRaises(
- client.OpenStackApiException,
- self._attach_interface, server, vdpa_port['id'])
- self.assertIn(
- 'not supported for instance with vDPA ports',
- ex.response.text)
+ self._attach_interface(server, vdpa_port['id'])
+ # ensure the binding details sent to "neutron" were correct
+ port = self.neutron.show_port(vdpa_port['id'])['port']
+ self.assertIn('binding:profile', port)
+ self.assertEqual(
+ {
+ 'pci_vendor_info': '15b3:101e',
+ 'pci_slot': '0000:06:00.4',
+ 'physical_network': 'physnet4',
+ },
+ port['binding:profile'],
+ )
+ self.assertEqual(hostname, port['binding:host_id'])
+ self.assertEqual(server['id'], port['device_id'])
+
+ def test_detach_interface_service_version_61(self):
+ with mock.patch(
+ "nova.objects.service.get_minimum_version_all_cells",
+ return_value=61
+ ):
+ self._test_common(self._detach_interface, uuids.vdpa_port)
def test_detach_interface(self):
- self._test_common(self._detach_interface, uuids.vdpa_port)
+ self.start_vdpa_compute()
+ vdpa_port, server = self._create_port_and_server()
+ # ensure the binding details sent to "neutron" were correct
+ port = self.neutron.show_port(vdpa_port['id'])['port']
+ self.assertEqual(server['id'], port['device_id'])
+ self._detach_interface(server, vdpa_port['id'])
+ # ensure the port is no longer owned by the vm
+ port = self.neutron.show_port(vdpa_port['id'])['port']
+ self.assertEqual('', port['device_id'])
+ self.assertEqual({}, port['binding:profile'])
- def test_shelve(self):
- self._test_common(self._shelve_server)
+ def test_shelve_offload(self):
+ hostname = self.start_vdpa_compute()
+ vdpa_port, server = self._create_port_and_server()
+ # assert the port is bound to the vm and the compute host
+ port = self.neutron.show_port(vdpa_port['id'])['port']
+ self.assertEqual(server['id'], port['device_id'])
+ self.assertEqual(hostname, port['binding:host_id'])
+ num_pci = self.NUM_PFS + self.NUM_VFS
+ # -2 we claim the vdpa device which make the parent PF unavailable
+ self.assertPCIDeviceCounts(hostname, total=num_pci, free=num_pci - 2)
+ server = self._shelve_server(server)
+ # now that the vm is shelve offloaded it should not be bound
+ # to any host but should still be owned by the vm
+ port = self.neutron.show_port(vdpa_port['id'])['port']
+ self.assertEqual(server['id'], port['device_id'])
+ # FIXME(sean-k-mooney): we should be unbinding the port from
+ # the host when we shelve offload but we don't today.
+ # This is unrelated to vdpa port and is a general issue.
+ self.assertEqual(hostname, port['binding:host_id'])
+ self.assertIn('binding:profile', port)
+ self.assertIsNone(server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
+ self.assertIsNone(server['OS-EXT-SRV-ATTR:host'])
+ self.assertPCIDeviceCounts(hostname, total=num_pci, free=num_pci)
- def test_suspend(self):
- self._test_common(self._suspend_server)
+ def test_unshelve_to_same_host(self):
+ hostname = self.start_vdpa_compute()
+ num_pci = self.NUM_PFS + self.NUM_VFS
+ self.assertPCIDeviceCounts(hostname, total=num_pci, free=num_pci)
- def test_evacuate(self):
- self._test_common(self._evacuate_server)
+ vdpa_port, server = self._create_port_and_server()
+ self.assertPCIDeviceCounts(hostname, total=num_pci, free=num_pci - 2)
+ self.assertEqual(
+ hostname, server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
+ port = self.neutron.show_port(vdpa_port['id'])['port']
+ self.assertEqual(hostname, port['binding:host_id'])
- def test_resize(self):
- flavor_id = self._create_flavor()
- self._test_common(self._resize_server, flavor_id)
+ server = self._shelve_server(server)
+ self.assertPCIDeviceCounts(hostname, total=num_pci, free=num_pci)
+ self.assertIsNone(server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
+ port = self.neutron.show_port(vdpa_port['id'])['port']
+ # FIXME(sean-k-mooney): shelve offload should unbind the port
+ # self.assertEqual('', port['binding:host_id'])
+ self.assertEqual(hostname, port['binding:host_id'])
+
+ server = self._unshelve_server(server)
+ self.assertPCIDeviceCounts(hostname, total=num_pci, free=num_pci - 2)
+ self.assertEqual(
+ hostname, server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
+ port = self.neutron.show_port(vdpa_port['id'])['port']
+ self.assertEqual(hostname, port['binding:host_id'])
+
+ def test_unshelve_to_different_host(self):
+ source = self.start_vdpa_compute(hostname='source')
+ dest = self.start_vdpa_compute(hostname='dest')
+
+ num_pci = self.NUM_PFS + self.NUM_VFS
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci)
+ self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci)
+
+ # ensure we boot the vm on the "source" compute
+ self.api.put_service(
+ self.computes['dest'].service_ref.uuid, {'status': 'disabled'})
+ vdpa_port, server = self._create_port_and_server()
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2)
+ self.assertEqual(
+ source, server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
+ port = self.neutron.show_port(vdpa_port['id'])['port']
+ self.assertEqual(source, port['binding:host_id'])
+
+ server = self._shelve_server(server)
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci)
+ self.assertIsNone(server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
+ port = self.neutron.show_port(vdpa_port['id'])['port']
+ # FIXME(sean-k-mooney): shelve should unbind the port
+ # self.assertEqual('', port['binding:host_id'])
+ self.assertEqual(source, port['binding:host_id'])
+
+ # force the unshelve to the other host
+ self.api.put_service(
+ self.computes['source'].service_ref.uuid, {'status': 'disabled'})
+ self.api.put_service(
+ self.computes['dest'].service_ref.uuid, {'status': 'enabled'})
+ self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci)
+ server = self._unshelve_server(server)
+ # the dest devices should be claimed
+ self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci - 2)
+ # and the source host devices should still be free
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci)
+ self.assertEqual(
+ dest, server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
+ port = self.neutron.show_port(vdpa_port['id'])['port']
+ self.assertEqual(dest, port['binding:host_id'])
+
+ def test_evacute(self):
+ source = self.start_vdpa_compute(hostname='source')
+ dest = self.start_vdpa_compute(hostname='dest')
+
+ num_pci = self.NUM_PFS + self.NUM_VFS
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci)
+ self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci)
+
+ # ensure we boot the vm on the "source" compute
+ self.api.put_service(
+ self.computes['dest'].service_ref.uuid, {'status': 'disabled'})
+ vdpa_port, server = self._create_port_and_server()
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2)
+ self.assertEqual(
+ source, server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
+ port = self.neutron.show_port(vdpa_port['id'])['port']
+ self.assertEqual(source, port['binding:host_id'])
+
+ # stop the source compute and enable the dest
+ self.api.put_service(
+ self.computes['dest'].service_ref.uuid, {'status': 'enabled'})
+ self.computes['source'].stop()
+ # Down the source compute to enable the evacuation
+ self.api.put_service(
+ self.computes['source'].service_ref.uuid, {'forced_down': True})
+
+ self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci)
+ server = self._evacuate_server(server)
+ self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci - 2)
+ self.assertEqual(
+ dest, server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
+ port = self.neutron.show_port(vdpa_port['id'])['port']
+ self.assertEqual(dest, port['binding:host_id'])
+
+ # as the source compute is offline the pci claims will not be cleaned
+ # up on the source compute.
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2)
+ # but if you fix/restart the source node the allocations for evacuated
+ # instances should be released.
+ self.restart_compute_service(source)
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci)
+
+ def test_resize_same_host(self):
+ self.flags(allow_resize_to_same_host=True)
+ num_pci = self.NUM_PFS + self.NUM_VFS
+ source = self.start_vdpa_compute()
+ vdpa_port, server = self._create_port_and_server()
+ # before we resize the vm should be using 1 VF but that will mark
+ # the PF as unavailable so we assert 2 devices are in use.
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2)
+ flavor_id = self._create_flavor(name='new-flavor')
+ self.assertNotEqual(server['flavor']['original_name'], 'new-flavor')
+ with mock.patch(
+ 'nova.virt.libvirt.driver.LibvirtDriver'
+ '.migrate_disk_and_power_off', return_value='{}',
+ ):
+ server = self._resize_server(server, flavor_id)
+ self.assertEqual(
+ server['flavor']['original_name'], 'new-flavor')
+ # in resize verify the VF claims should be doubled even
+ # for same host resize so assert that 3 are in devices in use
+ # 1 PF and 2 VFs .
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 3)
+ server = self._confirm_resize(server)
+ # but once we confrim it should be reduced back to 1 PF and 1 VF
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2)
+ # assert the hostname has not have changed as part
+ # of the resize.
+ self.assertEqual(
+ source, server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
+
+ def test_resize_different_host(self):
+ self.flags(allow_resize_to_same_host=False)
+ source = self.start_vdpa_compute(hostname='source')
+ dest = self.start_vdpa_compute(hostname='dest')
+
+ num_pci = self.NUM_PFS + self.NUM_VFS
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci)
+ self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci)
+
+ # ensure we boot the vm on the "source" compute
+ self.api.put_service(
+ self.computes['dest'].service_ref.uuid, {'status': 'disabled'})
+ vdpa_port, server = self._create_port_and_server()
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2)
+ flavor_id = self._create_flavor(name='new-flavor')
+ self.assertNotEqual(server['flavor']['original_name'], 'new-flavor')
+ # disable the source compute and enable the dest
+ self.api.put_service(
+ self.computes['source'].service_ref.uuid, {'status': 'disabled'})
+ self.api.put_service(
+ self.computes['dest'].service_ref.uuid, {'status': 'enabled'})
+ with mock.patch(
+ 'nova.virt.libvirt.driver.LibvirtDriver'
+ '.migrate_disk_and_power_off', return_value='{}',
+ ):
+ server = self._resize_server(server, flavor_id)
+ self.assertEqual(
+ server['flavor']['original_name'], 'new-flavor')
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2)
+ self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci - 2)
+ server = self._confirm_resize(server)
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci)
+ self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci - 2)
+ self.assertEqual(
+ dest, server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
+
+ def test_resize_revert(self):
+ self.flags(allow_resize_to_same_host=False)
+ source = self.start_vdpa_compute(hostname='source')
+ dest = self.start_vdpa_compute(hostname='dest')
+
+ num_pci = self.NUM_PFS + self.NUM_VFS
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci)
+ self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci)
+
+ # ensure we boot the vm on the "source" compute
+ self.api.put_service(
+ self.computes['dest'].service_ref.uuid, {'status': 'disabled'})
+ vdpa_port, server = self._create_port_and_server()
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2)
+ flavor_id = self._create_flavor(name='new-flavor')
+ self.assertNotEqual(server['flavor']['original_name'], 'new-flavor')
+ # disable the source compute and enable the dest
+ self.api.put_service(
+ self.computes['source'].service_ref.uuid, {'status': 'disabled'})
+ self.api.put_service(
+ self.computes['dest'].service_ref.uuid, {'status': 'enabled'})
+ with mock.patch(
+ 'nova.virt.libvirt.driver.LibvirtDriver'
+ '.migrate_disk_and_power_off', return_value='{}',
+ ):
+ server = self._resize_server(server, flavor_id)
+ self.assertEqual(
+ server['flavor']['original_name'], 'new-flavor')
+ # in resize verify both the dest and source pci claims should be
+ # present.
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2)
+ self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci - 2)
+ server = self._revert_resize(server)
+ # but once we revert the dest claims should be freed.
+ self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci)
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2)
+ self.assertEqual(
+ source, server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
def test_cold_migrate(self):
- self._test_common(self._migrate_server)
+ source = self.start_vdpa_compute(hostname='source')
+ dest = self.start_vdpa_compute(hostname='dest')
+
+ num_pci = self.NUM_PFS + self.NUM_VFS
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci)
+ self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci)
+
+ # ensure we boot the vm on the "source" compute
+ self.api.put_service(
+ self.computes['dest'].service_ref.uuid, {'status': 'disabled'})
+ vdpa_port, server = self._create_port_and_server()
+ self.assertEqual(
+ source, server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
+
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2)
+ # enable the dest we do not need to disable the source since cold
+ # migrate wont happen to the same host in the libvirt driver
+ self.api.put_service(
+ self.computes['dest'].service_ref.uuid, {'status': 'enabled'})
+ with mock.patch(
+ 'nova.virt.libvirt.driver.LibvirtDriver'
+ '.migrate_disk_and_power_off', return_value='{}',
+ ):
+ server = self._migrate_server(server)
+ self.assertEqual(
+ dest, server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2)
+ self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci - 2)
+ server = self._confirm_resize(server)
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci)
+ self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci - 2)
+ self.assertEqual(
+ dest, server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
+
+ def test_suspend_and_resume_service_version_62(self):
+ with mock.patch(
+ "nova.objects.service.get_minimum_version_all_cells",
+ return_value=62
+ ):
+ self._test_common(self._suspend_server)
+
+ def test_suspend_and_resume(self):
+ source = self.start_vdpa_compute(hostname='source')
+ vdpa_port, server = self._create_port_and_server()
+ num_pci = self.NUM_PFS + self.NUM_VFS
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2)
+ server = self._suspend_server(server)
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2)
+ self.assertEqual('SUSPENDED', server['status'])
+ server = self._resume_server(server)
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2)
+ self.assertEqual('ACTIVE', server['status'])
+
+ def test_live_migrate_service_version_62(self):
+ with mock.patch(
+ "nova.objects.service.get_minimum_version_all_cells",
+ return_value=62
+ ):
+ self._test_common(self._live_migrate)
+
+ def test_live_migrate(self):
+ source = self.start_vdpa_compute(hostname='source')
+ dest = self.start_vdpa_compute(hostname='dest')
+
+ num_pci = self.NUM_PFS + self.NUM_VFS
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci)
+ self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci)
+
+ # ensure we boot the vm on the "source" compute
+ self.api.put_service(
+ self.computes['dest'].service_ref.uuid, {'status': 'disabled'})
+ vdpa_port, server = self._create_port_and_server()
+ self.assertEqual(
+ source, server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
+
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2)
+ # enable the dest we do not need to disable the source since cold
+ # migrate wont happen to the same host in the libvirt driver
+ self.api.put_service(
+ self.computes['dest'].service_ref.uuid, {'status': 'enabled'})
+
+ with mock.patch(
+ 'nova.virt.libvirt.LibvirtDriver.'
+ '_detach_direct_passthrough_vifs'
+ ):
+ server = self._live_migrate(server)
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci)
+ self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci - 2)
+ self.assertEqual(
+ dest, server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
class PCIServersTest(_PCIServersTestBase):
@@ -1290,7 +1935,7 @@ class PCIServersTest(_PCIServersTestBase):
microversion = 'latest'
ALIAS_NAME = 'a1'
- PCI_PASSTHROUGH_WHITELIST = [jsonutils.dumps(
+ PCI_DEVICE_SPEC = [jsonutils.dumps(
{
'vendor_id': fakelibvirt.PCI_VEND_ID,
'product_id': fakelibvirt.PCI_PROD_ID,
@@ -1304,9 +1949,15 @@ class PCIServersTest(_PCIServersTestBase):
}
)]
+ def setUp(self):
+ super().setUp()
+ self.flags(group="pci", report_in_placement=True)
+ self.flags(group='filter_scheduler', pci_in_placement=True)
+
def test_create_server_with_pci_dev_and_numa(self):
"""Verifies that an instance can be booted with cpu pinning and with an
- assigned pci device.
+ assigned pci device with legacy policy and numa info for the pci
+ device.
"""
self.flags(cpu_dedicated_set='0-7', group='compute')
@@ -1314,6 +1965,13 @@ class PCIServersTest(_PCIServersTestBase):
pci_info = fakelibvirt.HostPCIDevicesInfo(num_pci=1, numa_node=1)
self.start_compute(pci_info=pci_info)
+ self.assert_placement_pci_view(
+ "compute1",
+ inventories={"0000:81:00.0": {self.PCI_RC: 1}},
+ traits={"0000:81:00.0": []},
+ usages={"0000:81:00.0": {self.PCI_RC: 0}},
+ )
+
# create a flavor
extra_spec = {
'hw:cpu_policy': 'dedicated',
@@ -1321,18 +1979,35 @@ class PCIServersTest(_PCIServersTestBase):
}
flavor_id = self._create_flavor(extra_spec=extra_spec)
- self._create_server(flavor_id=flavor_id, networks='none')
+ server = self._create_server(flavor_id=flavor_id, networks='none')
+
+ self.assert_placement_pci_view(
+ "compute1",
+ inventories={"0000:81:00.0": {self.PCI_RC: 1}},
+ traits={"0000:81:00.0": []},
+ usages={"0000:81:00.0": {self.PCI_RC: 1}},
+ allocations={server['id']: {"0000:81:00.0": {self.PCI_RC: 1}}},
+ )
+ self.assert_no_pci_healing("compute1")
def test_create_server_with_pci_dev_and_numa_fails(self):
"""This test ensures that it is not possible to allocated CPU and
- memory resources from one NUMA node and a PCI device from another.
+ memory resources from one NUMA node and a PCI device from another
+ if we use the legacy policy and the pci device reports numa info.
"""
-
self.flags(cpu_dedicated_set='0-7', group='compute')
pci_info = fakelibvirt.HostPCIDevicesInfo(num_pci=1, numa_node=0)
self.start_compute(pci_info=pci_info)
+ compute1_placement_pci_view = {
+ "inventories": {"0000:81:00.0": {self.PCI_RC: 1}},
+ "traits": {"0000:81:00.0": []},
+ "usages": {"0000:81:00.0": {self.PCI_RC: 0}},
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_placement_pci_view)
+
# boot one instance with no PCI device to "fill up" NUMA node 0
extra_spec = {'hw:cpu_policy': 'dedicated'}
flavor_id = self._create_flavor(vcpu=4, extra_spec=extra_spec)
@@ -1344,6 +2019,10 @@ class PCIServersTest(_PCIServersTestBase):
self._create_server(
flavor_id=flavor_id, networks='none', expected_state='ERROR')
+ self.assert_placement_pci_view(
+ "compute1", **compute1_placement_pci_view)
+ self.assert_no_pci_healing("compute1")
+
def test_live_migrate_server_with_pci(self):
"""Live migrate an instance with a PCI passthrough device.
@@ -1355,14 +2034,42 @@ class PCIServersTest(_PCIServersTestBase):
self.start_compute(
hostname='test_compute0',
pci_info=fakelibvirt.HostPCIDevicesInfo(num_pci=1))
+
+ test_compute0_placement_pci_view = {
+ "inventories": {"0000:81:00.0": {self.PCI_RC: 1}},
+ "traits": {"0000:81:00.0": []},
+ "usages": {"0000:81:00.0": {self.PCI_RC: 0}},
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
self.start_compute(
hostname='test_compute1',
pci_info=fakelibvirt.HostPCIDevicesInfo(num_pci=1))
+ test_compute1_placement_pci_view = {
+ "inventories": {"0000:81:00.0": {self.PCI_RC: 1}},
+ "traits": {"0000:81:00.0": []},
+ "usages": {"0000:81:00.0": {self.PCI_RC: 0}},
+ }
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
# create a server
extra_spec = {'pci_passthrough:alias': f'{self.ALIAS_NAME}:1'}
flavor_id = self._create_flavor(extra_spec=extra_spec)
- server = self._create_server(flavor_id=flavor_id, networks='none')
+ server = self._create_server(
+ flavor_id=flavor_id, networks='none', host="test_compute0")
+
+ test_compute0_placement_pci_view[
+ "usages"]["0000:81:00.0"][self.PCI_RC] = 1
+ test_compute0_placement_pci_view[
+ "allocations"][server['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
# now live migrate that server
ex = self.assertRaises(
@@ -1374,28 +2081,400 @@ class PCIServersTest(_PCIServersTestBase):
# this will bubble to the API
self.assertEqual(500, ex.response.status_code)
self.assertIn('NoValidHost', str(ex))
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+ self.assert_no_pci_healing("test_compute0")
+ self.assert_no_pci_healing("test_compute1")
def test_resize_pci_to_vanilla(self):
# Start two computes, one with PCI and one without.
self.start_compute(
hostname='test_compute0',
pci_info=fakelibvirt.HostPCIDevicesInfo(num_pci=1))
+ test_compute0_placement_pci_view = {
+ "inventories": {"0000:81:00.0": {self.PCI_RC: 1}},
+ "traits": {"0000:81:00.0": []},
+ "usages": {"0000:81:00.0": {self.PCI_RC: 0}},
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
self.start_compute(hostname='test_compute1')
+ test_compute1_placement_pci_view = {
+ "inventories": {},
+ "traits": {},
+ "usages": {},
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
# Boot a server with a single PCI device.
extra_spec = {'pci_passthrough:alias': f'{self.ALIAS_NAME}:1'}
pci_flavor_id = self._create_flavor(extra_spec=extra_spec)
server = self._create_server(flavor_id=pci_flavor_id, networks='none')
+ test_compute0_placement_pci_view[
+ "usages"]["0000:81:00.0"][self.PCI_RC] = 1
+ test_compute0_placement_pci_view[
+ "allocations"][server['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
# Resize it to a flavor without PCI devices. We expect this to work, as
# test_compute1 is available.
- # FIXME(artom) This is bug 1941005.
flavor_id = self._create_flavor()
- ex = self.assertRaises(client.OpenStackApiException,
- self._resize_server, server, flavor_id)
- self.assertEqual(500, ex.response.status_code)
- self.assertIn('NoValidHost', str(ex))
- # self._confirm_resize(server)
+ with mock.patch(
+ 'nova.virt.libvirt.driver.LibvirtDriver'
+ '.migrate_disk_and_power_off',
+ return_value='{}',
+ ):
+ self._resize_server(server, flavor_id)
+ self._confirm_resize(server)
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=1)
+ self.assertPCIDeviceCounts('test_compute1', total=0, free=0)
+ test_compute0_placement_pci_view[
+ "usages"]["0000:81:00.0"][self.PCI_RC] = 0
+ del test_compute0_placement_pci_view["allocations"][server['id']]
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+ self.assert_no_pci_healing("test_compute0")
+ self.assert_no_pci_healing("test_compute1")
+
+ def test_resize_vanilla_to_pci(self):
+ """Resize an instance from a non PCI flavor to a PCI flavor"""
+ # Start two computes, one with PCI and one without.
+ self.start_compute(
+ hostname='test_compute0',
+ pci_info=fakelibvirt.HostPCIDevicesInfo(num_pci=1))
+ test_compute0_placement_pci_view = {
+ "inventories": {"0000:81:00.0": {self.PCI_RC: 1}},
+ "traits": {"0000:81:00.0": []},
+ "usages": {"0000:81:00.0": {self.PCI_RC: 0}},
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ self.start_compute(hostname='test_compute1')
+ test_compute1_placement_pci_view = {
+ "inventories": {},
+ "traits": {},
+ "usages": {},
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
+ # Boot a server without PCI device and make sure it lands on the
+ # compute that has no device, so we can resize it later to the other
+ # host having PCI device.
+ extra_spec = {}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._create_server(
+ flavor_id=flavor_id, networks='none', host="test_compute1")
+
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=1)
+ self.assertPCIDeviceCounts('test_compute1', total=0, free=0)
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
+ # Resize it to a flavor with a PCI devices. We expect this to work, as
+ # test_compute0 is available and having PCI devices.
+ extra_spec = {'pci_passthrough:alias': f'{self.ALIAS_NAME}:1'}
+ pci_flavor_id = self._create_flavor(extra_spec=extra_spec)
+ with mock.patch(
+ 'nova.virt.libvirt.driver.LibvirtDriver'
+ '.migrate_disk_and_power_off',
+ return_value='{}',
+ ):
+ self._resize_server(server, pci_flavor_id)
+ self._confirm_resize(server)
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=0)
+ self.assertPCIDeviceCounts('test_compute1', total=0, free=0)
+ test_compute0_placement_pci_view[
+ "usages"]["0000:81:00.0"][self.PCI_RC] = 1
+ test_compute0_placement_pci_view[
+ "allocations"][server['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+ self.assert_no_pci_healing("test_compute0")
+ self.assert_no_pci_healing("test_compute1")
+
+ def test_resize_from_one_dev_to_two(self):
+ self.start_compute(
+ hostname='test_compute0',
+ pci_info=fakelibvirt.HostPCIDevicesInfo(num_pci=1))
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=1)
+ test_compute0_placement_pci_view = {
+ "inventories": {"0000:81:00.0": {self.PCI_RC: 1}},
+ "traits": {"0000:81:00.0": []},
+ "usages": {"0000:81:00.0": {self.PCI_RC: 0}},
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ self.start_compute(
+ hostname='test_compute1',
+ pci_info=fakelibvirt.HostPCIDevicesInfo(num_pci=2),
+ )
+ self.assertPCIDeviceCounts('test_compute1', total=2, free=2)
+ test_compute1_placement_pci_view = {
+ "inventories": {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ },
+ "traits": {
+ "0000:81:00.0": [],
+ "0000:81:01.0": [],
+ },
+ "usages": {
+ "0000:81:00.0": {self.PCI_RC: 0},
+ "0000:81:01.0": {self.PCI_RC: 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
+ # boot a VM on test_compute0 with a single PCI dev
+ extra_spec = {'pci_passthrough:alias': f'{self.ALIAS_NAME}:1'}
+ pci_flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._create_server(
+ flavor_id=pci_flavor_id, networks='none', host="test_compute0")
+
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=0)
+ test_compute0_placement_pci_view["usages"][
+ "0000:81:00.0"][self.PCI_RC] = 1
+ test_compute0_placement_pci_view["allocations"][
+ server['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ # resize the server to a flavor requesting two devices
+ extra_spec = {'pci_passthrough:alias': f'{self.ALIAS_NAME}:2'}
+ pci_flavor_id = self._create_flavor(extra_spec=extra_spec)
+ with mock.patch(
+ 'nova.virt.libvirt.driver.LibvirtDriver'
+ '.migrate_disk_and_power_off',
+ return_value='{}',
+ ):
+ self._resize_server(server, pci_flavor_id)
+
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=0)
+ # one the source host the PCI allocation is now held by the migration
+ self._move_server_allocation(
+ test_compute0_placement_pci_view['allocations'], server['id'])
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+ # on the dest we have now two device allocated
+ self.assertPCIDeviceCounts('test_compute1', total=2, free=0)
+ test_compute1_placement_pci_view["usages"] = {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ }
+ test_compute1_placement_pci_view["allocations"][
+ server['id']] = {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ }
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
+ # now revert the resize
+ self._revert_resize(server)
+
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=0)
+ # on the host the allocation should move back to the instance UUID
+ self._move_server_allocation(
+ test_compute0_placement_pci_view["allocations"],
+ server["id"],
+ revert=True,
+ )
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+ # so the dest should be freed
+ self.assertPCIDeviceCounts('test_compute1', total=2, free=2)
+ test_compute1_placement_pci_view["usages"] = {
+ "0000:81:00.0": {self.PCI_RC: 0},
+ "0000:81:01.0": {self.PCI_RC: 0},
+ }
+ del test_compute1_placement_pci_view["allocations"][server['id']]
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
+ # now resize again and confirm it
+ with mock.patch(
+ 'nova.virt.libvirt.driver.LibvirtDriver'
+ '.migrate_disk_and_power_off',
+ return_value='{}',
+ ):
+ self._resize_server(server, pci_flavor_id)
+ self._confirm_resize(server)
+
+ # the source host now need to be freed up
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=1)
+ test_compute0_placement_pci_view["usages"] = {
+ "0000:81:00.0": {self.PCI_RC: 0},
+ }
+ test_compute0_placement_pci_view["allocations"] = {}
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+ # and dest allocated
+ self.assertPCIDeviceCounts('test_compute1', total=2, free=0)
+ test_compute1_placement_pci_view["usages"] = {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ }
+ test_compute1_placement_pci_view["allocations"][
+ server['id']] = {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ }
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
+ self.assert_no_pci_healing("test_compute0")
+ self.assert_no_pci_healing("test_compute1")
+
+ def test_same_host_resize_with_pci(self):
+ """Start a single compute with 3 PCI devs and resize and instance
+ from one dev to two devs
+ """
+ self.flags(allow_resize_to_same_host=True)
+ self.start_compute(
+ hostname='test_compute0',
+ pci_info=fakelibvirt.HostPCIDevicesInfo(num_pci=3))
+ self.assertPCIDeviceCounts('test_compute0', total=3, free=3)
+ test_compute0_placement_pci_view = {
+ "inventories": {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ "0000:81:02.0": {self.PCI_RC: 1},
+ },
+ "traits": {
+ "0000:81:00.0": [],
+ "0000:81:01.0": [],
+ "0000:81:02.0": [],
+ },
+ "usages": {
+ "0000:81:00.0": {self.PCI_RC: 0},
+ "0000:81:01.0": {self.PCI_RC: 0},
+ "0000:81:02.0": {self.PCI_RC: 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ # Boot a server with a single PCI device.
+ # To stabilize the test we reserve 81.01 and 81.02 in placement so
+ # we can be sure that the instance will use 81.00, otherwise the
+ # allocation will be random between 00, 01, and 02
+ self._reserve_placement_resource(
+ "test_compute0_0000:81:01.0", self.PCI_RC, 1)
+ self._reserve_placement_resource(
+ "test_compute0_0000:81:02.0", self.PCI_RC, 1)
+ extra_spec = {'pci_passthrough:alias': f'{self.ALIAS_NAME}:1'}
+ pci_flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._create_server(flavor_id=pci_flavor_id, networks='none')
+
+ self.assertPCIDeviceCounts('test_compute0', total=3, free=2)
+ test_compute0_placement_pci_view[
+ "usages"]["0000:81:00.0"][self.PCI_RC] = 1
+ test_compute0_placement_pci_view[
+ "allocations"][server['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+ # remove the reservations, so we can resize on the same host and
+ # consume 01 and 02
+ self._reserve_placement_resource(
+ "test_compute0_0000:81:01.0", self.PCI_RC, 0)
+ self._reserve_placement_resource(
+ "test_compute0_0000:81:02.0", self.PCI_RC, 0)
+
+ # Resize the server to use 2 PCI devices
+ extra_spec = {'pci_passthrough:alias': f'{self.ALIAS_NAME}:2'}
+ pci_flavor_id = self._create_flavor(extra_spec=extra_spec)
+ with mock.patch(
+ 'nova.virt.libvirt.driver.LibvirtDriver'
+ '.migrate_disk_and_power_off',
+ return_value='{}',
+ ):
+ self._resize_server(server, pci_flavor_id)
+
+ self.assertPCIDeviceCounts('test_compute0', total=3, free=0)
+ # the source host side of the allocation is now held by the migration
+ # UUID
+ self._move_server_allocation(
+ test_compute0_placement_pci_view["allocations"], server['id'])
+ # but we have the dest host side of the allocations on the same host
+ test_compute0_placement_pci_view[
+ "usages"]["0000:81:01.0"][self.PCI_RC] = 1
+ test_compute0_placement_pci_view[
+ "usages"]["0000:81:02.0"][self.PCI_RC] = 1
+ test_compute0_placement_pci_view["allocations"][server['id']] = {
+ "0000:81:01.0": {self.PCI_RC: 1},
+ "0000:81:02.0": {self.PCI_RC: 1},
+ }
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ # revert the resize so the instance should go back to use a single
+ # device
+ self._revert_resize(server)
+ self.assertPCIDeviceCounts('test_compute0', total=3, free=2)
+ # the migration allocation is moved back to the instance UUID
+ self._move_server_allocation(
+ test_compute0_placement_pci_view["allocations"],
+ server["id"],
+ revert=True,
+ )
+ # and the "dest" side of the allocation is dropped
+ test_compute0_placement_pci_view[
+ "usages"]["0000:81:01.0"][self.PCI_RC] = 0
+ test_compute0_placement_pci_view[
+ "usages"]["0000:81:02.0"][self.PCI_RC] = 0
+ test_compute0_placement_pci_view["allocations"][server['id']] = {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ }
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ # resize again but now confirm the same host resize and assert that
+ # only the new flavor usage remains
+ with mock.patch(
+ 'nova.virt.libvirt.driver.LibvirtDriver'
+ '.migrate_disk_and_power_off',
+ return_value='{}',
+ ):
+ self._resize_server(server, pci_flavor_id)
+ self._confirm_resize(server)
+
+ self.assertPCIDeviceCounts('test_compute0', total=3, free=1)
+ test_compute0_placement_pci_view["usages"] = {
+ "0000:81:01.0": {self.PCI_RC: 1},
+ "0000:81:02.0": {self.PCI_RC: 1},
+ }
+ test_compute0_placement_pci_view["allocations"][
+ server['id']] = {self.PCI_RC: 1}
+ test_compute0_placement_pci_view["allocations"][server['id']] = {
+ "0000:81:01.0": {self.PCI_RC: 1},
+ "0000:81:02.0": {self.PCI_RC: 1},
+ }
+ self.assert_no_pci_healing("test_compute0")
def _confirm_resize(self, server, host='host1'):
# NOTE(sbauza): Unfortunately, _cleanup_resize() in libvirt checks the
@@ -1410,7 +2489,6 @@ class PCIServersTest(_PCIServersTestBase):
self.flags(host=orig_host)
def test_cold_migrate_server_with_pci(self):
-
host_devices = {}
orig_create = nova.virt.libvirt.guest.Guest.create
@@ -1439,6 +2517,41 @@ class PCIServersTest(_PCIServersTestBase):
for hostname in ('test_compute0', 'test_compute1'):
pci_info = fakelibvirt.HostPCIDevicesInfo(num_pci=2)
self.start_compute(hostname=hostname, pci_info=pci_info)
+ test_compute0_placement_pci_view = {
+ "inventories": {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ },
+ "traits": {
+ "0000:81:00.0": [],
+ "0000:81:01.0": [],
+ },
+ "usages": {
+ "0000:81:00.0": {self.PCI_RC: 0},
+ "0000:81:01.0": {self.PCI_RC: 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ test_compute1_placement_pci_view = {
+ "inventories": {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ },
+ "traits": {
+ "0000:81:00.0": [],
+ "0000:81:01.0": [],
+ },
+ "usages": {
+ "0000:81:00.0": {self.PCI_RC: 0},
+ "0000:81:01.0": {self.PCI_RC: 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
# boot an instance with a PCI device on each host
extra_spec = {
@@ -1446,8 +2559,16 @@ class PCIServersTest(_PCIServersTestBase):
}
flavor_id = self._create_flavor(extra_spec=extra_spec)
+ # force the allocation on test_compute0 to 81:00 to make it easy
+ # to assert the placement allocation
+ self._reserve_placement_resource(
+ "test_compute0_0000:81:01.0", self.PCI_RC, 1)
server_a = self._create_server(
flavor_id=flavor_id, networks='none', host='test_compute0')
+ # force the allocation on test_compute1 to 81:00 to make it easy
+ # to assert the placement allocation
+ self._reserve_placement_resource(
+ "test_compute1_0000:81:01.0", self.PCI_RC, 1)
server_b = self._create_server(
flavor_id=flavor_id, networks='none', host='test_compute1')
@@ -1459,6 +2580,25 @@ class PCIServersTest(_PCIServersTestBase):
for hostname in ('test_compute0', 'test_compute1'):
self.assertPCIDeviceCounts(hostname, total=2, free=1)
+ test_compute0_placement_pci_view["usages"][
+ "0000:81:00.0"][self.PCI_RC] = 1
+ test_compute0_placement_pci_view["allocations"][
+ server_a['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ test_compute1_placement_pci_view[
+ "usages"]["0000:81:00.0"][self.PCI_RC] = 1
+ test_compute1_placement_pci_view["allocations"][
+ server_b['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
+ # remove the resource reservation from test_compute1 to be able to
+ # migrate server_a there
+ self._reserve_placement_resource(
+ "test_compute1_0000:81:01.0", self.PCI_RC, 0)
+
# TODO(stephenfin): The mock of 'migrate_disk_and_power_off' should
# probably be less...dumb
with mock.patch(
@@ -1476,19 +2616,390 @@ class PCIServersTest(_PCIServersTestBase):
server_a['OS-EXT-SRV-ATTR:host'], server_b['OS-EXT-SRV-ATTR:host'],
)
self.assertPCIDeviceCounts('test_compute0', total=2, free=1)
+ # on the source host the allocation is now held by the migration UUID
+ self._move_server_allocation(
+ test_compute0_placement_pci_view["allocations"], server_a['id'])
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
self.assertPCIDeviceCounts('test_compute1', total=2, free=0)
+ # sever_a now have allocation on test_compute1 on 81:01
+ test_compute1_placement_pci_view["usages"][
+ "0000:81:01.0"][self.PCI_RC] = 1
+ test_compute1_placement_pci_view["allocations"][
+ server_a['id']] = {"0000:81:01.0": {self.PCI_RC: 1}}
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
# now, confirm the migration and check our counts once again
self._confirm_resize(server_a)
self.assertPCIDeviceCounts('test_compute0', total=2, free=2)
+ # the source host now has no allocations as the migration allocation
+ # is removed by confirm resize
+ test_compute0_placement_pci_view["usages"] = {
+ "0000:81:00.0": {self.PCI_RC: 0},
+ "0000:81:01.0": {self.PCI_RC: 0},
+ }
+ test_compute0_placement_pci_view["allocations"] = {}
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
self.assertPCIDeviceCounts('test_compute1', total=2, free=0)
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
+ self.assert_no_pci_healing("test_compute0")
+ self.assert_no_pci_healing("test_compute1")
+
+ def test_request_two_pci_but_host_has_one(self):
+ # simulate a single type-PCI device on the host
+ self.start_compute(pci_info=fakelibvirt.HostPCIDevicesInfo(num_pci=1))
+ self.assertPCIDeviceCounts('compute1', total=1, free=1)
+
+ alias = [jsonutils.dumps(x) for x in (
+ {
+ 'vendor_id': fakelibvirt.PCI_VEND_ID,
+ 'product_id': fakelibvirt.PCI_PROD_ID,
+ 'name': 'a1',
+ },
+ {
+ 'vendor_id': fakelibvirt.PCI_VEND_ID,
+ 'product_id': fakelibvirt.PCI_PROD_ID,
+ 'name': 'a2',
+ },
+ )]
+ self.flags(group='pci', alias=alias)
+ # request two PCI devices both are individually matching with the
+ # single available device on the host
+ extra_spec = {'pci_passthrough:alias': 'a1:1,a2:1'}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ # so we expect that the boot fails with no valid host error as only
+ # one of the requested PCI device can be allocated
+ server = self._create_server(
+ flavor_id=flavor_id, networks="none", expected_state='ERROR')
+ self.assertIn('fault', server)
+ self.assertIn('No valid host', server['fault']['message'])
+
+ def _create_two_computes(self):
+ self.start_compute(
+ hostname='test_compute0',
+ pci_info=fakelibvirt.HostPCIDevicesInfo(num_pci=1))
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=1)
+ test_compute0_placement_pci_view = {
+ "inventories": {"0000:81:00.0": {self.PCI_RC: 1}},
+ "traits": {"0000:81:00.0": []},
+ "usages": {"0000:81:00.0": {self.PCI_RC: 0}},
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ self.start_compute(
+ hostname='test_compute1',
+ pci_info=fakelibvirt.HostPCIDevicesInfo(num_pci=1),
+ )
+ self.assertPCIDeviceCounts('test_compute1', total=1, free=1)
+ test_compute1_placement_pci_view = {
+ "inventories": {"0000:81:00.0": {self.PCI_RC: 1}},
+ "traits": {"0000:81:00.0": []},
+ "usages": {"0000:81:00.0": {self.PCI_RC: 0}},
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
+ return (
+ test_compute0_placement_pci_view,
+ test_compute1_placement_pci_view,
+ )
+
+ def _create_two_computes_and_an_instance_on_the_first(self):
+ (
+ test_compute0_placement_pci_view,
+ test_compute1_placement_pci_view,
+ ) = self._create_two_computes()
+
+ # boot a VM on test_compute0 with a single PCI dev
+ extra_spec = {'pci_passthrough:alias': f'{self.ALIAS_NAME}:1'}
+ pci_flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._create_server(
+ flavor_id=pci_flavor_id, networks='none', host="test_compute0")
+
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=0)
+ test_compute0_placement_pci_view["usages"][
+ "0000:81:00.0"][self.PCI_RC] = 1
+ test_compute0_placement_pci_view["allocations"][
+ server['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ return (
+ server,
+ test_compute0_placement_pci_view,
+ test_compute1_placement_pci_view,
+ )
+
+ def test_evacuate(self):
+ (
+ server,
+ test_compute0_placement_pci_view,
+ test_compute1_placement_pci_view,
+ ) = self._create_two_computes_and_an_instance_on_the_first()
+
+ # kill test_compute0 and evacuate the instance
+ self.computes['test_compute0'].stop()
+ self.api.put_service(
+ self.computes["test_compute0"].service_ref.uuid,
+ {"forced_down": True},
+ )
+ self._evacuate_server(server)
+ # source allocation should be kept as source is dead but the server
+ # now has allocation on both hosts as evacuation does not use migration
+ # allocations.
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=0)
+ self.assert_placement_pci_inventory(
+ "test_compute0",
+ test_compute0_placement_pci_view["inventories"],
+ test_compute0_placement_pci_view["traits"]
+ )
+ self.assert_placement_pci_usages(
+ "test_compute0", test_compute0_placement_pci_view["usages"]
+ )
+ self.assert_placement_pci_allocations(
+ {
+ server['id']: {
+ "test_compute0": {
+ "VCPU": 2,
+ "MEMORY_MB": 2048,
+ "DISK_GB": 20,
+ },
+ "test_compute0_0000:81:00.0": {self.PCI_RC: 1},
+ "test_compute1": {
+ "VCPU": 2,
+ "MEMORY_MB": 2048,
+ "DISK_GB": 20,
+ },
+ "test_compute1_0000:81:00.0": {self.PCI_RC: 1},
+ },
+ }
+ )
+
+ # dest allocation should be created
+ self.assertPCIDeviceCounts('test_compute1', total=1, free=0)
+ test_compute1_placement_pci_view["usages"][
+ "0000:81:00.0"][self.PCI_RC] = 1
+ test_compute1_placement_pci_view["allocations"][
+ server['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
+ self.assert_placement_pci_inventory(
+ "test_compute1",
+ test_compute1_placement_pci_view["inventories"],
+ test_compute1_placement_pci_view["traits"]
+ )
+ self.assert_placement_pci_usages(
+ "test_compute1", test_compute0_placement_pci_view["usages"]
+ )
+
+ # recover test_compute0 and check that it is cleaned
+ self.restart_compute_service('test_compute0')
+
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=1)
+ test_compute0_placement_pci_view = {
+ "inventories": {"0000:81:00.0": {self.PCI_RC: 1}},
+ "traits": {"0000:81:00.0": []},
+ "usages": {"0000:81:00.0": {self.PCI_RC: 0}},
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ # and test_compute1 is not changes (expect that the instance now has
+ # only allocation on this compute)
+ self.assertPCIDeviceCounts('test_compute1', total=1, free=0)
+ test_compute1_placement_pci_view["usages"][
+ "0000:81:00.0"][self.PCI_RC] = 1
+ test_compute1_placement_pci_view["allocations"][
+ server['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
+ self.assert_no_pci_healing("test_compute0")
+ self.assert_no_pci_healing("test_compute1")
+
+ def test_unshelve_after_offload(self):
+ (
+ server,
+ test_compute0_placement_pci_view,
+ test_compute1_placement_pci_view,
+ ) = self._create_two_computes_and_an_instance_on_the_first()
+
+ # shelve offload the server
+ self._shelve_server(server)
+
+ # source allocation should be freed
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=1)
+ test_compute0_placement_pci_view["usages"][
+ "0000:81:00.0"][self.PCI_RC] = 0
+ del test_compute0_placement_pci_view["allocations"][server['id']]
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ # test_compute1 should not be touched
+ self.assertPCIDeviceCounts('test_compute1', total=1, free=1)
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
+ # disable test_compute0 and unshelve the instance
+ self.api.put_service(
+ self.computes["test_compute0"].service_ref.uuid,
+ {"status": "disabled"},
+ )
+ self._unshelve_server(server)
+
+ # test_compute0 should be unchanged
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=1)
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ # test_compute1 should be allocated
+ self.assertPCIDeviceCounts('test_compute1', total=1, free=0)
+ test_compute1_placement_pci_view["usages"][
+ "0000:81:00.0"][self.PCI_RC] = 1
+ test_compute1_placement_pci_view["allocations"][
+ server['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
+ self.assert_no_pci_healing("test_compute0")
+ self.assert_no_pci_healing("test_compute1")
+
+ def test_reschedule(self):
+ (
+ test_compute0_placement_pci_view,
+ test_compute1_placement_pci_view,
+ ) = self._create_two_computes()
+
+ # try to boot a VM with a single device but inject fault on the first
+ # compute so that the VM is re-scheduled to the other
+ extra_spec = {'pci_passthrough:alias': f'{self.ALIAS_NAME}:1'}
+ pci_flavor_id = self._create_flavor(extra_spec=extra_spec)
+
+ calls = []
+ orig_guest_create = (
+ nova.virt.libvirt.driver.LibvirtDriver._create_guest)
+
+ def fake_guest_create(*args, **kwargs):
+ if not calls:
+ calls.append(1)
+ raise fakelibvirt.make_libvirtError(
+ fakelibvirt.libvirtError,
+ "internal error",
+ error_code=fakelibvirt.VIR_ERR_INTERNAL_ERROR,
+ )
+ else:
+ return orig_guest_create(*args, **kwargs)
+
+ with mock.patch(
+ 'nova.virt.libvirt.driver.LibvirtDriver._create_guest',
+ new=fake_guest_create
+ ):
+ server = self._create_server(
+ flavor_id=pci_flavor_id, networks='none')
+
+ compute_pci_view_map = {
+ 'test_compute0': test_compute0_placement_pci_view,
+ 'test_compute1': test_compute1_placement_pci_view,
+ }
+ allocated_compute = server['OS-EXT-SRV-ATTR:host']
+ not_allocated_compute = (
+ "test_compute0"
+ if allocated_compute == "test_compute1"
+ else "test_compute1"
+ )
+
+ allocated_pci_view = compute_pci_view_map.pop(
+ server['OS-EXT-SRV-ATTR:host'])
+ not_allocated_pci_view = list(compute_pci_view_map.values())[0]
+
+ self.assertPCIDeviceCounts(allocated_compute, total=1, free=0)
+ allocated_pci_view["usages"][
+ "0000:81:00.0"][self.PCI_RC] = 1
+ allocated_pci_view["allocations"][
+ server['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
+ self.assert_placement_pci_view(allocated_compute, **allocated_pci_view)
+
+ self.assertPCIDeviceCounts(not_allocated_compute, total=1, free=1)
+ self.assert_placement_pci_view(
+ not_allocated_compute, **not_allocated_pci_view)
+ self.assert_no_pci_healing("test_compute0")
+ self.assert_no_pci_healing("test_compute1")
+
+ def test_multi_create(self):
+ self.start_compute(
+ hostname='test_compute0',
+ pci_info=fakelibvirt.HostPCIDevicesInfo(num_pci=3))
+ self.assertPCIDeviceCounts('test_compute0', total=3, free=3)
+ test_compute0_placement_pci_view = {
+ "inventories": {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ "0000:81:02.0": {self.PCI_RC: 1},
+ },
+ "traits": {
+ "0000:81:00.0": [],
+ "0000:81:01.0": [],
+ "0000:81:02.0": [],
+ },
+ "usages": {
+ "0000:81:00.0": {self.PCI_RC: 0},
+ "0000:81:01.0": {self.PCI_RC: 0},
+ "0000:81:02.0": {self.PCI_RC: 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ extra_spec = {'pci_passthrough:alias': f'{self.ALIAS_NAME}:1'}
+ pci_flavor_id = self._create_flavor(extra_spec=extra_spec)
+ body = self._build_server(flavor_id=pci_flavor_id, networks='none')
+ body.update(
+ {
+ "min_count": "2",
+ }
+ )
+ self.api.post_server({'server': body})
+
+ servers = self.api.get_servers(detail=False)
+ for server in servers:
+ self._wait_for_state_change(server, 'ACTIVE')
+
+ self.assertEqual(2, len(servers))
+ self.assertPCIDeviceCounts('test_compute0', total=3, free=1)
+ # we have no way to influence which instance takes which device, so
+ # we need to look at the nova DB to properly assert the placement
+ # allocation
+ devices = objects.PciDeviceList.get_by_compute_node(
+ self.ctxt,
+ objects.ComputeNode.get_by_nodename(self.ctxt, 'test_compute0').id,
+ )
+ for dev in devices:
+ if dev.instance_uuid:
+ test_compute0_placement_pci_view["usages"][
+ dev.address][self.PCI_RC] = 1
+ test_compute0_placement_pci_view["allocations"][
+ dev.instance_uuid] = {dev.address: {self.PCI_RC: 1}}
+
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ self.assert_no_pci_healing("test_compute0")
class PCIServersWithPreferredNUMATest(_PCIServersTestBase):
ALIAS_NAME = 'a1'
- PCI_PASSTHROUGH_WHITELIST = [jsonutils.dumps(
+ PCI_DEVICE_SPEC = [jsonutils.dumps(
{
'vendor_id': fakelibvirt.PCI_VEND_ID,
'product_id': fakelibvirt.PCI_PROD_ID,
@@ -1505,6 +3016,11 @@ class PCIServersWithPreferredNUMATest(_PCIServersTestBase):
)]
expected_state = 'ACTIVE'
+ def setUp(self):
+ super().setUp()
+ self.flags(group="pci", report_in_placement=True)
+ self.flags(group='filter_scheduler', pci_in_placement=True)
+
def test_create_server_with_pci_dev_and_numa(self):
"""Validate behavior of 'preferred' PCI NUMA policy.
@@ -1517,6 +3033,20 @@ class PCIServersWithPreferredNUMATest(_PCIServersTestBase):
pci_info = fakelibvirt.HostPCIDevicesInfo(num_pci=1, numa_node=0)
self.start_compute(pci_info=pci_info)
+ compute1_placement_pci_view = {
+ "inventories": {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ },
+ "traits": {
+ "0000:81:00.0": [],
+ },
+ "usages": {
+ "0000:81:00.0": {self.PCI_RC: 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_placement_pci_view)
# boot one instance with no PCI device to "fill up" NUMA node 0
extra_spec = {
@@ -1525,13 +3055,26 @@ class PCIServersWithPreferredNUMATest(_PCIServersTestBase):
flavor_id = self._create_flavor(vcpu=4, extra_spec=extra_spec)
self._create_server(flavor_id=flavor_id)
+ self.assert_placement_pci_view(
+ "compute1", **compute1_placement_pci_view)
+
# now boot one with a PCI device, which should succeed thanks to the
# use of the PCI policy
extra_spec['pci_passthrough:alias'] = '%s:1' % self.ALIAS_NAME
flavor_id = self._create_flavor(extra_spec=extra_spec)
- self._create_server(
+ server_with_pci = self._create_server(
flavor_id=flavor_id, expected_state=self.expected_state)
+ if self.expected_state == 'ACTIVE':
+ compute1_placement_pci_view["usages"][
+ "0000:81:00.0"][self.PCI_RC] = 1
+ compute1_placement_pci_view["allocations"][
+ server_with_pci['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
+
+ self.assert_placement_pci_view(
+ "compute1", **compute1_placement_pci_view)
+ self.assert_no_pci_healing("compute1")
+
class PCIServersWithRequiredNUMATest(PCIServersWithPreferredNUMATest):
@@ -1547,12 +3090,105 @@ class PCIServersWithRequiredNUMATest(PCIServersWithPreferredNUMATest):
)]
expected_state = 'ERROR'
+ def setUp(self):
+ super().setUp()
+ self.useFixture(
+ fixtures.MockPatch(
+ 'nova.pci.utils.is_physical_function', return_value=False
+ )
+ )
+
+ def test_create_server_with_pci_dev_and_numa_placement_conflict(self):
+ # fakelibvirt will simulate the devices:
+ # * one type-PCI in 81.00 on numa 0
+ # * one type-PCI in 81.01 on numa 1
+ pci_info = fakelibvirt.HostPCIDevicesInfo(num_pci=2)
+ # the device_spec will assign different traits to 81.00 than 81.01
+ # so the two devices become different from placement perspective
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ 'vendor_id': fakelibvirt.PCI_VEND_ID,
+ 'product_id': fakelibvirt.PCI_PROD_ID,
+ "address": "0000:81:00.0",
+ "traits": "green",
+ },
+ {
+ 'vendor_id': fakelibvirt.PCI_VEND_ID,
+ 'product_id': fakelibvirt.PCI_PROD_ID,
+ "address": "0000:81:01.0",
+ "traits": "red",
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ # both numa 0 and numa 1 has 4 PCPUs
+ self.flags(cpu_dedicated_set='0-7', group='compute')
+ self.start_compute(pci_info=pci_info)
+ compute1_placement_pci_view = {
+ "inventories": {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ },
+ "traits": {
+ "0000:81:00.0": ["CUSTOM_GREEN"],
+ "0000:81:01.0": ["CUSTOM_RED"],
+ },
+ "usages": {
+ "0000:81:00.0": {self.PCI_RC: 0},
+ "0000:81:01.0": {self.PCI_RC: 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_placement_pci_view)
+
+ # boot one instance with no PCI device to "fill up" NUMA node 0
+ # so we will have PCPUs on numa 0 and we have PCI on both nodes
+ extra_spec = {
+ 'hw:cpu_policy': 'dedicated',
+ }
+ flavor_id = self._create_flavor(vcpu=4, extra_spec=extra_spec)
+ self._create_server(flavor_id=flavor_id)
+
+ pci_alias = {
+ "resource_class": self.PCI_RC,
+ # this means only 81.00 will match in placement which is on numa 0
+ "traits": "green",
+ "name": "pci-dev",
+ # this forces the scheduler to only accept a solution where the
+ # PCI device is on the same numa node as the pinned CPUs
+ 'numa_policy': fields.PCINUMAAffinityPolicy.REQUIRED,
+ }
+ self.flags(
+ group="pci",
+ alias=self._to_list_of_json_str([pci_alias]),
+ )
+
+ # Ask for dedicated CPUs, that can only be fulfilled on numa 1.
+ # And ask for a PCI alias that can only be fulfilled on numa 0 due to
+ # trait request.
+ # We expect that this makes the scheduling fail.
+ extra_spec = {
+ "hw:cpu_policy": "dedicated",
+ "pci_passthrough:alias": "pci-dev:1",
+ }
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._create_server(
+ flavor_id=flavor_id, expected_state="ERROR")
+
+ self.assertIn('fault', server)
+ self.assertIn('No valid host', server['fault']['message'])
+ self.assert_placement_pci_view(
+ "compute1", **compute1_placement_pci_view)
+ self.assert_no_pci_healing("compute1")
+
@ddt.ddt
class PCIServersWithSRIOVAffinityPoliciesTest(_PCIServersTestBase):
ALIAS_NAME = 'a1'
- PCI_PASSTHROUGH_WHITELIST = [jsonutils.dumps(
+ PCI_DEVICE_SPEC = [jsonutils.dumps(
{
'vendor_id': fakelibvirt.PCI_VEND_ID,
'product_id': fakelibvirt.PCI_PROD_ID,
@@ -1657,9 +3293,11 @@ class PCIServersWithSRIOVAffinityPoliciesTest(_PCIServersTestBase):
}
)]
- self.flags(passthrough_whitelist=self.PCI_PASSTHROUGH_WHITELIST,
- alias=alias,
- group='pci')
+ self.flags(
+ device_spec=self.PCI_DEVICE_SPEC,
+ alias=alias,
+ group='pci'
+ )
self._test_policy(pci_numa_node, status, 'required')
@@ -1737,7 +3375,7 @@ class PCIServersWithSRIOVAffinityPoliciesTest(_PCIServersTestBase):
class PCIServersWithPortNUMAPoliciesTest(_PCIServersTestBase):
ALIAS_NAME = 'a1'
- PCI_PASSTHROUGH_WHITELIST = [jsonutils.dumps(x) for x in (
+ PCI_DEVICE_SPEC = [jsonutils.dumps(x) for x in (
{
'vendor_id': fakelibvirt.PCI_VEND_ID,
'product_id': fakelibvirt.PF_PROD_ID,
@@ -1875,9 +3513,11 @@ class PCIServersWithPortNUMAPoliciesTest(_PCIServersTestBase):
}
)]
- self.flags(passthrough_whitelist=self.PCI_PASSTHROUGH_WHITELIST,
- alias=alias,
- group='pci')
+ self.flags(
+ device_spec=self.PCI_DEVICE_SPEC,
+ alias=alias,
+ group='pci'
+ )
self._test_policy(pci_numa_node, status, 'required')
@@ -1973,7 +3613,7 @@ class RemoteManagedServersTest(_PCIServersWithMigrationTestBase):
ADMIN_API = True
microversion = 'latest'
- PCI_PASSTHROUGH_WHITELIST = [jsonutils.dumps(x) for x in (
+ PCI_DEVICE_SPEC = [jsonutils.dumps(x) for x in (
# A PF with access to physnet4.
{
'vendor_id': '15b3',
diff --git a/nova/tests/functional/libvirt/test_power_manage.py b/nova/tests/functional/libvirt/test_power_manage.py
new file mode 100644
index 0000000000..fb1ac7d0cd
--- /dev/null
+++ b/nova/tests/functional/libvirt/test_power_manage.py
@@ -0,0 +1,270 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from unittest import mock
+
+import fixtures
+
+from nova import context as nova_context
+from nova import exception
+from nova import objects
+from nova.tests import fixtures as nova_fixtures
+from nova.tests.fixtures import libvirt as fakelibvirt
+from nova.tests.functional.libvirt import base
+from nova.virt import hardware
+from nova.virt.libvirt import cpu
+
+
+class PowerManagementTestsBase(base.ServersTestBase):
+
+ ADDITIONAL_FILTERS = ['NUMATopologyFilter']
+
+ ADMIN_API = True
+
+ def setUp(self):
+ super(PowerManagementTestsBase, self).setUp()
+
+ self.ctxt = nova_context.get_admin_context()
+
+ # Mock the 'NUMATopologyFilter' filter, as most tests need to inspect
+ # this
+ host_manager = self.scheduler.manager.host_manager
+ numa_filter_class = host_manager.filter_cls_map['NUMATopologyFilter']
+ host_pass_mock = mock.Mock(wraps=numa_filter_class().host_passes)
+ _p = mock.patch('nova.scheduler.filters'
+ '.numa_topology_filter.NUMATopologyFilter.host_passes',
+ side_effect=host_pass_mock)
+ self.mock_filter = _p.start()
+ self.addCleanup(_p.stop)
+
+ # for the sake of resizing, we need to patch the two methods below
+ self.useFixture(fixtures.MockPatch(
+ 'nova.virt.libvirt.LibvirtDriver._get_instance_disk_info',
+ return_value=[]))
+ self.useFixture(fixtures.MockPatch('os.rename'))
+
+ self.useFixture(nova_fixtures.PrivsepFixture())
+
+ # Defining the main flavor for 4 vCPUs all pinned
+ self.extra_spec = {
+ 'hw:cpu_policy': 'dedicated',
+ 'hw:cpu_thread_policy': 'prefer',
+ }
+ self.pcpu_flavor_id = self._create_flavor(
+ vcpu=4, extra_spec=self.extra_spec)
+
+ def _assert_server_cpus_state(self, server, expected='online'):
+ inst = objects.Instance.get_by_uuid(self.ctxt, server['id'])
+ if not inst.numa_topology:
+ self.fail('Instance should have a NUMA topology in order to know '
+ 'its physical CPUs')
+ instance_pcpus = inst.numa_topology.cpu_pinning
+ self._assert_cpu_set_state(instance_pcpus, expected=expected)
+ return instance_pcpus
+
+ def _assert_cpu_set_state(self, cpu_set, expected='online'):
+ for i in cpu_set:
+ core = cpu.Core(i)
+ if expected == 'online':
+ self.assertTrue(core.online, f'{i} is not online')
+ elif expected == 'offline':
+ self.assertFalse(core.online, f'{i} is online')
+ elif expected == 'powersave':
+ self.assertEqual('powersave', core.governor)
+ elif expected == 'performance':
+ self.assertEqual('performance', core.governor)
+
+
+class PowerManagementTests(PowerManagementTestsBase):
+ """Test suite for a single host with 9 dedicated cores and 1 used for OS"""
+
+ def setUp(self):
+ super(PowerManagementTests, self).setUp()
+
+ self.useFixture(nova_fixtures.SysFileSystemFixture())
+
+ # Definining the CPUs to be pinned.
+ self.flags(cpu_dedicated_set='1-9', cpu_shared_set=None,
+ group='compute')
+ self.flags(vcpu_pin_set=None)
+ self.flags(cpu_power_management=True, group='libvirt')
+
+ self.flags(allow_resize_to_same_host=True)
+ self.host_info = fakelibvirt.HostInfo(cpu_nodes=1, cpu_sockets=1,
+ cpu_cores=5, cpu_threads=2)
+ self.compute1 = self.start_compute(host_info=self.host_info,
+ hostname='compute1')
+
+ # All cores are shutdown at startup, let's check.
+ cpu_dedicated_set = hardware.get_cpu_dedicated_set()
+ self._assert_cpu_set_state(cpu_dedicated_set, expected='offline')
+
+ def test_hardstop_compute_service_if_wrong_opt(self):
+ self.flags(cpu_dedicated_set=None, cpu_shared_set=None,
+ group='compute')
+ self.flags(vcpu_pin_set=None)
+ self.flags(cpu_power_management=True, group='libvirt')
+ self.assertRaises(exception.InvalidConfiguration,
+ self.start_compute, host_info=self.host_info,
+ hostname='compute2')
+
+ def test_create_server(self):
+ server = self._create_server(
+ flavor_id=self.pcpu_flavor_id,
+ expected_state='ACTIVE')
+ # Let's verify that the pinned CPUs are now online
+ self._assert_server_cpus_state(server, expected='online')
+
+ # Verify that the unused CPUs are still offline
+ inst = objects.Instance.get_by_uuid(self.ctxt, server['id'])
+ instance_pcpus = inst.numa_topology.cpu_pinning
+ cpu_dedicated_set = hardware.get_cpu_dedicated_set()
+ unused_cpus = cpu_dedicated_set - instance_pcpus
+ self._assert_cpu_set_state(unused_cpus, expected='offline')
+
+ def test_stop_start_server(self):
+ server = self._create_server(
+ flavor_id=self.pcpu_flavor_id,
+ expected_state='ACTIVE')
+
+ server = self._stop_server(server)
+ # Let's verify that the pinned CPUs are now stopped...
+ self._assert_server_cpus_state(server, expected='offline')
+
+ server = self._start_server(server)
+ # ...and now, they should be back.
+ self._assert_server_cpus_state(server, expected='online')
+
+ def test_resize(self):
+ server = self._create_server(
+ flavor_id=self.pcpu_flavor_id,
+ expected_state='ACTIVE')
+ server_pcpus = self._assert_server_cpus_state(server,
+ expected='online')
+
+ new_flavor_id = self._create_flavor(
+ vcpu=5, extra_spec=self.extra_spec)
+ self._resize_server(server, new_flavor_id)
+ server2_pcpus = self._assert_server_cpus_state(server,
+ expected='online')
+ # Even if the resize is not confirmed yet, the original guest is now
+ # destroyed so the cores are now offline.
+ self._assert_cpu_set_state(server_pcpus, expected='offline')
+
+ # let's revert the resize
+ self._revert_resize(server)
+ # So now the original CPUs will be online again, while the previous
+ # cores should be back offline.
+ self._assert_cpu_set_state(server_pcpus, expected='online')
+ self._assert_cpu_set_state(server2_pcpus, expected='offline')
+
+ def test_changing_strategy_fails(self):
+ # As a reminder, all cores have been shutdown before.
+ # Now we want to change the strategy and then we restart the service
+ self.flags(cpu_power_management_strategy='governor', group='libvirt')
+ # See, this is not possible as we would have offline CPUs.
+ self.assertRaises(exception.InvalidConfiguration,
+ self.restart_compute_service, hostname='compute1')
+
+
+class PowerManagementTestsGovernor(PowerManagementTestsBase):
+ """Test suite for speific governor usage (same 10-core host)"""
+
+ def setUp(self):
+ super(PowerManagementTestsGovernor, self).setUp()
+
+ self.useFixture(nova_fixtures.SysFileSystemFixture())
+
+ # Definining the CPUs to be pinned.
+ self.flags(cpu_dedicated_set='1-9', cpu_shared_set=None,
+ group='compute')
+ self.flags(vcpu_pin_set=None)
+ self.flags(cpu_power_management=True, group='libvirt')
+ self.flags(cpu_power_management_strategy='governor', group='libvirt')
+
+ self.flags(allow_resize_to_same_host=True)
+ self.host_info = fakelibvirt.HostInfo(cpu_nodes=1, cpu_sockets=1,
+ cpu_cores=5, cpu_threads=2)
+ self.compute1 = self.start_compute(host_info=self.host_info,
+ hostname='compute1')
+
+ def test_create(self):
+ cpu_dedicated_set = hardware.get_cpu_dedicated_set()
+ # With the governor strategy, cores are still online but run with a
+ # powersave governor.
+ self._assert_cpu_set_state(cpu_dedicated_set, expected='powersave')
+
+ # Now, start an instance
+ server = self._create_server(
+ flavor_id=self.pcpu_flavor_id,
+ expected_state='ACTIVE')
+ # When pinned cores are run, the governor state is now performance
+ self._assert_server_cpus_state(server, expected='performance')
+
+ def test_changing_strategy_fails(self):
+ # Arbitratly set a core governor strategy to be performance
+ cpu.Core(1).set_high_governor()
+ # and then forget about it while changing the strategy.
+ self.flags(cpu_power_management_strategy='cpu_state', group='libvirt')
+ # This time, this wouldn't be acceptable as some core would have a
+ # difference performance while Nova would only online/offline it.
+ self.assertRaises(exception.InvalidConfiguration,
+ self.restart_compute_service, hostname='compute1')
+
+
+class PowerManagementMixedInstances(PowerManagementTestsBase):
+ """Test suite for a single host with 6 dedicated cores, 3 shared and one
+ OS-restricted.
+ """
+
+ def setUp(self):
+ super(PowerManagementMixedInstances, self).setUp()
+
+ self.useFixture(nova_fixtures.SysFileSystemFixture())
+
+ # Definining 6 CPUs to be dedicated, not all of them in a series.
+ self.flags(cpu_dedicated_set='1-3,5-7', cpu_shared_set='4,8-9',
+ group='compute')
+ self.flags(vcpu_pin_set=None)
+ self.flags(cpu_power_management=True, group='libvirt')
+
+ self.host_info = fakelibvirt.HostInfo(cpu_nodes=1, cpu_sockets=1,
+ cpu_cores=5, cpu_threads=2)
+ self.compute1 = self.start_compute(host_info=self.host_info,
+ hostname='compute1')
+
+ # Make sure only 6 are offline now
+ cpu_dedicated_set = hardware.get_cpu_dedicated_set()
+ self._assert_cpu_set_state(cpu_dedicated_set, expected='offline')
+
+ # cores 4 and 8-9 should be online
+ self._assert_cpu_set_state({4, 8, 9}, expected='online')
+
+ def test_standard_server_works_and_passes(self):
+
+ std_flavor_id = self._create_flavor(vcpu=2)
+ self._create_server(flavor_id=std_flavor_id, expected_state='ACTIVE')
+
+ # Since this is an instance with floating vCPUs on the shared set, we
+ # can only lookup the host CPUs and see they haven't changed state.
+ cpu_dedicated_set = hardware.get_cpu_dedicated_set()
+ self._assert_cpu_set_state(cpu_dedicated_set, expected='offline')
+ self._assert_cpu_set_state({4, 8, 9}, expected='online')
+
+ # We can now try to boot an instance with pinned CPUs to test the mix
+ pinned_server = self._create_server(
+ flavor_id=self.pcpu_flavor_id,
+ expected_state='ACTIVE')
+ # We'll see that its CPUs are now online
+ self._assert_server_cpus_state(pinned_server, expected='online')
+ # but it doesn't change the shared set
+ self._assert_cpu_set_state({4, 8, 9}, expected='online')
diff --git a/nova/tests/functional/libvirt/test_report_cpu_traits.py b/nova/tests/functional/libvirt/test_report_cpu_traits.py
index eb984c5145..99e68b7b5c 100644
--- a/nova/tests/functional/libvirt/test_report_cpu_traits.py
+++ b/nova/tests/functional/libvirt/test_report_cpu_traits.py
@@ -190,7 +190,6 @@ class LibvirtReportNoSevTraitsTests(LibvirtReportTraitsTestBase):
class LibvirtReportSevTraitsTests(LibvirtReportTraitsTestBase):
STUB_INIT_HOST = False
- @test.patch_exists(SEV_KERNEL_PARAM_FILE, True)
@test.patch_open(SEV_KERNEL_PARAM_FILE, "1\n")
@mock.patch.object(
fakelibvirt.virConnect, '_domain_capability_features',
@@ -198,7 +197,8 @@ class LibvirtReportSevTraitsTests(LibvirtReportTraitsTestBase):
def setUp(self):
super(LibvirtReportSevTraitsTests, self).setUp()
self.flags(num_memory_encrypted_guests=16, group='libvirt')
- self.start_compute()
+ with test.patch_exists(SEV_KERNEL_PARAM_FILE, True):
+ self.start_compute()
def test_sev_trait_on_off(self):
"""Test that the compute service reports the SEV trait in the list of
diff --git a/nova/tests/functional/libvirt/test_reshape.py b/nova/tests/functional/libvirt/test_reshape.py
index 73f64b6972..1f924739e3 100644
--- a/nova/tests/functional/libvirt/test_reshape.py
+++ b/nova/tests/functional/libvirt/test_reshape.py
@@ -30,17 +30,7 @@ LOG = logging.getLogger(__name__)
class VGPUReshapeTests(base.ServersTestBase):
- @mock.patch('nova.virt.libvirt.LibvirtDriver._get_local_gb_info',
- return_value={'total': 128,
- 'used': 44,
- 'free': 84})
- @mock.patch('nova.virt.libvirt.driver.libvirt_utils.is_valid_hostname',
- return_value=True)
- @mock.patch('nova.virt.libvirt.driver.libvirt_utils.file_open',
- side_effect=[io.BytesIO(b''), io.BytesIO(b''),
- io.BytesIO(b'')])
- def test_create_servers_with_vgpu(
- self, mock_file_open, mock_valid_hostname, mock_get_fs_info):
+ def test_create_servers_with_vgpu(self):
"""Verify that vgpu reshape works with libvirt driver
1) create two servers with an old tree where the VGPU resource is on
@@ -49,7 +39,8 @@ class VGPUReshapeTests(base.ServersTestBase):
3) check that the allocations of the servers are still valid
4) create another server now against the new tree
"""
-
+ self.mock_file_open.side_effect = [
+ io.BytesIO(b''), io.BytesIO(b''), io.BytesIO(b'')]
# NOTE(gibi): We cannot simply ask the virt driver to create an old
# RP tree with vgpu on the root RP as that code path does not exist
# any more. So we have to hack a "bit". We will create a compute
@@ -81,11 +72,11 @@ class VGPUReshapeTests(base.ServersTestBase):
# ignore the content of the above HostMdevDeviceInfo
self.flags(enabled_mdev_types='', group='devices')
- hostname = self.start_compute(
+ self.hostname = self.start_compute(
hostname='compute1',
mdev_info=fakelibvirt.HostMdevDevicesInfo(devices=mdevs),
)
- self.compute = self.computes[hostname]
+ self.compute = self.computes[self.hostname]
# create the VGPU resource in placement manually
compute_rp_uuid = self.placement.get(
@@ -167,7 +158,7 @@ class VGPUReshapeTests(base.ServersTestBase):
allocations[compute_rp_uuid]['resources'])
# restart compute which will trigger a reshape
- self.compute = self.restart_compute_service(self.compute)
+ self.compute = self.restart_compute_service(self.hostname)
# verify that the inventory, usages and allocation are correct after
# the reshape
diff --git a/nova/tests/functional/libvirt/test_vgpu.py b/nova/tests/functional/libvirt/test_vgpu.py
index f25ce44221..686582120a 100644
--- a/nova/tests/functional/libvirt/test_vgpu.py
+++ b/nova/tests/functional/libvirt/test_vgpu.py
@@ -49,11 +49,11 @@ class VGPUTestBase(base.ServersTestBase):
def setUp(self):
super(VGPUTestBase, self).setUp()
- self.useFixture(fixtures.MockPatch(
- 'nova.virt.libvirt.LibvirtDriver._get_local_gb_info',
- return_value={'total': 128,
- 'used': 44,
- 'free': 84}))
+ libvirt_driver.LibvirtDriver._get_local_gb_info.return_value = {
+ 'total': 128,
+ 'used': 44,
+ 'free': 84,
+ }
self.useFixture(fixtures.MockPatch(
'nova.privsep.libvirt.create_mdev',
side_effect=self._create_mdev))
@@ -113,8 +113,8 @@ class VGPUTestBase(base.ServersTestBase):
parent=libvirt_parent)})
return uuid
- def start_compute(self, hostname):
- hostname = super().start_compute(
+ def start_compute_with_vgpu(self, hostname):
+ hostname = self.start_compute(
pci_info=fakelibvirt.HostPCIDevicesInfo(
num_pci=0, num_pfs=0, num_vfs=0, num_mdevcap=2,
),
@@ -197,7 +197,7 @@ class VGPUTests(VGPUTestBase):
enabled_mdev_types=fakelibvirt.NVIDIA_11_VGPU_TYPE,
group='devices')
- self.compute1 = self.start_compute('host1')
+ self.compute1 = self.start_compute_with_vgpu('host1')
def assert_vgpu_usage_for_compute(self, compute, expected):
self.assert_mdev_usage(compute, expected_amount=expected)
@@ -211,7 +211,7 @@ class VGPUTests(VGPUTestBase):
def test_resize_servers_with_vgpu(self):
# Add another compute for the sake of resizing
- self.compute2 = self.start_compute('host2')
+ self.compute2 = self.start_compute_with_vgpu('host2')
server = self._create_server(
image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
flavor_id=self.flavor, host=self.compute1.host,
@@ -337,7 +337,7 @@ class VGPUMultipleTypesTests(VGPUTestBase):
# Prepare traits for later on
self._create_trait('CUSTOM_NVIDIA_11')
self._create_trait('CUSTOM_NVIDIA_12')
- self.compute1 = self.start_compute('host1')
+ self.compute1 = self.start_compute_with_vgpu('host1')
def test_create_servers_with_vgpu(self):
self._create_server(
@@ -369,13 +369,12 @@ class VGPUMultipleTypesTests(VGPUTestBase):
def test_create_servers_with_specific_type(self):
# Regenerate the PCI addresses so both pGPUs now support nvidia-12
- connection = self.computes[
- self.compute1.host].driver._host.get_connection()
- connection.pci_info = fakelibvirt.HostPCIDevicesInfo(
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
num_pci=0, num_pfs=0, num_vfs=0, num_mdevcap=2,
multiple_gpu_types=True)
# Make a restart to update the Resource Providers
- self.compute1 = self.restart_compute_service(self.compute1)
+ self.compute1 = self.restart_compute_service(
+ self.compute1.host, pci_info=pci_info, keep_hypervisor_state=False)
pgpu1_rp_uuid = self._get_provider_uuid_by_name(
self.compute1.host + '_' + fakelibvirt.MDEVCAP_DEV1_PCI_ADDR)
pgpu2_rp_uuid = self._get_provider_uuid_by_name(
@@ -451,7 +450,7 @@ class DifferentMdevClassesTests(VGPUTestBase):
group='mdev_nvidia-12')
self.flags(mdev_class='CUSTOM_NOTVGPU', group='mdev_mlx5_core')
- self.compute1 = self.start_compute('host1')
+ self.compute1 = self.start_compute_with_vgpu('host1')
# Regenerate the PCI addresses so they can support both mlx5 and
# nvidia-12 types
connection = self.computes[
@@ -460,7 +459,7 @@ class DifferentMdevClassesTests(VGPUTestBase):
num_pci=0, num_pfs=0, num_vfs=0, num_mdevcap=2,
generic_types=True)
# Make a restart to update the Resource Providers
- self.compute1 = self.restart_compute_service(self.compute1)
+ self.compute1 = self.restart_compute_service('host1')
def test_create_servers_with_different_mdev_classes(self):
physdev1_rp_uuid = self._get_provider_uuid_by_name(
@@ -498,7 +497,7 @@ class DifferentMdevClassesTests(VGPUTestBase):
def test_resize_servers_with_mlx5(self):
# Add another compute for the sake of resizing
- self.compute2 = self.start_compute('host2')
+ self.compute2 = self.start_compute_with_vgpu('host2')
# Regenerate the PCI addresses so they can support both mlx5 and
# nvidia-12 types
connection = self.computes[
@@ -507,7 +506,7 @@ class DifferentMdevClassesTests(VGPUTestBase):
num_pci=0, num_pfs=0, num_vfs=0, num_mdevcap=2,
generic_types=True)
# Make a restart to update the Resource Providers
- self.compute2 = self.restart_compute_service(self.compute2)
+ self.compute2 = self.restart_compute_service('host2')
# Use the new flavor for booting
server = self._create_server(
diff --git a/nova/tests/functional/libvirt/test_vpmem.py b/nova/tests/functional/libvirt/test_vpmem.py
index d1cad0e376..1200f80357 100644
--- a/nova/tests/functional/libvirt/test_vpmem.py
+++ b/nova/tests/functional/libvirt/test_vpmem.py
@@ -12,9 +12,11 @@
# under the License.
import fixtures
+from unittest import mock
from oslo_config import cfg
from oslo_log import log as logging
+from oslo_utils.fixture import uuidsentinel as uuids
from nova import objects
from nova.tests import fixtures as nova_fixtures
@@ -99,7 +101,9 @@ class VPMEMTestBase(integrated_helpers.LibvirtProviderUsageBaseTestCase):
cpu_cores=2, cpu_threads=2),
hostname=hostname)
self.mock_conn.return_value = fake_connection
- compute = self._start_compute(host=hostname)
+ with mock.patch('nova.virt.node.get_local_node_uuid') as m:
+ m.return_value = str(getattr(uuids, 'node_%s' % hostname))
+ compute = self._start_compute(host=hostname)
# Ensure populating the existing pmems correctly.
vpmems = compute.driver._vpmems_by_name
diff --git a/nova/tests/functional/notification_sample_tests/test_compute_task.py b/nova/tests/functional/notification_sample_tests/test_compute_task.py
index 3de1c7d4e1..05d2d32fde 100644
--- a/nova/tests/functional/notification_sample_tests/test_compute_task.py
+++ b/nova/tests/functional/notification_sample_tests/test_compute_task.py
@@ -10,6 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
+from nova import objects
from nova.tests import fixtures
from nova.tests.functional.notification_sample_tests \
import notification_sample_base
@@ -53,6 +56,10 @@ class TestComputeTaskNotificationSample(
},
actual=self.notifier.versioned_notifications[1])
+ @mock.patch.object(
+ objects.service, 'get_minimum_version_all_cells',
+ new=mock.Mock(return_value=62)
+ )
def test_rebuild_fault(self):
server = self._boot_a_server(
extra_params={'networks': [{'port': self.neutron.port_1['id']}]},
diff --git a/nova/tests/functional/notification_sample_tests/test_instance.py b/nova/tests/functional/notification_sample_tests/test_instance.py
index b8ab0ee9ba..5a52c2dad6 100644
--- a/nova/tests/functional/notification_sample_tests/test_instance.py
+++ b/nova/tests/functional/notification_sample_tests/test_instance.py
@@ -46,18 +46,18 @@ class TestInstanceNotificationSampleWithMultipleCompute(
self.compute2 = self.start_service('compute', host='host2')
actions = [
- self._test_live_migration_rollback,
- self._test_live_migration_abort,
- self._test_live_migration_success,
- self._test_evacuate_server,
- self._test_live_migration_force_complete
+ (self._test_live_migration_rollback, 'ACTIVE'),
+ (self._test_live_migration_abort, 'ACTIVE'),
+ (self._test_live_migration_success, 'ACTIVE'),
+ (self._test_evacuate_server, 'SHUTOFF'),
+ (self._test_live_migration_force_complete, 'ACTIVE'),
]
- for action in actions:
+ for action, expected_state in actions:
self.notifier.reset()
action(server)
# Ensure that instance is in active state after an action
- self._wait_for_state_change(server, 'ACTIVE')
+ self._wait_for_state_change(server, expected_state)
@mock.patch('nova.compute.manager.ComputeManager.'
'_live_migration_cleanup_flags', return_value=[True, False])
@@ -275,6 +275,12 @@ class TestInstanceNotificationSampleWithMultipleCompute(
self.admin_api.put_service(service_id, {'forced_down': False})
def _test_live_migration_force_complete(self, server):
+ # In the scenario evacuate happened before which stopped the
+ # server.
+ self._start_server(server)
+ self._wait_for_state_change(server, 'ACTIVE')
+ self.notifier.reset()
+
post = {
'os-migrateLive': {
'host': 'host2',
@@ -1231,7 +1237,7 @@ class TestInstanceNotificationSample(
'nova_object.data': {},
'nova_object.name': 'ImageMetaPropsPayload',
'nova_object.namespace': 'nova',
- 'nova_object.version': '1.9',
+ 'nova_object.version': '1.12',
},
'image.size': 58145823,
'image.tags': [],
@@ -1327,7 +1333,7 @@ class TestInstanceNotificationSample(
'nova_object.data': {},
'nova_object.name': 'ImageMetaPropsPayload',
'nova_object.namespace': 'nova',
- 'nova_object.version': '1.9',
+ 'nova_object.version': '1.12',
},
'image.size': 58145823,
'image.tags': [],
diff --git a/nova/tests/functional/regressions/test_bug_1628606.py b/nova/tests/functional/regressions/test_bug_1628606.py
new file mode 100644
index 0000000000..0fccd78cce
--- /dev/null
+++ b/nova/tests/functional/regressions/test_bug_1628606.py
@@ -0,0 +1,60 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import test
+from nova.tests import fixtures as nova_fixtures
+from nova.tests.functional.api import client
+from nova.tests.functional import fixtures as func_fixtures
+from nova.tests.functional import integrated_helpers
+from unittest import mock
+
+
+class PostLiveMigrationFail(
+ test.TestCase, integrated_helpers.InstanceHelperMixin):
+ """Regression test for bug 1628606
+ """
+
+ def setUp(self):
+ super().setUp()
+ self.useFixture(nova_fixtures.NeutronFixture(self))
+ self.glance = self.useFixture(nova_fixtures.GlanceFixture(self))
+ self.useFixture(func_fixtures.PlacementFixture())
+ self.useFixture(nova_fixtures.HostNameWeigherFixture())
+
+ self.start_service('conductor')
+ self.start_service('scheduler')
+
+ api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
+ api_version='v2.1'))
+
+ self.api = api_fixture.admin_api
+ self.api.microversion = 'latest'
+
+ self.src = self._start_compute(host='host1')
+ self.dest = self._start_compute(host='host2')
+
+ @mock.patch(
+ 'nova.compute.manager.ComputeManager'
+ '._post_live_migration_remove_source_vol_connections')
+ def test_post_live_migration(self, mock_migration):
+ server = self._create_server(networks=[])
+ self.assertEqual(self.src.host, server['OS-EXT-SRV-ATTR:host'])
+
+ error = client.OpenStackApiException(
+ "Failed to remove source vol connection post live migration")
+ mock_migration.side_effect = error
+
+ server = self._live_migrate(
+ server, migration_expected_state='error',
+ server_expected_state='ERROR')
+
+ self.assertEqual(self.dest.host, server['OS-EXT-SRV-ATTR:host'])
diff --git a/nova/tests/functional/regressions/test_bug_1669054.py b/nova/tests/functional/regressions/test_bug_1669054.py
index 6180dbfbaa..b20e1530cc 100644
--- a/nova/tests/functional/regressions/test_bug_1669054.py
+++ b/nova/tests/functional/regressions/test_bug_1669054.py
@@ -59,7 +59,8 @@ class ResizeEvacuateTestCase(integrated_helpers._IntegratedTestBase):
# Now try to evacuate the server back to the original source compute.
server = self._evacuate_server(
server, {'onSharedStorage': 'False'},
- expected_host=self.compute.host, expected_migration_status='done')
+ expected_host=self.compute.host, expected_migration_status='done',
+ expected_state='ACTIVE')
# Assert the RequestSpec.ignore_hosts field is not populated.
reqspec = objects.RequestSpec.get_by_instance_uuid(
diff --git a/nova/tests/functional/regressions/test_bug_1713783.py b/nova/tests/functional/regressions/test_bug_1713783.py
index 9a6a79d7a2..8088ccfe06 100644
--- a/nova/tests/functional/regressions/test_bug_1713783.py
+++ b/nova/tests/functional/regressions/test_bug_1713783.py
@@ -13,9 +13,11 @@
# limitations under the License.
import time
+from unittest import mock
from oslo_log import log as logging
+from nova import objects
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import fixtures as func_fixtures
@@ -81,6 +83,10 @@ class FailedEvacuateStateTests(test.TestCase,
created_server = self.api.post_server({'server': server_req})
return self._wait_for_state_change(created_server, 'ACTIVE')
+ @mock.patch.object(
+ objects.service, 'get_minimum_version_all_cells',
+ new=mock.Mock(return_value=62)
+ )
def test_evacuate_no_valid_host(self):
# Boot a server
server = self._boot_a_server()
diff --git a/nova/tests/functional/regressions/test_bug_1732947.py b/nova/tests/functional/regressions/test_bug_1732947.py
index 3637f40bc2..db518fa8ce 100644
--- a/nova/tests/functional/regressions/test_bug_1732947.py
+++ b/nova/tests/functional/regressions/test_bug_1732947.py
@@ -28,7 +28,9 @@ class RebuildVolumeBackedSameImage(integrated_helpers._IntegratedTestBase):
original image.
"""
api_major_version = 'v2.1'
- microversion = 'latest'
+ # We need microversion <=2.93 to get the old BFV rebuild behavior
+ # that was the environment for this regression.
+ microversion = '2.92'
def _setup_scheduler_service(self):
# Add the IsolatedHostsFilter to the list of enabled filters since it
diff --git a/nova/tests/functional/regressions/test_bug_1764883.py b/nova/tests/functional/regressions/test_bug_1764883.py
index aa86770584..59bbed4f46 100644
--- a/nova/tests/functional/regressions/test_bug_1764883.py
+++ b/nova/tests/functional/regressions/test_bug_1764883.py
@@ -95,7 +95,8 @@ class TestEvacuationWithSourceReturningDuringRebuild(
# Evacuate the instance from the source_host
server = self._evacuate_server(
- server, expected_migration_status='done')
+ server, expected_migration_status='done',
+ expected_state='ACTIVE')
host = server['OS-EXT-SRV-ATTR:host']
migrations = self.api.get_migrations()
diff --git a/nova/tests/functional/regressions/test_bug_1823370.py b/nova/tests/functional/regressions/test_bug_1823370.py
index 5e69905f5f..af134070cd 100644
--- a/nova/tests/functional/regressions/test_bug_1823370.py
+++ b/nova/tests/functional/regressions/test_bug_1823370.py
@@ -66,4 +66,5 @@ class MultiCellEvacuateTestCase(integrated_helpers._IntegratedTestBase):
# higher than host3.
self._evacuate_server(
server, {'onSharedStorage': 'False'}, expected_host='host3',
- expected_migration_status='done')
+ expected_migration_status='done',
+ expected_state='ACTIVE')
diff --git a/nova/tests/functional/regressions/test_bug_1896463.py b/nova/tests/functional/regressions/test_bug_1896463.py
index 6663ebe8cd..3cfece8d36 100644
--- a/nova/tests/functional/regressions/test_bug_1896463.py
+++ b/nova/tests/functional/regressions/test_bug_1896463.py
@@ -51,14 +51,6 @@ class TestEvacuateResourceTrackerRace(
self.api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1'))
- self.useFixture(fixtures.MockPatch(
- 'nova.pci.utils.get_mac_by_pci_address',
- return_value='52:54:00:1e:59:c6'))
-
- self.useFixture(fixtures.MockPatch(
- 'nova.pci.utils.get_vf_num_by_pci_address',
- return_value=1))
-
self.admin_api = self.api_fixture.admin_api
self.admin_api.microversion = 'latest'
self.api = self.admin_api
@@ -224,7 +216,7 @@ class TestEvacuateResourceTrackerRace(
self._run_periodics()
self._wait_for_server_parameter(
- server, {'OS-EXT-SRV-ATTR:host': 'host2', 'status': 'ACTIVE'})
+ server, {'OS-EXT-SRV-ATTR:host': 'host2', 'status': 'SHUTOFF'})
self._assert_pci_device_allocated(server['id'], self.compute1_id)
self._assert_pci_device_allocated(server['id'], self.compute2_id)
diff --git a/nova/tests/functional/regressions/test_bug_1902925.py b/nova/tests/functional/regressions/test_bug_1902925.py
index f0e823e2a4..59105c6cc6 100644
--- a/nova/tests/functional/regressions/test_bug_1902925.py
+++ b/nova/tests/functional/regressions/test_bug_1902925.py
@@ -28,6 +28,11 @@ class ComputeVersion5xPinnedRpcTests(integrated_helpers._IntegratedTestBase):
self.compute1 = self._start_compute(host='host1')
def _test_rebuild_instance_with_compute_rpc_pin(self, version_cap):
+ # Since passing the latest microversion (>= 2.93) passes
+ # the 'reimage_boot_volume' parameter as True and it is
+ # not acceptable with compute RPC version (required 6.1)
+ # These tests fail, so assigning microversion to 2.92
+ self.api.microversion = '2.92'
self.flags(compute=version_cap, group='upgrade_levels')
server_req = self._build_server(networks='none')
diff --git a/nova/tests/functional/regressions/test_bug_1922053.py b/nova/tests/functional/regressions/test_bug_1922053.py
index 612be27b2b..70bb3d4cab 100644
--- a/nova/tests/functional/regressions/test_bug_1922053.py
+++ b/nova/tests/functional/regressions/test_bug_1922053.py
@@ -1,3 +1,4 @@
+
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@@ -27,6 +28,7 @@ class ForceUpWithDoneEvacuations(integrated_helpers._IntegratedTestBase):
ADMIN_API = True
microversion = 'latest'
+ expected_state = 'SHUTOFF'
def _create_test_server(self, compute_host):
return self._create_server(host=compute_host, networks='none')
@@ -59,7 +61,8 @@ class ForceUpWithDoneEvacuations(integrated_helpers._IntegratedTestBase):
server = self._evacuate_server(
server,
expected_host='compute2',
- expected_migration_status='done'
+ expected_migration_status='done',
+ expected_state=self.expected_state
)
# Assert that the request to force up the host is rejected
@@ -97,6 +100,7 @@ class ForceUpWithDoneEvacuationsv252(ForceUpWithDoneEvacuations):
"""
microversion = '2.52'
+ expected_state = 'ACTIVE'
def _create_test_server(self, compute_host):
return self._create_server(az='nova:compute', networks='none')
diff --git a/nova/tests/functional/regressions/test_bug_1928063.py b/nova/tests/functional/regressions/test_bug_1928063.py
index 2c773981c5..94d7b8122c 100644
--- a/nova/tests/functional/regressions/test_bug_1928063.py
+++ b/nova/tests/functional/regressions/test_bug_1928063.py
@@ -30,7 +30,6 @@ class TestSEVInstanceReboot(base.ServersTestBase):
"""
microversion = 'latest'
- @test.patch_exists(SEV_KERNEL_PARAM_FILE, True)
@test.patch_open(SEV_KERNEL_PARAM_FILE, "1\n")
@mock.patch.object(
fakelibvirt.virConnect, '_domain_capability_features',
@@ -40,7 +39,8 @@ class TestSEVInstanceReboot(base.ServersTestBase):
# Configure the compute to allow SEV based instances and then start
self.flags(num_memory_encrypted_guests=16, group='libvirt')
- self.start_compute()
+ with test.patch_exists(SEV_KERNEL_PARAM_FILE, True):
+ self.start_compute()
# Create a SEV enabled image for the test
sev_image = copy.deepcopy(self.glance.image1)
diff --git a/nova/tests/functional/regressions/test_bug_1944619.py b/nova/tests/functional/regressions/test_bug_1944619.py
index bdd06c493f..430a6e3981 100644
--- a/nova/tests/functional/regressions/test_bug_1944619.py
+++ b/nova/tests/functional/regressions/test_bug_1944619.py
@@ -71,6 +71,6 @@ class TestRollbackWithHWOffloadedOVS(
# Live migrate the instance to another host
self._live_migrate(self.server,
migration_expected_state='failed',
- server_expected_state='MIGRATING')
+ server_expected_state='ACTIVE')
mlpr.assert_not_called()
mlpp.assert_called_once()
diff --git a/nova/tests/functional/regressions/test_bug_1951656.py b/nova/tests/functional/regressions/test_bug_1951656.py
new file mode 100644
index 0000000000..d705ff6fe3
--- /dev/null
+++ b/nova/tests/functional/regressions/test_bug_1951656.py
@@ -0,0 +1,73 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_utils import uuidutils
+
+
+from nova.tests.fixtures import libvirt as fakelibvirt
+from nova.tests.functional.libvirt import test_vgpu
+from nova.virt.libvirt import utils as libvirt_utils
+
+
+class VGPUTestsLibvirt7_7(test_vgpu.VGPUTestBase):
+
+ def _create_mdev(self, physical_device, mdev_type, uuid=None):
+ # We need to fake the newly created sysfs object by adding a new
+ # FakeMdevDevice in the existing persisted Connection object so
+ # when asking to get the existing mdevs, we would see it.
+ if not uuid:
+ uuid = uuidutils.generate_uuid()
+ mdev_name = libvirt_utils.mdev_uuid2name(uuid)
+ libvirt_parent = self.pci2libvirt_address(physical_device)
+
+ # Libvirt 7.7 now creates mdevs with a parent_addr suffix.
+ new_mdev_name = '_'.join([mdev_name, libvirt_parent])
+
+ # Here, we get the right compute thanks by the self.current_host that
+ # was modified just before
+ connection = self.computes[
+ self._current_host].driver._host.get_connection()
+ connection.mdev_info.devices.update(
+ {mdev_name: fakelibvirt.FakeMdevDevice(dev_name=new_mdev_name,
+ type_id=mdev_type,
+ parent=libvirt_parent)})
+ return uuid
+
+ def setUp(self):
+ super(VGPUTestsLibvirt7_7, self).setUp()
+ extra_spec = {"resources:VGPU": "1"}
+ self.flavor = self._create_flavor(extra_spec=extra_spec)
+
+ # Start compute1 supporting only nvidia-11
+ self.flags(
+ enabled_mdev_types=fakelibvirt.NVIDIA_11_VGPU_TYPE,
+ group='devices')
+
+ self.compute1 = self.start_compute_with_vgpu('host1')
+
+ def test_create_servers_with_vgpu(self):
+
+ # Create a single instance against a specific compute node.
+ self._create_server(
+ image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
+ flavor_id=self.flavor, host=self.compute1.host,
+ networks='auto', expected_state='ACTIVE')
+
+ self.assert_mdev_usage(self.compute1, expected_amount=1)
+
+ self._create_server(
+ image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
+ flavor_id=self.flavor, host=self.compute1.host,
+ networks='auto', expected_state='ACTIVE')
+
+ self.assert_mdev_usage(self.compute1, expected_amount=2)
diff --git a/nova/tests/functional/regressions/test_bug_1978983.py b/nova/tests/functional/regressions/test_bug_1978983.py
index 75260abf37..51465900da 100644
--- a/nova/tests/functional/regressions/test_bug_1978983.py
+++ b/nova/tests/functional/regressions/test_bug_1978983.py
@@ -13,10 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-
from nova import test
from nova.tests import fixtures as nova_fixtures
-from nova.tests.functional.api import client
from nova.tests.functional import fixtures as func_fixtures
from nova.tests.functional import integrated_helpers
@@ -44,6 +42,7 @@ class EvacuateServerWithTaskState(
api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1'))
self.api = api_fixture.admin_api
+ self.api.microversion = 'latest'
self.src = self._start_compute(host='host1')
self.dest = self._start_compute(host='host2')
@@ -53,26 +52,20 @@ class EvacuateServerWithTaskState(
"""
server = self._create_server(networks=[])
- self.api.microversion = 'latest'
server = self._wait_for_state_change(server, 'ACTIVE')
- self.assertEqual('host1', server['OS-EXT-SRV-ATTR:host'])
+ self.assertEqual(self.src.host, server['OS-EXT-SRV-ATTR:host'])
# stop host1 compute service
self.src.stop()
+ self.api.put_service_force_down(self.src.service_ref.uuid, True)
# poweroff instance
self._stop_server(server, wait_for_stop=False)
server = self._wait_for_server_parameter(
server, {'OS-EXT-STS:task_state': 'powering-off'})
- # FIXME(auniyal): As compute service is down in source node
- # instance is stuck at powering-off, evacuation fails with
- # msg: Cannot 'evacuate' instance <instance-id> while it is in
- # task_state powering-off (HTTP 409)
-
- ex = self.assertRaises(
- client.OpenStackApiException,
- self._evacuate_server,
- server,
- expected_host=self.dest.host)
- self.assertEqual(409, ex.response.status_code)
+ # evacuate instance
+ server = self._evacuate_server(
+ server, expected_host=self.dest.host
+ )
+ self.assertEqual(self.dest.host, server['OS-EXT-SRV-ATTR:host'])
diff --git a/nova/tests/functional/regressions/test_bug_1980720.py b/nova/tests/functional/regressions/test_bug_1980720.py
new file mode 100644
index 0000000000..ad2e6e6ba2
--- /dev/null
+++ b/nova/tests/functional/regressions/test_bug_1980720.py
@@ -0,0 +1,68 @@
+# Copyright (C) 2022 Red Hat, Inc
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from nova.tests import fixtures as nova_fixtures
+from nova.tests.functional.api import client
+from nova.tests.functional import integrated_helpers
+from nova.tests.functional.libvirt import base
+from unittest import mock
+
+
+class LibvirtDriverTests(
+ base.ServersTestBase,
+ integrated_helpers.InstanceHelperMixin
+):
+ api_major_version = 'v2.1'
+ microversion = 'latest'
+
+ def setUp(self):
+ super(LibvirtDriverTests, self).setUp()
+ self.cinder = self.useFixture(nova_fixtures.CinderFixture(self))
+ self.start_compute()
+
+ def _create_server_with_block_device(self):
+ server_request = self._build_server(
+ networks=[],
+ )
+ # removing imageRef is required as we want
+ # to boot from volume
+ server_request.pop('imageRef')
+ server_request['block_device_mapping_v2'] = [{
+ 'boot_index': 0,
+ 'source_type': 'volume',
+ 'uuid': nova_fixtures.CinderFixture.IMAGE_BACKED_VOL_QUIESCE,
+ 'destination_type': 'volume'}]
+
+ server = self.api.post_server({
+ 'server': server_request,
+ })
+ self._wait_for_state_change(server, 'ACTIVE')
+ return server
+
+ def test_snapshot_quiesce_fail(self):
+ server = self._create_server_with_block_device()
+ with mock.patch.object(
+ nova_fixtures.libvirt.Domain, 'fsFreeze'
+ ) as mock_obj:
+ ex = nova_fixtures.libvirt.libvirtError("Error")
+ ex.err = (nova_fixtures.libvirt.VIR_ERR_AGENT_UNRESPONSIVE,)
+
+ mock_obj.side_effect = ex
+ excep = self.assertRaises(
+ client.OpenStackApiException,
+ self._snapshot_server, server, "snapshot-1"
+ )
+ self.assertEqual(409, excep.response.status_code)
diff --git a/nova/tests/functional/regressions/test_bug_1983753.py b/nova/tests/functional/regressions/test_bug_1983753.py
new file mode 100644
index 0000000000..3658d6aeb8
--- /dev/null
+++ b/nova/tests/functional/regressions/test_bug_1983753.py
@@ -0,0 +1,177 @@
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import fixtures
+
+from oslo_serialization import jsonutils
+
+from nova.tests.fixtures import libvirt as fakelibvirt
+from nova.tests.functional.libvirt import test_pci_sriov_servers
+
+
+class TestPciResize(test_pci_sriov_servers._PCIServersTestBase):
+ # these tests use multiple different configs so the whitelist is set by
+ # each testcase individually
+ PCI_DEVICE_SPEC = []
+ PCI_ALIAS = [
+ jsonutils.dumps(x)
+ for x in [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PCI_PROD_ID,
+ "name": "a-pci-dev",
+ },
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PF_PROD_ID,
+ "device_type": "type-PF",
+ "name": "a-pf",
+ },
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.VF_PROD_ID,
+ "device_type": "type-VF",
+ "name": "a-vf",
+ },
+ ]
+ ]
+
+ def setUp(self):
+ super().setUp()
+ self.useFixture(
+ fixtures.MockPatch(
+ 'nova.virt.libvirt.driver.LibvirtDriver.'
+ 'migrate_disk_and_power_off',
+ return_value='{}'
+ )
+ )
+ # These tests should not depend on the host's sysfs
+ self.useFixture(
+ fixtures.MockPatch('nova.pci.utils.is_physical_function'))
+ self.useFixture(
+ fixtures.MockPatch(
+ 'nova.pci.utils.get_function_by_ifname',
+ return_value=(None, False)
+ )
+ )
+
+ def _test_resize_from_two_devs_to_one_dev(self, num_pci_on_dest):
+ # The fake libvirt will emulate on the host:
+ # * two type-PCI in slot 0, 1
+ compute1_pci_info = fakelibvirt.HostPCIDevicesInfo(num_pci=2)
+ # the config matches the PCI dev
+ compute1_device_spec = [
+ jsonutils.dumps(x)
+ for x in [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PCI_PROD_ID,
+ },
+ ]
+ ]
+ self.flags(group='pci', passthrough_whitelist=compute1_device_spec)
+ self.start_compute(hostname="compute1", pci_info=compute1_pci_info)
+ self.assertPCIDeviceCounts("compute1", total=2, free=2)
+
+ # create a server that requests two PCI devs
+ extra_spec = {"pci_passthrough:alias": "a-pci-dev:2"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._create_server(flavor_id=flavor_id, networks=[])
+ self.assertPCIDeviceCounts("compute1", total=2, free=0)
+
+ # start another compute with a different amount of PCI dev available
+ compute2_pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=num_pci_on_dest)
+ # the config matches the PCI dev
+ compute2_device_spec = [
+ jsonutils.dumps(x)
+ for x in [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PCI_PROD_ID,
+ },
+ ]
+ ]
+ self.flags(group='pci', passthrough_whitelist=compute2_device_spec)
+ self.start_compute(hostname="compute2", pci_info=compute2_pci_info)
+ self.assertPCIDeviceCounts(
+ "compute2", total=num_pci_on_dest, free=num_pci_on_dest)
+
+ # resize the server to request only one PCI dev instead of the current
+ # two. This should fit to compute2 having at least one dev
+ extra_spec = {"pci_passthrough:alias": "a-pci-dev:1"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ self._resize_server(server, flavor_id=flavor_id)
+ self._confirm_resize(server)
+ self.assertPCIDeviceCounts("compute1", total=2, free=2)
+ self.assertPCIDeviceCounts(
+ "compute2", total=num_pci_on_dest, free=num_pci_on_dest - 1)
+
+ def test_resize_from_two_devs_to_one_dev_dest_has_two_devs(self):
+ self._test_resize_from_two_devs_to_one_dev(num_pci_on_dest=2)
+
+ def test_resize_from_two_devs_to_one_dev_dest_has_one_dev(self):
+ self._test_resize_from_two_devs_to_one_dev(num_pci_on_dest=1)
+
+ def test_resize_from_vf_to_pf(self):
+ # The fake libvirt will emulate on the host:
+ # * one type-PF in slot 0 with one VF
+ compute1_pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pfs=1, num_vfs=1)
+ # the config matches only the VF
+ compute1_device_spec = [
+ jsonutils.dumps(x)
+ for x in [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.VF_PROD_ID,
+ },
+ ]
+ ]
+ self.flags(group='pci', passthrough_whitelist=compute1_device_spec)
+ self.start_compute(hostname="compute1", pci_info=compute1_pci_info)
+ self.assertPCIDeviceCounts("compute1", total=1, free=1)
+
+ # create a server that requests one Vf
+ extra_spec = {"pci_passthrough:alias": "a-vf:1"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._create_server(flavor_id=flavor_id, networks=[])
+ self.assertPCIDeviceCounts("compute1", total=1, free=0)
+
+ # start another compute with a single PF dev available
+ # The fake libvirt will emulate on the host:
+ # * one type-PF in slot 0 with 1 VF
+ compute2_pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pfs=1, num_vfs=1)
+ # the config matches the PF dev but not the VF
+ compute2_device_spec = [
+ jsonutils.dumps(x)
+ for x in [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PF_PROD_ID,
+ },
+ ]
+ ]
+ self.flags(group='pci', passthrough_whitelist=compute2_device_spec)
+ self.start_compute(hostname="compute2", pci_info=compute2_pci_info)
+ self.assertPCIDeviceCounts("compute2", total=1, free=1)
+
+ # resize the server to request on PF dev instead of the current VF
+ # dev. This should fit to compute2 having exactly one PF dev.
+ extra_spec = {"pci_passthrough:alias": "a-pf:1"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ self._resize_server(server, flavor_id=flavor_id)
+ self._confirm_resize(server)
+ self.assertPCIDeviceCounts("compute1", total=1, free=1)
+ self.assertPCIDeviceCounts("compute2", total=1, free=0)
diff --git a/nova/tests/functional/test_aggregates.py b/nova/tests/functional/test_aggregates.py
index 8dfb345578..1ffa3ada92 100644
--- a/nova/tests/functional/test_aggregates.py
+++ b/nova/tests/functional/test_aggregates.py
@@ -935,11 +935,11 @@ class TestAggregateMultiTenancyIsolationFilter(
# Start nova services.
self.start_service('conductor')
- self.admin_api = self.useFixture(
- nova_fixtures.OSAPIFixture(api_version='v2.1')).admin_api
- self.api = self.useFixture(
- nova_fixtures.OSAPIFixture(api_version='v2.1',
- project_id=uuids.non_admin)).api
+ api_fixture = self.useFixture(
+ nova_fixtures.OSAPIFixture(api_version='v2.1'))
+ self.admin_api = api_fixture.admin_api
+ self.api = api_fixture.api
+ self.api.project_id = uuids.non_admin
# Add the AggregateMultiTenancyIsolation to the list of enabled
# filters since it is not enabled by default.
enabled_filters = CONF.filter_scheduler.enabled_filters
@@ -1037,15 +1037,15 @@ class AggregateMultiTenancyIsolationColdMigrateTest(
self.glance = self.useFixture(nova_fixtures.GlanceFixture(self))
self.useFixture(nova_fixtures.NeutronFixture(self))
self.useFixture(func_fixtures.PlacementFixture())
- # Intentionally keep these separate since we want to create the
- # server with the non-admin user in a different project.
- admin_api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
+ # Intentionally define different project id for the two client since
+ # we want to create the server with the non-admin user in a different
+ # project.
+ api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1', project_id=uuids.admin_project))
- self.admin_api = admin_api_fixture.admin_api
+ self.admin_api = api_fixture.admin_api
self.admin_api.microversion = 'latest'
- user_api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
- api_version='v2.1', project_id=uuids.user_project))
- self.api = user_api_fixture.api
+ self.api = api_fixture.api
+ self.api.project_id = uuids.user_project
self.api.microversion = 'latest'
self.start_service('conductor')
diff --git a/nova/tests/functional/test_boot_from_volume.py b/nova/tests/functional/test_boot_from_volume.py
index 0b963b5aa3..6396954bf4 100644
--- a/nova/tests/functional/test_boot_from_volume.py
+++ b/nova/tests/functional/test_boot_from_volume.py
@@ -10,6 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import fixtures
from unittest import mock
from nova import context
@@ -50,6 +51,9 @@ class BootFromVolumeTest(integrated_helpers._IntegratedTestBase):
self.flags(allow_resize_to_same_host=True)
super(BootFromVolumeTest, self).setUp()
self.admin_api = self.api_fixture.admin_api
+ self.useFixture(nova_fixtures.CinderFixture(self))
+ self.useFixture(fixtures.MockPatch(
+ 'nova.compute.manager.ComputeVirtAPI.wait_for_instance_event'))
def test_boot_from_volume_larger_than_local_gb(self):
# Verify no local disk is being used currently
@@ -138,6 +142,42 @@ class BootFromVolumeTest(integrated_helpers._IntegratedTestBase):
image_uuid = '155d900f-4e14-4e4c-a73d-069cbf4541e6'
post_data = {'rebuild': {'imageRef': image_uuid}}
self.api.post_server_action(server_id, post_data)
+
+ def test_rebuild_volume_backed_larger_than_local_gb(self):
+ # Verify no local disk is being used currently
+ self._verify_zero_local_gb_used()
+
+ # Create flavors with disk larger than available host local disk
+ flavor_id = self._create_flavor(memory_mb=64, vcpu=1, disk=8192,
+ ephemeral=0)
+
+ # Boot a server with a flavor disk larger than the available local
+ # disk. It should succeed for boot from volume.
+ server = self._build_server(image_uuid='', flavor_id=flavor_id)
+ volume_uuid = nova_fixtures.CinderFixture.IMAGE_BACKED_VOL
+ bdm = {'boot_index': 0,
+ 'uuid': volume_uuid,
+ 'source_type': 'volume',
+ 'destination_type': 'volume'}
+ server['block_device_mapping_v2'] = [bdm]
+ created_server = self.api.post_server({"server": server})
+ server_id = created_server['id']
+ self._wait_for_state_change(created_server, 'ACTIVE')
+
+ # Check that hypervisor local disk reporting is still 0
+ self._verify_zero_local_gb_used()
+ # Check that instance has not been saved with 0 root_gb
+ self._verify_instance_flavor_not_zero(server_id)
+ # Check that request spec has not been saved with 0 root_gb
+ self._verify_request_spec_flavor_not_zero(server_id)
+
+ # Rebuild
+ # The image_uuid is from CinderFixture for the
+ # volume representing IMAGE_BACKED_VOL.
+ self.api.microversion = '2.93'
+ image_uuid = '155d900f-4e14-4e4c-a73d-069cbf4541e6'
+ post_data = {'rebuild': {'imageRef': image_uuid}}
+ self.api.post_server_action(server_id, post_data)
self._wait_for_state_change(created_server, 'ACTIVE')
# Check that hypervisor local disk reporting is still 0
diff --git a/nova/tests/functional/test_ephemeral_encryption.py b/nova/tests/functional/test_ephemeral_encryption.py
new file mode 100644
index 0000000000..ba5e411902
--- /dev/null
+++ b/nova/tests/functional/test_ephemeral_encryption.py
@@ -0,0 +1,381 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_utils.fixture import uuidsentinel
+
+from nova import context
+from nova import objects
+from nova.tests.functional import integrated_helpers
+
+
+class _TestEphemeralEncryptionBase(
+ integrated_helpers.ProviderUsageBaseTestCase
+):
+ # NOTE(lyarwood): A dict of test flavors defined per test class,
+ # keyed by flavor name and providing an additional dict containing an 'id'
+ # and optional 'extra_specs' dict. For example:
+ # {
+ # 'name': {
+ # 'id': uuidsentinel.flavor_id
+ # 'extra_specs': {
+ # 'hw:foo': 'bar'
+ # }
+ # }
+ # }
+ flavors = {}
+
+ def setUp(self):
+ super().setUp()
+
+ self.ctxt = context.get_admin_context()
+
+ # Create the required test flavors
+ for name, details in self.flavors.items():
+ flavor = self.admin_api.post_flavor({
+ 'flavor': {
+ 'name': name,
+ 'id': details['id'],
+ 'ram': 512,
+ 'vcpus': 1,
+ 'disk': 1024,
+ }
+ })
+ # Add the optional extra_specs
+ if details.get('extra_specs'):
+ self.admin_api.post_extra_spec(
+ flavor['id'], {'extra_specs': details['extra_specs']})
+
+ # We only need a single compute for these tests
+ self._start_compute(host='compute1')
+
+ def _assert_ephemeral_encryption_enabled(
+ self, server_id, encryption_format=None):
+ bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
+ self.ctxt, server_id)
+ for bdm in bdms:
+ if bdm.is_local:
+ self.assertTrue(bdm.encrypted)
+ if encryption_format:
+ self.assertEqual(
+ encryption_format, bdm.encryption_format)
+
+ def _assert_ephemeral_encryption_disabled(self, server_id):
+ bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
+ self.ctxt, server_id)
+ for bdm in bdms:
+ if bdm.is_local:
+ self.assertFalse(bdm.encrypted)
+
+
+class TestEphemeralEncryptionAvailable(_TestEphemeralEncryptionBase):
+
+ compute_driver = 'fake.EphEncryptionDriver'
+ flavors = {
+ 'no_eph_encryption': {
+ 'id': uuidsentinel.no_eph_encryption
+ },
+ 'eph_encryption': {
+ 'id': uuidsentinel.eph_encryption_flavor,
+ 'extra_specs': {
+ 'hw:ephemeral_encryption': 'True'
+ }
+ },
+ 'eph_encryption_disabled': {
+ 'id': uuidsentinel.eph_encryption_disabled_flavor,
+ 'extra_specs': {
+ 'hw:ephemeral_encryption': 'False'
+ }
+ },
+ }
+
+ def test_image_requested(self):
+ server_request = self._build_server(
+ flavor_id=uuidsentinel.no_eph_encryption,
+ image_uuid=uuidsentinel.eph_encryption,
+ networks=[])
+ server_id = self._assert_build_request_success(server_request)
+ self._assert_ephemeral_encryption_enabled(server_id)
+
+ def test_image_disabled(self):
+ server_request = self._build_server(
+ flavor_id=uuidsentinel.no_eph_encryption,
+ image_uuid=uuidsentinel.eph_encryption_disabled,
+ networks=[])
+ server_id = self._assert_build_request_success(server_request)
+ self._assert_ephemeral_encryption_disabled(server_id)
+
+ def test_flavor_requested(self):
+ server_request = self._build_server(
+ flavor_id=uuidsentinel.eph_encryption_flavor,
+ networks=[])
+ server_id = self._assert_build_request_success(server_request)
+ self._assert_ephemeral_encryption_enabled(server_id)
+
+ def test_flavor_disabled(self):
+ server_request = self._build_server(
+ flavor_id=uuidsentinel.eph_encryption_disabled_flavor,
+ networks=[])
+ server_id = self._assert_build_request_success(server_request)
+ self._assert_ephemeral_encryption_disabled(server_id)
+
+ def test_flavor_and_image_requested(self):
+ server_request = self._build_server(
+ flavor_id=uuidsentinel.eph_encryption_flavor,
+ image_uuid=uuidsentinel.eph_encryption,
+ networks=[])
+ server_id = self._assert_build_request_success(server_request)
+ self._assert_ephemeral_encryption_enabled(server_id)
+
+ def test_flavor_and_image_disabled(self):
+ server_request = self._build_server(
+ flavor_id=uuidsentinel.eph_encryption_disabled_flavor,
+ image_uuid=uuidsentinel.eph_encryption_disabled,
+ networks=[])
+ server_id = self._assert_build_request_success(server_request)
+ self._assert_ephemeral_encryption_disabled(server_id)
+
+ def test_flavor_requested_and_image_disabled(self):
+ server_request = self._build_server(
+ flavor_id=uuidsentinel.eph_encryption_flavor,
+ image_uuid=uuidsentinel.eph_encryption_disabled,
+ networks=[])
+ self._assert_bad_build_request_error(server_request)
+
+ def test_flavor_disabled_and_image_requested(self):
+ server_request = self._build_server(
+ flavor_id=uuidsentinel.eph_encryption_disabled_flavor,
+ image_uuid=uuidsentinel.eph_encryption,
+ networks=[])
+ self._assert_bad_build_request_error(server_request)
+
+
+class TestEphemeralEncryptionUnavailable(_TestEphemeralEncryptionBase):
+
+ compute_driver = 'fake.MediumFakeDriver'
+ flavors = {
+ 'no_eph_encryption': {
+ 'id': uuidsentinel.no_eph_encryption
+ },
+ 'eph_encryption': {
+ 'id': uuidsentinel.eph_encryption_flavor,
+ 'extra_specs': {
+ 'hw:ephemeral_encryption': 'True'
+ }
+ },
+ 'eph_encryption_disabled': {
+ 'id': uuidsentinel.eph_encryption_disabled_flavor,
+ 'extra_specs': {
+ 'hw:ephemeral_encryption': 'False'
+ }
+ },
+ }
+
+ def test_requested_but_unavailable(self):
+ server_request = self._build_server(
+ flavor_id=uuidsentinel.eph_encryption_flavor,
+ image_uuid=uuidsentinel.eph_encryption,
+ networks=[])
+ self._assert_build_request_schedule_failure(server_request)
+
+ def test_image_disabled(self):
+ server_request = self._build_server(
+ image_uuid=uuidsentinel.eph_encryption_disabled,
+ flavor_id=uuidsentinel.no_eph_encryption,
+ networks=[])
+ server_id = self._assert_build_request_success(server_request)
+ self._assert_ephemeral_encryption_disabled(server_id)
+
+ def test_flavor_disabled(self):
+ server_request = self._build_server(
+ flavor_id=uuidsentinel.eph_encryption_disabled_flavor,
+ networks=[])
+ server_id = self._assert_build_request_success(server_request)
+ self._assert_ephemeral_encryption_disabled(server_id)
+
+
+class TestEphemeralEncryptionLUKS(TestEphemeralEncryptionAvailable):
+
+ compute_driver = 'fake.EphEncryptionDriverLUKS'
+ flavors = {
+ 'no_eph_encryption': {
+ 'id': uuidsentinel.no_eph_encryption
+ },
+ 'eph_encryption': {
+ 'id': uuidsentinel.eph_encryption_flavor,
+ 'extra_specs': {
+ 'hw:ephemeral_encryption': 'True'
+ }
+ },
+ 'eph_encryption_disabled': {
+ 'id': uuidsentinel.eph_encryption_disabled_flavor,
+ 'extra_specs': {
+ 'hw:ephemeral_encryption': 'False'
+ }
+ },
+ 'eph_encryption_luks': {
+ 'id': uuidsentinel.eph_encryption_luks_flavor,
+ 'extra_specs': {
+ 'hw:ephemeral_encryption': 'True',
+ 'hw:ephemeral_encryption_format': 'luks'
+ }
+ },
+ 'eph_encryption_plain': {
+ 'id': uuidsentinel.eph_encryption_plain_flavor,
+ 'extra_specs': {
+ 'hw:ephemeral_encryption': 'True',
+ 'hw:ephemeral_encryption_format': 'plain'
+ }
+ },
+
+ }
+
+ def test_image_requested_luks(self):
+ server_request = self._build_server(
+ flavor_id=uuidsentinel.no_eph_encryption,
+ image_uuid=uuidsentinel.eph_encryption_luks,
+ networks=[])
+ server_id = self._assert_build_request_success(server_request)
+ self._assert_ephemeral_encryption_enabled(
+ server_id, encryption_format='luks')
+
+ def test_flavor_requested_luks(self):
+ server_request = self._build_server(
+ flavor_id=uuidsentinel.eph_encryption_luks_flavor,
+ networks=[])
+ server_id = self._assert_build_request_success(server_request)
+ self._assert_ephemeral_encryption_enabled(
+ server_id, encryption_format='luks')
+
+ def test_flavor_and_image_requested_luks(self):
+ server_request = self._build_server(
+ flavor_id=uuidsentinel.eph_encryption_luks_flavor,
+ image_uuid=uuidsentinel.eph_encryption_luks,
+ networks=[])
+ server_id = self._assert_build_request_success(server_request)
+ self._assert_ephemeral_encryption_enabled(
+ server_id, encryption_format='luks')
+
+ def test_image_requested_plain(self):
+ server_request = self._build_server(
+ flavor_id=uuidsentinel.no_eph_encryption,
+ image_uuid=uuidsentinel.eph_encryption_plain,
+ networks=[])
+ self._assert_build_request_schedule_failure(server_request)
+
+ def test_flavor_requested_plain(self):
+ server_request = self._build_server(
+ flavor_id=uuidsentinel.eph_encryption_plain_flavor,
+ networks=[])
+ self._assert_build_request_schedule_failure(server_request)
+
+ def test_image_requested_luks_flavor_requested_plain(self):
+ server_request = self._build_server(
+ image_uuid=uuidsentinel.eph_encryption_luks,
+ flavor_id=uuidsentinel.eph_encryption_plain_flavor,
+ networks=[])
+ self._assert_bad_build_request_error(server_request)
+
+ def test_image_requested_plain_flavor_requested_luks(self):
+ server_request = self._build_server(
+ image_uuid=uuidsentinel.eph_encryption_plain,
+ flavor_id=uuidsentinel.eph_encryption_luks_flavor,
+ networks=[])
+ self._assert_bad_build_request_error(server_request)
+
+
+class TestEphemeralEncryptionPLAIN(_TestEphemeralEncryptionBase):
+
+ compute_driver = 'fake.EphEncryptionDriverPLAIN'
+ flavors = {
+ 'no_eph_encryption': {
+ 'id': uuidsentinel.no_eph_encryption
+ },
+ 'eph_encryption': {
+ 'id': uuidsentinel.eph_encryption_flavor,
+ 'extra_specs': {
+ 'hw:ephemeral_encryption': 'True'
+ }
+ },
+ 'eph_encryption_disabled': {
+ 'id': uuidsentinel.eph_encryption_disabled_flavor,
+ 'extra_specs': {
+ 'hw:ephemeral_encryption': 'False'
+ }
+ },
+ 'eph_encryption_luks': {
+ 'id': uuidsentinel.eph_encryption_luks_flavor,
+ 'extra_specs': {
+ 'hw:ephemeral_encryption': 'True',
+ 'hw:ephemeral_encryption_format': 'luks'
+ }
+ },
+ 'eph_encryption_plain': {
+ 'id': uuidsentinel.eph_encryption_plain_flavor,
+ 'extra_specs': {
+ 'hw:ephemeral_encryption': 'True',
+ 'hw:ephemeral_encryption_format': 'plain'
+ }
+ },
+ }
+
+ def test_image_requested_plain(self):
+ server_request = self._build_server(
+ flavor_id=uuidsentinel.no_eph_encryption,
+ image_uuid=uuidsentinel.eph_encryption_plain,
+ networks=[])
+ server_id = self._assert_build_request_success(server_request)
+ self._assert_ephemeral_encryption_enabled(
+ server_id, encryption_format='plain')
+
+ def test_flavor_requested_plain(self):
+ server_request = self._build_server(
+ flavor_id=uuidsentinel.eph_encryption_plain_flavor,
+ networks=[])
+ server_id = self._assert_build_request_success(server_request)
+ self._assert_ephemeral_encryption_enabled(
+ server_id, encryption_format='plain')
+
+ def test_flavor_and_image_requested_plain(self):
+ server_request = self._build_server(
+ flavor_id=uuidsentinel.eph_encryption_plain_flavor,
+ image_uuid=uuidsentinel.eph_encryption_plain,
+ networks=[])
+ server_id = self._assert_build_request_success(server_request)
+ self._assert_ephemeral_encryption_enabled(
+ server_id, encryption_format='plain')
+
+ def test_image_requested_luks(self):
+ server_request = self._build_server(
+ flavor_id=uuidsentinel.no_eph_encryption,
+ image_uuid=uuidsentinel.eph_encryption_luks,
+ networks=[])
+ self._assert_build_request_schedule_failure(server_request)
+
+ def test_flavor_requested_luks(self):
+ server_request = self._build_server(
+ flavor_id=uuidsentinel.eph_encryption_luks_flavor,
+ networks=[])
+ self._assert_build_request_schedule_failure(server_request)
+
+ def test_image_requested_plain_flavor_requested_luks(self):
+ server_request = self._build_server(
+ image_uuid=uuidsentinel.eph_encryption_plain,
+ flavor_id=uuidsentinel.eph_encryption_luks_flavor,
+ networks=[])
+ self._assert_bad_build_request_error(server_request)
+
+ def test_image_requested_luks_flavor_requested_plain(self):
+ server_request = self._build_server(
+ image_uuid=uuidsentinel.eph_encryption_luks,
+ flavor_id=uuidsentinel.eph_encryption_plain_flavor,
+ networks=[])
+ self._assert_bad_build_request_error(server_request)
diff --git a/nova/tests/functional/test_images.py b/nova/tests/functional/test_images.py
index 340e883da9..e7e9f2a6c9 100644
--- a/nova/tests/functional/test_images.py
+++ b/nova/tests/functional/test_images.py
@@ -12,7 +12,6 @@
from oslo_utils.fixture import uuidsentinel as uuids
-from nova.tests import fixtures as nova_fixtures
from nova.tests.functional.api import client
from nova.tests.functional import integrated_helpers
@@ -70,10 +69,9 @@ class ImagesTest(integrated_helpers._IntegratedTestBase):
server = self.api.post_server({"server": server})
server = self._wait_for_state_change(server, 'ACTIVE')
- # Create an admin API fixture with a unique project ID.
- admin_api = self.useFixture(
- nova_fixtures.OSAPIFixture(
- project_id=uuids.admin_project)).admin_api
+ # use an admin API with a unique project ID.
+ admin_api = self.api_fixture.alternative_admin_api
+ admin_api.project_id = uuids.admin_project
# Create a snapshot of the server using the admin project.
name = 'admin-created-snapshot'
diff --git a/nova/tests/functional/test_instance_actions.py b/nova/tests/functional/test_instance_actions.py
index c20b053459..060133ce93 100644
--- a/nova/tests/functional/test_instance_actions.py
+++ b/nova/tests/functional/test_instance_actions.py
@@ -59,6 +59,15 @@ class InstanceActionsTestV221(InstanceActionsTestV21):
self.assertEqual('delete', actions[0]['action'])
self.assertEqual('create', actions[1]['action'])
+ def test_get_instance_actions_shelve_deleted(self):
+ server = self._create_server()
+ self._shelve_server(server)
+ self._delete_server(server)
+ actions = self.api.get_instance_actions(server['id'])
+ self.assertEqual('delete', actions[0]['action'])
+ self.assertEqual('shelve', actions[1]['action'])
+ self.assertEqual('create', actions[2]['action'])
+
class HypervisorError(Exception):
"""This is just used to make sure the exception type is in the events."""
diff --git a/nova/tests/functional/test_report_client.py b/nova/tests/functional/test_report_client.py
index ba4b729b87..a5da9f87b4 100644
--- a/nova/tests/functional/test_report_client.py
+++ b/nova/tests/functional/test_report_client.py
@@ -1363,6 +1363,17 @@ class SchedulerReportClientTests(test.TestCase):
resp = self.client._reshape(self.context, inventories, allocs)
self.assertEqual(204, resp.status_code)
+ # Trigger generation conflict
+ # We can do this is by simply sending back the same reshape as that
+ # will not work because the previous reshape updated generations
+ self.assertRaises(
+ exception.PlacementReshapeConflict,
+ self.client._reshape,
+ self.context,
+ inventories,
+ allocs,
+ )
+
def test_update_from_provider_tree_reshape(self):
"""Run update_from_provider_tree with reshaping."""
exp_ptree = self._set_up_provider_tree()
@@ -1519,3 +1530,44 @@ class SchedulerReportClientTests(test.TestCase):
self.context, self.compute_name)
self.assertProviderTree(orig_exp_ptree, ptree)
self.assertAllocations(orig_exp_allocs, allocs)
+
+ def test_update_from_provider_tree_reshape_conflict_retry(self):
+ exp_ptree = self._set_up_provider_tree()
+
+ ptree = self.client.get_provider_tree_and_ensure_root(
+ self.context, self.compute_uuid)
+ allocs = self.client.get_allocations_for_provider_tree(
+ self.context, self.compute_name)
+ self.assertProviderTree(exp_ptree, ptree)
+ self.assertAllocations({}, allocs)
+
+ exp_allocs = self._set_up_provider_tree_allocs()
+
+ # we prepare inventory and allocation changes to trigger a reshape
+ for rp_uuid in ptree.get_provider_uuids():
+ # Add a new resource class to the inventories
+ ptree.update_inventory(
+ rp_uuid, dict(ptree.data(rp_uuid).inventory,
+ CUSTOM_FOO={'total': 10}))
+ exp_ptree[rp_uuid]['inventory']['CUSTOM_FOO'] = {'total': 10}
+ for c_uuid, alloc in allocs.items():
+ for rp_uuid, res in alloc['allocations'].items():
+ res['resources']['CUSTOM_FOO'] = 1
+ exp_allocs[c_uuid]['allocations'][rp_uuid][
+ 'resources']['CUSTOM_FOO'] = 1
+
+ # As the inventory update happens is the same request as the allocation
+ # update the allocation update will have a generation conflict.
+ # So we expect that it is signalled with an exception so that the
+ # upper layer can re-drive the reshape process with a fresh tree that
+ # now has the inventories
+ self.assertRaises(
+ exception.PlacementReshapeConflict,
+ self.client.update_from_provider_tree,
+ self.context,
+ ptree,
+ allocations=allocs,
+ )
+ # also we except that the internal caches is cleared so that the
+ # re-drive will have a chance to load fresh data from placement
+ self.assertEqual(0, len(self.client._provider_tree.roots))
diff --git a/nova/tests/functional/test_server_group.py b/nova/tests/functional/test_server_group.py
index 93c725af1b..01e3547f7e 100644
--- a/nova/tests/functional/test_server_group.py
+++ b/nova/tests/functional/test_server_group.py
@@ -65,12 +65,12 @@ class ServerGroupTestBase(test.TestCase,
self.useFixture(nova_fixtures.NeutronFixture(self))
self.useFixture(func_fixtures.PlacementFixture())
- api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
+ self.api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1'))
- self.api = api_fixture.api
+ self.api = self.api_fixture.api
self.api.microversion = self.microversion
- self.admin_api = api_fixture.admin_api
+ self.admin_api = self.api_fixture.admin_api
self.admin_api.microversion = self.microversion
self.start_service('conductor')
@@ -103,7 +103,10 @@ class ServerGroupFakeDriver(fake.SmallFakeDriver):
"""
vcpus = 1000
- memory_mb = 8192
+ # the testcases were built with a default ram allocation ratio
+ # of 1.5 and 8192 mb of ram so to maintain the same capacity with
+ # the new default allocation ratio of 1.0 we use 8192+4096=12288
+ memory_mb = 12288
local_gb = 100000
@@ -175,13 +178,8 @@ class ServerGroupTestV21(ServerGroupTestBase):
# Create an API using project 'openstack1'.
# This is a non-admin API.
- #
- # NOTE(sdague): this is actually very much *not* how this
- # fixture should be used. This actually spawns a whole
- # additional API server. Should be addressed in the future.
- api_openstack1 = self.useFixture(nova_fixtures.OSAPIFixture(
- api_version=self.api_major_version,
- project_id=PROJECT_ID_ALT)).api
+ api_openstack1 = self.api_fixture.alternative_api
+ api_openstack1.project_id = PROJECT_ID_ALT
api_openstack1.microversion = self.microversion
# Create a server group in project 'openstack'
@@ -446,7 +444,8 @@ class ServerGroupTestV21(ServerGroupTestBase):
evacuated_server = self._evacuate_server(
servers[1], {'onSharedStorage': 'False'},
- expected_migration_status='done')
+ expected_migration_status='done',
+ expected_state='ACTIVE')
# check that the server is evacuated to another host
self.assertNotEqual(evacuated_server['OS-EXT-SRV-ATTR:host'],
@@ -623,7 +622,8 @@ class ServerGroupTestV215(ServerGroupTestV21):
compute3 = self.start_service('compute', host='host3')
evacuated_server = self._evacuate_server(
- servers[1], expected_migration_status='done')
+ servers[1], expected_migration_status='done',
+ expected_state='ACTIVE')
# check that the server is evacuated
self.assertNotEqual(evacuated_server['OS-EXT-SRV-ATTR:host'],
@@ -802,7 +802,8 @@ class ServerGroupTestV215(ServerGroupTestV21):
self._set_forced_down(host, True)
evacuated_server = self._evacuate_server(
- servers[1], expected_migration_status='done')
+ servers[1], expected_migration_status='done',
+ expected_state='ACTIVE')
# Note(gibi): need to get the server again as the state of the instance
# goes to ACTIVE first then the host of the instance changes to the
@@ -872,6 +873,54 @@ class ServerGroupTestV264(ServerGroupTestV215):
self.assertEqual(2, hosts.count(host))
+class ServerGroupTestV295(ServerGroupTestV264):
+ microversion = '2.95'
+
+ def _evacuate_with_soft_anti_affinity_policies(self, group):
+ created_group = self.api.post_server_groups(group)
+ servers = self._boot_servers_to_group(created_group)
+
+ host = self._get_compute_service_by_host_name(
+ servers[1]['OS-EXT-SRV-ATTR:host'])
+ # Set forced_down on the host to ensure nova considers the host down.
+ self._set_forced_down(host, True)
+
+ evacuated_server = self._evacuate_server(
+ servers[1], expected_migration_status='done')
+
+ # Note(gibi): need to get the server again as the state of the instance
+ # goes to ACTIVE first then the host of the instance changes to the
+ # new host later
+ evacuated_server = self.admin_api.get_server(evacuated_server['id'])
+
+ return [evacuated_server['OS-EXT-SRV-ATTR:host'],
+ servers[0]['OS-EXT-SRV-ATTR:host']]
+
+ def test_evacuate_with_anti_affinity(self):
+ created_group = self.api.post_server_groups(self.anti_affinity)
+ servers = self._boot_servers_to_group(created_group)
+
+ host = self._get_compute_service_by_host_name(
+ servers[1]['OS-EXT-SRV-ATTR:host'])
+ # Set forced_down on the host to ensure nova considers the host down.
+ self._set_forced_down(host, True)
+
+ # Start additional host to test evacuation
+ compute3 = self.start_service('compute', host='host3')
+
+ evacuated_server = self._evacuate_server(
+ servers[1], expected_migration_status='done')
+
+ # check that the server is evacuated
+ self.assertNotEqual(evacuated_server['OS-EXT-SRV-ATTR:host'],
+ servers[1]['OS-EXT-SRV-ATTR:host'])
+ # check that policy is kept
+ self.assertNotEqual(evacuated_server['OS-EXT-SRV-ATTR:host'],
+ servers[0]['OS-EXT-SRV-ATTR:host'])
+
+ compute3.kill()
+
+
class ServerGroupTestMultiCell(ServerGroupTestBase):
NUMBER_OF_CELLS = 2
diff --git a/nova/tests/functional/test_server_rescue.py b/nova/tests/functional/test_server_rescue.py
index fa96c10344..8f5b912943 100644
--- a/nova/tests/functional/test_server_rescue.py
+++ b/nova/tests/functional/test_server_rescue.py
@@ -10,6 +10,10 @@
# License for the specific language governing permissions and limitations
# under the License.
+import datetime
+
+from oslo_utils.fixture import uuidsentinel as uuids
+
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional.api import client
from nova.tests.functional import integrated_helpers
@@ -23,7 +27,37 @@ class BFVRescue(integrated_helpers.ProviderUsageBaseTestCase):
self.useFixture(nova_fixtures.CinderFixture(self))
self._start_compute(host='host1')
- def _create_bfv_server(self):
+ def _create_image(self, metadata=None):
+ image = {
+ 'id': uuids.stable_rescue_image,
+ 'name': 'fake-image-rescue-property',
+ 'created_at': datetime.datetime(2011, 1, 1, 1, 2, 3),
+ 'updated_at': datetime.datetime(2011, 1, 1, 1, 2, 3),
+ 'deleted_at': None,
+ 'deleted': False,
+ 'status': 'active',
+ 'is_public': False,
+ 'container_format': 'raw',
+ 'disk_format': 'raw',
+ 'size': '25165824',
+ 'min_ram': 0,
+ 'min_disk': 0,
+ 'protected': False,
+ 'visibility': 'public',
+ 'tags': ['tag1', 'tag2'],
+ 'properties': {
+ 'kernel_id': 'nokernel',
+ 'ramdisk_id': 'nokernel',
+ 'hw_rescue_device': 'disk',
+ 'hw_rescue_bus': 'scsi',
+ },
+ }
+ if metadata:
+ image['properties'].update(metadata)
+ return self.glance.create(None, image)
+
+ def _create_bfv_server(self, metadata=None):
+ image = self._create_image(metadata=metadata)
server_request = self._build_server(networks=[])
server_request.pop('imageRef')
server_request['block_device_mapping_v2'] = [{
@@ -33,7 +67,7 @@ class BFVRescue(integrated_helpers.ProviderUsageBaseTestCase):
'destination_type': 'volume'}]
server = self.api.post_server({'server': server_request})
self._wait_for_state_change(server, 'ACTIVE')
- return server
+ return server, image
class DisallowBFVRescuev286(BFVRescue):
@@ -43,10 +77,10 @@ class DisallowBFVRescuev286(BFVRescue):
microversion = '2.86'
def test_bfv_rescue_not_supported(self):
- server = self._create_bfv_server()
+ server, image = self._create_bfv_server()
ex = self.assertRaises(client.OpenStackApiException,
self.api.post_server_action, server['id'], {'rescue': {
- 'rescue_image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6'}})
+ 'rescue_image_ref': image['id']}})
self.assertEqual(400, ex.response.status_code)
self.assertIn('Cannot rescue a volume-backed instance',
ex.response.text)
@@ -60,10 +94,10 @@ class DisallowBFVRescuev286WithTrait(BFVRescue):
microversion = '2.86'
def test_bfv_rescue_not_supported(self):
- server = self._create_bfv_server()
+ server, image = self._create_bfv_server()
ex = self.assertRaises(client.OpenStackApiException,
self.api.post_server_action, server['id'], {'rescue': {
- 'rescue_image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6'}})
+ 'rescue_image_ref': image['id']}})
self.assertEqual(400, ex.response.status_code)
self.assertIn('Cannot rescue a volume-backed instance',
ex.response.text)
@@ -77,10 +111,10 @@ class DisallowBFVRescuev287WithoutTrait(BFVRescue):
microversion = '2.87'
def test_bfv_rescue_not_supported(self):
- server = self._create_bfv_server()
+ server, image = self._create_bfv_server()
ex = self.assertRaises(client.OpenStackApiException,
self.api.post_server_action, server['id'], {'rescue': {
- 'rescue_image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6'}})
+ 'rescue_image_ref': image['id']}})
self.assertEqual(400, ex.response.status_code)
self.assertIn('Host unable to rescue a volume-backed instance',
ex.response.text)
@@ -94,7 +128,41 @@ class AllowBFVRescuev287WithTrait(BFVRescue):
microversion = '2.87'
def test_bfv_rescue_supported(self):
- server = self._create_bfv_server()
+ server, image = self._create_bfv_server()
self.api.post_server_action(server['id'], {'rescue': {
+ 'rescue_image_ref': image['id']}})
+ self._wait_for_state_change(server, 'RESCUE')
+
+
+class DisallowBFVRescuev287WithoutRescueImageProperties(BFVRescue):
+ """Asserts that BFV rescue requests fail with microversion 2.87 (or later)
+ when the required hw_rescue_device and hw_rescue_bus image properties
+ are not set on the image.
+ """
+ compute_driver = 'fake.MediumFakeDriver'
+ microversion = '2.87'
+
+ def test_bfv_rescue_failed(self):
+ server, image = self._create_bfv_server()
+ # try rescue without hw_rescue_device and hw_rescue_bus properties set
+ ex = self.assertRaises(client.OpenStackApiException,
+ self.api.post_server_action, server['id'], {'rescue': {
'rescue_image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6'}})
+ self.assertEqual(400, ex.response.status_code)
+ self.assertIn('Cannot rescue a volume-backed instance',
+ ex.response.text)
+
+
+class AllowBFVRescuev287WithRescueImageProperties(BFVRescue):
+ """Asserts that BFV rescue requests pass with microversion 2.87 (or later)
+ when the required hw_rescue_device and hw_rescue_bus image properties
+ are set on the image.
+ """
+ compute_driver = 'fake.RescueBFVDriver'
+ microversion = '2.87'
+
+ def test_bfv_rescue_done(self):
+ server, image = self._create_bfv_server()
+ self.api.post_server_action(server['id'], {'rescue': {
+ 'rescue_image_ref': image['id']}})
self._wait_for_state_change(server, 'RESCUE')
diff --git a/nova/tests/functional/test_servers.py b/nova/tests/functional/test_servers.py
index d5b1b58e4b..43208aa812 100644
--- a/nova/tests/functional/test_servers.py
+++ b/nova/tests/functional/test_servers.py
@@ -20,6 +20,7 @@ import time
from unittest import mock
import zlib
+from cinderclient import exceptions as cinder_exception
from keystoneauth1 import adapter
from oslo_config import cfg
from oslo_log import log as logging
@@ -1253,9 +1254,7 @@ class ServerTestV269(integrated_helpers._IntegratedTestBase):
def test_get_servers_detail_filters(self):
# We get the results only from the up cells, this ignoring the down
# cells if list_records_by_skipping_down_cells config option is True.
- api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
- api_version='v2.1'))
- self.admin_api = api_fixture.admin_api
+ self.admin_api = self.api_fixture.admin_api
self.admin_api.microversion = '2.69'
servers = self.admin_api.get_servers(
search_opts={'hostname': "cell3-inst0"})
@@ -1263,9 +1262,7 @@ class ServerTestV269(integrated_helpers._IntegratedTestBase):
self.assertEqual(self.up_cell_insts[2], servers[0]['id'])
def test_get_servers_detail_all_tenants_with_down_cells(self):
- api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
- api_version='v2.1'))
- self.admin_api = api_fixture.admin_api
+ self.admin_api = self.api_fixture.admin_api
self.admin_api.microversion = '2.69'
servers = self.admin_api.get_servers(search_opts={'all_tenants': True})
# 4 servers from the up cells and 4 servers from the down cells
@@ -1518,15 +1515,97 @@ class ServerRebuildTestCase(integrated_helpers._IntegratedTestBase):
'volume-backed server', str(resp))
+class ServerRebuildTestCaseV293(integrated_helpers._IntegratedTestBase):
+ api_major_version = 'v2.1'
+
+ def setUp(self):
+ super(ServerRebuildTestCaseV293, self).setUp()
+ self.cinder = nova_fixtures.CinderFixture(self)
+ self.useFixture(self.cinder)
+
+ def _bfv_server(self):
+ server_req_body = {
+ # There is no imageRef because this is boot from volume.
+ 'server': {
+ 'flavorRef': '1', # m1.tiny from DefaultFlavorsFixture,
+ 'name': 'test_volume_backed_rebuild_different_image',
+ 'networks': [],
+ 'block_device_mapping_v2': [{
+ 'boot_index': 0,
+ 'uuid':
+ nova_fixtures.CinderFixture.IMAGE_BACKED_VOL,
+ 'source_type': 'volume',
+ 'destination_type': 'volume'
+ }]
+ }
+ }
+ server = self.api.post_server(server_req_body)
+ return self._wait_for_state_change(server, 'ACTIVE')
+
+ def _test_rebuild(self, server):
+ self.api.microversion = '2.93'
+ # Now rebuild the server with a different image than was used to create
+ # our fake volume.
+ rebuild_image_ref = self.glance.auto_disk_config_enabled_image['id']
+ rebuild_req_body = {'rebuild': {'imageRef': rebuild_image_ref}}
+
+ with mock.patch.object(self.compute.manager.virtapi,
+ 'wait_for_instance_event'):
+ self.api.api_post('/servers/%s/action' % server['id'],
+ rebuild_req_body,
+ check_response_status=[202])
+
+ def test_volume_backed_rebuild_root_v293(self):
+ server = self._bfv_server()
+ self._test_rebuild(server)
+
+ def test_volume_backed_rebuild_root_create_failed(self):
+ server = self._bfv_server()
+ error = cinder_exception.ClientException(code=500)
+ with mock.patch.object(volume.cinder.API, 'attachment_create',
+ side_effect=error):
+ # We expect this to fail because we are doing cast-as-call
+ self.assertRaises(client.OpenStackApiException,
+ self._test_rebuild, server)
+ server = self.api.get_server(server['id'])
+ self.assertIn('Failed to rebuild volume backed instance',
+ server['fault']['message'])
+ self.assertEqual('ERROR', server['status'])
+
+ def test_volume_backed_rebuild_root_instance_deleted(self):
+ server = self._bfv_server()
+ error = exception.InstanceNotFound(instance_id=server['id'])
+ with mock.patch.object(self.compute.manager, '_detach_root_volume',
+ side_effect=error):
+ # We expect this to fail because we are doing cast-as-call
+ self.assertRaises(client.OpenStackApiException,
+ self._test_rebuild, server)
+ server = self.api.get_server(server['id'])
+ self.assertIn('Failed to rebuild volume backed instance',
+ server['fault']['message'])
+ self.assertEqual('ERROR', server['status'])
+
+ def test_volume_backed_rebuild_root_delete_old_failed(self):
+ server = self._bfv_server()
+ error = cinder_exception.ClientException(code=500)
+ with mock.patch.object(volume.cinder.API, 'attachment_delete',
+ side_effect=error):
+ # We expect this to fail because we are doing cast-as-call
+ self.assertRaises(client.OpenStackApiException,
+ self._test_rebuild, server)
+ server = self.api.get_server(server['id'])
+ self.assertIn('Failed to rebuild volume backed instance',
+ server['fault']['message'])
+ self.assertEqual('ERROR', server['status'])
+
+
class ServersTestV280(integrated_helpers._IntegratedTestBase):
api_major_version = 'v2.1'
def setUp(self):
super(ServersTestV280, self).setUp()
- api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
- api_version='v2.1'))
- self.api = api_fixture.api
- self.admin_api = api_fixture.admin_api
+ self.api = self.api_fixture.api
+ self.admin_api = self.api_fixture.admin_api
self.api.microversion = '2.80'
self.admin_api.microversion = '2.80'
@@ -1585,9 +1664,8 @@ class ServersTestV280(integrated_helpers._IntegratedTestBase):
project_id_1 = '4906260553374bf0a5d566543b320516'
project_id_2 = 'c850298c1b6b4796a8f197ac310b2469'
- new_api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
- api_version=self.api_major_version, project_id=project_id_1))
- new_admin_api = new_api_fixture.admin_api
+ new_admin_api = self.api_fixture.alternative_admin_api
+ new_admin_api.project_id = project_id_1
new_admin_api.microversion = '2.80'
post = {
@@ -2182,7 +2260,8 @@ class ServerMovingTests(integrated_helpers.ProviderUsageBaseTestCase):
}
server = self._evacuate_server(
- server, extra_post_args=post, expected_host=dest_hostname)
+ server, extra_post_args=post, expected_host=dest_hostname,
+ expected_state='ACTIVE')
# Run the periodics to show those don't modify allocations.
self._run_periodics()
@@ -2359,7 +2438,8 @@ class ServerMovingTests(integrated_helpers.ProviderUsageBaseTestCase):
# stay ACTIVE and task_state will be set to None.
server = self._evacuate_server(
server, expected_task_state=None,
- expected_migration_status='failed')
+ expected_migration_status='failed',
+ expected_state='ACTIVE')
# Run the periodics to show those don't modify allocations.
self._run_periodics()
@@ -5246,7 +5326,8 @@ class ServerMovingTestsWithNestedResourceRequests(
server = self._evacuate_server(
server, extra_post_args=post, expected_migration_status='error',
- expected_host=source_hostname)
+ expected_host=source_hostname,
+ expected_state='ACTIVE')
self.assertIn('Unable to move instance %s to host host2. The instance '
'has complex allocations on the source host so move '
@@ -5452,7 +5533,8 @@ class ServerMovingTestsFromFlatToNested(
self._evacuate_server(
server, extra_post_args=post, expected_host='host1',
- expected_migration_status='error')
+ expected_migration_status='error',
+ expected_state='ACTIVE')
# We expect that the evacuation will fail as force evacuate tries to
# blindly copy the source allocation to the destination but on the
diff --git a/nova/tests/functional/test_servers_provider_tree.py b/nova/tests/functional/test_servers_provider_tree.py
index f48f91dc02..da562c4f19 100644
--- a/nova/tests/functional/test_servers_provider_tree.py
+++ b/nova/tests/functional/test_servers_provider_tree.py
@@ -82,7 +82,7 @@ class ProviderTreeTests(integrated_helpers.ProviderUsageBaseTestCase):
},
'MEMORY_MB': {
'total': 8192,
- 'allocation_ratio': 1.5,
+ 'allocation_ratio': 1.0,
'max_unit': 8192,
'min_unit': 1,
'reserved': 512,
@@ -90,7 +90,7 @@ class ProviderTreeTests(integrated_helpers.ProviderUsageBaseTestCase):
},
'VCPU': {
'total': 10,
- 'allocation_ratio': 16.0,
+ 'allocation_ratio': 4.0,
'max_unit': 10,
'min_unit': 1,
'reserved': 0,
diff --git a/nova/tests/functional/test_servers_resource_request.py b/nova/tests/functional/test_servers_resource_request.py
index 09983e1d35..9c91af7218 100644
--- a/nova/tests/functional/test_servers_resource_request.py
+++ b/nova/tests/functional/test_servers_resource_request.py
@@ -459,7 +459,7 @@ class PortResourceRequestBasedSchedulingTestBase(
def _create_sriov_networking_rp_tree(self, hostname, compute_rp_uuid):
# Create a matching RP tree in placement for the PCI devices added to
- # the passthrough_whitelist config during setUp() and PCI devices
+ # the device_spec config during setUp() and PCI devices
# present in the FakeDriverWithPciResources virt driver.
#
# * PF1 represents the PCI device 0000:01:00, it will be mapped to
@@ -1068,7 +1068,7 @@ class PortResourceRequestBasedSchedulingTest(
def test_interface_attach_sriov_with_qos_pci_update_fails(self):
# Update the name of the network device RP of PF2 on host2 to something
# unexpected. This will cause
- # update_pci_request_spec_with_allocated_interface_name() to raise
+ # update_pci_request_with_placement_allocations() to raise
# when the sriov interface is attached.
rsp = self.placement.put(
'/resource_providers/%s'
@@ -1115,7 +1115,7 @@ class PortResourceRequestBasedSchedulingTest(
):
# Update the name of the network device RP of PF2 on host2 to something
# unexpected. This will cause
- # update_pci_request_spec_with_allocated_interface_name() to raise
+ # update_pci_request_with_placement_allocations() to raise
# when the sriov interface is attached.
rsp = self.placement.put(
'/resource_providers/%s'
@@ -1362,7 +1362,7 @@ class PortResourceRequestBasedSchedulingTest(
does not have resource request can be allocated to PF2 or PF3.
For the detailed compute host config see the FakeDriverWithPciResources
- class. For the necessary passthrough_whitelist config see the setUp of
+ class. For the necessary device_spec config see the setUp of
the PortResourceRequestBasedSchedulingTestBase class.
"""
@@ -1923,7 +1923,7 @@ class ServerMoveWithPortResourceRequestTest(
def test_migrate_server_with_qos_port_pci_update_fail_not_reschedule(self):
# Update the name of the network device RP of PF2 on host2 to something
# unexpected. This will cause
- # update_pci_request_spec_with_allocated_interface_name() to raise
+ # update_pci_request_with_placement_allocations() to raise
# when the instance is migrated to the host2.
rsp = self.placement.put(
'/resource_providers/%s'
@@ -1943,7 +1943,7 @@ class ServerMoveWithPortResourceRequestTest(
non_qos_port, qos_port, qos_sriov_port)
# The compute manager on host2 will raise from
- # update_pci_request_spec_with_allocated_interface_name which will
+ # update_pci_request_with_placement_allocations which will
# intentionally not trigger a re-schedule even if there is host3 as an
# alternate.
self.api.post_server_action(server['id'], {'migrate': None})
@@ -2162,7 +2162,8 @@ class ServerMoveWithPortResourceRequestTest(
# simply fail and the server remains on the source host
server = self._evacuate_server(
server, expected_host='host1', expected_task_state=None,
- expected_migration_status='failed')
+ expected_migration_status='failed',
+ expected_state="ACTIVE")
# As evacuation failed the resource allocation should be untouched
self._check_allocation(
@@ -2186,7 +2187,7 @@ class ServerMoveWithPortResourceRequestTest(
def test_evacuate_with_qos_port_pci_update_fail(self):
# Update the name of the network device RP of PF2 on host2 to something
# unexpected. This will cause
- # update_pci_request_spec_with_allocated_interface_name() to raise
+ # update_pci_request_with_placement_allocations() to raise
# when the instance is evacuated to the host2.
rsp = self.placement.put(
'/resource_providers/%s'
@@ -2207,7 +2208,7 @@ class ServerMoveWithPortResourceRequestTest(
self.compute1_service_id, {'forced_down': 'true'})
# The compute manager on host2 will raise from
- # update_pci_request_spec_with_allocated_interface_name
+ # update_pci_request_with_placement_allocations
server = self._evacuate_server(
server, expected_host='host1', expected_state='ERROR',
expected_task_state=None, expected_migration_status='failed')
@@ -2363,7 +2364,7 @@ class ServerMoveWithPortResourceRequestTest(
def test_live_migrate_with_qos_port_pci_update_fails(self):
# Update the name of the network device RP of PF2 on host2 to something
# unexpected. This will cause
- # update_pci_request_spec_with_allocated_interface_name() to raise
+ # update_pci_request_with_placement_allocations() to raise
# when the instance is live migrated to the host2.
rsp = self.placement.put(
'/resource_providers/%s'
@@ -2504,7 +2505,7 @@ class ServerMoveWithPortResourceRequestTest(
def test_unshelve_offloaded_server_with_qos_port_pci_update_fails(self):
# Update the name of the network device RP of PF2 on host2 to something
# unexpected. This will cause
- # update_pci_request_spec_with_allocated_interface_name() to raise
+ # update_pci_request_with_placement_allocations() to raise
# when the instance is unshelved to the host2.
rsp = self.placement.put(
'/resource_providers/%s'
@@ -2537,7 +2538,7 @@ class ServerMoveWithPortResourceRequestTest(
self.api.post_server_action(server['id'], {'unshelve': None})
# Unshelve fails on host2 due to
- # update_pci_request_spec_with_allocated_interface_name fails so the
+ # update_pci_request_with_placement_allocations fails so the
# instance goes back to shelve offloaded state
self.notifier.wait_for_versioned_notifications(
'instance.unshelve.start')
@@ -2979,6 +2980,7 @@ class ExtendedResourceRequestOldCompute(
super().setUp()
self.neutron = self.useFixture(
ExtendedResourceRequestNeutronFixture(self))
+ self.api.microversion = '2.72'
@mock.patch.object(
objects.service, 'get_minimum_version_all_cells',
diff --git a/nova/tests/functional/test_service.py b/nova/tests/functional/test_service.py
index 65b41594bd..21e9a519ee 100644
--- a/nova/tests/functional/test_service.py
+++ b/nova/tests/functional/test_service.py
@@ -10,8 +10,12 @@
# License for the specific language governing permissions and limitations
# under the License.
+import functools
from unittest import mock
+import fixtures
+from oslo_utils.fixture import uuidsentinel as uuids
+
from nova import context as nova_context
from nova import exception
from nova.objects import service
@@ -19,6 +23,7 @@ from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import fixtures as func_fixtures
from nova.tests.functional import integrated_helpers
+from nova.virt import node
class ServiceTestCase(test.TestCase,
@@ -137,3 +142,83 @@ class TestOldComputeCheck(
return_value=old_version):
self.assertRaises(
exception.TooOldComputeService, self._start_compute, 'host1')
+
+
+class TestComputeStartupChecks(test.TestCase):
+ STUB_COMPUTE_ID = False
+
+ def setUp(self):
+ super().setUp()
+ self.useFixture(nova_fixtures.RealPolicyFixture())
+ self.useFixture(nova_fixtures.NeutronFixture(self))
+ self.useFixture(nova_fixtures.GlanceFixture(self))
+ self.useFixture(func_fixtures.PlacementFixture())
+
+ self._local_uuid = str(uuids.node)
+
+ self.useFixture(fixtures.MockPatch(
+ 'nova.virt.node.get_local_node_uuid',
+ functools.partial(self.local_uuid, True)))
+ self.useFixture(fixtures.MockPatch(
+ 'nova.virt.node.read_local_node_uuid',
+ self.local_uuid))
+ self.useFixture(fixtures.MockPatch(
+ 'nova.virt.node.write_local_node_uuid',
+ mock.DEFAULT))
+ self.flags(compute_driver='fake.FakeDriverWithoutFakeNodes')
+
+ def local_uuid(self, get=False):
+ if get and not self._local_uuid:
+ # Simulate the get_local_node_uuid behavior of calling write once
+ self._local_uuid = str(uuids.node)
+ node.write_local_node_uuid(self._local_uuid)
+ return self._local_uuid
+
+ def test_compute_node_identity_greenfield(self):
+ # Level-set test case to show that starting and re-starting without
+ # any error cases works as expected.
+
+ # Start with no local compute_id
+ self._local_uuid = None
+ self.start_service('compute')
+
+ # Start should have generated and written a compute id
+ node.write_local_node_uuid.assert_called_once_with(str(uuids.node))
+
+ # Starting again should succeed and not cause another write
+ self.start_service('compute')
+ node.write_local_node_uuid.assert_called_once_with(str(uuids.node))
+
+ def test_compute_node_identity_deleted(self):
+ self.start_service('compute')
+
+ # Simulate the compute_id file being deleted
+ self._local_uuid = None
+
+ # Should refuse to start because it's not our first time and the file
+ # being missing is a hard error.
+ exc = self.assertRaises(exception.InvalidConfiguration,
+ self.start_service, 'compute')
+ self.assertIn('lost that state', str(exc))
+
+ def test_compute_node_hostname_changed(self):
+ # Start our compute once to create the node record
+ self.start_service('compute')
+
+ # Starting with a different hostname should trigger the abort
+ exc = self.assertRaises(exception.InvalidConfiguration,
+ self.start_service, 'compute', host='other')
+ self.assertIn('hypervisor_hostname', str(exc))
+
+ def test_compute_node_uuid_changed(self):
+ # Start our compute once to create the node record
+ self.start_service('compute')
+
+ # Simulate a changed local compute_id file
+ self._local_uuid = str(uuids.othernode)
+
+ # We should fail to create the compute node record again, but with a
+ # useful error message about why.
+ exc = self.assertRaises(exception.InvalidConfiguration,
+ self.start_service, 'compute')
+ self.assertIn('Duplicate compute node record', str(exc))
diff --git a/nova/tests/unit/api/openstack/compute/test_create_backup.py b/nova/tests/unit/api/openstack/compute/test_create_backup.py
index 4a9742177e..9728002e88 100644
--- a/nova/tests/unit/api/openstack/compute/test_create_backup.py
+++ b/nova/tests/unit/api/openstack/compute/test_create_backup.py
@@ -41,10 +41,6 @@ class CreateBackupTestsV21(admin_only_action_common.CommonMixin,
self.controller = getattr(self.create_backup, self.controller_name)()
self.compute_api = self.controller.compute_api
- patch_get = mock.patch.object(self.compute_api, 'get')
- self.mock_get = patch_get.start()
- self.addCleanup(patch_get.stop)
-
@mock.patch.object(common, 'check_img_metadata_properties_quota')
@mock.patch.object(api.API, 'backup')
def test_create_backup_with_metadata(self, mock_backup, mock_check_image):
diff --git a/nova/tests/unit/api/openstack/compute/test_evacuate.py b/nova/tests/unit/api/openstack/compute/test_evacuate.py
index fb7f7662d8..bd88bb8d6e 100644
--- a/nova/tests/unit/api/openstack/compute/test_evacuate.py
+++ b/nova/tests/unit/api/openstack/compute/test_evacuate.py
@@ -416,3 +416,32 @@ class EvacuateTestV268(EvacuateTestV229):
def test_forced_evacuate_with_no_host_provided(self):
# not applicable for v2.68, which removed the 'force' parameter
pass
+
+
+class EvacuateTestV295(EvacuateTestV268):
+ def setUp(self):
+ super(EvacuateTestV268, self).setUp()
+ self.admin_req = fakes.HTTPRequest.blank('', use_admin_context=True,
+ version='2.95')
+ self.req = fakes.HTTPRequest.blank('', version='2.95')
+ self.mock_get_min_ver = self.useFixture(fixtures.MockPatch(
+ 'nova.objects.service.get_minimum_version_all_cells',
+ return_value=62)).mock
+
+ def test_evacuate_version_error(self):
+ self.mock_get_min_ver.return_value = 61
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self._get_evacuate_response,
+ {'host': 'my-host', 'adminPass': 'foo'})
+
+ def test_evacuate_unsupported_rpc(self):
+ def fake_evacuate(*args, **kwargs):
+ raise exception.UnsupportedRPCVersion(
+ api="fakeapi",
+ required="x.xx")
+
+ self.stub_out('nova.compute.api.API.evacuate', fake_evacuate)
+ self._check_evacuate_failure(webob.exc.HTTPConflict,
+ {'host': 'my-host',
+ 'onSharedStorage': 'False',
+ 'adminPass': 'MyNewPass'})
diff --git a/nova/tests/unit/api/openstack/compute/test_flavor_access.py b/nova/tests/unit/api/openstack/compute/test_flavor_access.py
index 0581a47c84..ea9ca2f632 100644
--- a/nova/tests/unit/api/openstack/compute/test_flavor_access.py
+++ b/nova/tests/unit/api/openstack/compute/test_flavor_access.py
@@ -353,14 +353,37 @@ class FlavorAccessTestV21(test.NoDBTestCase):
mock_verify.assert_called_once_with(
req.environ['nova.context'], 'proj2')
+ @mock.patch('nova.objects.Flavor.remove_access')
@mock.patch('nova.api.openstack.identity.verify_project_id',
side_effect=exc.HTTPBadRequest(
explanation="Project ID proj2 is not a valid project."))
- def test_remove_tenant_access_with_invalid_tenant(self, mock_verify):
+ def test_remove_tenant_access_with_invalid_tenant(self,
+ mock_verify,
+ mock_remove_access):
"""Tests the case that the tenant does not exist in Keystone."""
req = fakes.HTTPRequest.blank(self._prefix + '/flavors/2/action',
use_admin_context=True)
body = {'removeTenantAccess': {'tenant': 'proj2'}}
+
+ self.flavor_action_controller._remove_tenant_access(
+ req, '2', body=body)
+ mock_verify.assert_called_once_with(
+ req.environ['nova.context'], 'proj2')
+ mock_remove_access.assert_called_once_with('proj2')
+
+ @mock.patch('nova.api.openstack.identity.verify_project_id',
+ side_effect=exc.HTTPBadRequest(
+ explanation="Nova was unable to find Keystone "
+ "service endpoint."))
+ def test_remove_tenant_access_missing_keystone_endpoint(self,
+ mock_verify):
+ """Tests the case that Keystone identity service endpoint
+ version 3.0 was not found.
+ """
+ req = fakes.HTTPRequest.blank(self._prefix + '/flavors/2/action',
+ use_admin_context=True)
+ body = {'removeTenantAccess': {'tenant': 'proj2'}}
+
self.assertRaises(exc.HTTPBadRequest,
self.flavor_action_controller._remove_tenant_access,
req, '2', body=body)
diff --git a/nova/tests/unit/api/openstack/compute/test_migrate_server.py b/nova/tests/unit/api/openstack/compute/test_migrate_server.py
index 407cdf7f8e..8d1c853206 100644
--- a/nova/tests/unit/api/openstack/compute/test_migrate_server.py
+++ b/nova/tests/unit/api/openstack/compute/test_migrate_server.py
@@ -531,9 +531,8 @@ class MigrateServerTestsV256(MigrateServerTestsV234):
self.req, fakes.FAKE_UUID, body=body)
def _test_migrate_exception(self, exc_info, expected_result):
- @mock.patch.object(self.compute_api, 'get')
@mock.patch.object(self.compute_api, 'resize', side_effect=exc_info)
- def _test(mock_resize, mock_get):
+ def _test(mock_resize):
instance = objects.Instance(uuid=uuids.instance)
self.assertRaises(expected_result,
self.controller._migrate,
diff --git a/nova/tests/unit/api/openstack/compute/test_quotas.py b/nova/tests/unit/api/openstack/compute/test_quotas.py
index 8c2b67d3a8..0a1bbd08d8 100644
--- a/nova/tests/unit/api/openstack/compute/test_quotas.py
+++ b/nova/tests/unit/api/openstack/compute/test_quotas.py
@@ -883,7 +883,8 @@ class UnifiedLimitsQuotaSetsTest(NoopQuotaSetsTest):
local_limit.KEY_PAIRS: 100,
local_limit.SERVER_GROUPS: 12,
local_limit.SERVER_GROUP_MEMBERS: 10}
- self.useFixture(limit_fixture.LimitFixture(reglimits, {}))
+ self.limit_fixture = self.useFixture(
+ limit_fixture.LimitFixture(reglimits, {}))
@mock.patch.object(placement_limit, "get_legacy_project_limits")
def test_show_v21(self, mock_proj):
@@ -1099,7 +1100,7 @@ class UnifiedLimitsQuotaSetsTest(NoopQuotaSetsTest):
local_limit.KEY_PAIRS: 1,
local_limit.SERVER_GROUPS: 3,
local_limit.SERVER_GROUP_MEMBERS: 2}
- self.useFixture(limit_fixture.LimitFixture(reglimits, {}))
+ self.limit_fixture.reglimits = reglimits
req = fakes.HTTPRequest.blank("")
response = self.controller.defaults(req, uuids.project_id)
diff --git a/nova/tests/unit/api/openstack/compute/test_remote_consoles.py b/nova/tests/unit/api/openstack/compute/test_remote_consoles.py
index bd09307567..961f4a02c9 100644
--- a/nova/tests/unit/api/openstack/compute/test_remote_consoles.py
+++ b/nova/tests/unit/api/openstack/compute/test_remote_consoles.py
@@ -104,6 +104,18 @@ class ConsolesExtensionTestV21(test.NoDBTestCase):
'get_vnc_console',
exception.InstanceNotFound(instance_id=fakes.FAKE_UUID))
+ def test_get_vnc_console_instance_invalid_state(self):
+ body = {'os-getVNCConsole': {'type': 'novnc'}}
+ self._check_console_failure(
+ self.controller.get_vnc_console,
+ webob.exc.HTTPConflict,
+ body,
+ 'get_vnc_console',
+ exception.InstanceInvalidState(
+ attr='fake-attr', state='fake-state', method='fake-method',
+ instance_uuid=fakes.FAKE_UUID)
+ )
+
def test_get_vnc_console_invalid_type(self):
body = {'os-getVNCConsole': {'type': 'invalid'}}
self._check_console_failure(
diff --git a/nova/tests/unit/api/openstack/compute/test_server_group_quotas.py b/nova/tests/unit/api/openstack/compute/test_server_group_quotas.py
index 8d0ba37f92..fe7a60f956 100644
--- a/nova/tests/unit/api/openstack/compute/test_server_group_quotas.py
+++ b/nova/tests/unit/api/openstack/compute/test_server_group_quotas.py
@@ -210,7 +210,8 @@ class ServerGroupQuotasUnifiedLimitsTestV21(ServerGroupQuotasTestV21):
self.flags(driver='nova.quota.UnifiedLimitsDriver', group='quota')
self.req = fakes.HTTPRequest.blank('')
self.controller = sg_v21.ServerGroupController()
- self.useFixture(limit_fixture.LimitFixture({'server_groups': 10}, {}))
+ self.limit_fixture = self.useFixture(
+ limit_fixture.LimitFixture({'server_groups': 10}, {}))
@mock.patch('nova.limit.local.enforce_db_limit')
def test_create_server_group_during_recheck(self, mock_enforce):
@@ -237,7 +238,7 @@ class ServerGroupQuotasUnifiedLimitsTestV21(ServerGroupQuotasTestV21):
delta=1)
def test_create_group_fails_with_zero_quota(self):
- self.useFixture(limit_fixture.LimitFixture({'server_groups': 0}, {}))
+ self.limit_fixture.reglimits = {'server_groups': 0}
sgroup = {'name': 'test', 'policies': ['anti-affinity']}
exc = self.assertRaises(webob.exc.HTTPForbidden,
self.controller.create,
@@ -246,7 +247,7 @@ class ServerGroupQuotasUnifiedLimitsTestV21(ServerGroupQuotasTestV21):
self.assertIn(msg, str(exc))
def test_create_only_one_group_when_limit_is_one(self):
- self.useFixture(limit_fixture.LimitFixture({'server_groups': 1}, {}))
+ self.limit_fixture.reglimits = {'server_groups': 1}
policies = ['anti-affinity']
sgroup = {'name': 'test', 'policies': policies}
res_dict = self.controller.create(
diff --git a/nova/tests/unit/api/openstack/compute/test_server_groups.py b/nova/tests/unit/api/openstack/compute/test_server_groups.py
index 636682a6b7..9d99c3ae6d 100644
--- a/nova/tests/unit/api/openstack/compute/test_server_groups.py
+++ b/nova/tests/unit/api/openstack/compute/test_server_groups.py
@@ -87,7 +87,8 @@ class ServerGroupTestV21(test.NoDBTestCase):
def setUp(self):
super(ServerGroupTestV21, self).setUp()
self._setup_controller()
- self.req = fakes.HTTPRequest.blank('')
+ self.member_req = fakes.HTTPRequest.member_req('')
+ self.reader_req = fakes.HTTPRequest.reader_req('')
self.admin_req = fakes.HTTPRequest.blank('', use_admin_context=True)
self.foo_req = fakes.HTTPRequest.blank('', project_id='foo')
self.policy = self.useFixture(fixtures.RealPolicyFixture())
@@ -114,20 +115,20 @@ class ServerGroupTestV21(test.NoDBTestCase):
def test_create_server_group_with_no_policies(self):
sgroup = server_group_template()
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
def _create_server_group_normal(self, policies=None, policy=None,
rules=None):
sgroup = server_group_template()
sgroup['policies'] = policies
- res_dict = self.controller.create(self.req,
+ res_dict = self.controller.create(self.member_req,
body={'server_group': sgroup})
self.assertEqual(res_dict['server_group']['name'], 'test')
self.assertTrue(uuidutils.is_uuid_like(res_dict['server_group']['id']))
self.assertEqual(res_dict['server_group']['policies'], policies)
def test_create_server_group_with_new_policy_before_264(self):
- req = fakes.HTTPRequest.blank('', version='2.63')
+ req = fakes.HTTPRequest.member_req('', version='2.63')
policy = 'anti-affinity'
rules = {'max_server_per_host': 3}
# 'policy' isn't an acceptable request key before 2.64
@@ -162,7 +163,7 @@ class ServerGroupTestV21(test.NoDBTestCase):
self.controller.create(self.admin_req, body={'server_group': sgroup})
# test as non-admin
- self.controller.create(self.req, body={'server_group': sgroup})
+ self.controller.create(self.member_req, body={'server_group': sgroup})
def _create_instance(self, ctx, cell):
with context.target_cell(ctx, cell) as cctx:
@@ -289,7 +290,7 @@ class ServerGroupTestV21(test.NoDBTestCase):
path = path or '/os-server-groups?all_projects=True'
if limited:
path += limited
- req = fakes.HTTPRequest.blank(path, version=api_version)
+ reader_req = fakes.HTTPRequest.reader_req(path, version=api_version)
admin_req = fakes.HTTPRequest.blank(path, use_admin_context=True,
version=api_version)
@@ -298,7 +299,7 @@ class ServerGroupTestV21(test.NoDBTestCase):
self.assertEqual(all, res_dict)
# test as non-admin
- res_dict = self.controller.index(req)
+ res_dict = self.controller.index(reader_req)
self.assertEqual(tenant_specific, res_dict)
@mock.patch('nova.objects.InstanceGroupList.get_by_project_id')
@@ -347,25 +348,27 @@ class ServerGroupTestV21(test.NoDBTestCase):
return_get_by_project = return_server_groups()
mock_get_by_project.return_value = return_get_by_project
path = '/os-server-groups'
- req = fakes.HTTPRequest.blank(path, version=api_version)
+ req = fakes.HTTPRequest.reader_req(path, version=api_version)
res_dict = self.controller.index(req)
self.assertEqual(expected, res_dict)
def test_display_members(self):
ctx = context.RequestContext('fake_user', fakes.FAKE_PROJECT_ID)
(ig_uuid, instances, members) = self._create_groups_and_instances(ctx)
- res_dict = self.controller.show(self.req, ig_uuid)
+ res_dict = self.controller.show(self.reader_req, ig_uuid)
result_members = res_dict['server_group']['members']
self.assertEqual(3, len(result_members))
for member in members:
self.assertIn(member, result_members)
def test_display_members_with_nonexistent_group(self):
- self.assertRaises(webob.exc.HTTPNotFound,
- self.controller.show, self.req, uuidsentinel.group)
+ self.assertRaises(
+ webob.exc.HTTPNotFound,
+ self.controller.show, self.reader_req, uuidsentinel.group)
def test_display_active_members_only(self):
- ctx = context.RequestContext('fake_user', fakes.FAKE_PROJECT_ID)
+ ctx = context.RequestContext('fake_user', fakes.FAKE_PROJECT_ID,
+ roles=['member', 'reader'])
(ig_uuid, instances, members) = self._create_groups_and_instances(ctx)
# delete an instance
@@ -379,7 +382,7 @@ class ServerGroupTestV21(test.NoDBTestCase):
self.assertRaises(exception.InstanceNotFound,
objects.Instance.get_by_uuid,
ctx, instances[1].uuid)
- res_dict = self.controller.show(self.req, ig_uuid)
+ res_dict = self.controller.show(self.reader_req, ig_uuid)
result_members = res_dict['server_group']['members']
# check that only the active instance is displayed
self.assertEqual(2, len(result_members))
@@ -393,7 +396,7 @@ class ServerGroupTestV21(test.NoDBTestCase):
self.controller.show(self.admin_req, ig_uuid)
# test as non-admin, same project
- self.controller.show(self.req, ig_uuid)
+ self.controller.show(self.reader_req, ig_uuid)
# test as non-admin, different project
self.assertRaises(webob.exc.HTTPNotFound,
@@ -406,7 +409,7 @@ class ServerGroupTestV21(test.NoDBTestCase):
sgroup = server_group_template(name='good* $%name',
policies=['affinity'])
- res_dict = self.controller.create(self.req,
+ res_dict = self.controller.create(self.member_req,
body={'server_group': sgroup})
self.assertEqual(res_dict['server_group']['name'], 'good* $%name')
@@ -414,99 +417,99 @@ class ServerGroupTestV21(test.NoDBTestCase):
# blank name
sgroup = server_group_template(name='', policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
# name with length 256
sgroup = server_group_template(name='1234567890' * 26,
policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
# non-string name
sgroup = server_group_template(name=12, policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
# name with leading spaces
sgroup = server_group_template(name=' leading spaces',
policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
# name with trailing spaces
sgroup = server_group_template(name='trailing space ',
policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
# name with all spaces
sgroup = server_group_template(name=' ',
policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
# name with unprintable character
sgroup = server_group_template(name='bad\x00name',
policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
# name with out of range char U0001F4A9
sgroup = server_group_template(name=u"\U0001F4A9",
policies=['affinity'])
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
def test_create_server_group_with_illegal_policies(self):
# blank policy
sgroup = server_group_template(name='fake-name', policies='')
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
# policy as integer
sgroup = server_group_template(name='fake-name', policies=7)
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
# policy as string
sgroup = server_group_template(name='fake-name', policies='invalid')
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
# policy as None
sgroup = server_group_template(name='fake-name', policies=None)
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
def test_create_server_group_conflicting_policies(self):
sgroup = server_group_template()
policies = ['anti-affinity', 'affinity']
sgroup['policies'] = policies
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
def test_create_server_group_with_duplicate_policies(self):
sgroup = server_group_template()
policies = ['affinity', 'affinity']
sgroup['policies'] = policies
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
def test_create_server_group_not_supported(self):
sgroup = server_group_template()
policies = ['storage-affinity', 'anti-affinity', 'rack-affinity']
sgroup['policies'] = policies
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
def test_create_server_group_with_no_body(self):
self.assertRaises(self.validation_error,
- self.controller.create, self.req, body=None)
+ self.controller.create, self.member_req, body=None)
def test_create_server_group_with_no_server_group(self):
body = {'no-instanceGroup': None}
self.assertRaises(self.validation_error,
- self.controller.create, self.req, body=body)
+ self.controller.create, self.member_req, body=body)
def test_list_server_group_by_tenant(self):
self._test_list_server_group_by_tenant(
@@ -528,7 +531,7 @@ class ServerGroupTestV21(test.NoDBTestCase):
self.controller.index(self.admin_req)
# test as non-admin
- self.controller.index(self.req)
+ self.controller.index(self.reader_req)
def test_list_server_group_multiple_param(self):
self._test_list_server_group(api_version=self.wsgi_api_version,
@@ -598,7 +601,7 @@ class ServerGroupTestV21(test.NoDBTestCase):
self.stub_out('nova.objects.InstanceGroup.get_by_uuid',
return_server_group)
- resp = self.controller.delete(self.req, uuidsentinel.sg1_id)
+ resp = self.controller.delete(self.member_req, uuidsentinel.sg1_id)
mock_destroy.assert_called_once_with()
# NOTE: on v2.1, http status code is set as wsgi_code of API
@@ -611,7 +614,7 @@ class ServerGroupTestV21(test.NoDBTestCase):
def test_delete_non_existing_server_group(self):
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
- self.req, 'invalid')
+ self.member_req, 'invalid')
def test_delete_server_group_rbac_default(self):
ctx = context.RequestContext('fake_user', fakes.FAKE_PROJECT_ID)
@@ -622,7 +625,7 @@ class ServerGroupTestV21(test.NoDBTestCase):
# test as non-admin
ig_uuid = self._create_groups_and_instances(ctx)[0]
- self.controller.delete(self.req, ig_uuid)
+ self.controller.delete(self.member_req, ig_uuid)
class ServerGroupTestV213(ServerGroupTestV21):
@@ -649,7 +652,7 @@ class ServerGroupTestV264(ServerGroupTestV213):
def _create_server_group_normal(self, policies=None, policy=None,
rules=None):
- req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
+ req = fakes.HTTPRequest.member_req('', version=self.wsgi_api_version)
sgroup = server_group_template()
sgroup['rules'] = rules or {}
sgroup['policy'] = policy
@@ -674,7 +677,7 @@ class ServerGroupTestV264(ServerGroupTestV213):
self.assertEqual(res_dict['server_group']['rules'], {})
def _display_server_group(self, uuid):
- req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
+ req = fakes.HTTPRequest.reader_req('', version=self.wsgi_api_version)
group = self.controller.show(req, uuid)
return group
@@ -690,7 +693,7 @@ class ServerGroupTestV264(ServerGroupTestV213):
self.assertEqual(res_dict['server_group']['rules'], rules)
def test_create_affinity_server_group_with_invalid_policy(self):
- req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
+ req = fakes.HTTPRequest.member_req('', version=self.wsgi_api_version)
sgroup = server_group_template(policy='affinity',
rules={'max_server_per_host': 3})
result = self.assertRaises(webob.exc.HTTPBadRequest,
@@ -698,7 +701,7 @@ class ServerGroupTestV264(ServerGroupTestV213):
self.assertIn("Only anti-affinity policy supports rules", str(result))
def test_create_anti_affinity_server_group_with_invalid_rules(self):
- req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
+ req = fakes.HTTPRequest.member_req('', version=self.wsgi_api_version)
# A negative test for key is unknown, the value is not positive
# and not integer
invalid_rules = [{'unknown_key': '3'},
@@ -718,7 +721,7 @@ class ServerGroupTestV264(ServerGroupTestV213):
return_value=32)
def test_create_server_group_with_low_version_compute_service(self,
mock_get_v):
- req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
+ req = fakes.HTTPRequest.member_req('', version=self.wsgi_api_version)
sgroup = server_group_template(policy='anti-affinity',
rules={'max_server_per_host': 3})
result = self.assertRaises(
@@ -734,7 +737,7 @@ class ServerGroupTestV264(ServerGroupTestV213):
self._create_server_group_normal(policy=policy)
def test_policies_since_264(self):
- req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
+ req = fakes.HTTPRequest.member_req('', version=self.wsgi_api_version)
# 'policies' isn't allowed in request >= 2.64
sgroup = server_group_template(policies=['anti-affinity'])
self.assertRaises(
@@ -742,14 +745,14 @@ class ServerGroupTestV264(ServerGroupTestV213):
req, body={'server_group': sgroup})
def test_create_server_group_without_policy(self):
- req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
+ req = fakes.HTTPRequest.member_req('', version=self.wsgi_api_version)
# 'policy' is required request key in request >= 2.64
sgroup = server_group_template()
self.assertRaises(self.validation_error, self.controller.create,
req, body={'server_group': sgroup})
def test_create_server_group_with_illegal_policies(self):
- req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
+ req = fakes.HTTPRequest.member_req('', version=self.wsgi_api_version)
# blank policy
sgroup = server_group_template(policy='')
self.assertRaises(self.validation_error, self.controller.create,
@@ -771,7 +774,7 @@ class ServerGroupTestV264(ServerGroupTestV213):
req, body={'server_group': sgroup})
def test_additional_params(self):
- req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
+ req = fakes.HTTPRequest.member_req('', version=self.wsgi_api_version)
sgroup = server_group_template(unknown='unknown')
self.assertRaises(self.validation_error, self.controller.create,
req, body={'server_group': sgroup})
@@ -786,7 +789,7 @@ class ServerGroupTestV275(ServerGroupTestV264):
path='/os-server-groups?dummy=False&all_projects=True')
def test_list_server_group_additional_param(self):
- req = fakes.HTTPRequest.blank('/os-server-groups?dummy=False',
- version=self.wsgi_api_version)
+ req = fakes.HTTPRequest.reader_req('/os-server-groups?dummy=False',
+ version=self.wsgi_api_version)
self.assertRaises(self.validation_error, self.controller.index,
req)
diff --git a/nova/tests/unit/api/openstack/compute/test_servers.py b/nova/tests/unit/api/openstack/compute/test_servers.py
index fc85e9c4e9..8903de0c3c 100644
--- a/nova/tests/unit/api/openstack/compute/test_servers.py
+++ b/nova/tests/unit/api/openstack/compute/test_servers.py
@@ -2088,10 +2088,10 @@ class ServersControllerTestV216(_ServersControllerTest):
return server_dict
- @mock.patch('nova.compute.api.API.get_instance_host_status')
- def _verify_host_status_policy_behavior(self, func, mock_get_host_status):
+ def _verify_host_status_policy_behavior(self, func):
# Set policy to disallow both host_status cases and verify we don't
# call the get_instance_host_status compute RPC API.
+ self.mock_get_instance_host_status.reset_mock()
rules = {
'os_compute_api:servers:show:host_status': '!',
'os_compute_api:servers:show:host_status:unknown-only': '!',
@@ -2099,7 +2099,7 @@ class ServersControllerTestV216(_ServersControllerTest):
orig_rules = policy.get_rules()
policy.set_rules(oslo_policy.Rules.from_dict(rules), overwrite=False)
func()
- mock_get_host_status.assert_not_called()
+ self.mock_get_instance_host_status.assert_not_called()
# Restore the original rules.
policy.set_rules(orig_rules)
@@ -2639,15 +2639,13 @@ class ServersControllerTestV275(ControllerTest):
microversion = '2.75'
- @mock.patch('nova.compute.api.API.get_all')
- def test_get_servers_additional_query_param_old_version(self, mock_get):
+ def test_get_servers_additional_query_param_old_version(self):
req = fakes.HTTPRequest.blank(self.path_with_query % 'unknown=1',
use_admin_context=True,
version='2.74')
self.controller.index(req)
- @mock.patch('nova.compute.api.API.get_all')
- def test_get_servers_ignore_sort_key_old_version(self, mock_get):
+ def test_get_servers_ignore_sort_key_old_version(self):
req = fakes.HTTPRequest.blank(
self.path_with_query % 'sort_key=deleted',
use_admin_context=True, version='2.74')
@@ -3585,13 +3583,13 @@ class ServersControllerRebuildTestV263(ControllerTest):
},
}
- @mock.patch('nova.compute.api.API.get')
- def _rebuild_server(self, mock_get, certs=None,
- conf_enabled=True, conf_certs=None):
+ def _rebuild_server(self, certs=None, conf_enabled=True, conf_certs=None):
ctx = self.req.environ['nova.context']
- mock_get.return_value = fakes.stub_instance_obj(ctx,
- vm_state=vm_states.ACTIVE, trusted_certs=certs,
- project_id=self.req_project_id, user_id=self.req_user_id)
+ self.mock_get.side_effect = None
+ self.mock_get.return_value = fakes.stub_instance_obj(
+ ctx, vm_state=vm_states.ACTIVE, trusted_certs=certs,
+ project_id=self.req_project_id, user_id=self.req_user_id
+ )
self.flags(default_trusted_certificate_ids=conf_certs, group='glance')
@@ -3744,10 +3742,10 @@ class ServersControllerRebuildTestV271(ControllerTest):
}
}
- @mock.patch('nova.compute.api.API.get')
- def _rebuild_server(self, mock_get):
+ def _rebuild_server(self):
ctx = self.req.environ['nova.context']
- mock_get.return_value = fakes.stub_instance_obj(ctx,
+ self.mock_get.side_effect = None
+ self.mock_get.return_value = fakes.stub_instance_obj(ctx,
vm_state=vm_states.ACTIVE, project_id=self.req_project_id,
user_id=self.req_user_id)
server = self.controller._action_rebuild(
diff --git a/nova/tests/unit/api/openstack/compute/test_volumes.py b/nova/tests/unit/api/openstack/compute/test_volumes.py
index 660f70bfeb..5b4a2d8b1a 100644
--- a/nova/tests/unit/api/openstack/compute/test_volumes.py
+++ b/nova/tests/unit/api/openstack/compute/test_volumes.py
@@ -1889,8 +1889,7 @@ class AssistedSnapshotDeleteTestCaseV21(test.NoDBTestCase):
req, '5')
def _test_assisted_delete_instance_conflict(self, api_error):
- # unset the stub on volume_snapshot_delete from setUp
- self.mock_volume_snapshot_delete.stop()
+ self.mock_volume_snapshot_delete.side_effect = api_error
params = {
'delete_info': jsonutils.dumps({'volume_id': '1'}),
}
@@ -1899,10 +1898,9 @@ class AssistedSnapshotDeleteTestCaseV21(test.NoDBTestCase):
urllib.parse.urlencode(params),
version=self.microversion)
req.method = 'DELETE'
- with mock.patch.object(compute_api.API, 'volume_snapshot_delete',
- side_effect=api_error):
- self.assertRaises(
- webob.exc.HTTPBadRequest, self.controller.delete, req, '5')
+
+ self.assertRaises(
+ webob.exc.HTTPBadRequest, self.controller.delete, req, '5')
def test_assisted_delete_instance_invalid_state(self):
api_error = exception.InstanceInvalidState(
diff --git a/nova/tests/unit/api/openstack/fakes.py b/nova/tests/unit/api/openstack/fakes.py
index 8cf90ddebe..9ac970f787 100644
--- a/nova/tests/unit/api/openstack/fakes.py
+++ b/nova/tests/unit/api/openstack/fakes.py
@@ -240,6 +240,9 @@ class HTTPRequest(os_wsgi.Request):
def blank(cls, *args, **kwargs):
defaults = {'base_url': 'http://localhost/v2'}
use_admin_context = kwargs.pop('use_admin_context', False)
+ roles = kwargs.pop('roles', [])
+ if use_admin_context:
+ roles.append('admin')
project_id = kwargs.pop('project_id', FAKE_PROJECT_ID)
version = kwargs.pop('version', os_wsgi.DEFAULT_API_VERSION)
defaults.update(kwargs)
@@ -247,10 +250,19 @@ class HTTPRequest(os_wsgi.Request):
out.environ['nova.context'] = FakeRequestContext(
user_id='fake_user',
project_id=project_id,
- is_admin=use_admin_context)
+ is_admin=use_admin_context,
+ roles=roles)
out.api_version_request = api_version.APIVersionRequest(version)
return out
+ @classmethod
+ def member_req(cls, *args, **kwargs):
+ return cls.blank(*args, roles=['member', 'reader'], **kwargs)
+
+ @classmethod
+ def reader_req(cls, *args, **kwargs):
+ return cls.blank(*args, roles=['reader'], **kwargs)
+
class HTTPRequestV21(HTTPRequest):
pass
diff --git a/nova/tests/unit/api/openstack/test_wsgi_app.py b/nova/tests/unit/api/openstack/test_wsgi_app.py
index 94e2fe5cb1..0eb7011c11 100644
--- a/nova/tests/unit/api/openstack/test_wsgi_app.py
+++ b/nova/tests/unit/api/openstack/test_wsgi_app.py
@@ -104,3 +104,18 @@ document_root = /tmp
'disable_compute_service_check_for_ffu', True,
group='workarounds')
wsgi_app._setup_service('myhost', 'api')
+
+ def test__get_config_files_empty_env(self):
+ env = {}
+ result = wsgi_app._get_config_files(env)
+ expected = ['/etc/nova/api-paste.ini', '/etc/nova/nova.conf']
+ self.assertEqual(result, expected)
+
+ def test__get_config_files_with_env(self):
+ env = {
+ "OS_NOVA_CONFIG_DIR": "/nova",
+ "OS_NOVA_CONFIG_FILES": "api.conf",
+ }
+ result = wsgi_app._get_config_files(env)
+ expected = ['/nova/api.conf']
+ self.assertEqual(result, expected)
diff --git a/nova/tests/unit/api/validation/extra_specs/test_validators.py b/nova/tests/unit/api/validation/extra_specs/test_validators.py
index dd45f85ff1..a8911aadad 100644
--- a/nova/tests/unit/api/validation/extra_specs/test_validators.py
+++ b/nova/tests/unit/api/validation/extra_specs/test_validators.py
@@ -74,6 +74,10 @@ class TestValidators(test.NoDBTestCase):
('hw:pci_numa_affinity_policy', 'preferred'),
('hw:pci_numa_affinity_policy', 'socket'),
('hw:cpu_policy', 'mixed'),
+ ('hw:viommu_model', 'auto'),
+ ('hw:viommu_model', 'intel'),
+ ('hw:viommu_model', 'smmuv3'),
+ ('hw:viommu_model', 'virtio'),
)
for key, value in valid_specs:
validators.validate(key, value)
@@ -92,6 +96,7 @@ class TestValidators(test.NoDBTestCase):
('hw:pci_numa_affinity_policy', 'requird'),
('hw:pci_numa_affinity_policy', 'prefrred'),
('hw:pci_numa_affinity_policy', 'socet'),
+ ('hw:viommu_model', 'autt'),
)
for key, value in invalid_specs:
with testtools.ExpectedException(exception.ValidationError):
diff --git a/nova/tests/unit/cmd/test_policy.py b/nova/tests/unit/cmd/test_policy.py
index 60e8e32c75..29dd5610f6 100644
--- a/nova/tests/unit/cmd/test_policy.py
+++ b/nova/tests/unit/cmd/test_policy.py
@@ -128,20 +128,21 @@ class TestPolicyCheck(test.NoDBTestCase):
self.assertEqual(set(expected_rules), set(passing_rules))
def test_filter_rules_non_admin(self):
- context = nova_context.RequestContext()
- rule_conditions = [base_policies.PROJECT_READER]
+ context = nova_context.RequestContext(roles=['reader'])
+ rule_conditions = [base_policies.PROJECT_READER_OR_ADMIN]
expected_rules = [r.name for r in ia_policies.list_rules() if
r.check_str in rule_conditions]
self._check_filter_rules(context, expected_rules=expected_rules)
def test_filter_rules_admin(self):
- self._check_filter_rules()
+ context = nova_context.RequestContext(roles=['admin'])
+ self._check_filter_rules(context)
def test_filter_rules_instance_non_admin(self):
db_context = nova_context.RequestContext(user_id='fake-user',
project_id='fake-project')
instance = fake_instance.fake_instance_obj(db_context)
- context = nova_context.RequestContext()
+ context = nova_context.RequestContext(roles=['reader'])
expected_rules = [r.name for r in ia_policies.list_rules() if
r.check_str == base_policies.RULE_ANY]
self._check_filter_rules(context, instance, expected_rules)
@@ -150,13 +151,15 @@ class TestPolicyCheck(test.NoDBTestCase):
db_context = nova_context.RequestContext(user_id='fake-user',
project_id='fake-project')
instance = fake_instance.fake_instance_obj(db_context)
- self._check_filter_rules(target=instance)
+ context = nova_context.RequestContext(roles=['admin'])
+ self._check_filter_rules(context, target=instance)
def test_filter_rules_instance_owner(self):
db_context = nova_context.RequestContext(user_id='fake-user',
- project_id='fake-project')
+ project_id='fake-project',
+ roles=['reader'])
instance = fake_instance.fake_instance_obj(db_context)
- rule_conditions = [base_policies.PROJECT_READER]
+ rule_conditions = [base_policies.PROJECT_READER_OR_ADMIN]
expected_rules = [r.name for r in ia_policies.list_rules() if
r.check_str in rule_conditions]
self._check_filter_rules(db_context, instance, expected_rules)
diff --git a/nova/tests/unit/cmd/test_status.py b/nova/tests/unit/cmd/test_status.py
index 4f2510438d..f5fcc168ee 100644
--- a/nova/tests/unit/cmd/test_status.py
+++ b/nova/tests/unit/cmd/test_status.py
@@ -39,7 +39,6 @@ from nova import exception
# in the tests, we don't use them in the actual CLI.
from nova import objects
from nova.objects import service
-from nova import policy
from nova import test
from nova.tests import fixtures as nova_fixtures
@@ -393,60 +392,6 @@ class TestUpgradeCheckCinderAPI(test.NoDBTestCase):
self.assertEqual(upgradecheck.Code.SUCCESS, result.code)
-class TestUpgradeCheckPolicy(test.NoDBTestCase):
-
- new_default_status = upgradecheck.Code.WARNING
-
- def setUp(self):
- super(TestUpgradeCheckPolicy, self).setUp()
- self.cmd = status.UpgradeCommands()
- self.rule_name = "context_is_admin"
-
- def tearDown(self):
- super(TestUpgradeCheckPolicy, self).tearDown()
- # Check if policy is reset back after the upgrade check
- self.assertIsNone(policy._ENFORCER)
-
- def test_policy_rule_with_new_defaults(self):
- new_default = "role:admin and system_scope:all"
- rule = {self.rule_name: new_default}
- self.policy.set_rules(rule, overwrite=False)
- self.assertEqual(self.new_default_status,
- self.cmd._check_policy().code)
-
- def test_policy_rule_with_old_defaults(self):
- new_default = "is_admin:True"
- rule = {self.rule_name: new_default}
- self.policy.set_rules(rule, overwrite=False)
-
- self.assertEqual(upgradecheck.Code.SUCCESS,
- self.cmd._check_policy().code)
-
- def test_policy_rule_with_both_defaults(self):
- new_default = "(role:admin and system_scope:all) or is_admin:True"
- rule = {self.rule_name: new_default}
- self.policy.set_rules(rule, overwrite=False)
-
- self.assertEqual(upgradecheck.Code.SUCCESS,
- self.cmd._check_policy().code)
-
- def test_policy_checks_with_fresh_init_and_no_policy_override(self):
- self.policy = self.useFixture(nova_fixtures.OverridePolicyFixture(
- rules_in_file={}))
- policy.reset()
- self.assertEqual(upgradecheck.Code.SUCCESS,
- self.cmd._check_policy().code)
-
-
-class TestUpgradeCheckPolicyEnableScope(TestUpgradeCheckPolicy):
-
- new_default_status = upgradecheck.Code.SUCCESS
-
- def setUp(self):
- super(TestUpgradeCheckPolicyEnableScope, self).setUp()
- self.flags(enforce_scope=True, group="oslo_policy")
-
-
class TestUpgradeCheckOldCompute(test.NoDBTestCase):
def setUp(self):
diff --git a/nova/tests/unit/compute/test_api.py b/nova/tests/unit/compute/test_api.py
index 38db08d952..9d6e9ba4bd 100644
--- a/nova/tests/unit/compute/test_api.py
+++ b/nova/tests/unit/compute/test_api.py
@@ -967,6 +967,31 @@ class _ComputeAPIUnitTestMixIn(object):
return snapshot_id
+ def _test_delete(self, delete_type, **attrs):
+ delete_time = datetime.datetime(
+ 1955, 11, 5, 9, 30, tzinfo=iso8601.UTC)
+ timeutils.set_time_override(delete_time)
+ self.addCleanup(timeutils.clear_time_override)
+
+ with test.nested(
+ mock.patch.object(
+ self.compute_api.compute_rpcapi, 'confirm_resize'),
+ mock.patch.object(
+ self.compute_api.compute_rpcapi, 'terminate_instance'),
+ mock.patch.object(
+ self.compute_api.compute_rpcapi, 'soft_delete_instance'),
+ ) as (
+ mock_confirm, mock_terminate, mock_soft_delete
+ ):
+ self._do_delete(
+ delete_type,
+ mock_confirm,
+ mock_terminate,
+ mock_soft_delete,
+ delete_time,
+ **attrs
+ )
+
@mock.patch.object(compute_utils,
'notify_about_instance_action')
@mock.patch.object(objects.Migration, 'get_by_instance_and_status')
@@ -986,12 +1011,13 @@ class _ComputeAPIUnitTestMixIn(object):
@mock.patch.object(objects.BlockDeviceMappingList,
'get_by_instance_uuid', return_value=[])
@mock.patch.object(objects.Instance, 'save')
- def _test_delete(self, delete_type, mock_save, mock_bdm_get, mock_elevated,
- mock_get_cn, mock_up, mock_record, mock_inst_update,
- mock_deallocate, mock_inst_meta, mock_inst_destroy,
- mock_notify_legacy, mock_get_inst,
- mock_save_im, mock_image_delete, mock_mig_get,
- mock_notify, **attrs):
+ def _do_delete(
+ self, delete_type, mock_confirm, mock_terminate, mock_soft_delete,
+ delete_time, mock_save, mock_bdm_get, mock_elevated, mock_get_cn,
+ mock_up, mock_record, mock_inst_update, mock_deallocate,
+ mock_inst_meta, mock_inst_destroy, mock_notify_legacy, mock_get_inst,
+ mock_save_im, mock_image_delete, mock_mig_get, mock_notify, **attrs
+ ):
expected_save_calls = [mock.call()]
expected_record_calls = []
expected_elevated_calls = []
@@ -1001,17 +1027,11 @@ class _ComputeAPIUnitTestMixIn(object):
deltas = {'instances': -1,
'cores': -inst.flavor.vcpus,
'ram': -inst.flavor.memory_mb}
- delete_time = datetime.datetime(1955, 11, 5, 9, 30,
- tzinfo=iso8601.UTC)
- self.useFixture(utils_fixture.TimeFixture(delete_time))
task_state = (delete_type == 'soft_delete' and
task_states.SOFT_DELETING or task_states.DELETING)
updates = {'progress': 0, 'task_state': task_state}
if delete_type == 'soft_delete':
updates['deleted_at'] = delete_time
- rpcapi = self.compute_api.compute_rpcapi
- mock_confirm = self.useFixture(
- fixtures.MockPatchObject(rpcapi, 'confirm_resize')).mock
def _reset_task_state(context, instance, migration, src_host,
cast=False):
@@ -1026,11 +1046,6 @@ class _ComputeAPIUnitTestMixIn(object):
snapshot_id = self._set_delete_shelved_part(inst,
mock_image_delete)
- mock_terminate = self.useFixture(
- fixtures.MockPatchObject(rpcapi, 'terminate_instance')).mock
- mock_soft_delete = self.useFixture(
- fixtures.MockPatchObject(rpcapi, 'soft_delete_instance')).mock
-
if inst.task_state == task_states.RESIZE_FINISH:
self._test_delete_resizing_part(inst, deltas)
@@ -1239,10 +1254,12 @@ class _ComputeAPIUnitTestMixIn(object):
@mock.patch('nova.objects.Instance.save')
@mock.patch('nova.compute.utils.notify_about_instance_usage')
@mock.patch('nova.objects.Service.get_by_compute_host')
+ @mock.patch('nova.compute.api.API._record_action_start')
@mock.patch('nova.compute.api.API._local_delete')
def test_delete_error_state_with_no_host(
- self, mock_local_delete, mock_service_get, _mock_notify,
- _mock_save, mock_bdm_get, mock_lookup, _mock_del_booting):
+ self, mock_local_delete, mock_record, mock_service_get,
+ _mock_notify, _mock_save, mock_bdm_get, mock_lookup,
+ _mock_del_booting):
# Instance in error state with no host should be a local delete
# for non API cells
inst = self._create_instance_obj(params=dict(vm_state=vm_states.ERROR,
@@ -1254,6 +1271,8 @@ class _ComputeAPIUnitTestMixIn(object):
mock_local_delete.assert_called_once_with(
self.context, inst, mock_bdm_get.return_value,
'delete', self.compute_api._do_delete)
+ mock_record.assert_called_once_with(self.context, inst,
+ instance_actions.DELETE)
mock_terminate.assert_not_called()
mock_service_get.assert_not_called()
@@ -2074,7 +2093,8 @@ class _ComputeAPIUnitTestMixIn(object):
filter_properties = {'ignore_hosts': [fake_inst['host']]}
if request_spec:
- fake_spec = objects.RequestSpec()
+ fake_spec = objects.RequestSpec(
+ pci_requests=objects.InstancePCIRequests(requests=[]))
if requested_destination:
cell1 = objects.CellMapping(uuid=uuids.cell1, name='cell1')
fake_spec.requested_destination = objects.Destination(
@@ -2637,9 +2657,6 @@ class _ComputeAPIUnitTestMixIn(object):
rpcapi = self.compute_api.compute_rpcapi
- mock_pause = self.useFixture(
- fixtures.MockPatchObject(rpcapi, 'pause_instance')).mock
-
with mock.patch.object(rpcapi, 'pause_instance') as mock_pause:
self.compute_api.pause(self.context, instance)
@@ -3479,7 +3496,9 @@ class _ComputeAPIUnitTestMixIn(object):
'device_type': None, 'snapshot_id': '1-snapshot',
'device_name': '/dev/vda',
'destination_type': 'volume', 'delete_on_termination': False,
- 'tag': None, 'volume_type': None})
+ 'tag': None, 'volume_type': None,
+ 'encrypted': None, 'encryption_format': None,
+ 'encryption_secret_uuid': None, 'encryption_options': None})
limits_patcher = mock.patch.object(
self.compute_api.volume_api, 'get_absolute_limits',
@@ -3542,7 +3561,9 @@ class _ComputeAPIUnitTestMixIn(object):
'device_type': None, 'snapshot_id': None,
'device_name': '/dev/vdh',
'destination_type': 'local', 'delete_on_termination': True,
- 'tag': None, 'volume_type': None})
+ 'tag': None, 'volume_type': None,
+ 'encrypted': False, 'encryption_format': None,
+ 'encryption_secret_uuid': None, 'encryption_options': None})
quiesced = [False, False]
@@ -3987,6 +4008,158 @@ class _ComputeAPIUnitTestMixIn(object):
_checks_for_create_and_rebuild.assert_called_once_with(
self.context, None, image, flavor, {}, [], None)
+ @ddt.data(True, False)
+ @mock.patch.object(objects.RequestSpec, 'save')
+ @mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
+ @mock.patch.object(objects.Instance, 'save')
+ @mock.patch.object(objects.Instance, 'get_flavor')
+ @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
+ @mock.patch.object(compute_api.API, '_get_image')
+ @mock.patch.object(compute_api.API, '_check_image_arch')
+ @mock.patch.object(compute_api.API, '_check_auto_disk_config')
+ @mock.patch.object(compute_api.API, '_checks_for_create_and_rebuild')
+ @mock.patch.object(compute_api.API, '_record_action_start')
+ def test_rebuild_volume_backed(self, reimage_boot_vol,
+ _record_action_start, _checks_for_create_and_rebuild,
+ _check_auto_disk_config,
+ _check_image_arch, mock_get_image,
+ mock_get_bdms, get_flavor,
+ instance_save, req_spec_get_by_inst_uuid, request_save):
+ """Test a scenario where the instance is volume backed and we rebuild
+ with following cases:
+
+ 1) reimage_boot_volume=True
+ 2) reimage_boot_volume=False
+
+ """
+ instance = fake_instance.fake_instance_obj(
+ self.context, vm_state=vm_states.ACTIVE, cell_name='fake-cell',
+ launched_at=timeutils.utcnow(),
+ system_metadata={}, image_ref=uuids.image_ref,
+ expected_attrs=['system_metadata'], node='fake')
+ bdms = objects.BlockDeviceMappingList(objects=[
+ objects.BlockDeviceMapping(
+ boot_index=None, image_id=None,
+ source_type='volume', destination_type='volume',
+ volume_type=None, snapshot_id=None,
+ volume_id=uuids.volume_id, volume_size=None)])
+ mock_get_bdms.return_value = bdms
+ get_flavor.return_value = test_flavor.fake_flavor
+ flavor = instance.get_flavor()
+ image = {
+ "id": uuids.image_ref,
+ "min_ram": 10, "min_disk": 1,
+ "properties": {
+ 'architecture': fields_obj.Architecture.X86_64}}
+ mock_get_image.return_value = (None, image)
+ fake_spec = objects.RequestSpec(id=1, force_nodes=None)
+ req_spec_get_by_inst_uuid.return_value = fake_spec
+ fake_volume = {'id': uuids.volume_id, 'status': 'in-use'}
+ fake_conn_info = '{}'
+ fake_device = 'fake_vda'
+ root_bdm = mock.MagicMock(
+ volume_id=uuids.volume_id, connection_info=fake_conn_info,
+ device_name=fake_device, attachment_id=uuids.old_attachment_id,
+ save=mock.MagicMock())
+ admin_pass = "new password"
+ with mock.patch.object(self.compute_api.volume_api, 'get',
+ return_value=fake_volume), \
+ mock.patch.object(compute_utils, 'get_root_bdm',
+ return_value=root_bdm), \
+ mock.patch.object(self.compute_api.compute_task_api,
+ 'rebuild_instance') as rebuild_instance:
+ if reimage_boot_vol:
+ self.compute_api.rebuild(self.context,
+ instance,
+ uuids.image_ref,
+ admin_pass,
+ reimage_boot_volume=True)
+ rebuild_instance.assert_called_once_with(self.context,
+ instance=instance, new_pass=admin_pass,
+ image_ref=uuids.image_ref,
+ orig_image_ref=None, orig_sys_metadata={},
+ injected_files=[], bdms=bdms,
+ preserve_ephemeral=False, host=None,
+ request_spec=fake_spec,
+ reimage_boot_volume=True,
+ target_state=None)
+ _check_auto_disk_config.assert_called_once_with(
+ image=image, auto_disk_config=None)
+ _checks_for_create_and_rebuild.assert_called_once_with(
+ self.context, None, image, flavor, {}, [], root_bdm)
+ mock_get_bdms.assert_called_once_with(
+ self.context, instance.uuid)
+ else:
+ self.assertRaises(
+ exception.NovaException,
+ self.compute_api.rebuild,
+ self.context,
+ instance,
+ uuids.image_ref,
+ admin_pass,
+ reimage_boot_volume=False,
+ target_state=None)
+
+ @mock.patch.object(objects.RequestSpec, 'save')
+ @mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
+ @mock.patch.object(objects.Instance, 'save')
+ @mock.patch.object(objects.Instance, 'get_flavor')
+ @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
+ @mock.patch.object(compute_api.API, '_get_image')
+ @mock.patch.object(compute_api.API, '_check_image_arch')
+ @mock.patch.object(compute_api.API, '_check_auto_disk_config')
+ @mock.patch.object(compute_api.API, '_checks_for_create_and_rebuild')
+ @mock.patch.object(compute_api.API, '_record_action_start')
+ def test_rebuild_volume_backed_fails(self, _record_action_start,
+ _checks_for_create_and_rebuild, _check_auto_disk_config,
+ _check_image_arch, mock_get_image,
+ mock_get_bdms, get_flavor,
+ instance_save, req_spec_get_by_inst_uuid, request_save):
+ """Test a scenario where we don't pass parameters to rebuild
+ boot volume
+ """
+ instance = fake_instance.fake_instance_obj(
+ self.context, vm_state=vm_states.ACTIVE, cell_name='fake-cell',
+ launched_at=timeutils.utcnow(),
+ system_metadata={}, image_ref=uuids.image_ref,
+ expected_attrs=['system_metadata'], node='fake')
+ bdms = objects.BlockDeviceMappingList(objects=[
+ objects.BlockDeviceMapping(
+ boot_index=None, image_id=None,
+ source_type='volume', destination_type='volume',
+ volume_type=None, snapshot_id=None,
+ volume_id=uuids.volume_id, volume_size=None)])
+ mock_get_bdms.return_value = bdms
+ get_flavor.return_value = test_flavor.fake_flavor
+ image = {
+ "id": uuids.image_ref,
+ "min_ram": 10, "min_disk": 1,
+ "properties": {
+ 'architecture': fields_obj.Architecture.X86_64}}
+ mock_get_image.return_value = (None, image)
+ fake_spec = objects.RequestSpec(id=1, force_nodes=None)
+ req_spec_get_by_inst_uuid.return_value = fake_spec
+ fake_volume = {'id': uuids.volume_id, 'status': 'in-use'}
+ fake_conn_info = '{}'
+ fake_device = 'fake_vda'
+ root_bdm = mock.MagicMock(
+ volume_id=uuids.volume_id, connection_info=fake_conn_info,
+ device_name=fake_device, attachment_id=uuids.old_attachment_id,
+ save=mock.MagicMock())
+ admin_pass = "new password"
+ with mock.patch.object(self.compute_api.volume_api, 'get',
+ return_value=fake_volume), \
+ mock.patch.object(compute_utils, 'get_root_bdm',
+ return_value=root_bdm):
+ self.assertRaises(exception.NovaException,
+ self.compute_api.rebuild,
+ self.context,
+ instance,
+ uuids.image_ref,
+ admin_pass,
+ reimage_boot_volume=False,
+ target_state=None)
+
@mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
@mock.patch.object(objects.Instance, 'save')
@mock.patch.object(objects.Instance, 'get_flavor')
@@ -4035,7 +4208,8 @@ class _ComputeAPIUnitTestMixIn(object):
orig_image_ref=uuids.image_ref,
orig_sys_metadata=orig_system_metadata, bdms=bdms,
preserve_ephemeral=False, host=instance.host,
- request_spec=fake_spec)
+ request_spec=fake_spec, reimage_boot_volume=False,
+ target_state=None)
_check_auto_disk_config.assert_called_once_with(
image=image, auto_disk_config=None)
@@ -4108,7 +4282,8 @@ class _ComputeAPIUnitTestMixIn(object):
orig_image_ref=uuids.image_ref,
orig_sys_metadata=orig_system_metadata, bdms=bdms,
preserve_ephemeral=False, host=None,
- request_spec=fake_spec)
+ request_spec=fake_spec, reimage_boot_volume=False,
+ target_state=None)
# assert the request spec was modified so the scheduler picks
# the existing instance host/node
req_spec_save.assert_called_once_with()
@@ -4176,7 +4351,8 @@ class _ComputeAPIUnitTestMixIn(object):
orig_image_ref=uuids.image_ref,
orig_sys_metadata=orig_system_metadata, bdms=bdms,
preserve_ephemeral=False, host=instance.host,
- request_spec=fake_spec)
+ request_spec=fake_spec, reimage_boot_volume=False,
+ target_state=None)
_check_auto_disk_config.assert_called_once_with(
image=image, auto_disk_config=None)
@@ -4235,7 +4411,8 @@ class _ComputeAPIUnitTestMixIn(object):
orig_image_ref=uuids.image_ref,
orig_sys_metadata=orig_system_metadata, bdms=bdms,
preserve_ephemeral=False, host=instance.host,
- request_spec=fake_spec)
+ request_spec=fake_spec, reimage_boot_volume=False,
+ target_state=None)
_check_auto_disk_config.assert_called_once_with(
image=image, auto_disk_config=None)
@@ -4299,7 +4476,8 @@ class _ComputeAPIUnitTestMixIn(object):
orig_image_ref=uuids.image_ref,
orig_sys_metadata=orig_system_metadata, bdms=bdms,
preserve_ephemeral=False, host=instance.host,
- request_spec=fake_spec)
+ request_spec=fake_spec, reimage_boot_volume=False,
+ target_state=None)
_check_auto_disk_config.assert_called_once_with(
image=image, auto_disk_config=None)
@@ -5624,7 +5802,10 @@ class _ComputeAPIUnitTestMixIn(object):
destination_type='volume', volume_type=None,
snapshot_id=None, volume_id=uuids.volume_id,
volume_size=None)])
- rescue_image_meta_obj = image_meta_obj.ImageMeta.from_dict({})
+ rescue_image_meta_obj = image_meta_obj.ImageMeta.from_dict({
+ 'properties': {'hw_rescue_device': 'disk',
+ 'hw_rescue_bus': 'scsi'}
+ })
with test.nested(
mock.patch.object(self.compute_api.placementclient,
@@ -5676,6 +5857,7 @@ class _ComputeAPIUnitTestMixIn(object):
# Assert that the instance task state as set in the compute API
self.assertEqual(task_states.RESCUING, instance.task_state)
+ @mock.patch('nova.objects.instance.Instance.image_meta')
@mock.patch('nova.objects.compute_node.ComputeNode'
'.get_by_host_and_nodename')
@mock.patch('nova.compute.utils.is_volume_backed_instance',
@@ -5684,7 +5866,8 @@ class _ComputeAPIUnitTestMixIn(object):
'.get_by_instance_uuid')
def test_rescue_bfv_without_required_trait(self, mock_get_bdms,
mock_is_volume_backed,
- mock_get_cn):
+ mock_get_cn,
+ mock_image_meta):
instance = self._create_instance_obj()
bdms = objects.BlockDeviceMappingList(objects=[
objects.BlockDeviceMapping(
@@ -5692,6 +5875,12 @@ class _ComputeAPIUnitTestMixIn(object):
destination_type='volume', volume_type=None,
snapshot_id=None, volume_id=uuids.volume_id,
volume_size=None)])
+
+ instance.image_meta = image_meta_obj.ImageMeta.from_dict({
+ 'properties': {'hw_rescue_device': 'disk',
+ 'hw_rescue_bus': 'scsi'}
+ })
+
with test.nested(
mock.patch.object(self.compute_api.placementclient,
'get_provider_traits'),
@@ -5729,6 +5918,124 @@ class _ComputeAPIUnitTestMixIn(object):
mock_get_traits.assert_called_once_with(
self.context, uuids.cn)
+ @mock.patch('nova.objects.image_meta.ImageMeta.from_image_ref')
+ @mock.patch('nova.objects.compute_node.ComputeNode'
+ '.get_by_host_and_nodename')
+ @mock.patch('nova.compute.utils.is_volume_backed_instance',
+ return_value=True)
+ @mock.patch('nova.objects.block_device.BlockDeviceMappingList'
+ '.get_by_instance_uuid')
+ def test_rescue_bfv_with_required_image_properties(
+ self, mock_get_bdms, mock_is_volume_backed, mock_get_cn,
+ mock_image_meta_obj_from_ref):
+ instance = self._create_instance_obj()
+ bdms = objects.BlockDeviceMappingList(objects=[
+ objects.BlockDeviceMapping(
+ boot_index=0, image_id=uuids.image_id, source_type='image',
+ destination_type='volume', volume_type=None,
+ snapshot_id=None, volume_id=uuids.volume_id,
+ volume_size=None)])
+ rescue_image_meta_obj = image_meta_obj.ImageMeta.from_dict({
+ 'properties': {'hw_rescue_device': 'disk',
+ 'hw_rescue_bus': 'scsi'}
+ })
+
+ with test.nested(
+ mock.patch.object(self.compute_api.placementclient,
+ 'get_provider_traits'),
+ mock.patch.object(self.compute_api.volume_api, 'get'),
+ mock.patch.object(self.compute_api.volume_api, 'check_attached'),
+ mock.patch.object(instance, 'save'),
+ mock.patch.object(self.compute_api, '_record_action_start'),
+ mock.patch.object(self.compute_api.compute_rpcapi,
+ 'rescue_instance')
+ ) as (
+ mock_get_traits, mock_get_volume, mock_check_attached,
+ mock_instance_save, mock_record_start, mock_rpcapi_rescue
+ ):
+ # Mock out the returned compute node, image_meta, bdms and volume
+ mock_image_meta_obj_from_ref.return_value = rescue_image_meta_obj
+ mock_get_bdms.return_value = bdms
+ mock_get_volume.return_value = mock.sentinel.volume
+ mock_get_cn.return_value = mock.Mock(uuid=uuids.cn)
+
+ # Ensure the required trait is returned, allowing BFV rescue
+ mock_trait_info = mock.Mock(traits=[ot.COMPUTE_RESCUE_BFV])
+ mock_get_traits.return_value = mock_trait_info
+
+ # Try to rescue the instance
+ self.compute_api.rescue(self.context, instance,
+ rescue_image_ref=uuids.rescue_image_id,
+ allow_bfv_rescue=True)
+
+ # Assert all of the calls made in the compute API
+ mock_get_bdms.assert_called_once_with(self.context, instance.uuid)
+ mock_get_volume.assert_called_once_with(
+ self.context, uuids.volume_id)
+ mock_check_attached.assert_called_once_with(
+ self.context, mock.sentinel.volume)
+ mock_is_volume_backed.assert_called_once_with(
+ self.context, instance, bdms)
+ mock_get_cn.assert_called_once_with(
+ self.context, instance.host, instance.node)
+ mock_get_traits.assert_called_once_with(self.context, uuids.cn)
+ mock_instance_save.assert_called_once_with(
+ expected_task_state=[None])
+ mock_record_start.assert_called_once_with(
+ self.context, instance, instance_actions.RESCUE)
+ mock_rpcapi_rescue.assert_called_once_with(
+ self.context, instance=instance, rescue_password=None,
+ rescue_image_ref=uuids.rescue_image_id, clean_shutdown=True)
+
+ # Assert that the instance task state as set in the compute API
+ self.assertEqual(task_states.RESCUING, instance.task_state)
+
+ @mock.patch('nova.objects.image_meta.ImageMeta.from_image_ref')
+ @mock.patch('nova.compute.utils.is_volume_backed_instance',
+ return_value=True)
+ @mock.patch('nova.objects.block_device.BlockDeviceMappingList'
+ '.get_by_instance_uuid')
+ def test_rescue_bfv_without_required_image_properties(
+ self, mock_get_bdms, mock_is_volume_backed,
+ mock_image_meta_obj_from_ref):
+ instance = self._create_instance_obj()
+ bdms = objects.BlockDeviceMappingList(objects=[
+ objects.BlockDeviceMapping(
+ boot_index=0, image_id=uuids.image_id, source_type='image',
+ destination_type='volume', volume_type=None,
+ snapshot_id=None, volume_id=uuids.volume_id,
+ volume_size=None)])
+ rescue_image_meta_obj = image_meta_obj.ImageMeta.from_dict({
+ 'properties': {}
+ })
+
+ with test.nested(
+ mock.patch.object(self.compute_api.volume_api, 'get'),
+ mock.patch.object(self.compute_api.volume_api, 'check_attached'),
+ ) as (
+ mock_get_volume, mock_check_attached
+ ):
+ # Mock out the returned bdms, volume and image_meta
+ mock_get_bdms.return_value = bdms
+ mock_get_volume.return_value = mock.sentinel.volume
+ mock_image_meta_obj_from_ref.return_value = rescue_image_meta_obj
+
+ # Assert that any attempt to rescue a bfv instance on a compute
+ # node that does not report the COMPUTE_RESCUE_BFV trait fails and
+ # raises InstanceNotRescuable
+ self.assertRaises(exception.InstanceNotRescuable,
+ self.compute_api.rescue, self.context, instance,
+ rescue_image_ref=None, allow_bfv_rescue=True)
+
+ # Assert the calls made in the compute API prior to the failure
+ mock_get_bdms.assert_called_once_with(self.context, instance.uuid)
+ mock_get_volume.assert_called_once_with(
+ self.context, uuids.volume_id)
+ mock_check_attached.assert_called_once_with(
+ self.context, mock.sentinel.volume)
+ mock_is_volume_backed.assert_called_once_with(
+ self.context, instance, bdms)
+
@mock.patch('nova.compute.utils.is_volume_backed_instance',
return_value=True)
@mock.patch('nova.objects.block_device.BlockDeviceMappingList'
@@ -5913,6 +6220,41 @@ class _ComputeAPIUnitTestMixIn(object):
'volume_id': 'volume_id'}]
self._test_check_and_transform_bdm(block_device_mapping)
+ def test_update_ephemeral_encryption_bdms(self):
+ flavor = self._create_flavor(
+ extra_specs={
+ 'hw:ephemeral_encryption': True,
+ 'hw:ephemeral_encryption_format': 'luks',
+ }
+ )
+ block_device_mapping = [
+ {'device_name': '/dev/sda1',
+ 'source_type': 'snapshot', 'destination_type': 'volume',
+ 'snapshot_id': uuids.snapshot_id,
+ 'delete_on_termination': False,
+ 'boot_index': 0},
+ {'device_name': '/dev/sdb2',
+ 'source_type': 'image', 'destination_type': 'local',
+ 'image_id': uuids.image_id, 'delete_on_termination': False},
+ {'device_name': '/dev/sdb3',
+ 'source_type': 'blank', 'destination_type': 'local',
+ 'guest_format': 'ext3', 'delete_on_termination': False}]
+
+ block_device_mapping = (
+ block_device_obj.block_device_make_list_from_dicts(
+ self.context,
+ map(fake_block_device.AnonFakeDbBlockDeviceDict,
+ block_device_mapping)))
+
+ self.compute_api._update_ephemeral_encryption_bdms(
+ flavor, {}, block_device_mapping)
+
+ for bdm in block_device_mapping:
+ if bdm.is_local:
+ self.assertTrue(bdm.encrypted)
+ else:
+ self.assertFalse(bdm.encrypted)
+
def test_bdm_validate_set_size_and_instance(self):
swap_size = 42
ephemeral_size = 24
@@ -7704,8 +8046,9 @@ class ComputeAPIUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase):
@mock.patch.object(compute_utils, 'notify_about_instance_usage')
@mock.patch.object(objects.BlockDeviceMapping, 'destroy')
@mock.patch.object(objects.Instance, 'destroy')
+ @mock.patch('nova.compute.api.API._record_action_start')
def _test_delete_volume_backed_instance(
- self, vm_state, mock_instance_destroy, bdm_destroy,
+ self, vm_state, mock_record, mock_instance_destroy, bdm_destroy,
notify_about_instance_usage, mock_save, mock_elevated,
bdm_get_by_instance_uuid, mock_lookup, _mock_del_booting,
notify_about_instance_action):
@@ -7734,6 +8077,8 @@ class ComputeAPIUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase):
'detach') as mock_detach:
self.compute_api.delete(self.context, inst)
+ mock_record.assert_called_once_with(self.context, inst,
+ instance_actions.DELETE)
mock_deallocate.assert_called_once_with(self.context, inst)
mock_detach.assert_called_once_with(self.context, volume_id,
inst.uuid)
@@ -7751,16 +8096,13 @@ class ComputeAPIUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase):
self.assertTrue(hasattr(self.compute_api, 'host'))
self.assertEqual(CONF.host, self.compute_api.host)
- @mock.patch('nova.scheduler.client.report.SchedulerReportClient')
+ @mock.patch('nova.scheduler.client.report.report_client_singleton')
def test_placement_client_init(self, mock_report_client):
"""Tests to make sure that the construction of the placement client
- only happens once per API class instance.
+ uses the singleton helper, and happens only when needed.
"""
- self.assertIsNone(self.compute_api._placementclient)
- # Access the property twice to make sure SchedulerReportClient is
- # only loaded once.
- for x in range(2):
- self.compute_api.placementclient
+ self.assertFalse(mock_report_client.called)
+ self.compute_api.placementclient
mock_report_client.assert_called_once_with()
def test_validate_host_for_cold_migrate_same_host_fails(self):
diff --git a/nova/tests/unit/compute/test_claims.py b/nova/tests/unit/compute/test_claims.py
index 9ef3999441..dcdef56fbe 100644
--- a/nova/tests/unit/compute/test_claims.py
+++ b/nova/tests/unit/compute/test_claims.py
@@ -169,7 +169,8 @@ class ClaimTestCase(test.NoDBTestCase):
spec=[{'vendor_id': 'v', 'product_id': 'p'}])
requests = objects.InstancePCIRequests(requests=[request])
self._claim(requests=requests)
- mock_pci_supports_requests.assert_called_once_with([request])
+ mock_pci_supports_requests.assert_called_once_with(
+ [request], provider_mapping=None)
@mock.patch('nova.pci.stats.PciDeviceStats.support_requests',
return_value=False)
@@ -181,7 +182,8 @@ class ClaimTestCase(test.NoDBTestCase):
exception.ComputeResourcesUnavailable,
'Claim pci failed.',
self._claim, requests=requests)
- mock_pci_supports_requests.assert_called_once_with([request])
+ mock_pci_supports_requests.assert_called_once_with(
+ [request], provider_mapping=None)
@mock.patch('nova.pci.stats.PciDeviceStats.support_requests')
def test_pci_pass_no_requests(self, mock_pci_supports_requests):
diff --git a/nova/tests/unit/compute/test_compute.py b/nova/tests/unit/compute/test_compute.py
index ded8f0c877..49cf15ec17 100644
--- a/nova/tests/unit/compute/test_compute.py
+++ b/nova/tests/unit/compute/test_compute.py
@@ -1389,13 +1389,14 @@ class ComputeVolumeTestCase(BaseTestCase):
@mock.patch.object(nova.virt.block_device, 'convert_snapshots')
@mock.patch.object(nova.virt.block_device, 'convert_volumes')
@mock.patch.object(nova.virt.block_device, 'convert_ephemerals')
+ @mock.patch.object(nova.virt.block_device, 'convert_local_images')
@mock.patch.object(nova.virt.block_device, 'convert_swap')
@mock.patch.object(nova.virt.block_device, 'attach_block_devices')
def test_prep_block_device_with_blanks(self, attach_block_devices,
- convert_swap, convert_ephemerals,
- convert_volumes, convert_snapshots,
- convert_images, convert_blanks,
- get_swap):
+ convert_swap, convert_local_images,
+ convert_ephemerals, convert_volumes,
+ convert_snapshots, convert_images,
+ convert_blanks, get_swap):
instance = self._create_fake_instance_obj()
instance['root_device_name'] = '/dev/vda'
root_volume = objects.BlockDeviceMapping(
@@ -1426,6 +1427,7 @@ class ComputeVolumeTestCase(BaseTestCase):
return bdm
convert_swap.return_value = []
+ convert_local_images.return_value = []
convert_ephemerals.return_value = []
convert_volumes.return_value = [blank_volume1, blank_volume2]
convert_snapshots.return_value = []
@@ -1438,6 +1440,7 @@ class ComputeVolumeTestCase(BaseTestCase):
'root_device_name': '/dev/vda',
'swap': [],
'ephemerals': [],
+ 'image': [],
'block_device_mapping': bdms
}
@@ -1452,6 +1455,7 @@ class ComputeVolumeTestCase(BaseTestCase):
self.assertIsNotNone(bdm.device_name)
convert_swap.assert_called_once_with(bdms)
+ convert_local_images.assert_called_once_with(bdms)
convert_ephemerals.assert_called_once_with(bdms)
bdm_args = tuple(bdms)
convert_volumes.assert_called_once_with(bdm_args)
@@ -2726,7 +2730,8 @@ class ComputeTestCase(BaseTestCase,
new_pass="new_password", orig_sys_metadata=sys_metadata,
bdms=[], recreate=False, on_shared_storage=False,
preserve_ephemeral=False, migration=None, scheduled_node=None,
- limits={}, request_spec=None, accel_uuids=[])
+ limits={}, request_spec=None, accel_uuids=[],
+ reimage_boot_volume=False, target_state=None)
self.compute.terminate_instance(self.context, instance, [])
def test_rebuild_driver(self):
@@ -2756,7 +2761,8 @@ class ComputeTestCase(BaseTestCase,
new_pass="new_password", orig_sys_metadata=sys_metadata,
bdms=[], recreate=False, on_shared_storage=False,
preserve_ephemeral=False, migration=None, scheduled_node=None,
- limits={}, request_spec=None, accel_uuids=[])
+ limits={}, request_spec=None, accel_uuids=[],
+ reimage_boot_volume=False, target_state=None)
self.assertTrue(called['rebuild'])
self.compute.terminate_instance(self.context, instance, [])
@@ -2808,7 +2814,8 @@ class ComputeTestCase(BaseTestCase,
new_pass="new_password", orig_sys_metadata=sys_metadata,
bdms=bdms, recreate=False, preserve_ephemeral=False,
migration=None, scheduled_node=None, limits={},
- on_shared_storage=False, request_spec=None, accel_uuids=[])
+ on_shared_storage=False, request_spec=None, accel_uuids=[],
+ reimage_boot_volume=False, target_state=None)
self.assertTrue(called['rebuild'])
self.compute.terminate_instance(self.context, instance, [])
@@ -2827,7 +2834,8 @@ class ComputeTestCase(BaseTestCase,
new_pass="new_password", orig_sys_metadata=sys_metadata, bdms=[],
recreate=False, on_shared_storage=False, preserve_ephemeral=False,
migration=None, scheduled_node=None, limits=None,
- request_spec=None, accel_uuids=[])
+ request_spec=None, accel_uuids=[], reimage_boot_volume=False,
+ target_state=None)
self.compute.terminate_instance(self.context, instance, [])
def test_rebuild_launched_at_time(self):
@@ -2848,7 +2856,7 @@ class ComputeTestCase(BaseTestCase,
new_pass="new_password", orig_sys_metadata={}, bdms=[],
recreate=False, on_shared_storage=False, preserve_ephemeral=False,
migration=None, scheduled_node=None, limits={}, request_spec=None,
- accel_uuids=[])
+ accel_uuids=[], reimage_boot_volume=False, target_state=None)
instance.refresh()
self.assertEqual(cur_time,
instance['launched_at'].replace(tzinfo=None))
@@ -2881,7 +2889,8 @@ class ComputeTestCase(BaseTestCase,
injected_files=injected_files, new_pass="new_password",
orig_sys_metadata=sys_metadata, bdms=[], recreate=False,
on_shared_storage=False, preserve_ephemeral=False, migration=None,
- scheduled_node=None, limits={}, request_spec=None, accel_uuids=[])
+ scheduled_node=None, limits={}, request_spec=None, accel_uuids=[],
+ reimage_boot_volume=False, target_state=None)
self.compute.terminate_instance(self.context, instance, [])
@mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
@@ -3212,6 +3221,7 @@ class ComputeTestCase(BaseTestCase,
expected = {
'swap': None,
'ephemerals': [],
+ 'image': [],
'root_device_name': None,
'block_device_mapping': driver_bdms
}
@@ -3240,6 +3250,7 @@ class ComputeTestCase(BaseTestCase,
expected = {
'swap': None,
'ephemerals': [],
+ 'image': [],
'root_device_name': None,
'block_device_mapping': driver_bdms
}
@@ -3273,7 +3284,11 @@ class ComputeTestCase(BaseTestCase,
'delete_on_termination': True,
'guest_format': None,
'volume_size': 2,
- 'boot_index': -1
+ 'boot_index': -1,
+ 'encrypted': True,
+ 'encryption_secret_uuid': uuids.secret,
+ 'encryption_format': 'luks',
+ 'encryption_options': None,
})
swap = fake_block_device.FakeDbBlockDeviceDict({
'id': 3,
@@ -3308,16 +3323,25 @@ class ComputeTestCase(BaseTestCase,
'device_type': 'disk',
'disk_bus': 'virtio',
'guest_format': None,
- 'size': 1
+ 'size': 1,
+ 'encrypted': False,
+ 'encryption_secret_uuid': None,
+ 'encryption_format': None,
+ 'encryption_options': None,
},
{
'device_name': '/dev/vdc',
'device_type': 'disk',
'disk_bus': 'virtio',
'guest_format': None,
- 'size': 2
+ 'size': 2,
+ 'encrypted': True,
+ 'encryption_secret_uuid': uuids.secret,
+ 'encryption_format': 'luks',
+ 'encryption_options': None,
}
],
+ 'image': [],
'block_device_mapping': [],
'root_device_name': None
}
@@ -4593,7 +4617,9 @@ class ComputeTestCase(BaseTestCase,
'limits': {},
'request_spec': None,
'on_shared_storage': False,
- 'accel_uuids': ()}),
+ 'accel_uuids': (),
+ 'reimage_boot_volume': False,
+ 'target_state': None}),
("set_admin_password", task_states.UPDATING_PASSWORD,
{'new_pass': None}),
("rescue_instance", task_states.RESCUING,
@@ -5111,7 +5137,8 @@ class ComputeTestCase(BaseTestCase,
injected_files=[], new_pass=password,
orig_sys_metadata=orig_sys_metadata, bdms=[], recreate=False,
on_shared_storage=False, preserve_ephemeral=False, migration=None,
- scheduled_node=None, limits={}, request_spec=None, accel_uuids=[])
+ scheduled_node=None, limits={}, request_spec=None, accel_uuids=[],
+ reimage_boot_volume=False, target_state=None)
inst_ref.refresh()
@@ -6108,7 +6135,7 @@ class ComputeTestCase(BaseTestCase,
mock_pre.assert_called_once_with(
test.MatchType(nova.context.RequestContext),
test.MatchType(objects.Instance),
- {'swap': None, 'ephemerals': [],
+ {'swap': None, 'ephemerals': [], 'image': [],
'root_device_name': None,
'block_device_mapping': []},
mock.ANY, mock.ANY, mock.ANY)
@@ -6474,7 +6501,7 @@ class ComputeTestCase(BaseTestCase,
self.assertEqual(2, mock_notify.call_count)
post_live_migration.assert_has_calls([
mock.call(c, instance, {'swap': None, 'ephemerals': [],
- 'root_device_name': None,
+ 'image': [], 'root_device_name': None,
'block_device_mapping': []},
migrate_data)])
migrate_instance_start.assert_has_calls([
@@ -6705,7 +6732,7 @@ class ComputeTestCase(BaseTestCase,
mock_setup.assert_called_once_with(c, instance, self.compute.host,
teardown=True)
mock_rollback.assert_called_once_with(c, instance, [],
- {'swap': None, 'ephemerals': [],
+ {'swap': None, 'ephemerals': [], 'image': [],
'root_device_name': None,
'block_device_mapping': []},
destroy_disks=True, migrate_data=None)
@@ -8134,7 +8161,7 @@ class ComputeTestCase(BaseTestCase,
self.compute._default_block_device_names(instance, {}, bdms)
self.assertEqual('/dev/vda', instance.root_device_name)
- mock_def.assert_called_once_with(instance, '/dev/vda', [], [],
+ mock_def.assert_called_once_with(instance, '/dev/vda', [], [], [],
[bdm for bdm in bdms])
@mock.patch.object(objects.BlockDeviceMapping, 'save')
@@ -8148,7 +8175,7 @@ class ComputeTestCase(BaseTestCase,
self.compute._default_block_device_names(instance, {}, bdms)
- mock_def.assert_called_once_with(instance, '/dev/vda', [], [],
+ mock_def.assert_called_once_with(instance, '/dev/vda', [], [], [],
[bdm for bdm in bdms])
@mock.patch.object(objects.Instance, 'save')
@@ -8170,7 +8197,7 @@ class ComputeTestCase(BaseTestCase,
self.assertEqual('/dev/vda', instance.root_device_name)
mock_default_dev.assert_called_once_with(instance, mock.ANY, bdms[0])
mock_default_name.assert_called_once_with(instance, '/dev/vda', [], [],
- [bdm for bdm in bdms])
+ [], [bdm for bdm in bdms])
def test_default_block_device_names_with_blank_volumes(self):
instance = self._create_fake_instance_obj()
@@ -8230,7 +8257,7 @@ class ComputeTestCase(BaseTestCase,
self.assertEqual('/dev/vda', instance.root_device_name)
self.assertTrue(object_save.called)
default_device_names.assert_called_once_with(instance,
- '/dev/vda', [bdms[-2]], [bdms[-1]],
+ '/dev/vda', [], [bdms[-2]], [bdms[-1]],
[bdm for bdm in bdms[:-2]])
def test_reserve_block_device_name(self):
@@ -10736,8 +10763,13 @@ class ComputeAPITestCase(BaseTestCase):
supports_attach_interface=True),
mock.patch.object(self.compute.network_api,
'create_resource_requests'),
- mock.patch.object(self.compute.rt, 'claim_pci_devices',
- return_value=[]),
+ mock.patch.object(
+ self.compute.rt,
+ 'claim_pci_devices',
+ side_effect=exception.PciDeviceRequestFailed(
+ requests=instance.pci_requests
+ )
+ ),
mock.patch.object(
self.compute, '_allocate_port_resource_for_instance'),
mock.patch(
@@ -10813,7 +10845,7 @@ class ComputeAPITestCase(BaseTestCase):
'add_resources_to_instance_allocation'),
mock.patch(
'nova.compute.utils.'
- 'update_pci_request_spec_with_allocated_interface_name'),
+ 'update_pci_request_with_placement_allocations'),
) as (
mock_get_nodename, mock_get_alloc_candidates, mock_add_res,
mock_update_pci
@@ -10883,7 +10915,7 @@ class ComputeAPITestCase(BaseTestCase):
new=mock.NonCallableMock()),
mock.patch(
'nova.compute.utils.'
- 'update_pci_request_spec_with_allocated_interface_name',
+ 'update_pci_request_with_placement_allocations',
new=mock.NonCallableMock()),
) as (
mock_get_nodename, mock_get_alloc_candidates, mock_add_res,
@@ -10928,7 +10960,7 @@ class ComputeAPITestCase(BaseTestCase):
'add_resources_to_instance_allocation'),
mock.patch(
'nova.compute.utils.'
- 'update_pci_request_spec_with_allocated_interface_name',
+ 'update_pci_request_with_placement_allocations',
new=mock.NonCallableMock()),
) as (
mock_get_nodename, mock_get_alloc_candidates, mock_add_res,
@@ -10995,7 +11027,7 @@ class ComputeAPITestCase(BaseTestCase):
'add_resources_to_instance_allocation'),
mock.patch(
'nova.compute.utils.'
- 'update_pci_request_spec_with_allocated_interface_name'),
+ 'update_pci_request_with_placement_allocations'),
mock.patch(
'nova.scheduler.client.report.SchedulerReportClient.'
'remove_resources_from_instance_allocation'),
@@ -11560,12 +11592,60 @@ class ComputeAPITestCase(BaseTestCase):
instance.uuid, None)
@mock.patch.object(context.RequestContext, 'elevated')
+ @mock.patch.object(cinder.API, 'detach')
+ @mock.patch.object(cinder.API, 'terminate_connection')
+ @mock.patch.object(compute_manager.ComputeManager,
+ '_get_instance_block_device_info')
+ @mock.patch('nova.virt.fake.FakeDriver.get_volume_connector')
+ def test_shutdown_with_legacy_volume_detach(
+ self, mock_get_connector, mock_info, mock_terminate, mock_detach,
+ mock_elevated,
+ ):
+ # test _shutdown_instance with legacy BDMs without a volume
+ # attachment ID
+ admin = context.get_admin_context()
+ mock_elevated.return_value = admin
+ instance = self._create_fake_instance_obj()
+ connector = 'fake-connector'
+ mock_get_connector.return_value = connector
+
+ vol_a_bdm = block_device_obj.BlockDeviceMapping(
+ instance_uuid=instance['uuid'],
+ source_type='volume', destination_type='volume',
+ delete_on_termination=False,
+ volume_id=uuids.volume_a_id,
+ attachment_id=None)
+ vol_b_bdm = block_device_obj.BlockDeviceMapping(
+ instance_uuid=instance['uuid'],
+ source_type='volume', destination_type='volume',
+ delete_on_termination=False,
+ volume_id=uuids.volume_b_id,
+ attachment_id=None)
+ bdms = [vol_a_bdm, vol_b_bdm]
+
+ self.compute._shutdown_instance(admin, instance, bdms)
+
+ # we should only got the connector once, regardless of the number of
+ # volumes
+ mock_get_connector.assert_called_once_with(instance)
+ # but we should have separate terminate and detach calls
+ mock_terminate.assert_has_calls([
+ mock.call(admin, uuids.volume_a_id, connector),
+ mock.call(admin, uuids.volume_b_id, connector),
+ ])
+ mock_detach.assert_has_calls([
+ mock.call(admin, uuids.volume_a_id, instance.uuid),
+ mock.call(admin, uuids.volume_b_id, instance.uuid),
+ ])
+
+ @mock.patch.object(context.RequestContext, 'elevated')
@mock.patch.object(cinder.API, 'attachment_delete')
@mock.patch.object(compute_manager.ComputeManager,
'_get_instance_block_device_info')
- def test_shutdown_with_attachment_delete(self, mock_info,
- mock_attach_delete,
- mock_elevated):
+ @mock.patch('nova.virt.fake.FakeDriver.get_volume_connector')
+ def test_shutdown_with_attachment_delete(
+ self, mock_get_connector, mock_info, mock_attach_delete, mock_elevated,
+ ):
# test _shutdown_instance with volume bdm containing an
# attachment id. This should use the v3 cinder api.
admin = context.get_admin_context()
@@ -11585,14 +11665,18 @@ class ComputeAPITestCase(BaseTestCase):
self.compute._shutdown_instance(admin, instance, bdms)
mock_attach_delete.assert_called_once_with(admin, attachment_id)
+ # we shouldn't try to get a connector for a cinder v3-style attachment
+ mock_get_connector.assert_not_called()
@mock.patch.object(compute_manager.LOG, 'debug')
@mock.patch.object(cinder.API, 'attachment_delete')
@mock.patch.object(compute_manager.ComputeManager,
'_get_instance_block_device_info')
- def test_shutdown_with_attachment_not_found(self, mock_info,
- mock_attach_delete,
- mock_debug_log):
+ @mock.patch('nova.virt.fake.FakeDriver.get_volume_connector')
+ def test_shutdown_with_attachment_not_found(
+ self, mock_get_connector, mock_info, mock_attach_delete,
+ mock_debug_log,
+ ):
# test _shutdown_instance with attachment_delete throwing
# a VolumeAttachmentNotFound exception. This should not
# cause _shutdown_instance to fail. Only a debug log
@@ -11618,6 +11702,8 @@ class ComputeAPITestCase(BaseTestCase):
# get last call to LOG.debug and verify correct exception is in there
self.assertIsInstance(mock_debug_log.call_args[0][1],
exception.VolumeAttachmentNotFound)
+ # we shouldn't try to get a connector for a cinder v3-style attachment
+ mock_get_connector.assert_not_called()
def test_terminate_with_volumes(self):
# Make sure that volumes get detached during instance termination.
@@ -11877,7 +11963,7 @@ class ComputeAPITestCase(BaseTestCase):
force=False)
@mock.patch('nova.compute.utils.notify_about_instance_action')
- def _test_evacuate(self, mock_notify, force=None):
+ def _test_evacuate(self, mock_notify, force=None, target_state=None):
instance = self._create_fake_instance_obj(services=True)
self.assertIsNone(instance.task_state)
@@ -11914,7 +12000,8 @@ class ComputeAPITestCase(BaseTestCase):
host='fake_dest_host',
on_shared_storage=True,
admin_password=None,
- force=force)
+ force=force,
+ target_state=target_state)
if force is False:
host = None
else:
@@ -11931,7 +12018,8 @@ class ComputeAPITestCase(BaseTestCase):
recreate=True,
on_shared_storage=True,
request_spec=fake_spec,
- host=host)
+ host=host,
+ target_state=target_state)
do_test()
instance.refresh()
@@ -11963,6 +12051,9 @@ class ComputeAPITestCase(BaseTestCase):
def test_evacuate_with_forced_host(self):
self._test_evacuate(force=True)
+ def test_evacuate_with_target_state(self):
+ self._test_evacuate(target_state="stopped")
+
@mock.patch('nova.servicegroup.api.API.service_is_up',
return_value=False)
def test_fail_evacuate_with_non_existing_destination(self, _service_is_up):
@@ -13037,16 +13128,13 @@ class ComputeAPIAggrTestCase(BaseTestCase):
hosts = aggregate.hosts if 'hosts' in aggregate else None
self.assertIn(values[0][1][0], hosts)
- @mock.patch('nova.scheduler.client.report.SchedulerReportClient')
+ @mock.patch('nova.scheduler.client.report.report_client_singleton')
def test_placement_client_init(self, mock_report_client):
"""Tests to make sure that the construction of the placement client
- only happens once per AggregateAPI class instance.
+ uses the singleton helper, and happens only when needed.
"""
- self.assertIsNone(self.api._placement_client)
- # Access the property twice to make sure SchedulerReportClient is
- # only loaded once.
- for x in range(2):
- self.api.placement_client
+ self.assertFalse(mock_report_client.called)
+ self.api.placement_client
mock_report_client.assert_called_once_with()
@@ -13439,7 +13527,7 @@ class EvacuateHostTestCase(BaseTestCase):
return_value=mock.sentinel.mapping)
@mock.patch(
'nova.compute.utils.'
- 'update_pci_request_spec_with_allocated_interface_name')
+ 'update_pci_request_with_placement_allocations')
@mock.patch('nova.compute.utils.notify_about_instance_action')
@mock.patch('nova.compute.utils.notify_about_instance_rebuild')
@mock.patch.object(network_api, 'setup_networks_on_host')
@@ -13459,7 +13547,8 @@ class EvacuateHostTestCase(BaseTestCase):
image_ref, injected_files, 'newpass', {}, bdms, recreate=True,
on_shared_storage=on_shared_storage, migration=migration,
preserve_ephemeral=False, scheduled_node=node, limits=limits,
- request_spec=None, accel_uuids=[])
+ request_spec=None, accel_uuids=[], reimage_boot_volume=False,
+ target_state=None)
if vm_states_is_stopped:
mock_notify_rebuild.assert_has_calls([
mock.call(ctxt, self.inst, self.inst.host, phase='start',
diff --git a/nova/tests/unit/compute/test_compute_mgr.py b/nova/tests/unit/compute/test_compute_mgr.py
index d56f12fecb..1c69cd8f1c 100644
--- a/nova/tests/unit/compute/test_compute_mgr.py
+++ b/nova/tests/unit/compute/test_compute_mgr.py
@@ -57,6 +57,7 @@ from nova.objects import fields
from nova.objects import instance as instance_obj
from nova.objects import migrate_data as migrate_data_obj
from nova.objects import network_request as net_req_obj
+from nova.objects import service as service_obj
from nova.pci import request as pci_request
from nova.scheduler.client import report
from nova import test
@@ -76,6 +77,7 @@ from nova.virt import driver as virt_driver
from nova.virt import event as virtevent
from nova.virt import fake as fake_driver
from nova.virt import hardware
+from nova.virt import node as virt_node
from nova.volume import cinder
@@ -89,6 +91,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
# os-brick>=5.1 now uses external file system locks instead of internal
# locks so we need to set up locking
REQUIRES_LOCKING = True
+ STUB_COMPUTE_ID = False
def setUp(self):
super(ComputeManagerUnitTestCase, self).setUp()
@@ -348,6 +351,46 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self.context, mock.sentinel.node, startup=True)
log_mock.exception.assert_called_once()
+ def test_update_available_resource_for_node_pci_placement_failed_startup(
+ self
+ ):
+ """If the PCI placement translation failed during startup then the
+ exception is raised up to kill the service
+ """
+ rt = self._mock_rt(spec_set=['update_available_resource'])
+ rt.update_available_resource.side_effect = (
+ exception.PlacementPciException(error='error'))
+
+ self.assertRaises(
+ exception.PlacementPciException,
+ self.compute._update_available_resource_for_node,
+ self.context,
+ mock.sentinel.node,
+ startup=True,
+ )
+ rt.update_available_resource.assert_called_once_with(
+ self.context, mock.sentinel.node, startup=True)
+
+ @mock.patch('nova.compute.manager.LOG')
+ def test_update_available_resource_for_node_pci_placement_failed_later(
+ self, mock_log
+ ):
+ """If the PCI placement translation failed later (not at startup)
+ during a periodic then the exception is just logged
+ """
+ rt = self._mock_rt(spec_set=['update_available_resource'])
+ rt.update_available_resource.side_effect = (
+ exception.PlacementPciException(error='error'))
+
+ self.compute._update_available_resource_for_node(
+ self.context, mock.sentinel.node, startup=False)
+ rt.update_available_resource.assert_called_once_with(
+ self.context, mock.sentinel.node, startup=False)
+ mock_log.exception.assert_called_once_with(
+ 'Error updating PCI resources for node %(node)s.',
+ {'node': mock.sentinel.node}
+ )
+
@mock.patch.object(manager, 'LOG')
@mock.patch.object(manager.ComputeManager,
'_update_available_resource_for_node')
@@ -866,6 +909,8 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
return instance_obj._make_instance_list(
self.context, objects.InstanceList(), db_list, None)
+ @mock.patch.object(manager.ComputeManager,
+ '_ensure_existing_node_identity')
@mock.patch.object(manager.ComputeManager, '_get_nodes')
@mock.patch.object(manager.ComputeManager,
'_error_out_instances_whose_build_was_interrupted')
@@ -884,17 +929,19 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
mock_validate_vtpm, mock_validate_pinning,
mock_destroy, mock_admin_ctxt, mock_host_get,
mock_init_host,
- mock_error_interrupted, mock_get_nodes):
+ mock_error_interrupted, mock_get_nodes,
+ mock_existing_node):
mock_admin_ctxt.return_value = self.context
inst_list = _make_instance_list(startup_instances)
mock_host_get.return_value = inst_list
our_node = objects.ComputeNode(
- host='fake-host', uuid=uuids.our_node_uuid,
+ host=self.compute.host, uuid=uuids.our_node_uuid,
hypervisor_hostname='fake-node')
mock_get_nodes.return_value = {uuids.our_node_uuid: our_node}
- self.compute.init_host()
+ self.compute.init_host(None)
+ mock_existing_node.assert_not_called()
mock_validate_pinning.assert_called_once_with(inst_list)
mock_validate_vtpm.assert_called_once_with(inst_list)
mock_destroy.assert_called_once_with(
@@ -937,8 +984,9 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
"""
mock_get_nodes.return_value = {
uuids.cn_uuid1: objects.ComputeNode(
- uuid=uuids.cn_uuid1, hypervisor_hostname='node1')}
- self.compute.init_host()
+ uuid=uuids.cn_uuid1, hypervisor_hostname='node1',
+ host=self.compute.host)}
+ self.compute.init_host(None)
mock_error_interrupted.assert_called_once_with(
test.MatchType(nova.context.RequestContext), set(),
@@ -948,16 +996,19 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
@mock.patch('nova.objects.InstanceList')
@mock.patch('nova.objects.MigrationList.get_by_filters')
- def test_cleanup_host(self, mock_miglist_get, mock_instance_list):
+ @mock.patch('nova.objects.ComputeNodeList.get_all_by_uuids')
+ def test_cleanup_host(self, mock_cnlist_get, mock_miglist_get,
+ mock_instance_list):
# just testing whether the cleanup_host method
# when fired will invoke the underlying driver's
# equivalent method.
mock_miglist_get.return_value = []
mock_instance_list.get_by_host.return_value = []
+ mock_cnlist_get.return_value = []
with mock.patch.object(self.compute, 'driver') as mock_driver:
- self.compute.init_host()
+ self.compute.init_host(None)
mock_driver.init_host.assert_called_once_with(host='fake-mini')
self.compute.cleanup_host()
@@ -1046,7 +1097,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
'remove_provider_tree_from_instance_allocation')
) as (mock_get_net, mock_remove_allocation):
- self.compute.init_host()
+ self.compute.init_host(None)
mock_remove_allocation.assert_called_once_with(
self.context, deleted_instance.uuid, uuids.our_node_uuid)
@@ -1099,11 +1150,11 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
uuids.evac_instance: evacuating_instance
}
our_node = objects.ComputeNode(
- host='fake-host', uuid=uuids.our_node_uuid,
+ host=self.compute.host, uuid=uuids.our_node_uuid,
hypervisor_hostname='fake-node')
mock_get_nodes.return_value = {uuids.our_node_uuid: our_node}
- self.compute.init_host()
+ self.compute.init_host(None)
mock_init_instance.assert_called_once_with(
self.context, active_instance)
@@ -1111,23 +1162,49 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self.context, {active_instance.uuid, evacuating_instance.uuid},
mock_get_nodes.return_value.keys())
- @mock.patch.object(objects.ComputeNode, 'get_by_host_and_nodename')
- @mock.patch.object(fake_driver.FakeDriver, 'get_available_nodes')
- def test_get_nodes(self, mock_driver_get_nodes, mock_get_by_host_and_node):
- mock_driver_get_nodes.return_value = ['fake-node1', 'fake-node2']
- cn1 = objects.ComputeNode(uuid=uuids.cn1)
- cn2 = objects.ComputeNode(uuid=uuids.cn2)
- mock_get_by_host_and_node.side_effect = [cn1, cn2]
+ @mock.patch.object(objects.ComputeNodeList, 'get_all_by_uuids')
+ @mock.patch.object(fake_driver.FakeDriver, 'get_nodenames_by_uuid')
+ def test_get_nodes(self, mock_driver_get_nodes, mock_get_by_uuid):
+ mock_driver_get_nodes.return_value = {uuids.node_fake_node1: 'host',
+ uuids.node_fake_node2: 'host'}
+ # NOTE(danms): The fake driver, by default, uses
+ # uuidsentinel.node_$node_name, so we can predict the uuids it will
+ # return here.
+ cn1 = objects.ComputeNode(uuid=uuids.node_fake_node1,
+ hypervisor_hostname='host')
+ cn2 = objects.ComputeNode(uuid=uuids.node_fake_node2,
+ hypervisor_hostname='host')
+ mock_get_by_uuid.return_value = [cn1, cn2]
nodes = self.compute._get_nodes(self.context)
- self.assertEqual({uuids.cn1: cn1, uuids.cn2: cn2}, nodes)
+ self.assertEqual({uuids.node_fake_node1: cn1,
+ uuids.node_fake_node2: cn2}, nodes)
mock_driver_get_nodes.assert_called_once_with()
- mock_get_by_host_and_node.assert_has_calls([
- mock.call(self.context, self.compute.host, 'fake-node1'),
- mock.call(self.context, self.compute.host, 'fake-node2'),
- ])
+ mock_get_by_uuid.assert_called_once_with(self.context,
+ [uuids.node_fake_node1,
+ uuids.node_fake_node2])
+
+ @mock.patch.object(objects.ComputeNodeList, 'get_all_by_uuids')
+ @mock.patch.object(fake_driver.FakeDriver, 'get_nodenames_by_uuid')
+ def test_get_nodes_mismatch(self, mock_driver_get_nodes, mock_get_by_uuid):
+ # Virt driver reports a (hypervisor_) hostname of 'host1'
+ mock_driver_get_nodes.return_value = {uuids.node_fake_node1: 'host1',
+ uuids.node_fake_node2: 'host1'}
+
+ # The database records for our compute nodes (by UUID) show a
+ # hypervisor_hostname of 'host2'
+ cn1 = objects.ComputeNode(uuid=uuids.node_fake_node1,
+ hypervisor_hostname='host2')
+ cn2 = objects.ComputeNode(uuid=uuids.node_fake_node2,
+ hypervisor_hostname='host2')
+ mock_get_by_uuid.return_value = [cn1, cn2]
+
+ # Possible hostname (as reported by the virt driver) rename,
+ # which should abort our startup
+ self.assertRaises(exception.InvalidConfiguration,
+ self.compute._get_nodes, self.context)
@mock.patch.object(manager.LOG, 'warning')
@mock.patch.object(
@@ -1149,37 +1226,35 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
"is starting on this host, then you can ignore this warning.")
@mock.patch.object(manager.LOG, 'warning')
- @mock.patch.object(objects.ComputeNode, 'get_by_host_and_nodename')
- @mock.patch.object(fake_driver.FakeDriver, 'get_available_nodes')
+ @mock.patch.object(objects.ComputeNodeList, 'get_all_by_uuids')
+ @mock.patch.object(fake_driver.FakeDriver, 'get_nodenames_by_uuid')
def test_get_nodes_node_not_found(
- self, mock_driver_get_nodes, mock_get_by_host_and_node,
+ self, mock_driver_get_nodes, mock_get_all_by_uuids,
mock_log_warning):
- mock_driver_get_nodes.return_value = ['fake-node1', 'fake-node2']
- cn2 = objects.ComputeNode(uuid=uuids.cn2)
- mock_get_by_host_and_node.side_effect = [
- exception.ComputeHostNotFound(host='fake-node1'), cn2]
+ mock_driver_get_nodes.return_value = {uuids.node_1: 'fake-node1'}
+ mock_get_all_by_uuids.return_value = []
nodes = self.compute._get_nodes(self.context)
- self.assertEqual({uuids.cn2: cn2}, nodes)
+ self.assertEqual({}, nodes)
mock_driver_get_nodes.assert_called_once_with()
- mock_get_by_host_and_node.assert_has_calls([
- mock.call(self.context, self.compute.host, 'fake-node1'),
- mock.call(self.context, self.compute.host, 'fake-node2'),
- ])
+ mock_get_all_by_uuids.assert_called_once_with(self.context,
+ [uuids.node_1])
mock_log_warning.assert_called_once_with(
- "Compute node %s not found in the database. If this is the first "
- "time this service is starting on this host, then you can ignore "
- "this warning.", 'fake-node1')
+ "Compute nodes %s for host %s were not found in the database. "
+ "If this is the first time this service is starting on this host, "
+ "then you can ignore this warning.", [uuids.node_1], 'fake-mini')
def test_init_host_disk_devices_configuration_failure(self):
self.flags(max_disk_devices_to_attach=0, group='compute')
self.assertRaises(exception.InvalidConfiguration,
- self.compute.init_host)
+ self.compute.init_host, None)
@mock.patch.object(objects.InstanceList, 'get_by_host',
new=mock.Mock())
+ @mock.patch('nova.objects.ComputeNodeList.get_all_by_uuids',
+ new=mock.Mock(return_value=[mock.MagicMock()]))
@mock.patch('nova.compute.manager.ComputeManager.'
'_validate_pinning_configuration')
def test_init_host_pinning_configuration_validation_failure(self,
@@ -1190,13 +1265,15 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
mock_validate_pinning.side_effect = exception.InvalidConfiguration
self.assertRaises(exception.InvalidConfiguration,
- self.compute.init_host)
+ self.compute.init_host, None)
@mock.patch.object(objects.InstanceList, 'get_by_host',
new=mock.Mock())
@mock.patch('nova.compute.manager.ComputeManager.'
'_validate_pinning_configuration',
new=mock.Mock())
+ @mock.patch('nova.objects.ComputeNodeList.get_all_by_uuids',
+ new=mock.Mock(return_value=[mock.MagicMock()]))
@mock.patch('nova.compute.manager.ComputeManager.'
'_validate_vtpm_configuration')
def test_init_host_vtpm_configuration_validation_failure(self,
@@ -1207,7 +1284,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
mock_validate_vtpm.side_effect = exception.InvalidConfiguration
self.assertRaises(exception.InvalidConfiguration,
- self.compute.init_host)
+ self.compute.init_host, None)
@mock.patch.object(objects.Instance, 'save')
@mock.patch.object(objects.InstanceList, 'get_by_filters')
@@ -1310,6 +1387,36 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self.compute._init_instance(self.context, instance)
set_error_state.assert_called_once_with(instance)
+ def test_init_instance_vif_plug_fails_missing_pci(self):
+ instance = fake_instance.fake_instance_obj(
+ self.context,
+ uuid=uuids.instance,
+ info_cache=None,
+ power_state=power_state.RUNNING,
+ vm_state=vm_states.ACTIVE,
+ task_state=None,
+ host=self.compute.host,
+ expected_attrs=['info_cache'])
+
+ with test.nested(
+ mock.patch.object(context, 'get_admin_context',
+ return_value=self.context),
+ mock.patch.object(objects.Instance, 'get_network_info',
+ return_value=network_model.NetworkInfo()),
+ mock.patch.object(self.compute.driver, 'plug_vifs',
+ side_effect=exception.PciDeviceNotFoundById("pci-addr")),
+ mock.patch("nova.compute.manager.LOG.exception"),
+ ) as (get_admin_context, get_nw_info, plug_vifs, log_exception):
+ # as this does not raise, we are sure that the compute service
+ # continues initializing the rest of the instances
+ self.compute._init_instance(self.context, instance)
+ log_exception.assert_called_once_with(
+ "Virtual interface plugging failed for instance. Probably the "
+ "vnic_type of the bound port has been changed. Nova does not "
+ "support such change.",
+ instance=instance
+ )
+
def _test__validate_pinning_configuration(self, supports_pcpus=True):
instance_1 = fake_instance.fake_instance_obj(
self.context, uuid=uuids.instance_1)
@@ -5064,15 +5171,18 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
msg = mock_log.warning.call_args_list[0]
self.assertIn('appears to not be owned by this host', msg[0][0])
- def test_init_host_pci_passthrough_whitelist_validation_failure(self):
- # Tests that we fail init_host if there is a pci.passthrough_whitelist
+ def test_init_host_pci_device_spec_validation_failure(self):
+ # Tests that we fail init_host if there is a pci.device_spec
# configured incorrectly.
- self.flags(passthrough_whitelist=[
- # it's invalid to specify both in the same devspec
- jsonutils.dumps({'address': 'foo', 'devname': 'bar'})],
- group='pci')
+ self.flags(
+ device_spec=[
+ # it's invalid to specify both in the same devspec
+ jsonutils.dumps({'address': 'foo', 'devname': 'bar'})
+ ],
+ group='pci'
+ )
self.assertRaises(exception.PciDeviceInvalidDeviceName,
- self.compute.init_host)
+ self.compute.init_host, None)
@mock.patch('nova.compute.manager.ComputeManager._instance_update')
def test_error_out_instance_on_exception_not_implemented_err(self,
@@ -5262,7 +5372,8 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self.compute.rebuild_instance(
self.context, instance, None, None, None, None, None, None,
- recreate, False, False, None, scheduled_node, {}, None, [])
+ recreate, False, False, None, scheduled_node, {}, None, [], False,
+ None)
mock_set.assert_called_once_with(None, 'failed')
mock_notify_about_instance_usage.assert_called_once_with(
mock.ANY, instance, 'rebuild.error', fault=mock_rebuild.side_effect
@@ -5373,7 +5484,9 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
None, recreate=True, on_shared_storage=None,
preserve_ephemeral=False, migration=None,
scheduled_node='fake-node',
- limits={}, request_spec=request_spec, accel_uuids=[])
+ limits={}, request_spec=request_spec, accel_uuids=[],
+ reimage_boot_volume=False,
+ target_state=None)
mock_validate_policy.assert_called_once_with(
elevated_context, instance, {'group': [uuids.group]})
@@ -5412,7 +5525,8 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self.context, instance, None, None, None, None, None, None,
recreate=True, on_shared_storage=None, preserve_ephemeral=False,
migration=None, scheduled_node='fake-node', limits={},
- request_spec=request_spec, accel_uuids=[])
+ request_spec=request_spec, accel_uuids=[],
+ reimage_boot_volume=False, target_state=None)
mock_validate_policy.assert_called_once_with(
elevated_context, instance, {'group': [uuids.group]})
@@ -5438,7 +5552,8 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self.compute.rebuild_instance(
self.context, instance, None, None,
None, None, None, None, False,
- False, False, migration, None, {}, None, [])
+ False, False, migration, None, {}, None, [], False,
+ None)
self.assertFalse(mock_get.called)
self.assertEqual(node, instance.node)
self.assertEqual('done', migration.status)
@@ -5460,7 +5575,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self.compute.rebuild_instance(
self.context, instance, None, None, None, None, None,
None, True, False, False, mock.sentinel.migration, None, {},
- None, [])
+ None, [], False, None)
mock_get.assert_called_once_with(mock.ANY, self.compute.host)
mock_rt.finish_evacuation.assert_called_once_with(
instance, 'new-node', mock.sentinel.migration)
@@ -5542,7 +5657,8 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
recreate, on_shared_storage,
preserve_ephemeral, {}, {},
self.allocations,
- mock.sentinel.mapping, [])
+ mock.sentinel.mapping, [],
+ False, None)
mock_notify_usage.assert_has_calls(
[mock.call(self.context, instance, "rebuild.start",
@@ -5560,8 +5676,12 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
provider_mappings=mock.sentinel.mapping)
mock_get_nw_info.assert_called_once_with(self.context, instance)
- def test_rebuild_default_impl(self):
- def _detach(context, bdms):
+ @ddt.data((False, False), (False, True), (True, False), (True, True))
+ @ddt.unpack
+ def test_rebuild_default_impl(self, is_vol_backed, reimage_boot_vol):
+ fake_image_meta = mock.MagicMock(id='fake_id')
+
+ def _detach(context, bdms, detach_root_bdm=True):
# NOTE(rpodolyaka): check that instance has been powered off by
# the time we detach block devices, exact calls arguments will be
# checked below
@@ -5587,13 +5707,20 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
mock.patch.object(self.compute, '_power_off_instance',
return_value=None),
mock.patch.object(self.compute, '_get_accel_info',
- return_value=[])
+ return_value=[]),
+ mock.patch.object(compute_utils, 'is_volume_backed_instance',
+ return_value=is_vol_backed),
+ mock.patch.object(self.compute, '_rebuild_volume_backed_instance'),
+ mock.patch.object(compute_utils, 'get_root_bdm')
) as(
mock_destroy,
mock_spawn,
mock_save,
mock_power_off,
- mock_accel_info
+ mock_accel_info,
+ mock_is_volume_backed,
+ mock_rebuild_vol_backed_inst,
+ mock_get_root,
):
instance = fake_instance.fake_instance_obj(self.context)
instance.migration_context = None
@@ -5603,9 +5730,19 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
instance.device_metadata = None
instance.task_state = task_states.REBUILDING
instance.save(expected_task_state=[task_states.REBUILDING])
+ fake_block_device_info = {
+ 'block_device_mapping': [
+ {'attachment_id': '341a8917-f74d-4473-8ee7-4ca05e5e0ab3',
+ 'volume_id': 'b7c93bb9-dfe4-41af-aa56-e6b28342fd8f',
+ 'connection_info': {'driver_volume_type': 'iscsi',
+ 'data': {'target_discovered': False,
+ 'target_portal': '127.0.0.1:3260',
+ 'target_iqn': 'iqn.2010-10.org.openstack:volume-'
+ 'b7c93bb9-dfe4-41af-aa56-e6b28342fd8f',
+ 'target_lun': 0}}}]}
self.compute._rebuild_default_impl(self.context,
instance,
- None,
+ fake_image_meta,
[],
admin_password='new_pass',
bdms=[],
@@ -5614,16 +5751,151 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
attach_block_devices=_attach,
network_info=None,
evacuate=False,
- block_device_info=None,
- preserve_ephemeral=False)
+ block_device_info=
+ fake_block_device_info,
+ preserve_ephemeral=False,
+ reimage_boot_volume=
+ reimage_boot_vol)
self.assertTrue(mock_save.called)
self.assertTrue(mock_spawn.called)
mock_destroy.assert_called_once_with(
self.context, instance,
- network_info=None, block_device_info=None)
+ network_info=None, block_device_info=fake_block_device_info)
mock_power_off.assert_called_once_with(
instance, clean_shutdown=True)
+ if is_vol_backed and reimage_boot_vol:
+ mock_rebuild_vol_backed_inst.assert_called_once_with(
+ self.context, instance, [], fake_image_meta.id)
+ else:
+ mock_rebuild_vol_backed_inst.assert_not_called()
+
+ @mock.patch('nova.volume.cinder.API.attachment_delete')
+ @mock.patch('nova.volume.cinder.API.attachment_create',
+ return_value={'id': uuids.new_attachment_id})
+ @mock.patch.object(nova.compute.manager.ComputeVirtAPI,
+ 'wait_for_instance_event')
+ def test__rebuild_volume_backed_instance(
+ self, wait_inst_event, attach_create, attach_delete):
+ fake_conn_info = '{}'
+ fake_device = 'fake_vda'
+ root_bdm = mock.MagicMock(
+ volume_id=uuids.volume_id, connection_info=fake_conn_info,
+ device_name=fake_device, attachment_id=uuids.old_attachment_id,
+ save=mock.MagicMock())
+ bdms = [root_bdm]
+ events = [('volume-reimaged', root_bdm.volume_id)]
+ image_size_gb = 1
+ deadline = CONF.reimage_timeout_per_gb * image_size_gb
+
+ with test.nested(
+ mock.patch.object(objects.Instance, 'save',
+ return_value=None),
+ mock.patch.object(compute_utils, 'get_root_bdm',
+ return_value=root_bdm),
+ mock.patch.object(self.compute, 'volume_api'),
+ mock.patch.object(self.compute.image_api, 'get'),
+ ) as (
+ mock_save,
+ mock_get_root_bdm,
+ mock_vol_api,
+ mock_get_img
+ ):
+ instance = fake_instance.fake_instance_obj(self.context)
+ instance.task_state = task_states.REBUILDING
+ # 1024 ** 3 = 1073741824
+ mock_get_img.return_value = {'size': 1073741824}
+ self.compute._rebuild_volume_backed_instance(
+ self.context, instance, bdms, uuids.image_id)
+ mock_vol_api.attachment_create.assert_called_once_with(
+ self.context, uuids.volume_id, instance.uuid)
+ mock_vol_api.attachment_delete.assert_called_once_with(
+ self.context, uuids.old_attachment_id)
+ mock_vol_api.reimage_volume.assert_called_once_with(
+ self.context, uuids.volume_id, uuids.image_id,
+ reimage_reserved=True)
+ mock_get_img.assert_called_once_with(
+ self.context, uuids.image_id)
+ mock_get_root_bdm.assert_called_once_with(
+ self.context, instance, bdms)
+ wait_inst_event.assert_called_once_with(
+ instance, events, deadline=deadline,
+ error_callback=self.compute._reimage_failed_callback)
+
+ @mock.patch('nova.volume.cinder.API.attachment_delete')
+ @mock.patch('nova.volume.cinder.API.attachment_create',
+ return_value={'id': uuids.new_attachment_id})
+ @mock.patch.object(nova.compute.manager.ComputeVirtAPI,
+ 'wait_for_instance_event')
+ def test__rebuild_volume_backed_instance_image_not_found(
+ self, wait_inst_event, attach_create, attach_delete):
+ fake_conn_info = '{}'
+ fake_device = 'fake_vda'
+ root_bdm = mock.MagicMock(
+ volume_id=uuids.volume_id, connection_info=fake_conn_info,
+ device_name=fake_device, attachment_id=uuids.old_attachment_id,
+ save=mock.MagicMock())
+ bdms = [root_bdm]
+
+ with test.nested(
+ mock.patch.object(objects.Instance, 'save',
+ return_value=None),
+ mock.patch.object(compute_utils, 'get_root_bdm',
+ return_value=root_bdm),
+ mock.patch.object(self.compute, 'volume_api'),
+ mock.patch.object(self.compute.image_api, 'get'),
+ ) as(
+ mock_save,
+ mock_get_root_bdm,
+ mock_vol_api,
+ mock_get_img
+ ):
+ mock_get_img.side_effect = exception.ImageNotFound(
+ image_id=uuids.image_id)
+ instance = fake_instance.fake_instance_obj(self.context)
+ instance.task_state = task_states.REBUILDING
+ instance.save(expected_task_state=[task_states.REBUILDING])
+ mock_get_img.return_value = {'size': 1}
+ self.assertRaises(
+ exception.BuildAbortException,
+ self.compute._rebuild_volume_backed_instance,
+ self.context, instance, bdms, uuids.image_id)
+ mock_vol_api.attachment_create.assert_called_once_with(
+ self.context, uuids.volume_id, instance.uuid)
+ mock_vol_api.attachment_delete.assert_called_once_with(
+ self.context, uuids.old_attachment_id)
+ mock_get_img.assert_called_once_with(
+ self.context, uuids.image_id)
+
+ @mock.patch.object(objects.Instance, 'save', return_value=None)
+ @mock.patch.object(fake_driver.SmallFakeDriver, 'detach_volume')
+ @mock.patch.object(cinder.API, 'roll_detaching')
+ def test__detach_root_volume(self, mock_roll_detach, mock_detach,
+ mock_save):
+ exception_list = [
+ '',
+ exception.DiskNotFound(location="not\\here"),
+ exception.DeviceDetachFailed(device="fake_dev", reason="unknown"),
+ ]
+ mock_detach.side_effect = exception_list
+ fake_conn_info = '{}'
+ fake_device = 'fake_vda'
+ root_bdm = mock.MagicMock(
+ volume_id=uuids.volume_id, connection_info=fake_conn_info,
+ device_name=fake_device, attachment_id=uuids.old_attachment_id,
+ save=mock.MagicMock())
+ instance = fake_instance.fake_instance_obj(self.context)
+ instance.task_state = task_states.REBUILDING
+ instance.save(expected_task_state=[task_states.REBUILDING])
+ self.compute._detach_root_volume(self.context, instance, root_bdm)
+ self.compute._detach_root_volume(self.context, instance, root_bdm)
+ self.assertRaises(exception.DeviceDetachFailed,
+ self.compute._detach_root_volume,
+ self.context, instance, root_bdm)
+ mock_roll_detach.assert_called_with(self.context, uuids.volume_id)
+ self.assertRaises(Exception, self.compute._detach_root_volume, # noqa
+ self.context, instance, root_bdm)
+ mock_roll_detach.assert_called_with(self.context, uuids.volume_id)
def test_do_rebuild_instance_check_trusted_certs(self):
"""Tests the scenario that we're rebuilding an instance with
@@ -5645,7 +5917,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
request_spec=objects.RequestSpec(),
allocations=self.allocations,
request_group_resource_providers_mapping=mock.sentinel.mapping,
- accel_uuids=[])
+ accel_uuids=[], reimage_boot_volume=False, target_state=None)
self.assertIn('Trusted image certificates provided on host', str(ex))
def test_reverts_task_state_instance_not_found(self):
@@ -6089,6 +6361,171 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self.assertEqual({'one-image': 'cached',
'two-image': 'existing'}, r)
+ @mock.patch.object(virt_node, 'write_local_node_uuid')
+ @mock.patch.object(virt_node, 'read_local_node_uuid')
+ def test_ensure_node_uuid_not_needed_version(self, mock_read, mock_write):
+ # Make sure an up-to-date service bypasses the persistence
+ service_ref = service_obj.Service()
+ self.assertEqual(service_obj.SERVICE_VERSION, service_ref.version)
+ mock_read.return_value = 'not none'
+ mock_write.assert_not_called()
+ self.compute._ensure_existing_node_identity(service_ref)
+ mock_write.assert_not_called()
+
+ @mock.patch.object(virt_node, 'write_local_node_uuid')
+ def test_ensure_node_uuid_not_needed_ironic(self, mock_node):
+ # Make sure an old service for ironic does not write a local node uuid
+ with mock.patch.object(service_obj, 'SERVICE_VERSION', new=60):
+ service_ref = service_obj.Service()
+ self.assertEqual(60, service_ref.version)
+ self.flags(compute_driver='ironic')
+ self.compute._ensure_existing_node_identity(service_ref)
+ mock_node.assert_not_called()
+
+ @mock.patch.object(virt_node, 'write_local_node_uuid')
+ @mock.patch.object(virt_node, 'read_local_node_uuid')
+ def test_ensure_node_uuid_not_needed_preprovisioned(self,
+ mock_read_node,
+ mock_write_node):
+ # Make sure an old service does not write a uuid if one is present
+ with mock.patch.object(service_obj, 'SERVICE_VERSION', new=60):
+ service_ref = service_obj.Service()
+ self.assertEqual(60, service_ref.version)
+ mock_read_node.return_value = str(uuids.SOME_UUID)
+ self.compute._ensure_existing_node_identity(service_ref)
+ mock_write_node.assert_not_called()
+
+ @mock.patch.object(virt_node, 'write_local_node_uuid')
+ @mock.patch.object(virt_node, 'read_local_node_uuid')
+ @mock.patch('nova.objects.ComputeNodeList.get_all_by_host')
+ def test_ensure_node_uuid_upgrade_no_node(self, mock_get_cn,
+ mock_read_node,
+ mock_write_node):
+ # If we are not a new service, we have no pre-provisioned node uuid
+ # and we find no nodes in the database, we do not write a local
+ # node uuid *and* we abort startup since something is likely wrong.
+ with mock.patch.object(service_obj, 'SERVICE_VERSION', new=60):
+ service_ref = service_obj.Service()
+ self.assertEqual(60, service_ref.version)
+ mock_read_node.return_value = None
+ mock_get_cn.return_value = []
+ self.assertRaises(exception.InvalidConfiguration,
+ self.compute._ensure_existing_node_identity,
+ service_ref)
+ mock_get_cn.assert_called_once_with(mock.ANY, self.compute.host)
+ mock_write_node.assert_not_called()
+
+ @mock.patch.object(virt_node, 'write_local_node_uuid')
+ @mock.patch.object(virt_node, 'read_local_node_uuid')
+ @mock.patch('nova.objects.ComputeNodeList.get_all_by_host')
+ def test_ensure_node_uuid_upgrade_multi_node(self, mock_get_cn,
+ mock_read_node,
+ mock_write_node):
+ # If we are not a new service, we have no pre-provisioned node uuid
+ # and we find multiple nodes in the database, we do not write a local
+ # node uuid *and* we abort startup since something is likely wrong.
+ with mock.patch.object(service_obj, 'SERVICE_VERSION', new=60):
+ service_ref = service_obj.Service()
+ self.assertEqual(60, service_ref.version)
+ mock_read_node.return_value = None
+ mock_get_cn.return_value = [1, 2]
+ self.assertRaises(exception.InvalidConfiguration,
+ self.compute._ensure_existing_node_identity,
+ service_ref)
+ mock_get_cn.assert_called_once_with(mock.ANY, self.compute.host)
+ mock_write_node.assert_not_called()
+
+ @mock.patch.object(virt_node, 'write_local_node_uuid')
+ @mock.patch.object(virt_node, 'read_local_node_uuid')
+ @mock.patch('nova.objects.ComputeNodeList.get_all_by_host')
+ def test_ensure_node_uuid_upgrade_writes_node_uuid(self, mock_get_cn,
+ mock_read_node,
+ mock_write_node):
+ # If we are not a new service, there is no pre-provisioned local
+ # compute node uuid, and we find exactly one compute node in the
+ # database for our host, we persist that.
+ with mock.patch.object(service_obj, 'SERVICE_VERSION', new=60):
+ service_ref = service_obj.Service()
+ self.assertEqual(60, service_ref.version)
+ mock_read_node.return_value = None
+ mock_get_cn.return_value = [
+ objects.ComputeNode(uuid=str(uuids.compute)),
+ ]
+ self.compute._ensure_existing_node_identity(service_ref)
+ mock_get_cn.assert_called_once_with(mock.ANY, self.compute.host)
+ mock_write_node.assert_called_once_with(str(uuids.compute))
+
+ @mock.patch.object(virt_node, 'read_local_node_uuid')
+ def test_ensure_node_uuid_missing_file_ironic(self, mock_read):
+ mock_service = mock.MagicMock(
+ version=service_obj.NODE_IDENTITY_VERSION)
+ mock_read.return_value = None
+ self.assertRaises(exception.InvalidConfiguration,
+ self.compute._ensure_existing_node_identity,
+ mock_service)
+ mock_read.assert_called_once_with()
+
+ # Now make sure that ironic causes this exact configuration to pass
+ self.flags(compute_driver='ironic')
+ self.compute._ensure_existing_node_identity(mock_service)
+
+ def test_ensure_node_uuid_called_by_init_host(self):
+ # test_init_host() above ensures that we do not call
+ # _ensure_existing_node_identity() in the service_ref=None case.
+ # Since testing init_host() requires a billion mocks, this
+ # tests that we do call it when expected, but make it raise
+ # to avoid running the rest of init_host().
+ with mock.patch.object(self.compute,
+ '_ensure_existing_node_identity') as m:
+ m.side_effect = test.TestingException
+ self.assertRaises(test.TestingException,
+ self.compute.init_host,
+ mock.sentinel.service_ref)
+ m.assert_called_once_with(mock.sentinel.service_ref)
+
+ def test_check_for_host_rename_ironic(self):
+ self.flags(compute_driver='ironic')
+ # Passing None here makes sure we take the early exit because of our
+ # virt driver
+ nodes = {uuids.node1: mock.MagicMock(uuid=uuids.node1,
+ host='not-this-host')}
+ self.compute._check_for_host_rename(nodes)
+
+ def test_check_for_host_rename_renamed_only(self):
+ nodes = {uuids.node1: mock.MagicMock(uuid=uuids.node1,
+ host='not-this-host')}
+ self.assertRaises(exception.InvalidConfiguration,
+ self.compute._check_for_host_rename, nodes)
+
+ def test_check_for_host_rename_renamed_one(self):
+ nodes = {uuids.node1: mock.MagicMock(uuid=uuids.node1,
+ host=self.compute.host),
+ uuids.node2: mock.MagicMock(uuid=uuids.node2,
+ host='not-this-host')}
+ self.assertRaises(exception.InvalidConfiguration,
+ self.compute._check_for_host_rename, nodes)
+
+ def test_check_for_host_rename_not_renamed(self):
+ nodes = {uuids.node1: mock.MagicMock(uuid=uuids.node1,
+ host=self.compute.host)}
+ with mock.patch.object(manager.LOG, 'debug') as mock_debug:
+ self.compute._check_for_host_rename(nodes)
+ mock_debug.assert_called_once_with(
+ 'Verified node %s matches my host %s',
+ uuids.node1, self.compute.host)
+
+ @mock.patch('nova.compute.manager.ComputeManager._get_nodes')
+ def test_check_for_host_rename_called_by_init_host(self, mock_nodes):
+ # Since testing init_host() requires a billion mocks, this
+ # tests that we do call it when expected, but make it raise
+ # to avoid running the rest of init_host().
+ with mock.patch.object(self.compute,
+ '_check_for_host_rename') as m:
+ m.side_effect = test.TestingException
+ self.assertRaises(test.TestingException,
+ self.compute.init_host, None)
+ m.assert_called_once_with(mock_nodes.return_value)
+
class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
def setUp(self):
@@ -7695,6 +8132,42 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
@mock.patch.object(virt_driver.ComputeDriver, 'failed_spawn_cleanup')
@mock.patch.object(virt_driver.ComputeDriver, 'prepare_for_spawn')
+ @mock.patch.object(virt_driver.ComputeDriver,
+ 'prepare_networks_before_block_device_mapping')
+ @mock.patch.object(virt_driver.ComputeDriver,
+ 'clean_networks_preparation')
+ def test_failed_prepare_for_spawn(self, mock_clean, mock_prepnet,
+ mock_prepspawn, mock_failedspawn):
+ mock_prepspawn.side_effect = exception.ComputeResourcesUnavailable(
+ reason="asdf")
+ with mock.patch.object(self.compute,
+ '_build_networks_for_instance',
+ return_value=self.network_info
+ ) as _build_networks_for_instance:
+
+ try:
+ with self.compute._build_resources(self.context, self.instance,
+ self.requested_networks, self.security_groups,
+ self.image, self.block_device_mapping,
+ self.resource_provider_mapping, self.accel_uuids):
+ pass
+ except Exception as e:
+ self.assertIsInstance(e,
+ exception.ComputeResourcesUnavailable)
+
+ _build_networks_for_instance.assert_has_calls(
+ [mock.call(self.context, self.instance,
+ self.requested_networks, self.security_groups,
+ self.resource_provider_mapping,
+ self.network_arqs)])
+
+ mock_prepnet.assert_not_called()
+ mock_clean.assert_called_once_with(self.instance, self.network_info)
+ mock_prepspawn.assert_called_once_with(self.instance)
+ mock_failedspawn.assert_called_once_with(self.instance)
+
+ @mock.patch.object(virt_driver.ComputeDriver, 'failed_spawn_cleanup')
+ @mock.patch.object(virt_driver.ComputeDriver, 'prepare_for_spawn')
@mock.patch.object(manager.ComputeManager, '_build_networks_for_instance')
def test_build_resources_aborts_on_failed_network_alloc(self, mock_build,
mock_prepspawn,
@@ -8255,11 +8728,17 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
# resource request and therefore no matching request group exists in
# the request spec.
self.instance.pci_requests = objects.InstancePCIRequests(requests=[
- objects.InstancePCIRequest(),
objects.InstancePCIRequest(
+ request_id=uuids.req0,
+ ),
+ objects.InstancePCIRequest(
+ request_id=uuids.req1,
requester_id=uuids.port1,
spec=[{'vendor_id': '1377', 'product_id': '0047'}]),
- objects.InstancePCIRequest(requester_id=uuids.port2),
+ objects.InstancePCIRequest(
+ request_id=uuids.req2,
+ requester_id=uuids.port2,
+ ),
])
with test.nested(
mock.patch.object(self.compute.driver, 'spawn'),
@@ -8304,8 +8783,13 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
requester_id=uuids.port1,
provider_uuids=[uuids.rp1])])
- self.instance.pci_requests = objects.InstancePCIRequests(requests=[
- objects.InstancePCIRequest(requester_id=uuids.port1)])
+ self.instance.pci_requests = objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ requester_id=uuids.port1, request_id=uuids.req1
+ )
+ ]
+ )
with mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_get_resource_provider') as (mock_get_rp):
mock_get_rp.return_value = None
@@ -8327,8 +8811,13 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
requester_id=uuids.port1,
provider_uuids=[uuids.rp1])])
- self.instance.pci_requests = objects.InstancePCIRequests(requests=[
- objects.InstancePCIRequest(requester_id=uuids.port1)])
+ self.instance.pci_requests = objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ requester_id=uuids.port1, request_id=uuids.req1
+ )
+ ]
+ )
with mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_get_resource_provider') as (mock_get_rp):
mock_get_rp.return_value = {
@@ -8352,8 +8841,13 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
requester_id=uuids.port1,
provider_uuids=[uuids.rp1, uuids.rp2])])
- self.instance.pci_requests = objects.InstancePCIRequests(requests=[
- objects.InstancePCIRequest(requester_id=uuids.port1)])
+ self.instance.pci_requests = objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ requester_id=uuids.port1, request_id=uuids.req1
+ )
+ ]
+ )
self.assertRaises(
exception.BuildAbortException,
@@ -9983,6 +10477,27 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
self.instance,
migration)
+ def test_post_live_migration_update_host(self):
+ @mock.patch.object(self.compute, '_get_compute_info')
+ def _test_post_live_migration(_get_compute_info):
+ dest_host = 'dest'
+ cn = objects.ComputeNode(hypervisor_hostname=dest_host)
+ _get_compute_info.return_value = cn
+ instance = fake_instance.fake_instance_obj(self.context,
+ node='src',
+ uuid=uuids.instance)
+ with mock.patch.object(self.compute, "_post_live_migration"
+ ) as plm, mock.patch.object(instance, "save") as save:
+ error = ValueError("some failure")
+ plm.side_effect = error
+ self.assertRaises(
+ ValueError, self.compute._post_live_migration_update_host,
+ self.context, instance, dest_host)
+ save.assert_called_once()
+ self.assertEqual(instance.host, dest_host)
+
+ _test_post_live_migration()
+
def test_post_live_migration_cinder_pre_344_api(self):
# Because live migration has
# succeeded,_post_live_migration_remove_source_vol_connections()
@@ -10814,7 +11329,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
@mock.patch('nova.compute.resource_tracker.ResourceTracker.resize_claim')
@mock.patch('nova.objects.Instance.save')
@mock.patch('nova.compute.utils.'
- 'update_pci_request_spec_with_allocated_interface_name')
+ 'update_pci_request_with_placement_allocations')
@mock.patch('nova.compute.utils.notify_usage_exists')
@mock.patch('nova.compute.manager.ComputeManager.'
'_notify_about_instance_usage')
@@ -10848,7 +11363,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
@mock.patch('nova.compute.resource_tracker.ResourceTracker.resize_claim')
@mock.patch('nova.objects.Instance.save')
@mock.patch('nova.compute.utils.'
- 'update_pci_request_spec_with_allocated_interface_name')
+ 'update_pci_request_with_placement_allocations')
@mock.patch('nova.compute.utils.notify_usage_exists')
@mock.patch('nova.compute.manager.ComputeManager.'
'_notify_about_instance_usage')
diff --git a/nova/tests/unit/compute/test_pci_placement_translator.py b/nova/tests/unit/compute/test_pci_placement_translator.py
new file mode 100644
index 0000000000..0592186e54
--- /dev/null
+++ b/nova/tests/unit/compute/test_pci_placement_translator.py
@@ -0,0 +1,291 @@
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import ddt
+from oslo_utils.fixture import uuidsentinel as uuids
+from unittest import mock
+
+from nova.compute import pci_placement_translator as ppt
+from nova.compute import provider_tree
+from nova import exception
+from nova.objects import fields
+from nova.objects import pci_device
+from nova.pci import devspec
+from nova import test
+
+
+def dev(v, p):
+ return pci_device.PciDevice(vendor_id=v, product_id=p)
+
+
+# NOTE(gibi): Most of the nova.compute.pci_placement_translator module is
+# covered with functional tests in
+# nova.tests.functional.libvirt.test_pci_in_placement
+@ddt.ddt
+class TestTranslator(test.NoDBTestCase):
+ def setUp(self):
+ super().setUp()
+ patcher = mock.patch(
+ "nova.compute.pci_placement_translator."
+ "_is_placement_tracking_enabled")
+ self.addCleanup(patcher.stop)
+ patcher.start()
+
+ def test_translator_skips_devices_without_matching_spec(self):
+ """As every PCI device in the PciTracker is created by matching a
+ PciDeviceSpec the translator should always be able to look up the spec
+ for a device. But if cannot then the device will be skipped and warning
+ will be emitted.
+ """
+ pci_tracker = mock.Mock()
+ pci_tracker.pci_devs = pci_device.PciDeviceList(
+ objects=[
+ pci_device.PciDevice(
+ address="0000:81:00.0",
+ status=fields.PciDeviceStatus.AVAILABLE,
+ instance_uuid=None,
+ )
+ ]
+ )
+ # So we have a device but there is no spec for it
+ pci_tracker.dev_filter.get_devspec = mock.Mock(return_value=None)
+ pci_tracker.dev_filter.specs = []
+ # we expect that the provider_tree is not touched as the device without
+ # spec is skipped, we assert that with the NonCallableMock
+ provider_tree = mock.NonCallableMock()
+
+ ppt.update_provider_tree_for_pci(
+ provider_tree, "fake-node", pci_tracker, {}, [])
+
+ self.assertIn(
+ "Device spec is not found for device 0000:81:00.0 in "
+ "[pci]device_spec. Ignoring device in Placement resource view. "
+ "This should not happen. Please file a bug.",
+ self.stdlog.logger.output
+ )
+
+ @ddt.unpack
+ @ddt.data(
+ (None, set()),
+ ("", set()),
+ ("a", {"CUSTOM_A"}),
+ ("a,b", {"CUSTOM_A", "CUSTOM_B"}),
+ ("HW_GPU_API_VULKAN", {"HW_GPU_API_VULKAN"}),
+ ("CUSTOM_FOO", {"CUSTOM_FOO"}),
+ ("custom_bar", {"CUSTOM_BAR"}),
+ ("custom-bar", {"CUSTOM_CUSTOM_BAR"}),
+ ("CUSTOM_a", {"CUSTOM_A"}),
+ ("a@!#$b123X", {"CUSTOM_A_B123X"}),
+ # Note that both trait names are normalized to the same trait
+ ("a!@b,a###b", {"CUSTOM_A_B"}),
+ )
+ def test_trait_normalization(self, trait_names, expected_traits):
+ self.assertEqual(
+ expected_traits,
+ ppt.get_traits(trait_names)
+ )
+
+ @ddt.unpack
+ @ddt.data(
+ (dev(v='1234', p='5678'), None, "CUSTOM_PCI_1234_5678"),
+ (dev(v='1234', p='5678'), "", "CUSTOM_PCI_1234_5678"),
+ (dev(v='1234', p='5678'), "PGPU", "PGPU"),
+ (dev(v='1234', p='5678'), "pgpu", "PGPU"),
+ (dev(v='1234', p='5678'), "foobar", "CUSTOM_FOOBAR"),
+ (dev(v='1234', p='5678'), "custom_foo", "CUSTOM_FOO"),
+ (dev(v='1234', p='5678'), "CUSTOM_foo", "CUSTOM_FOO"),
+ (dev(v='1234', p='5678'), "custom_FOO", "CUSTOM_FOO"),
+ (dev(v='1234', p='5678'), "CUSTOM_FOO", "CUSTOM_FOO"),
+ (dev(v='1234', p='5678'), "custom-foo", "CUSTOM_CUSTOM_FOO"),
+ (dev(v='1234', p='5678'), "a###b", "CUSTOM_A_B"),
+ (dev(v='123a', p='567b'), "", "CUSTOM_PCI_123A_567B"),
+ )
+ def test_resource_class_normalization(self, pci_dev, rc_name, expected_rc):
+ self.assertEqual(
+ expected_rc,
+ ppt.get_resource_class(
+ rc_name, pci_dev.vendor_id, pci_dev.product_id
+ ),
+ )
+
+ def test_dependent_device_pf_then_vf(self):
+ pv = ppt.PlacementView(
+ "fake-node", instances_under_same_host_resize=[])
+ pf = pci_device.PciDevice(
+ address="0000:81:00.0",
+ dev_type=fields.PciDeviceType.SRIOV_PF,
+ vendor_id="dead",
+ product_id="beef",
+ )
+ vf = pci_device.PciDevice(
+ address="0000:81:00.1",
+ parent_addr=pf.address,
+ dev_type=fields.PciDeviceType.SRIOV_VF,
+ vendor_id="dead",
+ product_id="beef",
+ )
+
+ pv._add_dev(pf, {"resource_class": "foo"})
+ ex = self.assertRaises(
+ exception.PlacementPciDependentDeviceException,
+ pv._add_dev,
+ vf,
+ {"resource_class": "bar"}
+ )
+
+ self.assertEqual(
+ "Configuring both 0000:81:00.1 and 0000:81:00.0 in "
+ "[pci]device_spec is not supported. Either the parent PF or its "
+ "children VFs can be configured.",
+ str(ex),
+ )
+
+ def test_dependent_device_vf_then_pf(self):
+ pv = ppt.PlacementView(
+ "fake-node", instances_under_same_host_resize=[])
+ pf = pci_device.PciDevice(
+ address="0000:81:00.0",
+ dev_type=fields.PciDeviceType.SRIOV_PF,
+ vendor_id="dead",
+ product_id="beef",
+ )
+ vf = pci_device.PciDevice(
+ address="0000:81:00.1",
+ parent_addr=pf.address,
+ dev_type=fields.PciDeviceType.SRIOV_VF,
+ vendor_id="dead",
+ product_id="beef",
+ )
+ vf2 = pci_device.PciDevice(
+ address="0000:81:00.2",
+ parent_addr=pf.address,
+ dev_type=fields.PciDeviceType.SRIOV_VF,
+ vendor_id="dead",
+ product_id="beef",
+ )
+
+ pv._add_dev(vf, {"resource_class": "foo"})
+ pv._add_dev(vf2, {"resource_class": "foo"})
+ ex = self.assertRaises(
+ exception.PlacementPciDependentDeviceException,
+ pv._add_dev,
+ pf,
+ {"resource_class": "bar"}
+ )
+
+ self.assertEqual(
+ "Configuring both 0000:81:00.0 and 0000:81:00.1,0000:81:00.2 in "
+ "[pci]device_spec is not supported. Either the parent PF or its "
+ "children VFs can be configured.",
+ str(ex),
+ )
+
+ def test_mixed_rc_for_sibling_vfs(self):
+ pv = ppt.PlacementView(
+ "fake-node", instances_under_same_host_resize=[])
+ vf1, vf2, vf3, vf4 = [
+ pci_device.PciDevice(
+ address="0000:81:00.%d" % f,
+ parent_addr="0000:71:00.0",
+ dev_type=fields.PciDeviceType.SRIOV_VF,
+ vendor_id="dead",
+ product_id="beef",
+ )
+ for f in range(0, 4)
+ ]
+
+ pv._add_dev(vf1, {"resource_class": "a", "traits": "foo,bar,baz"})
+ # order is irrelevant
+ pv._add_dev(vf2, {"resource_class": "a", "traits": "foo,baz,bar"})
+ # but missing trait is rejected
+ ex = self.assertRaises(
+ exception.PlacementPciMixedTraitsException,
+ pv._add_dev,
+ vf3,
+ {"resource_class": "a", "traits": "foo,bar"},
+ )
+ self.assertEqual(
+ "VFs from the same PF cannot be configured with different set of "
+ "'traits' in [pci]device_spec. We got "
+ "COMPUTE_MANAGED_PCI_DEVICE,CUSTOM_BAR,CUSTOM_FOO for "
+ "0000:81:00.2 and "
+ "COMPUTE_MANAGED_PCI_DEVICE,CUSTOM_BAR,CUSTOM_BAZ,CUSTOM_FOO "
+ "for 0000:81:00.0,0000:81:00.1.",
+ str(ex),
+ )
+ # as well as additional trait
+ ex = self.assertRaises(
+ exception.PlacementPciMixedTraitsException,
+ pv._add_dev,
+ vf4,
+ {"resource_class": "a", "traits": "foo,bar,baz,extra"}
+ )
+ self.assertEqual(
+ "VFs from the same PF cannot be configured with different set of "
+ "'traits' in [pci]device_spec. We got "
+ "COMPUTE_MANAGED_PCI_DEVICE,CUSTOM_BAR,CUSTOM_BAZ,CUSTOM_EXTRA,"
+ "CUSTOM_FOO for 0000:81:00.3 and COMPUTE_MANAGED_PCI_DEVICE,"
+ "CUSTOM_BAR,CUSTOM_BAZ,CUSTOM_FOO for 0000:81:00.0,0000:81:00.1.",
+ str(ex),
+ )
+
+ def test_translator_maps_pci_device_to_rp(self):
+ pv = ppt.PlacementView(
+ "fake-node", instances_under_same_host_resize=[])
+ vf = pci_device.PciDevice(
+ address="0000:81:00.1",
+ parent_addr="0000:71:00.0",
+ dev_type=fields.PciDeviceType.SRIOV_VF,
+ vendor_id="dead",
+ product_id="beef",
+ )
+ pf = pci_device.PciDevice(
+ address="0000:72:00.0",
+ parent_addr=None,
+ dev_type=fields.PciDeviceType.SRIOV_PF,
+ vendor_id="dead",
+ product_id="beef",
+ )
+ pt = provider_tree.ProviderTree()
+ pt.new_root("fake-node", uuids.compute_rp)
+
+ pv._add_dev(vf, {})
+ pv._add_dev(pf, {})
+ pv.update_provider_tree(pt)
+
+ self.assertEqual(
+ pt.data("fake-node_0000:71:00.0").uuid, vf.extra_info["rp_uuid"]
+ )
+ self.assertEqual(
+ pt.data("fake-node_0000:72:00.0").uuid, pf.extra_info["rp_uuid"]
+ )
+
+ def test_update_provider_tree_for_pci_update_pools(self):
+ pt = provider_tree.ProviderTree()
+ pt.new_root("fake-node", uuids.compute_rp)
+ pf = pci_device.PciDevice(
+ address="0000:72:00.0",
+ parent_addr=None,
+ dev_type=fields.PciDeviceType.SRIOV_PF,
+ vendor_id="dead",
+ product_id="beef",
+ status=fields.PciDeviceStatus.AVAILABLE,
+ )
+ pci_tracker = mock.Mock()
+ pci_tracker.pci_devs = [pf]
+ pci_tracker.dev_filter.specs = [devspec.PciDeviceSpec({})]
+
+ ppt.update_provider_tree_for_pci(pt, 'fake-node', pci_tracker, {}, [])
+
+ pci_tracker.stats.populate_pools_metadata_from_assigned_devices.\
+ assert_called_once_with()
diff --git a/nova/tests/unit/compute/test_resource_tracker.py b/nova/tests/unit/compute/test_resource_tracker.py
index e06279f836..cd36b8987f 100644
--- a/nova/tests/unit/compute/test_resource_tracker.py
+++ b/nova/tests/unit/compute/test_resource_tracker.py
@@ -12,12 +12,14 @@
import copy
import datetime
+import ddt
from unittest import mock
from keystoneauth1 import exceptions as ks_exc
import os_resource_classes as orc
import os_traits
from oslo_config import cfg
+from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
from oslo_utils import units
@@ -62,11 +64,13 @@ _VIRT_DRIVER_AVAIL_RESOURCES = {
'hypervisor_hostname': _NODENAME,
'cpu_info': '',
'numa_topology': None,
+ 'uuid': uuids.cn1,
}
_COMPUTE_NODE_FIXTURES = [
objects.ComputeNode(
id=1,
+ deleted=False,
uuid=uuids.cn1,
host=_HOSTNAME,
vcpus=_VIRT_DRIVER_AVAIL_RESOURCES['vcpus'],
@@ -584,7 +588,7 @@ class TestUpdateAvailableResources(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_disabled(self, get_mock, migr_mock, get_cn_mock, pci_mock,
@@ -617,7 +621,7 @@ class TestUpdateAvailableResources(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_no_instances_no_migrations_no_reserved(self, get_mock, migr_mock,
@@ -641,8 +645,7 @@ class TestUpdateAvailableResources(BaseTestCase):
'flavor',
'migration_context',
'resources'])
- get_cn_mock.assert_called_once_with(mock.ANY, _HOSTNAME,
- _NODENAME)
+ get_cn_mock.assert_called_once_with(mock.ANY, uuids.cn1)
migr_mock.assert_called_once_with(mock.ANY, _HOSTNAME,
_NODENAME)
@@ -669,7 +672,7 @@ class TestUpdateAvailableResources(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
@@ -728,7 +731,7 @@ class TestUpdateAvailableResources(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_no_instances_no_migrations_reserved_disk_ram_and_cpu(
@@ -745,7 +748,7 @@ class TestUpdateAvailableResources(BaseTestCase):
update_mock = self._update_available_resources()
- get_cn_mock.assert_called_once_with(mock.ANY, _HOSTNAME, _NODENAME)
+ get_cn_mock.assert_called_once_with(mock.ANY, uuids.cn1)
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
vals = {
'free_disk_gb': 5, # 6GB avail - 1 GB reserved
@@ -770,7 +773,7 @@ class TestUpdateAvailableResources(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_some_instances_no_migrations(self, get_mock, migr_mock,
@@ -795,7 +798,7 @@ class TestUpdateAvailableResources(BaseTestCase):
update_mock = self._update_available_resources()
- get_cn_mock.assert_called_once_with(mock.ANY, _HOSTNAME, _NODENAME)
+ get_cn_mock.assert_called_once_with(mock.ANY, uuids.cn1)
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
vals = {
'free_disk_gb': 5, # 6 - 1 used
@@ -821,7 +824,7 @@ class TestUpdateAvailableResources(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@@ -861,7 +864,7 @@ class TestUpdateAvailableResources(BaseTestCase):
update_mock = self._update_available_resources()
- get_cn_mock.assert_called_once_with(mock.ANY, _HOSTNAME, _NODENAME)
+ get_cn_mock.assert_called_once_with(mock.ANY, uuids.cn1)
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
vals = {
'free_disk_gb': 5,
@@ -887,7 +890,7 @@ class TestUpdateAvailableResources(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@@ -924,7 +927,7 @@ class TestUpdateAvailableResources(BaseTestCase):
update_mock = self._update_available_resources()
- get_cn_mock.assert_called_once_with(mock.ANY, _HOSTNAME, _NODENAME)
+ get_cn_mock.assert_called_once_with(mock.ANY, uuids.cn1)
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
vals = {
'free_disk_gb': 1,
@@ -950,7 +953,7 @@ class TestUpdateAvailableResources(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@@ -985,7 +988,7 @@ class TestUpdateAvailableResources(BaseTestCase):
update_mock = self._update_available_resources()
- get_cn_mock.assert_called_once_with(mock.ANY, _HOSTNAME, _NODENAME)
+ get_cn_mock.assert_called_once_with(mock.ANY, uuids.cn1)
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
vals = {
'free_disk_gb': 1,
@@ -1011,7 +1014,7 @@ class TestUpdateAvailableResources(BaseTestCase):
return_value=objects.PciDeviceList())
@mock.patch('nova.objects.MigrationContext.get_by_instance_uuid',
return_value=None)
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@@ -1054,7 +1057,7 @@ class TestUpdateAvailableResources(BaseTestCase):
update_mock = self._update_available_resources()
- get_cn_mock.assert_called_once_with(mock.ANY, _HOSTNAME, _NODENAME)
+ get_cn_mock.assert_called_once_with(mock.ANY, uuids.cn1)
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
vals = {
# 6 total - 1G existing - 5G new flav - 1G old flav
@@ -1082,7 +1085,7 @@ class TestUpdateAvailableResources(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@@ -1119,7 +1122,7 @@ class TestUpdateAvailableResources(BaseTestCase):
update_mock = self._update_available_resources()
- get_cn_mock.assert_called_once_with(mock.ANY, _HOSTNAME, _NODENAME)
+ get_cn_mock.assert_called_once_with(mock.ANY, uuids.cn1)
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
vals = {
'free_disk_gb': 1,
@@ -1145,7 +1148,7 @@ class TestUpdateAvailableResources(BaseTestCase):
new=mock.Mock(return_value=objects.PciDeviceList()))
@mock.patch('nova.objects.MigrationContext.get_by_instance_uuid',
new=mock.Mock(return_value=None))
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@@ -1197,7 +1200,7 @@ class TestUpdateAvailableResources(BaseTestCase):
new=mock.Mock(return_value=objects.PciDeviceList()))
@mock.patch('nova.objects.MigrationContext.get_by_instance_uuid',
new=mock.Mock(return_value=None))
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@@ -1238,7 +1241,7 @@ class TestUpdateAvailableResources(BaseTestCase):
new=mock.Mock(return_value=False))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
new=mock.Mock(return_value=objects.PciDeviceList()))
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_check_resources_startup_fail(self, mock_get_instances,
@@ -1271,7 +1274,7 @@ class TestInitComputeNode(BaseTestCase):
return_value=objects.PciDeviceList())
@mock.patch('nova.objects.ComputeNode.create')
@mock.patch('nova.objects.Service.get_by_compute_host')
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_update')
def test_no_op_init_compute_node(self, update_mock, get_mock, service_mock,
@@ -1294,14 +1297,14 @@ class TestInitComputeNode(BaseTestCase):
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
@mock.patch('nova.objects.ComputeNode.create')
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_update')
def test_compute_node_loaded(self, update_mock, get_mock, create_mock,
pci_mock):
self._setup_rt()
- def fake_get_node(_ctx, host, node):
+ def fake_get_node(_ctx, uuid):
res = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
return res
@@ -1311,85 +1314,67 @@ class TestInitComputeNode(BaseTestCase):
self.assertFalse(
self.rt._init_compute_node(mock.sentinel.ctx, resources))
- get_mock.assert_called_once_with(mock.sentinel.ctx, _HOSTNAME,
- _NODENAME)
+ get_mock.assert_called_once_with(mock.sentinel.ctx,
+ uuids.cn1)
self.assertFalse(create_mock.called)
self.assertFalse(update_mock.called)
- @mock.patch('nova.objects.ComputeNodeList.get_by_hypervisor')
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
@mock.patch('nova.objects.ComputeNode.create')
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_update')
def test_compute_node_rebalanced(self, update_mock, get_mock, create_mock,
- pci_mock, get_by_hypervisor_mock):
+ pci_mock):
self._setup_rt()
self.driver_mock.rebalances_nodes = True
cn = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
cn.host = "old-host"
- def fake_get_all(_ctx, nodename):
- return [cn]
+ def fake_get_node(_ctx, uuid):
+ return cn
- get_mock.side_effect = exc.NotFound
- get_by_hypervisor_mock.side_effect = fake_get_all
+ get_mock.side_effect = fake_get_node
resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES)
self.assertFalse(
self.rt._init_compute_node(mock.sentinel.ctx, resources))
- get_mock.assert_called_once_with(mock.sentinel.ctx, _HOSTNAME,
- _NODENAME)
- get_by_hypervisor_mock.assert_called_once_with(mock.sentinel.ctx,
- _NODENAME)
+ get_mock.assert_called_once_with(mock.sentinel.ctx, uuids.cn1)
create_mock.assert_not_called()
update_mock.assert_called_once_with(mock.sentinel.ctx, cn)
self.assertEqual(_HOSTNAME, self.rt.compute_nodes[_NODENAME].host)
- @mock.patch('nova.objects.ComputeNodeList.get_by_hypervisor')
@mock.patch('nova.objects.ComputeNode.create')
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_update')
def test_compute_node_created_on_empty(self, update_mock, get_mock,
- create_mock,
- get_by_hypervisor_mock):
- get_by_hypervisor_mock.return_value = []
- self._test_compute_node_created(update_mock, get_mock, create_mock,
- get_by_hypervisor_mock)
+ create_mock):
+ self._test_compute_node_created(update_mock, get_mock, create_mock)
- @mock.patch('nova.objects.ComputeNodeList.get_by_hypervisor')
@mock.patch('nova.objects.ComputeNode.create')
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_update')
def test_compute_node_created_on_empty_rebalance(self, update_mock,
get_mock,
- create_mock,
- get_by_hypervisor_mock):
- get_by_hypervisor_mock.return_value = []
+ create_mock):
self._test_compute_node_created(update_mock, get_mock, create_mock,
- get_by_hypervisor_mock,
rebalances_nodes=True)
- @mock.patch('nova.objects.ComputeNodeList.get_by_hypervisor')
@mock.patch('nova.objects.ComputeNode.create')
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_update')
def test_compute_node_created_too_many(self, update_mock, get_mock,
- create_mock,
- get_by_hypervisor_mock):
- get_by_hypervisor_mock.return_value = ["fake_node_1", "fake_node_2"]
+ create_mock):
self._test_compute_node_created(update_mock, get_mock, create_mock,
- get_by_hypervisor_mock,
rebalances_nodes=True)
def _test_compute_node_created(self, update_mock, get_mock, create_mock,
- get_by_hypervisor_mock,
rebalances_nodes=False):
self.flags(cpu_allocation_ratio=1.0, ram_allocation_ratio=1.0,
disk_allocation_ratio=1.0)
@@ -1450,13 +1435,9 @@ class TestInitComputeNode(BaseTestCase):
self.rt._init_compute_node(mock.sentinel.ctx, resources))
cn = self.rt.compute_nodes[_NODENAME]
- get_mock.assert_called_once_with(mock.sentinel.ctx, _HOSTNAME,
- _NODENAME)
- if rebalances_nodes:
- get_by_hypervisor_mock.assert_called_once_with(
- mock.sentinel.ctx, _NODENAME)
- else:
- get_by_hypervisor_mock.assert_not_called()
+ get_mock.assert_called_once_with(mock.sentinel.ctx,
+ uuids.compute_node_uuid)
+
create_mock.assert_called_once_with()
self.assertTrue(obj_base.obj_equal_prims(expected_compute, cn))
setup_pci.assert_called_once_with(mock.sentinel.ctx, cn, resources)
@@ -1464,7 +1445,7 @@ class TestInitComputeNode(BaseTestCase):
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_setup_pci_tracker')
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename',
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid',
side_effect=exc.ComputeHostNotFound(host=_HOSTNAME))
@mock.patch('nova.objects.ComputeNode.create',
side_effect=(test.TestingException, None))
@@ -1487,14 +1468,14 @@ class TestInitComputeNode(BaseTestCase):
self.assertTrue(self.rt._init_compute_node(ctxt, resources))
self.assertIn(_NODENAME, self.rt.compute_nodes)
mock_get.assert_has_calls([mock.call(
- ctxt, _HOSTNAME, _NODENAME)] * 2)
+ ctxt, uuids.cn_uuid)] * 2)
self.assertEqual(2, mock_create.call_count)
mock_setup_pci.assert_called_once_with(
ctxt, test.MatchType(objects.ComputeNode), resources)
@mock.patch('nova.objects.ComputeNodeList.get_by_hypervisor')
@mock.patch('nova.objects.ComputeNode.create')
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_update')
def test_node_removed(self, update_mock, get_mock,
@@ -1510,7 +1491,83 @@ class TestInitComputeNode(BaseTestCase):
self.assertNotIn(_NODENAME, self.rt.stats)
self.assertNotIn(_NODENAME, self.rt.old_resources)
+ @mock.patch.object(resource_tracker.ResourceTracker,
+ '_update')
+ @mock.patch.object(resource_tracker.ResourceTracker,
+ '_setup_pci_tracker')
+ def test_undelete_node(self, mock_pci, mock_update):
+ self._setup_rt()
+ node = mock.MagicMock()
+ node.deleted = True
+ node.uuid = str(uuids.host1)
+ node.host = 'fake-host'
+ context_mock = mock.MagicMock()
+ resources = {'hypervisor_hostname': 'fake-host',
+ 'uuid': str(uuids.host1)}
+ with mock.patch.object(self.rt, '_get_compute_node') as getcn:
+ getcn.return_value = node
+
+ # _init_compute_node() should return False to indicate that
+ # it found an existing node
+ self.assertFalse(
+ self.rt._init_compute_node(context_mock, resources))
+
+ # Node should be undeleted and saved
+ self.assertFalse(node.deleted)
+ self.assertIsNone(node.deleted_at)
+ node.save.assert_called_once_with()
+
+ # Host is the same, no _update()
+ self.assertEqual('fake-host', node.host)
+ mock_update.assert_not_called()
+
+ @mock.patch.object(resource_tracker.ResourceTracker,
+ '_update')
+ @mock.patch.object(resource_tracker.ResourceTracker,
+ '_setup_pci_tracker')
+ def test_undelete_node_move_host(self, mock_pci, mock_update):
+ self._setup_rt()
+ node = mock.MagicMock()
+ node.deleted = True
+ node.uuid = str(uuids.host1)
+ node.host = 'old-host'
+ context_mock = mock.MagicMock()
+ resources = {'hypervisor_hostname': 'fake-host',
+ 'uuid': str(uuids.host1)}
+ with mock.patch.object(self.rt, '_get_compute_node') as getcn:
+ getcn.return_value = node
+
+ # _init_compute_node() should return False to indicate that
+ # it found an existing node
+ self.assertFalse(
+ self.rt._init_compute_node(context_mock, resources))
+
+ # Node should be undeleted and saved
+ self.assertFalse(node.deleted)
+ self.assertIsNone(node.deleted_at)
+ node.save.assert_called_once_with()
+
+ # Our host changed, so we should have the updated value and have
+ # called _update()
+ self.assertEqual('fake-host', node.host)
+ mock_update.assert_called()
+
+ @mock.patch.object(resource_tracker.ResourceTracker,
+ '_get_compute_node',
+ return_value=None)
+ @mock.patch('nova.objects.compute_node.ComputeNode.create')
+ def test_create_failed_conflict(self, mock_create, mock_getcn):
+ self._setup_rt()
+ resources = {'hypervisor_hostname': 'node1',
+ 'uuid': uuids.node1}
+ mock_create.side_effect = exc.DuplicateRecord(target='foo')
+ self.assertRaises(exc.InvalidConfiguration,
+ self.rt._init_compute_node,
+ mock.MagicMock,
+ resources)
+
+@ddt.ddt
class TestUpdateComputeNode(BaseTestCase):
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_sync_compute_service_disabled_trait', new=mock.Mock())
@@ -1577,6 +1634,7 @@ class TestUpdateComputeNode(BaseTestCase):
self.rt._update(mock.sentinel.ctx, new_compute)
save_mock.assert_called_once_with()
+ @mock.patch('nova.objects.ComputeNode.save', new=mock.Mock())
@mock.patch(
'nova.pci.stats.PciDeviceStats.has_remote_managed_device_pools',
return_value=True)
@@ -1707,12 +1765,18 @@ class TestUpdateComputeNode(BaseTestCase):
self.assertEqual(exp_inv, ptree.data(new_compute.uuid).inventory)
mock_sync_disabled.assert_called_once()
+ @ddt.data(
+ exc.ResourceProviderUpdateConflict(
+ uuid='uuid', generation=42, error='error'),
+ exc.PlacementReshapeConflict(error='error'),
+ )
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_sync_compute_service_disabled_trait')
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_resource_change', return_value=False)
- def test_update_retry_success(self, mock_resource_change,
- mock_sync_disabled):
+ def test_update_retry_success(
+ self, exc, mock_resource_change, mock_sync_disabled
+ ):
self._setup_rt()
orig_compute = _COMPUTE_NODE_FIXTURES[0].obj_clone()
self.rt.compute_nodes[_NODENAME] = orig_compute
@@ -1726,9 +1790,7 @@ class TestUpdateComputeNode(BaseTestCase):
self.driver_mock.update_provider_tree.side_effect = lambda *a: None
ufpt_mock = self.rt.reportclient.update_from_provider_tree
- ufpt_mock.side_effect = (
- exc.ResourceProviderUpdateConflict(
- uuid='uuid', generation=42, error='error'), None)
+ ufpt_mock.side_effect = (exc, None)
self.rt._update(mock.sentinel.ctx, new_compute)
@@ -1766,7 +1828,221 @@ class TestUpdateComputeNode(BaseTestCase):
self.assertEqual(4, ufpt_mock.call_count)
self.assertEqual(4, mock_sync_disabled.call_count)
# The retry is restricted to _update_to_placement
- self.assertEqual(1, mock_resource_change.call_count)
+ self.assertEqual(0, mock_resource_change.call_count)
+
+ @mock.patch(
+ 'nova.compute.resource_tracker.ResourceTracker.'
+ '_sync_compute_service_disabled_trait',
+ new=mock.Mock()
+ )
+ @mock.patch(
+ 'nova.compute.resource_tracker.ResourceTracker._resource_change',
+ new=mock.Mock(return_value=False)
+ )
+ @mock.patch(
+ 'nova.compute.pci_placement_translator.update_provider_tree_for_pci')
+ def test_update_pci_reporting(self, mock_update_provider_tree_for_pci):
+ """Assert that resource tracker calls update_provider_tree_for_pci
+ and that call did not change any allocations so
+ update_from_provider_tree called without triggering reshape
+ """
+ compute_obj = _COMPUTE_NODE_FIXTURES[0].obj_clone()
+ self._setup_rt()
+ ptree = self._setup_ptree(compute_obj)
+ # simulate that pci reporting did not touch allocations
+ mock_update_provider_tree_for_pci.return_value = False
+
+ self.rt._update(mock.sentinel.ctx, compute_obj)
+
+ mock_get_allocs = (
+ self.report_client_mock.get_allocations_for_provider_tree)
+ mock_get_allocs.assert_called_once_with(
+ mock.sentinel.ctx, compute_obj.hypervisor_hostname)
+ mock_update_provider_tree_for_pci.assert_called_once_with(
+ ptree,
+ compute_obj.hypervisor_hostname,
+ self.rt.pci_tracker,
+ mock_get_allocs.return_value,
+ [],
+ )
+ upt = self.rt.reportclient.update_from_provider_tree
+ upt.assert_called_once_with(mock.sentinel.ctx, ptree, allocations=None)
+
+ @mock.patch(
+ 'nova.compute.resource_tracker.ResourceTracker.'
+ '_sync_compute_service_disabled_trait',
+ new=mock.Mock()
+ )
+ @mock.patch(
+ 'nova.compute.resource_tracker.ResourceTracker._resource_change',
+ new=mock.Mock(return_value=False)
+ )
+ @mock.patch(
+ 'nova.compute.pci_placement_translator.update_provider_tree_for_pci')
+ def test_update_pci_reporting_reshape(
+ self, mock_update_provider_tree_for_pci
+ ):
+ """Assert that resource tracker calls update_provider_tree_for_pci
+ and that call changed allocations so
+ update_from_provider_tree called with allocations to trigger reshape
+ """
+ compute_obj = _COMPUTE_NODE_FIXTURES[0].obj_clone()
+ self._setup_rt()
+ ptree = self._setup_ptree(compute_obj)
+ # simulate that pci reporting changed some allocations
+ mock_update_provider_tree_for_pci.return_value = True
+
+ self.rt._update(mock.sentinel.ctx, compute_obj)
+
+ mock_get_allocs = (
+ self.report_client_mock.get_allocations_for_provider_tree)
+ mock_get_allocs.assert_called_once_with(
+ mock.sentinel.ctx, compute_obj.hypervisor_hostname)
+ mock_update_provider_tree_for_pci.assert_called_once_with(
+ ptree,
+ compute_obj.hypervisor_hostname,
+ self.rt.pci_tracker,
+ mock_get_allocs.return_value,
+ [],
+ )
+ upt = self.rt.reportclient.update_from_provider_tree
+ upt.assert_called_once_with(
+ mock.sentinel.ctx, ptree, allocations=mock_get_allocs.return_value)
+
+ @ddt.data(True, False)
+ @mock.patch(
+ 'nova.compute.resource_tracker.ResourceTracker.'
+ '_sync_compute_service_disabled_trait',
+ new=mock.Mock()
+ )
+ @mock.patch(
+ 'nova.compute.resource_tracker.ResourceTracker._resource_change',
+ new=mock.Mock(return_value=False)
+ )
+ @mock.patch(
+ 'nova.compute.pci_placement_translator.update_provider_tree_for_pci')
+ def test_update_pci_reporting_driver_reshape(
+ self, pci_reshape, mock_update_provider_tree_for_pci
+ ):
+ """Assert that resource tracker first called the
+ driver.update_provider_tree and that needed reshape so the allocations
+ are pulled. Then independently of update_provider_tree_for_pci the
+ update_from_provider_tree is called with the allocations to trigger
+ reshape in placement
+ """
+ compute_obj = _COMPUTE_NODE_FIXTURES[0].obj_clone()
+ self._setup_rt()
+ ptree = self._setup_ptree(compute_obj)
+ # simulate that the driver requests reshape
+ self.driver_mock.update_provider_tree.side_effect = [
+ exc.ReshapeNeeded, None]
+ mock_update_provider_tree_for_pci.return_value = pci_reshape
+
+ self.rt._update(mock.sentinel.ctx, compute_obj, startup=True)
+
+ mock_get_allocs = (
+ self.report_client_mock.get_allocations_for_provider_tree)
+ mock_get_allocs.assert_called_once_with(
+ mock.sentinel.ctx, compute_obj.hypervisor_hostname)
+ mock_update_provider_tree_for_pci.assert_called_once_with(
+ ptree,
+ compute_obj.hypervisor_hostname,
+ self.rt.pci_tracker,
+ mock_get_allocs.return_value,
+ [],
+ )
+ upt = self.rt.reportclient.update_from_provider_tree
+ upt.assert_called_once_with(
+ mock.sentinel.ctx, ptree, allocations=mock_get_allocs.return_value)
+
+ @mock.patch(
+ 'nova.compute.resource_tracker.ResourceTracker.'
+ '_sync_compute_service_disabled_trait',
+ new=mock.Mock()
+ )
+ @mock.patch(
+ 'nova.compute.resource_tracker.ResourceTracker._resource_change',
+ new=mock.Mock(return_value=False)
+ )
+ @mock.patch(
+ 'nova.compute.pci_placement_translator.update_provider_tree_for_pci')
+ def test_update_pci_reporting_same_host_resize(
+ self, mock_update_provider_tree_for_pci
+ ):
+ """Assert that resource tracker calls update_provider_tree_for_pci
+ and with the list of instances that are being resized to the same
+ host.
+ """
+ compute_obj = _COMPUTE_NODE_FIXTURES[0].obj_clone()
+ self._setup_rt()
+ ptree = self._setup_ptree(compute_obj)
+ # simulate that pci reporting did not touch allocations
+ mock_update_provider_tree_for_pci.return_value = False
+ self.rt.tracked_migrations = {
+ uuids.inst1: objects.Migration(
+ migration_type="resize",
+ source_node="fake-node",
+ dest_node="fake-node",
+ instance_uuid=uuids.inst1,
+ ),
+ uuids.inst2: objects.Migration(
+ migration_type="evacuation",
+ source_node="fake-node",
+ dest_node="fake-node",
+ instance_uuid=uuids.inst2,
+ ),
+ uuids.inst3: objects.Migration(
+ migration_type="resize",
+ source_node="fake-node1",
+ dest_node="fake-node2",
+ instance_uuid=uuids.inst3,
+ ),
+ }
+
+ self.rt._update(mock.sentinel.ctx, compute_obj)
+
+ mock_get_allocs = (
+ self.report_client_mock.get_allocations_for_provider_tree)
+ mock_get_allocs.assert_called_once_with(
+ mock.sentinel.ctx, compute_obj.hypervisor_hostname)
+ mock_update_provider_tree_for_pci.assert_called_once_with(
+ ptree,
+ compute_obj.hypervisor_hostname,
+ self.rt.pci_tracker,
+ mock_get_allocs.return_value,
+ [uuids.inst1],
+ )
+ upt = self.rt.reportclient.update_from_provider_tree
+ upt.assert_called_once_with(mock.sentinel.ctx, ptree, allocations=None)
+
+ @mock.patch(
+ 'nova.compute.resource_tracker.ResourceTracker.'
+ '_sync_compute_service_disabled_trait',
+ new=mock.Mock()
+ )
+ @mock.patch(
+ 'nova.compute.resource_tracker.ResourceTracker._resource_change',
+ new=mock.Mock(return_value=False)
+ )
+ def test_update_pci_reporting_allocation_in_use_error_propagated(self):
+ """Assert that if the pci placement reporting code tries to remove
+ inventory with allocation from placement due to invalid hypervisor
+ or [pci]device_spec reconfiguration then the InventoryInUse error from
+ placement is propagated and makes the compute startup fail.
+ """
+ compute_obj = _COMPUTE_NODE_FIXTURES[0].obj_clone()
+ self._setup_rt()
+ self.rt.reportclient.update_from_provider_tree.side_effect = (
+ exc.InventoryInUse(
+ resource_class="FOO", resource_provider="bar"))
+
+ self.assertRaises(
+ exc.PlacementPciException,
+ self.rt._update,
+ mock.sentinel.ctx,
+ compute_obj,
+ startup=True,
+ )
@mock.patch('nova.objects.Service.get_by_compute_host',
return_value=objects.Service(disabled=True))
@@ -1820,6 +2096,10 @@ class TestUpdateComputeNode(BaseTestCase):
self.assertIn('Unable to find services table record for nova-compute',
mock_log_error.call_args[0][0])
+ @mock.patch(
+ 'nova.compute.resource_tracker.ResourceTracker.'
+ '_update_to_placement',
+ new=mock.Mock())
def test_update_compute_node_save_fails_restores_old_resources(self):
"""Tests the scenario that compute_node.save() fails and the
old_resources value for the node is restored to its previous value
@@ -2124,26 +2404,45 @@ class TestInstanceClaim(BaseTestCase):
# PCI devices on the host and sends an updated pci_device_pools
# attribute of the ComputeNode object.
+ self.flags(
+ group="pci",
+ device_spec=[
+ jsonutils.dumps({"vendor_id": "0001", "product_id": "0002"})
+ ],
+ )
+ pci_dev = pci_device.PciDevice.create(
+ None,
+ dev_dict={
+ "compute_node_id": 1,
+ "address": "0000:81:00.0",
+ "product_id": "0002",
+ "vendor_id": "0001",
+ "numa_node": 0,
+ "dev_type": obj_fields.PciDeviceType.STANDARD,
+ "status": obj_fields.PciDeviceStatus.AVAILABLE,
+ "parent_addr": None,
+ },
+ )
+
+ pci_dev.instance_uuid = None
+ pci_devs = [pci_dev]
+
# TODO(jaypipes): Remove once the PCI tracker is always created
# upon the resource tracker being initialized...
with mock.patch.object(
objects.PciDeviceList, 'get_by_compute_node',
- return_value=objects.PciDeviceList()
+ return_value=objects.PciDeviceList(objects=pci_devs)
):
self.rt.pci_tracker = pci_manager.PciDevTracker(
mock.sentinel.ctx, _COMPUTE_NODE_FIXTURES[0])
- pci_dev = pci_device.PciDevice.create(
- None, fake_pci_device.dev_dict)
- pci_devs = [pci_dev]
- self.rt.pci_tracker.pci_devs = objects.PciDeviceList(objects=pci_devs)
-
request = objects.InstancePCIRequest(count=1,
- spec=[{'vendor_id': 'v', 'product_id': 'p'}])
+ spec=[{'vendor_id': '0001', 'product_id': '0002'}])
pci_requests = objects.InstancePCIRequests(
requests=[request],
instance_uuid=self.instance.uuid)
self.instance.pci_requests = pci_requests
+ self.instance.pci_devices = objects.PciDeviceList()
check_bfv_mock.return_value = False
disk_used = self.instance.root_gb + self.instance.ephemeral_gb
@@ -2155,7 +2454,20 @@ class TestInstanceClaim(BaseTestCase):
"free_ram_mb": expected.memory_mb - self.instance.memory_mb,
'running_vms': 1,
'vcpus_used': 1,
- 'pci_device_pools': objects.PciDevicePoolList(),
+ 'pci_device_pools': objects.PciDevicePoolList(
+ objects=[
+ objects.PciDevicePool(
+ vendor_id='0001',
+ product_id='0002',
+ numa_node=0,
+ tags={
+ 'dev_type': 'type-PCI',
+ 'address': '0000:81:00.0'
+ },
+ count=0
+ )
+ ]
+ ),
'stats': {
'io_workload': 0,
'num_instances': 1,
@@ -2172,7 +2484,8 @@ class TestInstanceClaim(BaseTestCase):
self.allocations, None)
cn = self.rt.compute_nodes[_NODENAME]
update_mock.assert_called_once_with(self.elevated, cn)
- pci_stats_mock.assert_called_once_with([request])
+ pci_stats_mock.assert_called_once_with(
+ [request], provider_mapping=None)
self.assertTrue(obj_base.obj_equal_prims(expected, cn))
@mock.patch('nova.compute.utils.is_volume_backed_instance',
@@ -2376,7 +2689,7 @@ class TestResize(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@mock.patch('nova.objects.ComputeNode.save')
@@ -2480,7 +2793,7 @@ class TestResize(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename',
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid',
return_value=_COMPUTE_NODE_FIXTURES[0])
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error',
return_value=[])
@@ -2652,7 +2965,7 @@ class TestResize(BaseTestCase):
@mock.patch('nova.pci.manager.PciDevTracker.claim_instance')
@mock.patch('nova.pci.request.get_pci_requests_from_flavor')
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node')
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@mock.patch('nova.objects.ComputeNode.save')
@@ -2822,7 +3135,7 @@ class TestResize(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@mock.patch('nova.objects.ComputeNode.save')
@@ -2954,7 +3267,7 @@ class TestRebuild(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@mock.patch('nova.objects.ComputeNode.save')
diff --git a/nova/tests/unit/compute/test_rpcapi.py b/nova/tests/unit/compute/test_rpcapi.py
index 541cc1012e..6f78678a92 100644
--- a/nova/tests/unit/compute/test_rpcapi.py
+++ b/nova/tests/unit/compute/test_rpcapi.py
@@ -835,7 +835,9 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
bdms=[], instance=self.fake_instance_obj, host='new_host',
orig_sys_metadata=None, recreate=True, on_shared_storage=True,
preserve_ephemeral=True, migration=None, node=None,
- limits=None, request_spec=None, accel_uuids=[], version='6.0')
+ limits=None, request_spec=None, accel_uuids=[],
+ reimage_boot_volume=False, target_state=None,
+ version='6.2')
def test_rebuild_instance_old_rpcapi(self):
# With rpcapi < 5.12, accel_uuids must be dropped in the client call.
@@ -862,20 +864,95 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
'migration': None,
'limits': None
}
+ # Pass reimage_boot_volume to the client call...
compute_api.rebuild_instance(
ctxt, instance=self.fake_instance_obj,
accel_uuids=['938af7f9-f136-4e5a-bdbe-3b6feab54311'],
- node=None, host=None, **rebuild_args)
+ node=None, host=None, reimage_boot_volume=False,
+ target_state=None, **rebuild_args)
- mock_client.can_send_version.assert_has_calls([mock.call('6.0'),
+ mock_client.can_send_version.assert_has_calls([mock.call('6.2'),
+ mock.call('6.1'),
+ mock.call('6.0'),
mock.call('5.12')])
mock_client.prepare.assert_called_with(
server=self.fake_instance_obj.host, version='5.0')
+ # ...and assert that it does not show up on the wire before 6.1
mock_cctx.cast.assert_called_with( # No accel_uuids
ctxt, 'rebuild_instance',
instance=self.fake_instance_obj,
scheduled_node=None, **rebuild_args)
+ def test_rebuild_instance_vol_backed_old_rpcapi(self):
+ # With rpcapi < 6.1, if reimage_boot_volume is True then we
+ # should raise error.
+ ctxt = context.RequestContext('fake_user', 'fake_project')
+ compute_api = compute_rpcapi.ComputeAPI()
+ compute_api.router.client = mock.Mock()
+ mock_client = mock.MagicMock()
+ compute_api.router.client.return_value = mock_client
+ # Force can_send_version to [False, True, True], so that 6.0
+ # version is used.
+ mock_client.can_send_version.side_effect = [False, False, True, True]
+ mock_cctx = mock.MagicMock()
+ mock_client.prepare.return_value = mock_cctx
+ rebuild_args = {
+ 'new_pass': 'admin_password',
+ 'injected_files': 'files_to_inject',
+ 'image_ref': uuids.image_ref,
+ 'orig_image_ref': uuids.orig_image_ref,
+ 'orig_sys_metadata': 'orig_sys_meta',
+ 'bdms': {},
+ 'recreate': False,
+ 'on_shared_storage': False,
+ 'preserve_ephemeral': False,
+ 'request_spec': None,
+ 'migration': None,
+ 'limits': None,
+ 'accel_uuids': [],
+ 'reimage_boot_volume': True,
+ 'target_state': None,
+ }
+ self.assertRaises(
+ exception.NovaException, compute_api.rebuild_instance,
+ ctxt, instance=self.fake_instance_obj,
+ node=None, host=None, **rebuild_args)
+ mock_client.can_send_version.assert_has_calls([mock.call('6.2')])
+
+ def test_rebuild_instance_evacuate_old_rpcapi(self):
+ # With rpcapi < 6.2, if evacuate we should raise error.
+ ctxt = context.RequestContext('fake_user', 'fake_project')
+ compute_api = compute_rpcapi.ComputeAPI()
+ compute_api.router.client = mock.Mock()
+ mock_client = mock.MagicMock()
+ compute_api.router.client.return_value = mock_client
+ # Force can_send_version to return False.
+ mock_client.can_send_version.return_value = False
+ mock_cctx = mock.MagicMock()
+ mock_client.prepare.return_value = mock_cctx
+ rebuild_args = {
+ 'new_pass': 'admin_password',
+ 'injected_files': 'files_to_inject',
+ 'image_ref': uuids.image_ref,
+ 'orig_image_ref': uuids.orig_image_ref,
+ 'orig_sys_metadata': 'orig_sys_meta',
+ 'bdms': {},
+ 'recreate': False,
+ 'on_shared_storage': False,
+ 'preserve_ephemeral': False,
+ 'request_spec': None,
+ 'migration': None,
+ 'limits': None,
+ 'accel_uuids': [],
+ 'reimage_boot_volume': True,
+ 'target_state': 'stopped',
+ }
+ self.assertRaises(
+ exception.UnsupportedRPCVersion,
+ compute_api.rebuild_instance,
+ ctxt, instance=self.fake_instance_obj,
+ node=None, host=None, **rebuild_args)
+
def test_reserve_block_device_name(self):
self.flags(long_rpc_timeout=1234)
self._test_compute_api('reserve_block_device_name', 'call',
diff --git a/nova/tests/unit/compute/test_shelve.py b/nova/tests/unit/compute/test_shelve.py
index c939b927f1..f95a722ced 100644
--- a/nova/tests/unit/compute/test_shelve.py
+++ b/nova/tests/unit/compute/test_shelve.py
@@ -281,7 +281,7 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
return instance
@mock.patch('nova.compute.utils.'
- 'update_pci_request_spec_with_allocated_interface_name',
+ 'update_pci_request_with_placement_allocations',
new=mock.NonCallableMock())
@mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid')
@mock.patch('nova.compute.utils.notify_about_instance_action')
@@ -631,7 +631,7 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
@mock.patch('nova.network.neutron.API.setup_instance_network_on_host')
@mock.patch('nova.compute.utils.'
- 'update_pci_request_spec_with_allocated_interface_name')
+ 'update_pci_request_with_placement_allocations')
def test_unshelve_with_resource_request(
self, mock_update_pci, mock_setup_network):
requested_res = [objects.RequestGroup(
@@ -655,7 +655,7 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
@mock.patch('nova.network.neutron.API.setup_instance_network_on_host',
new=mock.NonCallableMock())
@mock.patch('nova.compute.utils.'
- 'update_pci_request_spec_with_allocated_interface_name')
+ 'update_pci_request_with_placement_allocations')
def test_unshelve_with_resource_request_update_raises(
self, mock_update_pci):
requested_res = [objects.RequestGroup(
diff --git a/nova/tests/unit/compute/test_utils.py b/nova/tests/unit/compute/test_utils.py
index 848050d769..dd10ecd7df 100644
--- a/nova/tests/unit/compute/test_utils.py
+++ b/nova/tests/unit/compute/test_utils.py
@@ -1558,47 +1558,86 @@ class PciRequestUpdateTestCase(test.NoDBTestCase):
def test_no_pci_request(self):
provider_mapping = {}
- compute_utils.update_pci_request_spec_with_allocated_interface_name(
+ compute_utils.update_pci_request_with_placement_allocations(
self.context, mock.sentinel.report_client, [], provider_mapping)
- def test_pci_request_from_flavor(self):
- pci_requests = [objects.InstancePCIRequest(requester_id=None)]
+ def test_pci_request_from_flavor_no_mapping(self):
+ req = objects.InstancePCIRequest(
+ requester_id=None,
+ request_id=uuids.req1,
+ alias_name="a-dev",
+ spec=[{}],
+ )
+ pci_requests = [req]
+
provider_mapping = {}
- compute_utils.update_pci_request_spec_with_allocated_interface_name(
+ compute_utils.update_pci_request_with_placement_allocations(
+ self.context, mock.sentinel.report_client, pci_requests,
+ provider_mapping)
+
+ self.assertNotIn('rp_uuids', req.spec[0])
+
+ def test_pci_request_from_flavor_with_mapping(self):
+ req = objects.InstancePCIRequest(
+ requester_id=None,
+ request_id=uuids.req1,
+ alias_name="a-dev",
+ spec=[{}],
+ )
+ pci_requests = [req]
+
+ provider_mapping = {
+ f"{uuids.req1}-0": [uuids.rp1],
+ f"{uuids.req1}-1": [uuids.rp2],
+ }
+
+ compute_utils.update_pci_request_with_placement_allocations(
self.context, mock.sentinel.report_client, pci_requests,
provider_mapping)
+ self.assertEqual(
+ {uuids.rp1, uuids.rp2}, set(req.spec[0]["rp_uuids"].split(','))
+ )
+
def test_pci_request_has_no_mapping(self):
pci_requests = [objects.InstancePCIRequest(requester_id=uuids.port_1)]
provider_mapping = {}
- compute_utils.update_pci_request_spec_with_allocated_interface_name(
+ compute_utils.update_pci_request_with_placement_allocations(
self.context, mock.sentinel.report_client, pci_requests,
provider_mapping)
def test_pci_request_ambiguous_mapping(self):
- pci_requests = [objects.InstancePCIRequest(requester_id=uuids.port_1)]
+ pci_requests = [
+ objects.InstancePCIRequest(
+ requester_id=uuids.port_1, request_id=uuids.req1
+ )
+ ]
provider_mapping = {uuids.port_1: [uuids.rp1, uuids.rp2]}
self.assertRaises(
exception.AmbiguousResourceProviderForPCIRequest,
(compute_utils.
- update_pci_request_spec_with_allocated_interface_name),
+ update_pci_request_with_placement_allocations),
self.context, mock.sentinel.report_client, pci_requests,
provider_mapping)
def test_unexpected_provider_name(self):
report_client = mock.Mock(spec=report.SchedulerReportClient)
report_client.get_resource_provider_name.return_value = 'unexpected'
- pci_requests = [objects.InstancePCIRequest(
- requester_id=uuids.port_1, spec=[{}])]
+ pci_requests = [
+ objects.InstancePCIRequest(
+ requester_id=uuids.port_1, spec=[{}], request_id=uuids.req1
+ )
+ ]
+
provider_mapping = {uuids.port_1: [uuids.rp1]}
self.assertRaises(
exception.UnexpectedResourceProviderNameForPCIRequest,
(compute_utils.
- update_pci_request_spec_with_allocated_interface_name),
+ update_pci_request_with_placement_allocations),
self.context, report_client, pci_requests,
provider_mapping)
@@ -1610,11 +1649,14 @@ class PciRequestUpdateTestCase(test.NoDBTestCase):
report_client = mock.Mock(spec=report.SchedulerReportClient)
report_client.get_resource_provider_name.return_value = (
'host:agent:enp0s31f6')
- pci_requests = [objects.InstancePCIRequest(
- requester_id=uuids.port_1, spec=[{}],)]
+ pci_requests = [
+ objects.InstancePCIRequest(
+ requester_id=uuids.port_1, spec=[{}], request_id=uuids.req1
+ )
+ ]
provider_mapping = {uuids.port_1: [uuids.rp1]}
- compute_utils.update_pci_request_spec_with_allocated_interface_name(
+ compute_utils.update_pci_request_with_placement_allocations(
self.context, report_client, pci_requests, provider_mapping)
report_client.get_resource_provider_name.assert_called_once_with(
diff --git a/nova/tests/unit/compute/test_virtapi.py b/nova/tests/unit/compute/test_virtapi.py
index 15d004a816..71c9097525 100644
--- a/nova/tests/unit/compute/test_virtapi.py
+++ b/nova/tests/unit/compute/test_virtapi.py
@@ -187,6 +187,9 @@ class ComputeVirtAPITest(VirtAPIBaseTest):
do_test()
+ @mock.patch(
+ 'oslo_utils.timeutils.StopWatch.elapsed',
+ new=mock.Mock(return_value=1.23))
def test_wait_for_instance_event_timeout(self):
instance = mock.Mock()
instance.vm_state = mock.sentinel.vm_state
@@ -212,11 +215,14 @@ class ComputeVirtAPITest(VirtAPIBaseTest):
'vm_state': mock.sentinel.vm_state,
'task_state': mock.sentinel.task_state,
'event_states':
- 'foo-bar: timed out after 0.00 seconds',
+ 'foo-bar: timed out after 1.23 seconds',
},
instance=instance
)
+ @mock.patch(
+ 'oslo_utils.timeutils.StopWatch.elapsed',
+ new=mock.Mock(return_value=1.23))
def test_wait_for_instance_event_one_received_one_timed_out(self):
instance = mock.Mock()
instance.vm_state = mock.sentinel.vm_state
@@ -252,12 +258,15 @@ class ComputeVirtAPITest(VirtAPIBaseTest):
'vm_state': mock.sentinel.vm_state,
'task_state': mock.sentinel.task_state,
'event_states':
- 'foo-bar: received after waiting 0.00 seconds, '
- 'missing-event: timed out after 0.00 seconds',
+ 'foo-bar: received after waiting 1.23 seconds, '
+ 'missing-event: timed out after 1.23 seconds',
},
instance=instance
)
+ @mock.patch(
+ 'oslo_utils.timeutils.StopWatch.elapsed',
+ new=mock.Mock(return_value=1.23))
def test_wait_for_instance_event_multiple_events(self):
instance = mock.Mock()
instance.vm_state = mock.sentinel.vm_state
@@ -282,7 +291,6 @@ class ComputeVirtAPITest(VirtAPIBaseTest):
m.tag = tag
m.event_name = '%s-%s' % (name, tag)
m.wait.side_effect = fake_event_waiter
- print(name, tag)
if name == 'received-but-not-waited':
m.ready.return_value = True
if name == 'missing-but-not-waited':
@@ -323,9 +331,9 @@ class ComputeVirtAPITest(VirtAPIBaseTest):
'vm_state': mock.sentinel.vm_state,
'task_state': mock.sentinel.task_state,
'event_states':
- 'received-event: received after waiting 0.00 seconds, '
+ 'received-event: received after waiting 1.23 seconds, '
'early-event: received early, '
- 'missing-event: timed out after 0.00 seconds, '
+ 'missing-event: timed out after 1.23 seconds, '
'received-but-not-waited-event: received but not '
'processed, '
'missing-but-not-waited-event: expected but not received'
diff --git a/nova/tests/unit/conductor/tasks/test_live_migrate.py b/nova/tests/unit/conductor/tasks/test_live_migrate.py
index de15be28bd..4e888139f6 100644
--- a/nova/tests/unit/conductor/tasks/test_live_migrate.py
+++ b/nova/tests/unit/conductor/tasks/test_live_migrate.py
@@ -761,7 +761,7 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
@mock.patch(
'nova.compute.utils.'
- 'update_pci_request_spec_with_allocated_interface_name')
+ 'update_pci_request_with_placement_allocations')
@mock.patch('nova.scheduler.utils.fill_provider_mapping')
@mock.patch.object(live_migrate.LiveMigrationTask,
'_call_livem_checks_on_host')
diff --git a/nova/tests/unit/conductor/test_conductor.py b/nova/tests/unit/conductor/test_conductor.py
index 6f5331cc48..971570dfb5 100644
--- a/nova/tests/unit/conductor/test_conductor.py
+++ b/nova/tests/unit/conductor/test_conductor.py
@@ -16,8 +16,10 @@
"""Tests for the conductor service."""
import copy
+import ddt
from unittest import mock
+from keystoneauth1 import exceptions as ks_exc
from oslo_db import exception as db_exc
from oslo_limit import exception as limit_exceptions
import oslo_messaging as messaging
@@ -52,6 +54,7 @@ from nova.objects import block_device as block_device_obj
from nova.objects import fields
from nova.objects import request_spec
from nova.scheduler.client import query
+from nova.scheduler.client import report
from nova.scheduler import utils as scheduler_utils
from nova import test
from nova.tests import fixtures
@@ -385,7 +388,9 @@ class _BaseTaskTestCase(object):
'on_shared_storage': False,
'preserve_ephemeral': False,
'host': 'compute-host',
- 'request_spec': None}
+ 'request_spec': None,
+ 'reimage_boot_volume': False,
+ 'target_state': None}
if update_args:
rebuild_args.update(update_args)
compute_rebuild_args = copy.deepcopy(rebuild_args)
@@ -4747,6 +4752,68 @@ class ConductorTaskRPCAPITestCase(_BaseTaskTestCase,
mock.sentinel.migration)
can_send_version.assert_called_once_with('1.23')
+ def test_evacuate_old_rpc_with_target_state(self):
+ inst_obj = self._create_fake_instance_obj()
+ rebuild_args, compute_args = self._prepare_rebuild_args(
+ {'host': inst_obj.host,
+ 'target_state': 'stopped'})
+ with mock.patch.object(
+ self.conductor.client, 'can_send_version', return_value=False):
+ self.assertRaises(exc.UnsupportedRPCVersion,
+ self.conductor.rebuild_instance,
+ self.context, inst_obj, **rebuild_args)
+
+ def test_evacuate_old_rpc_without_target_state(self):
+ inst_obj = self._create_fake_instance_obj()
+ rebuild_args, compute_args = self._prepare_rebuild_args(
+ {'host': inst_obj.host,
+ 'target_state': None})
+ with mock.patch.object(
+ self.conductor.client, 'can_send_version',
+ return_value=False) as can_send_version:
+ self.conductor.rebuild_instance(
+ self.context, inst_obj, **rebuild_args)
+ can_send_version.assert_has_calls([
+ mock.call('1.25'), mock.call('1.24'),
+ mock.call('1.12')])
+
+ def test_rebuild_instance_volume_backed(self):
+ inst_obj = self._create_fake_instance_obj()
+ version = '1.25'
+ cctxt_mock = mock.MagicMock()
+ rebuild_args, compute_args = self._prepare_rebuild_args(
+ {'host': inst_obj.host})
+ rebuild_args['reimage_boot_volume'] = True
+
+ @mock.patch.object(self.conductor.client, 'prepare',
+ return_value=cctxt_mock)
+ @mock.patch.object(self.conductor.client, 'can_send_version',
+ return_value=True)
+ def _test(mock_can_send_ver, prepare_mock):
+ self.conductor.rebuild_instance(
+ self.context, inst_obj, **rebuild_args)
+ prepare_mock.assert_called_once_with(version=version)
+ kw = {'instance': inst_obj, **rebuild_args}
+ cctxt_mock.cast.assert_called_once_with(
+ self.context, 'rebuild_instance', **kw)
+ _test()
+
+ def test_rebuild_instance_volume_backed_old_service(self):
+ """Tests rebuild_instance_volume_backed when the service is too old"""
+ inst_obj = mock.MagicMock()
+ rebuild_args, compute_args = self._prepare_rebuild_args(
+ {'host': inst_obj.host})
+ rebuild_args['reimage_boot_volume'] = True
+ with mock.patch.object(
+ self.conductor.client, 'can_send_version',
+ return_value=False) as can_send_version:
+ self.assertRaises(exc.NovaException,
+ self.conductor.rebuild_instance,
+ self.context, inst_obj,
+ **rebuild_args)
+ can_send_version.assert_has_calls([mock.call('1.25'),
+ mock.call('1.24')])
+
class ConductorTaskAPITestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
"""Compute task API Tests."""
@@ -4869,3 +4936,35 @@ class ConductorTaskAPITestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
logtext)
self.assertIn('host3\' because it is not up', logtext)
self.assertIn('image1 failed 1 times', logtext)
+
+
+@ddt.ddt
+class TestConductorTaskManager(test.NoDBTestCase):
+ def test_placement_client_startup(self):
+ self.assertIsNone(report.PLACEMENTCLIENT)
+ conductor_manager.ComputeTaskManager()
+ self.assertIsNotNone(report.PLACEMENTCLIENT)
+
+ @ddt.data(ks_exc.MissingAuthPlugin,
+ ks_exc.Unauthorized,
+ test.TestingException)
+ def test_placement_client_startup_fatals(self, exc):
+ self.assertRaises(exc,
+ self._test_placement_client_startup_exception, exc)
+
+ @ddt.data(ks_exc.EndpointNotFound,
+ ks_exc.DiscoveryFailure,
+ ks_exc.RequestTimeout,
+ ks_exc.GatewayTimeout,
+ ks_exc.ConnectFailure)
+ def test_placement_client_startup_non_fatal(self, exc):
+ self._test_placement_client_startup_exception(exc)
+
+ @mock.patch.object(report, 'LOG')
+ def _test_placement_client_startup_exception(self, exc, mock_log):
+ with mock.patch.object(report.SchedulerReportClient, '_create_client',
+ side_effect=exc):
+ try:
+ conductor_manager.ComputeTaskManager()
+ finally:
+ mock_log.error.assert_called_once()
diff --git a/nova/tests/unit/console/test_websocketproxy.py b/nova/tests/unit/console/test_websocketproxy.py
index 30f3502bc8..639623bbb5 100644
--- a/nova/tests/unit/console/test_websocketproxy.py
+++ b/nova/tests/unit/console/test_websocketproxy.py
@@ -587,12 +587,12 @@ class NovaProxyRequestHandlerTestCase(test.NoDBTestCase):
self.wh.socket.assert_called_with('node1', 10000, connect=True)
self.wh.do_proxy.assert_called_with('<socket>')
- def test_reject_open_redirect(self):
+ def test_reject_open_redirect(self, url='//example.com/%2F..'):
# This will test the behavior when an attempt is made to cause an open
# redirect. It should be rejected.
mock_req = mock.MagicMock()
mock_req.makefile().readline.side_effect = [
- b'GET //example.com/%2F.. HTTP/1.1\r\n',
+ f'GET {url} HTTP/1.1\r\n'.encode('utf-8'),
b''
]
@@ -617,41 +617,34 @@ class NovaProxyRequestHandlerTestCase(test.NoDBTestCase):
result = output.readlines()
# Verify no redirect happens and instead a 400 Bad Request is returned.
- self.assertIn('400 URI must not start with //', result[0].decode())
+ # NOTE: As of python 3.10.6 there is a fix for this vulnerability,
+ # which will cause a 301 Moved Permanently error to be returned
+ # instead that redirects to a sanitized version of the URL with extra
+ # leading '/' characters removed.
+ # See https://github.com/python/cpython/issues/87389 for details.
+ # We will consider either response to be valid for this test. This will
+ # also help if and when the above fix gets backported to older versions
+ # of python.
+ errmsg = result[0].decode()
+ expected_nova = '400 URI must not start with //'
+ expected_cpython = '301 Moved Permanently'
+
+ self.assertTrue(expected_nova in errmsg or expected_cpython in errmsg)
+
+ # If we detect the cpython fix, verify that the redirect location is
+ # now the same url but with extra leading '/' characters removed.
+ if expected_cpython in errmsg:
+ location = result[3].decode()
+ if location.startswith('Location: '):
+ location = location[len('Location: '):]
+ location = location.rstrip('\r\n')
+ self.assertTrue(
+ location.startswith('/example.com/%2F..'),
+ msg='Redirect location is not the expected sanitized URL',
+ )
def test_reject_open_redirect_3_slashes(self):
- # This will test the behavior when an attempt is made to cause an open
- # redirect. It should be rejected.
- mock_req = mock.MagicMock()
- mock_req.makefile().readline.side_effect = [
- b'GET ///example.com/%2F.. HTTP/1.1\r\n',
- b''
- ]
-
- # Collect the response data to verify at the end. The
- # SimpleHTTPRequestHandler writes the response data by calling the
- # request socket sendall() method.
- self.data = b''
-
- def fake_sendall(data):
- self.data += data
-
- mock_req.sendall.side_effect = fake_sendall
-
- client_addr = ('8.8.8.8', 54321)
- mock_server = mock.MagicMock()
- # This specifies that the server will be able to handle requests other
- # than only websockets.
- mock_server.only_upgrade = False
-
- # Constructing a handler will process the mock_req request passed in.
- websocketproxy.NovaProxyRequestHandler(
- mock_req, client_addr, mock_server)
-
- # Verify no redirect happens and instead a 400 Bad Request is returned.
- self.data = self.data.decode()
- self.assertIn('Error code: 400', self.data)
- self.assertIn('Message: URI must not start with //', self.data)
+ self.test_reject_open_redirect(url='///example.com/%2F..')
@mock.patch('nova.objects.ConsoleAuthToken.validate')
def test_no_compute_rpcapi_with_invalid_token(self, mock_validate):
diff --git a/nova/tests/unit/db/main/test_migrations.py b/nova/tests/unit/db/main/test_migrations.py
index 2b3f01b704..e52deb262a 100644
--- a/nova/tests/unit/db/main/test_migrations.py
+++ b/nova/tests/unit/db/main/test_migrations.py
@@ -241,6 +241,12 @@ class NovaMigrationsWalk(
'Index %s on table %s should not exist' % (index, table_name),
)
+ def assertColumnExists(self, connection, table_name, column):
+ self.assertTrue(
+ oslodbutils.column_exists(connection, table_name, column),
+ 'Column %s on table %s should exist' % (column, table_name),
+ )
+
def _migrate_up(self, connection, revision):
if revision == self.init_version: # no tests for the initial revision
alembic_api.upgrade(self.config, revision)
@@ -281,6 +287,42 @@ class NovaMigrationsWalk(
# no check for the MySQL-specific change
+ def _check_ccb0fa1a2252(self, connection):
+ for prefix in ('', 'shadow_'):
+ table_name = prefix + 'block_device_mapping'
+ table = oslodbutils.get_table(connection, table_name)
+
+ self.assertColumnExists(connection, table_name, 'encrypted')
+ self.assertColumnExists(
+ connection, table_name, 'encryption_secret_uuid')
+ self.assertColumnExists(
+ connection, table_name, 'encryption_format')
+ self.assertColumnExists(
+ connection, table_name, 'encryption_options')
+
+ # Only check for the expected types if we're using sqlite because
+ # other databases' types may be different. For example, Boolean
+ # may be represented as an integer in MySQL
+ if connection.engine.name != 'sqlite':
+ return
+
+ self.assertIsInstance(table.c.encrypted.type, sa.types.Boolean)
+ self.assertIsInstance(
+ table.c.encryption_secret_uuid.type, sa.types.String)
+ self.assertIsInstance(
+ table.c.encryption_format.type, sa.types.String)
+ self.assertIsInstance(
+ table.c.encryption_options.type, sa.types.String)
+
+ def _check_960aac0e09ea(self, connection):
+ self.assertIndexNotExists(
+ connection, 'console_auth_tokens',
+ 'console_auth_tokens_token_hash_idx',
+ )
+ self.assertIndexNotExists(
+ connection, 'instances', 'uuid',
+ )
+
def test_single_base_revision(self):
"""Ensure we only have a single base revision.
diff --git a/nova/tests/unit/network/test_neutron.py b/nova/tests/unit/network/test_neutron.py
index b82bf5349f..eefa7b974f 100644
--- a/nova/tests/unit/network/test_neutron.py
+++ b/nova/tests/unit/network/test_neutron.py
@@ -42,7 +42,6 @@ from nova import objects
from nova.objects import fields as obj_fields
from nova.objects import network_request as net_req_obj
from nova.objects import virtual_interface as obj_vif
-from nova.pci import manager as pci_manager
from nova.pci import request as pci_request
from nova.pci import utils as pci_utils
from nova.pci import whitelist as pci_whitelist
@@ -3383,6 +3382,155 @@ class TestAPI(TestAPIBase):
mocked_client.list_ports.assert_called_once_with(
tenant_id=uuids.fake, device_id=uuids.instance)
+ @mock.patch.object(
+ neutronapi.API,
+ '_get_physnet_tunneled_info',
+ new=mock.Mock(return_value=(None, False)))
+ @mock.patch.object(
+ neutronapi.API,
+ '_get_preexisting_port_ids',
+ new=mock.Mock(return_value=[]))
+ @mock.patch.object(
+ neutronapi.API,
+ '_get_subnets_from_port',
+ new=mock.Mock(return_value=[model.Subnet(cidr='1.0.0.0/8')]))
+ @mock.patch.object(
+ neutronapi.API,
+ '_get_floating_ips_by_fixed_and_port',
+ new=mock.Mock(return_value=[{'floating_ip_address': '10.0.0.1'}]))
+ @mock.patch.object(neutronapi, 'get_client')
+ def test_build_network_info_model_full_vnic_type_change(
+ self, mock_get_client
+ ):
+ mocked_client = mock.create_autospec(client.Client)
+ mock_get_client.return_value = mocked_client
+ fake_inst = objects.Instance()
+ fake_inst.project_id = uuids.fake
+ fake_inst.uuid = uuids.instance
+ fake_ports = [
+ {
+ "id": "port1",
+ "network_id": "net-id",
+ "tenant_id": uuids.fake,
+ "admin_state_up": True,
+ "status": "ACTIVE",
+ "fixed_ips": [{"ip_address": "1.1.1.1"}],
+ "mac_address": "de:ad:be:ef:00:01",
+ "binding:vif_type": model.VIF_TYPE_BRIDGE,
+ "binding:vnic_type": model.VNIC_TYPE_DIRECT,
+ "binding:vif_details": {},
+ },
+ ]
+ mocked_client.list_ports.return_value = {'ports': fake_ports}
+ fake_inst.info_cache = objects.InstanceInfoCache.new(
+ self.context, uuids.instance)
+ fake_inst.info_cache.network_info = model.NetworkInfo.hydrate([])
+
+ # build the network info first
+ nw_infos = self.api._build_network_info_model(
+ self.context,
+ fake_inst,
+ force_refresh=True,
+ )
+
+ self.assertEqual(1, len(nw_infos))
+ fake_inst.info_cache.network_info = nw_infos
+
+ # change the vnic_type of the port and rebuild the network info
+ fake_ports[0]["binding:vnic_type"] = model.VNIC_TYPE_MACVTAP
+ with mock.patch(
+ "nova.network.neutron.API._log_error_if_vnic_type_changed"
+ ) as mock_log:
+ nw_infos = self.api._build_network_info_model(
+ self.context,
+ fake_inst,
+ force_refresh=True,
+ )
+
+ mock_log.assert_called_once_with(
+ fake_ports[0]["id"], "direct", "macvtap", fake_inst)
+ self.assertEqual(1, len(nw_infos))
+
+ @mock.patch.object(
+ neutronapi.API,
+ '_get_physnet_tunneled_info',
+ new=mock.Mock(return_value=(None, False)))
+ @mock.patch.object(
+ neutronapi.API,
+ '_get_preexisting_port_ids',
+ new=mock.Mock(return_value=[]))
+ @mock.patch.object(
+ neutronapi.API,
+ '_get_subnets_from_port',
+ new=mock.Mock(return_value=[model.Subnet(cidr='1.0.0.0/8')]))
+ @mock.patch.object(
+ neutronapi.API,
+ '_get_floating_ips_by_fixed_and_port',
+ new=mock.Mock(return_value=[{'floating_ip_address': '10.0.0.1'}]))
+ @mock.patch.object(neutronapi, 'get_client')
+ def test_build_network_info_model_single_vnic_type_change(
+ self, mock_get_client
+ ):
+ mocked_client = mock.create_autospec(client.Client)
+ mock_get_client.return_value = mocked_client
+ fake_inst = objects.Instance()
+ fake_inst.project_id = uuids.fake
+ fake_inst.uuid = uuids.instance
+ fake_ports = [
+ {
+ "id": "port1",
+ "network_id": "net-id",
+ "tenant_id": uuids.fake,
+ "admin_state_up": True,
+ "status": "ACTIVE",
+ "fixed_ips": [{"ip_address": "1.1.1.1"}],
+ "mac_address": "de:ad:be:ef:00:01",
+ "binding:vif_type": model.VIF_TYPE_BRIDGE,
+ "binding:vnic_type": model.VNIC_TYPE_DIRECT,
+ "binding:vif_details": {},
+ },
+ ]
+ fake_nets = [
+ {
+ "id": "net-id",
+ "name": "foo",
+ "tenant_id": uuids.fake,
+ }
+ ]
+ mocked_client.list_ports.return_value = {'ports': fake_ports}
+ fake_inst.info_cache = objects.InstanceInfoCache.new(
+ self.context, uuids.instance)
+ fake_inst.info_cache.network_info = model.NetworkInfo.hydrate([])
+
+ # build the network info first
+ nw_infos = self.api._build_network_info_model(
+ self.context,
+ fake_inst,
+ fake_nets,
+ [fake_ports[0]["id"]],
+ refresh_vif_id=fake_ports[0]["id"],
+ )
+
+ self.assertEqual(1, len(nw_infos))
+ fake_inst.info_cache.network_info = nw_infos
+
+ # change the vnic_type of the port and rebuild the network info
+ fake_ports[0]["binding:vnic_type"] = model.VNIC_TYPE_MACVTAP
+ with mock.patch(
+ "nova.network.neutron.API._log_error_if_vnic_type_changed"
+ ) as mock_log:
+ nw_infos = self.api._build_network_info_model(
+ self.context,
+ fake_inst,
+ fake_nets,
+ [fake_ports[0]["id"]],
+ refresh_vif_id=fake_ports[0]["id"],
+ )
+
+ mock_log.assert_called_once_with(
+ fake_ports[0]["id"], "direct", "macvtap", fake_inst)
+ self.assertEqual(1, len(nw_infos))
+
@mock.patch.object(neutronapi, 'get_client')
def test_get_subnets_from_port(self, mock_get_client):
mocked_client = mock.create_autospec(client.Client)
@@ -7738,11 +7886,11 @@ class TestAPIPortbinding(TestAPIBase):
'vf_num': 1,
}))
@mock.patch.object(pci_whitelist.Whitelist, 'get_devspec')
- @mock.patch.object(pci_manager, 'get_instance_pci_devs')
+ @mock.patch('nova.objects.Instance.get_pci_devices')
def test_populate_neutron_extension_values_binding_sriov(
self, mock_get_instance_pci_devs, mock_get_pci_device_devspec):
host_id = 'my_host_id'
- instance = {'host': host_id}
+ instance = objects.Instance(host=host_id)
port_req_body = {'port': {}}
pci_req_id = 'my_req_id'
pci_dev = {'vendor_id': '1377',
@@ -7783,11 +7931,11 @@ class TestAPIPortbinding(TestAPIBase):
})
)
@mock.patch.object(pci_whitelist.Whitelist, 'get_devspec')
- @mock.patch.object(pci_manager, 'get_instance_pci_devs')
+ @mock.patch('nova.objects.Instance.get_pci_devices')
def test_populate_neutron_extension_values_binding_sriov_card_serial(
self, mock_get_instance_pci_devs, mock_get_pci_device_devspec):
host_id = 'my_host_id'
- instance = {'host': host_id}
+ instance = objects.Instance(host=host_id)
port_req_body = {'port': {}}
pci_req_id = 'my_req_id'
pci_dev = {'vendor_id': 'a2d6',
@@ -7867,11 +8015,11 @@ class TestAPIPortbinding(TestAPIBase):
})
)
@mock.patch.object(pci_whitelist.Whitelist, 'get_devspec')
- @mock.patch.object(pci_manager, 'get_instance_pci_devs')
+ @mock.patch('nova.objects.Instance.get_pci_devices')
def test_populate_neutron_extension_values_binding_sriov_with_cap(
self, mock_get_instance_pci_devs, mock_get_pci_device_devspec):
host_id = 'my_host_id'
- instance = {'host': host_id}
+ instance = objects.Instance(host=host_id)
port_req_body = {'port': {
constants.BINDING_PROFILE: {
'capabilities': ['switchdev']}}}
@@ -7907,12 +8055,12 @@ class TestAPIPortbinding(TestAPIBase):
constants.BINDING_PROFILE])
@mock.patch.object(pci_whitelist.Whitelist, 'get_devspec')
- @mock.patch.object(pci_manager, 'get_instance_pci_devs')
+ @mock.patch('nova.objects.Instance.get_pci_devices')
def test_populate_neutron_extension_values_binding_sriov_pf(
self, mock_get_instance_pci_devs, mock_get_devspec
):
host_id = 'my_host_id'
- instance = {'host': host_id}
+ instance = objects.Instance(host=host_id)
port_req_body = {'port': {}}
pci_dev = objects.PciDevice(
@@ -8041,11 +8189,11 @@ class TestAPIPortbinding(TestAPIBase):
)
@mock.patch.object(pci_whitelist.Whitelist, 'get_devspec')
- @mock.patch.object(pci_manager, 'get_instance_pci_devs')
+ @mock.patch('nova.objects.Instance.get_pci_devices')
def test_populate_neutron_extension_values_binding_sriov_fail(
self, mock_get_instance_pci_devs, mock_get_pci_device_devspec):
host_id = 'my_host_id'
- instance = {'host': host_id}
+ instance = objects.Instance(host=host_id)
port_req_body = {'port': {}}
pci_req_id = 'my_req_id'
pci_objs = [objects.PciDevice(vendor_id='1377',
@@ -8062,7 +8210,7 @@ class TestAPIPortbinding(TestAPIBase):
self.api._populate_neutron_binding_profile,
instance, pci_req_id, port_req_body, None)
- @mock.patch.object(pci_manager, 'get_instance_pci_devs', return_value=[])
+ @mock.patch('nova.objects.Instance.get_pci_devices', return_value=[])
def test_populate_neutron_binding_profile_pci_dev_not_found(
self, mock_get_instance_pci_devs):
api = neutronapi.API()
@@ -8073,9 +8221,13 @@ class TestAPIPortbinding(TestAPIBase):
api._populate_neutron_binding_profile,
instance, pci_req_id, port_req_body, None)
mock_get_instance_pci_devs.assert_called_once_with(
- instance, pci_req_id)
+ request_id=pci_req_id)
@mock.patch.object(
+ pci_utils, 'is_physical_function',
+ new=mock.Mock(return_value=False)
+ )
+ @mock.patch.object(
pci_utils, 'get_vf_num_by_pci_address',
new=mock.MagicMock(
side_effect=(lambda vf_a: {'0000:0a:00.1': 1}.get(vf_a)))
@@ -8085,18 +8237,26 @@ class TestAPIPortbinding(TestAPIBase):
new=mock.MagicMock(side_effect=(lambda vf_a: {
'0000:0a:00.0': '52:54:00:1e:59:c6'}.get(vf_a)))
)
- @mock.patch.object(pci_manager, 'get_instance_pci_devs')
+ @mock.patch('nova.objects.Instance.get_pci_devices')
def test_pci_parse_whitelist_called_once(
- self, mock_get_instance_pci_devs):
- white_list = [
- '{"address":"0000:0a:00.1","physical_network":"default"}']
- cfg.CONF.set_override('passthrough_whitelist', white_list, 'pci')
+ self, mock_get_instance_pci_devs
+ ):
+ device_spec = [
+ jsonutils.dumps(
+ {
+ "address": "0000:0a:00.1",
+ "physical_network": "default",
+ }
+ )
+ ]
+ cfg.CONF.set_override(
+ 'device_spec', device_spec, 'pci')
# NOTE(takashin): neutronapi.API must be initialized
- # after the 'passthrough_whitelist' is set in this test case.
+ # after the 'device_spec' is set in this test case.
api = neutronapi.API()
host_id = 'my_host_id'
- instance = {'host': host_id}
+ instance = objects.Instance(host=host_id)
pci_req_id = 'my_req_id'
port_req_body = {'port': {}}
pci_dev = {'vendor_id': '1377',
@@ -8106,7 +8266,7 @@ class TestAPIPortbinding(TestAPIBase):
'dev_type': obj_fields.PciDeviceType.SRIOV_VF,
}
- whitelist = pci_whitelist.Whitelist(CONF.pci.passthrough_whitelist)
+ whitelist = pci_whitelist.Whitelist(CONF.pci.device_spec)
with mock.patch.object(pci_whitelist.Whitelist,
'_parse_white_list_from_config',
wraps=whitelist._parse_white_list_from_config
@@ -8132,7 +8292,7 @@ class TestAPIPortbinding(TestAPIBase):
vf.update_device(pci_dev)
return instance, pf, vf
- @mock.patch.object(pci_manager, 'get_instance_pci_devs')
+ @mock.patch('nova.objects.Instance.get_pci_devices')
@mock.patch.object(pci_utils, 'get_mac_by_pci_address')
def test_populate_pci_mac_address_pf(self, mock_get_mac_by_pci_address,
mock_get_instance_pci_devs):
@@ -8146,7 +8306,7 @@ class TestAPIPortbinding(TestAPIBase):
self.api._populate_pci_mac_address(instance, 0, req)
self.assertEqual(expected_port_req_body, req)
- @mock.patch.object(pci_manager, 'get_instance_pci_devs')
+ @mock.patch('nova.objects.Instance.get_pci_devices')
@mock.patch.object(pci_utils, 'get_mac_by_pci_address')
def test_populate_pci_mac_address_vf(self, mock_get_mac_by_pci_address,
mock_get_instance_pci_devs):
@@ -8158,7 +8318,7 @@ class TestAPIPortbinding(TestAPIBase):
self.api._populate_pci_mac_address(instance, 42, port_req_body)
self.assertEqual(port_req_body, req)
- @mock.patch.object(pci_manager, 'get_instance_pci_devs')
+ @mock.patch('nova.objects.Instance.get_pci_devices')
@mock.patch.object(pci_utils, 'get_mac_by_pci_address')
def test_populate_pci_mac_address_vf_fail(self,
mock_get_mac_by_pci_address,
@@ -8173,7 +8333,7 @@ class TestAPIPortbinding(TestAPIBase):
self.api._populate_pci_mac_address(instance, 42, port_req_body)
self.assertEqual(port_req_body, req)
- @mock.patch.object(pci_manager, 'get_instance_pci_devs')
+ @mock.patch('nova.objects.Instance.get_pci_devices')
@mock.patch('nova.network.neutron.LOG.error')
def test_populate_pci_mac_address_no_device(self, mock_log_error,
mock_get_instance_pci_devs):
diff --git a/nova/tests/unit/notifications/objects/test_notification.py b/nova/tests/unit/notifications/objects/test_notification.py
index 4b6869effb..de9e6f2762 100644
--- a/nova/tests/unit/notifications/objects/test_notification.py
+++ b/nova/tests/unit/notifications/objects/test_notification.py
@@ -386,7 +386,7 @@ notification_object_data = {
# ImageMetaProps, so when you see a fail here for that reason, you must
# *also* bump the version of ImageMetaPropsPayload. See its docstring for
# more information.
- 'ImageMetaPropsPayload': '1.9-24a851511d98e652aebd3536e7e08330',
+ 'ImageMetaPropsPayload': '1.12-b9c64832d7772c1973e913bacbe0e8f9',
'InstanceActionNotification': '1.0-a73147b93b520ff0061865849d3dfa56',
'InstanceActionPayload': '1.8-4fa3da9cbf0761f1f700ae578f36dc2f',
'InstanceActionRebuildNotification':
diff --git a/nova/tests/unit/objects/test_block_device.py b/nova/tests/unit/objects/test_block_device.py
index ad43bed8bf..85959a961a 100644
--- a/nova/tests/unit/objects/test_block_device.py
+++ b/nova/tests/unit/objects/test_block_device.py
@@ -251,6 +251,14 @@ class _TestBlockDeviceMappingObject(object):
destination_type='local')
self.assertFalse(bdm.is_volume)
+ def test_is_local(self):
+ self.assertTrue(
+ objects.BlockDeviceMapping(
+ context=self.context, destination_type='local').is_local)
+ self.assertFalse(
+ objects.BlockDeviceMapping(
+ context=self.context, destination_type='volume').is_local)
+
def test_obj_load_attr_not_instance(self):
"""Tests that lazy-loading something other than the instance field
results in an error.
@@ -276,6 +284,11 @@ class _TestBlockDeviceMappingObject(object):
mock_inst_get_by_uuid.assert_called_once_with(
self.context, bdm.instance_uuid)
+ def test_obj_load_attr_encrypted(self):
+ bdm = objects.BlockDeviceMapping(self.context, **self.fake_bdm())
+ del bdm.encrypted
+ self.assertEqual(bdm.fields['encrypted'].default, bdm.encrypted)
+
def test_obj_make_compatible_pre_1_17(self):
values = {'source_type': 'volume', 'volume_id': 'fake-vol-id',
'destination_type': 'volume',
diff --git a/nova/tests/unit/objects/test_compute_node.py b/nova/tests/unit/objects/test_compute_node.py
index 1964117fd6..84c4e87785 100644
--- a/nova/tests/unit/objects/test_compute_node.py
+++ b/nova/tests/unit/objects/test_compute_node.py
@@ -16,6 +16,7 @@ import copy
from unittest import mock
import netaddr
+from oslo_db import exception as db_exc
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel
from oslo_utils import timeutils
@@ -341,6 +342,14 @@ class _TestComputeNodeObject(object):
'uuid': uuidsentinel.fake_compute_node}
mock_create.assert_called_once_with(self.context, param_dict)
+ @mock.patch('nova.db.main.api.compute_node_create')
+ def test_create_duplicate(self, mock_create):
+ mock_create.side_effect = db_exc.DBDuplicateEntry
+ compute = compute_node.ComputeNode(context=self.context)
+ compute.service_id = 456
+ compute.hypervisor_hostname = 'node1'
+ self.assertRaises(exception.DuplicateRecord, compute.create)
+
@mock.patch.object(db, 'compute_node_update')
@mock.patch(
'nova.db.main.api.compute_node_get', return_value=fake_compute_node)
@@ -553,17 +562,15 @@ class _TestComputeNodeObject(object):
def test_update_from_virt_driver_uuid_already_set(self):
"""Tests update_from_virt_driver where the compute node object already
- has a uuid value so the uuid from the virt driver is ignored.
+ has a uuid value so an error is raised.
"""
# copy in case the update has a side effect
resources = copy.deepcopy(fake_resources)
# Emulate the ironic driver which adds a uuid field.
resources['uuid'] = uuidsentinel.node_uuid
compute = compute_node.ComputeNode(uuid=uuidsentinel.something_else)
- compute.update_from_virt_driver(resources)
- expected = fake_compute_with_resources.obj_clone()
- expected.uuid = uuidsentinel.something_else
- self.assertTrue(base.obj_equal_prims(expected, compute))
+ self.assertRaises(exception.InvalidNodeConfiguration,
+ compute.update_from_virt_driver, resources)
def test_update_from_virt_driver_missing_field(self):
# NOTE(pmurray): update_from_virt_driver does not require
@@ -666,8 +673,8 @@ class _TestComputeNodeObject(object):
CONF.initial_disk_allocation_ratio, compute.disk_allocation_ratio)
mock_update.assert_called_once_with(
- self.context, 123, {'cpu_allocation_ratio': 16.0,
- 'ram_allocation_ratio': 1.5,
+ self.context, 123, {'cpu_allocation_ratio': 4.0,
+ 'ram_allocation_ratio': 1.0,
'disk_allocation_ratio': 1.0})
@mock.patch('nova.db.main.api.compute_node_update')
@@ -694,8 +701,8 @@ class _TestComputeNodeObject(object):
CONF.initial_disk_allocation_ratio, compute.disk_allocation_ratio)
mock_update.assert_called_once_with(
- self.context, 123, {'cpu_allocation_ratio': 16.0,
- 'ram_allocation_ratio': 1.5,
+ self.context, 123, {'cpu_allocation_ratio': 4.0,
+ 'ram_allocation_ratio': 1.0,
'disk_allocation_ratio': 1.0})
@mock.patch('nova.db.main.api.compute_node_update')
@@ -722,8 +729,8 @@ class _TestComputeNodeObject(object):
CONF.initial_disk_allocation_ratio, compute.disk_allocation_ratio)
mock_update.assert_called_once_with(
- self.context, 123, {'cpu_allocation_ratio': 16.0,
- 'ram_allocation_ratio': 1.5,
+ self.context, 123, {'cpu_allocation_ratio': 4.0,
+ 'ram_allocation_ratio': 1.0,
'disk_allocation_ratio': 1.0})
def test_get_all_by_not_mapped(self):
diff --git a/nova/tests/unit/objects/test_fields.py b/nova/tests/unit/objects/test_fields.py
index 2574d93c80..461dc0ff6f 100644
--- a/nova/tests/unit/objects/test_fields.py
+++ b/nova/tests/unit/objects/test_fields.py
@@ -551,7 +551,7 @@ class TestNetworkModel(TestField):
def setUp(self):
super(TestNetworkModel, self).setUp()
model = network_model.NetworkInfo()
- self.field = fields.Field(fields.NetworkModel())
+ self.field = fields.NetworkModelField()
self.coerce_good_values = [(model, model), (model.json(), model)]
self.coerce_bad_values = [[], 'foo']
self.to_primitive_values = [(model, model.json())]
@@ -570,7 +570,7 @@ class TestNetworkVIFModel(TestField):
super(TestNetworkVIFModel, self).setUp()
model = network_model.VIF('6c197bc7-820c-40d5-8aff-7116b993e793')
primitive = jsonutils.dumps(model)
- self.field = fields.Field(fields.NetworkVIFModel())
+ self.field = fields.NetworkVIFModelField()
self.coerce_good_values = [(model, model), (primitive, model)]
self.coerce_bad_values = [[], 'foo']
self.to_primitive_values = [(model, primitive)]
diff --git a/nova/tests/unit/objects/test_image_meta.py b/nova/tests/unit/objects/test_image_meta.py
index 6e3725de84..371f7b101a 100644
--- a/nova/tests/unit/objects/test_image_meta.py
+++ b/nova/tests/unit/objects/test_image_meta.py
@@ -108,6 +108,7 @@ class TestImageMetaProps(test.NoDBTestCase):
'hw_video_model': 'vga',
'hw_video_ram': '512',
'hw_qemu_guest_agent': 'yes',
+ 'hw_locked_memory': 'true',
'trait:CUSTOM_TRUSTED': 'required',
# Fill sane values for the rest here
}
@@ -116,6 +117,7 @@ class TestImageMetaProps(test.NoDBTestCase):
self.assertEqual('vga', virtprops.hw_video_model)
self.assertEqual(512, virtprops.hw_video_ram)
self.assertTrue(virtprops.hw_qemu_guest_agent)
+ self.assertTrue(virtprops.hw_locked_memory)
self.assertIsNotNone(virtprops.traits_required)
self.assertIn('CUSTOM_TRUSTED', virtprops.traits_required)
@@ -285,6 +287,28 @@ class TestImageMetaProps(test.NoDBTestCase):
self.assertEqual([set([0, 1, 2, 3])],
virtprops.hw_numa_cpus)
+ def test_locked_memory_prop(self):
+ props = {'hw_locked_memory': 'true'}
+ virtprops = objects.ImageMetaProps.from_dict(props)
+ self.assertTrue(virtprops.hw_locked_memory)
+
+ def test_obj_make_compatible_hw_locked_memory(self):
+ """Check 'hw_locked_memory' compatibility."""
+ # assert that 'hw_locked_memory' is supported
+ # on a suitably new version
+ obj = objects.ImageMetaProps(
+ hw_locked_memory='true',
+ )
+ primitive = obj.obj_to_primitive('1.33')
+ self.assertIn('hw_locked_memory',
+ primitive['nova_object.data'])
+ self.assertTrue(primitive['nova_object.data']['hw_locked_memory'])
+
+ # and is absent on older versions
+ primitive = obj.obj_to_primitive('1.32')
+ self.assertNotIn('hw_locked_memory',
+ primitive['nova_object.data'])
+
def test_get_unnumbered_trait_fields(self):
"""Tests that only valid un-numbered required traits are parsed from
the properties.
@@ -349,6 +373,34 @@ class TestImageMetaProps(test.NoDBTestCase):
self.assertRaises(exception.ObjectActionError,
obj.obj_to_primitive, '1.0')
+ def test_obj_make_compatible_hw_ephemeral_encryption(self):
+ """Check 'hw_ephemeral_encryption(_format)' compatibility."""
+ # assert that 'hw_ephemeral_encryption' and
+ # 'hw_ephemeral_encryption_format' is supported
+ # on a suitably new version
+ new_fields = (
+ 'hw_ephemeral_encryption',
+ 'hw_ephemeral_encryption_format'
+ )
+ eph_format = objects.fields.BlockDeviceEncryptionFormatType.LUKS
+ obj = objects.ImageMetaProps(
+ hw_ephemeral_encryption='yes',
+ hw_ephemeral_encryption_format=eph_format,
+ )
+ primitive = obj.obj_to_primitive('1.32')
+ for field in new_fields:
+ self.assertIn(field, primitive['nova_object.data'])
+ self.assertTrue(
+ primitive['nova_object.data']['hw_ephemeral_encryption'])
+ self.assertEqual(
+ eph_format,
+ primitive['nova_object.data']['hw_ephemeral_encryption_format'])
+
+ # and is absent on older versions
+ primitive = obj.obj_to_primitive('1.31')
+ for field in new_fields:
+ self.assertNotIn(field, primitive['nova_object.data'])
+
def test_obj_make_compatible_hw_emulation(self):
"""Check 'hw_emulation_architecture' compatibility."""
# assert that 'hw_emulation_architecture' is supported
@@ -486,3 +538,19 @@ class TestImageMetaProps(test.NoDBTestCase):
hw_pci_numa_affinity_policy=fields.PCINUMAAffinityPolicy.SOCKET)
self.assertRaises(exception.ObjectActionError,
obj.obj_to_primitive, '1.27')
+
+ def test_obj_make_compatible_viommu_model(self):
+ """Check 'hw_viommu_model' compatibility."""
+ # assert that 'hw_viommu_model' is supported on a suitably new version
+ obj = objects.ImageMetaProps(
+ hw_viommu_model=objects.fields.VIOMMUModel.VIRTIO,
+ )
+ primitive = obj.obj_to_primitive('1.34')
+ self.assertIn('hw_viommu_model', primitive['nova_object.data'])
+ self.assertEqual(
+ objects.fields.VIOMMUModel.VIRTIO,
+ primitive['nova_object.data']['hw_viommu_model'])
+
+ # and is absent on older versions
+ primitive = obj.obj_to_primitive('1.33')
+ self.assertNotIn('hw_viommu_model', primitive['nova_object.data'])
diff --git a/nova/tests/unit/objects/test_instance.py b/nova/tests/unit/objects/test_instance.py
index de8b8d94fa..6215d2be60 100644
--- a/nova/tests/unit/objects/test_instance.py
+++ b/nova/tests/unit/objects/test_instance.py
@@ -25,6 +25,7 @@ from oslo_versionedobjects import base as ovo_base
from nova.compute import task_states
from nova.compute import vm_states
+from nova import context
from nova.db.main import api as db
from nova.db.main import models as sql_models
from nova import exception
@@ -2073,3 +2074,164 @@ class TestInstanceObjectMisc(test.NoDBTestCase):
self.assertEqual(['metadata', 'system_metadata', 'info_cache',
'security_groups', 'pci_devices', 'tags', 'extra',
'extra.flavor'], result_list)
+
+
+class TestInstanceObjectGetPciDevices(test.NoDBTestCase):
+ def test_lazy_loading_pci_devices(self):
+ user_id = "fake-user"
+ project_id = "fake-project"
+ ctxt = context.RequestContext(user_id, project_id)
+
+ inst = instance.Instance(ctxt, uuid=uuids.instance)
+ with mock.patch(
+ "nova.objects.PciDeviceList.get_by_instance_uuid",
+ return_value=objects.PciDeviceList(),
+ ) as mock_get_pci:
+ self.assertEqual([], inst.get_pci_devices())
+
+ mock_get_pci.assert_called_once_with(ctxt, uuids.instance)
+
+ def test_lazy_loading_pci_requests(self):
+ user_id = "fake-user"
+ project_id = "fake-project"
+ ctxt = context.RequestContext(user_id, project_id)
+
+ devs = [objects.PciDevice(request_id=uuids.req1)]
+ inst = instance.Instance(
+ ctxt,
+ uuid=uuids.instance,
+ pci_devices=objects.PciDeviceList(
+ objects=devs
+ ),
+ )
+
+ with mock.patch(
+ "nova.objects.InstancePCIRequests.get_by_instance_uuid",
+ return_value=objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ request_id=uuids.req1,
+ alias_name="pci-alias-1",
+ ),
+ ]
+ ),
+ ) as mock_get_pci_req:
+ self.assertEqual(
+ devs,
+ inst.get_pci_devices(
+ source=objects.InstancePCIRequest.FLAVOR_ALIAS
+ ),
+ )
+
+ mock_get_pci_req.assert_called_once_with(ctxt, uuids.instance)
+
+ def test_no_filter(self):
+ devs = [objects.PciDevice()]
+
+ inst = instance.Instance(
+ pci_devices=objects.PciDeviceList(objects=devs)
+ )
+
+ self.assertEqual(devs, inst.get_pci_devices())
+
+ def test_no_filter_by_request_id(self):
+ expected_devs = [objects.PciDevice(request_id=uuids.req1)]
+ all_devs = expected_devs + [objects.PciDevice(request_id=uuids.req2)]
+
+ inst = instance.Instance(
+ pci_devices=objects.PciDeviceList(objects=all_devs)
+ )
+
+ self.assertEqual(
+ expected_devs, inst.get_pci_devices(request_id=uuids.req1)
+ )
+
+ def test_no_filter_by_source(self):
+ expected_devs = [
+ objects.PciDevice(request_id=uuids.req1),
+ objects.PciDevice(request_id=uuids.req1),
+ ]
+ all_devs = expected_devs + [objects.PciDevice(request_id=uuids.req2)]
+
+ inst = instance.Instance(
+ pci_devices=objects.PciDeviceList(objects=all_devs),
+ pci_requests=objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ request_id=uuids.req1,
+ alias_name="pci-alias-1",
+ ),
+ objects.InstancePCIRequest(
+ request_id=uuids.req2,
+ ),
+ ]
+ ),
+ )
+
+ self.assertEqual(
+ expected_devs,
+ inst.get_pci_devices(
+ source=objects.InstancePCIRequest.FLAVOR_ALIAS
+ ),
+ )
+
+ def test_no_filter_by_request_id_and_source(self):
+ expected_devs = []
+ all_devs = expected_devs + [
+ objects.PciDevice(request_id=uuids.req1),
+ objects.PciDevice(request_id=uuids.req2),
+ objects.PciDevice(request_id=uuids.req1),
+ ]
+
+ inst = instance.Instance(
+ pci_devices=objects.PciDeviceList(objects=all_devs),
+ pci_requests=objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ request_id=uuids.req1,
+ alias_name="pci-alias-1",
+ ),
+ objects.InstancePCIRequest(
+ request_id=uuids.req2,
+ ),
+ ]
+ ),
+ )
+
+ self.assertEqual(
+ expected_devs,
+ inst.get_pci_devices(
+ request_id=uuids.req1,
+ source=objects.InstancePCIRequest.NEUTRON_PORT,
+ ),
+ )
+
+ def test_old_pci_dev_and_req(self):
+ """This tests the case when the system has old InstancePCIRequest
+ objects without the request_id being filled. And therefore have
+ PciDevice object where the request_id is None too. These requests and
+ devices are always flavor based.
+ """
+ devs = [
+ objects.PciDevice(request_id=None),
+ objects.PciDevice(request_id=None),
+ ]
+
+ inst = instance.Instance(
+ pci_devices=objects.PciDeviceList(objects=devs),
+ pci_requests=objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ request_id=None,
+ alias_name="pci-alias-1",
+ ),
+ ]
+ ),
+ )
+
+ self.assertEqual(
+ devs,
+ inst.get_pci_devices(
+ source=objects.InstancePCIRequest.FLAVOR_ALIAS,
+ ),
+ )
diff --git a/nova/tests/unit/objects/test_objects.py b/nova/tests/unit/objects/test_objects.py
index f3fa8aa55b..aab079381c 100644
--- a/nova/tests/unit/objects/test_objects.py
+++ b/nova/tests/unit/objects/test_objects.py
@@ -1046,7 +1046,7 @@ class TestRegistry(test.NoDBTestCase):
object_data = {
'Aggregate': '1.3-f315cb68906307ca2d1cca84d4753585',
'AggregateList': '1.3-3ea55a050354e72ef3306adefa553957',
- 'BlockDeviceMapping': '1.20-45a6ad666ddf14bbbedece2293af77e2',
+ 'BlockDeviceMapping': '1.21-220abb8aa1450e759b72fce8ec6ff955',
'BlockDeviceMappingList': '1.18-73bcbbae5ef5e8adcedbc821db869306',
'BuildRequest': '1.3-077dee42bed93f8a5b62be77657b7152',
'BuildRequestList': '1.0-cd95608eccb89fbc702c8b52f38ec738',
@@ -1066,20 +1066,20 @@ object_data = {
'EC2InstanceMapping': '1.0-a4556eb5c5e94c045fe84f49cf71644f',
'Flavor': '1.2-4ce99b41327bb230262e5a8f45ff0ce3',
'FlavorList': '1.1-52b5928600e7ca973aa4fc1e46f3934c',
- 'HVSpec': '1.2-de06bcec472a2f04966b855a49c46b41',
'HostMapping': '1.0-1a3390a696792a552ab7bd31a77ba9ac',
'HostMappingList': '1.1-18ac2bfb8c1eb5545bed856da58a79bc',
+ 'HVSpec': '1.2-de06bcec472a2f04966b855a49c46b41',
'HyperVLiveMigrateData': '1.4-e265780e6acfa631476c8170e8d6fce0',
'IDEDeviceBus': '1.0-29d4c9f27ac44197f01b6ac1b7e16502',
'ImageMeta': '1.8-642d1b2eb3e880a367f37d72dd76162d',
- 'ImageMetaProps': '1.31-27337af769b0c85b4ba4be8aebc1a65d',
+ 'ImageMetaProps': '1.34-29b3a6b7fe703f36bfd240d914f16c21',
'Instance': '2.7-d187aec68cad2e4d8b8a03a68e4739ce',
'InstanceAction': '1.2-9a5abc87fdd3af46f45731960651efb5',
'InstanceActionEvent': '1.4-5b1f361bd81989f8bb2c20bb7e8a4cb4',
'InstanceActionEventList': '1.1-13d92fb953030cdbfee56481756e02be',
'InstanceActionList': '1.1-a2b2fb6006b47c27076d3a1d48baa759',
'InstanceDeviceMetadata': '1.0-74d78dd36aa32d26d2769a1b57caf186',
- 'InstanceExternalEvent': '1.4-06c2dfcf2d2813c24cd37ee728524f1a',
+ 'InstanceExternalEvent': '1.5-1ec57351a9851c1eb43ccd90662d6dd0',
'InstanceFault': '1.2-7ef01f16f1084ad1304a513d6d410a38',
'InstanceFaultList': '1.2-6bb72de2872fe49ded5eb937a93f2451',
'InstanceGroup': '1.11-852ac511d30913ee88f3c3a869a8f30a',
@@ -1097,23 +1097,24 @@ object_data = {
'LibvirtLiveMigrateBDMInfo': '1.1-5f4a68873560b6f834b74e7861d71aaf',
'LibvirtLiveMigrateData': '1.10-348cf70ea44d3b985f45f64725d6f6a7',
'LibvirtLiveMigrateNUMAInfo': '1.0-0e777677f3459d0ed1634eabbdb6c22f',
+ 'LibvirtVPMEMDevice': '1.0-17ffaf47585199eeb9a2b83d6bde069f',
'MemoryDiagnostics': '1.0-2c995ae0f2223bb0f8e523c5cc0b83da',
'Migration': '1.7-bd45b232fd7c95cd79ae9187e10ef582',
'MigrationContext': '1.2-89f10a83999f852a489962ae37d8a026',
'MigrationList': '1.5-36793f8d65bae421bd5564d09a4de7be',
'MonitorMetric': '1.1-53b1db7c4ae2c531db79761e7acc52ba',
'MonitorMetricList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e',
- 'NUMACell': '1.5-2592de3c926a7840d763bcc85f81afa7',
- 'NUMAPagesTopology': '1.1-edab9fa2dc43c117a38d600be54b4542',
- 'NUMATopology': '1.2-c63fad38be73b6afd04715c9c1b29220',
- 'NUMATopologyLimits': '1.1-4235c5da7a76c7e36075f0cd2f5cf922',
'NetworkInterfaceMetadata': '1.2-6f3d480b40fe339067b1c0dd4d656716',
'NetworkMetadata': '1.0-2cb8d21b34f87b0261d3e1d1ae5cf218',
'NetworkRequest': '1.3-3a815ea3df7defa61e0b894dee5288ba',
'NetworkRequestList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e',
'NicDiagnostics': '1.0-895e9ad50e0f56d5258585e3e066aea5',
- 'PCIDeviceBus': '1.0-2b891cb77e42961044689f3dc2718995',
+ 'NUMACell': '1.5-2592de3c926a7840d763bcc85f81afa7',
+ 'NUMAPagesTopology': '1.1-edab9fa2dc43c117a38d600be54b4542',
+ 'NUMATopology': '1.2-c63fad38be73b6afd04715c9c1b29220',
+ 'NUMATopologyLimits': '1.1-4235c5da7a76c7e36075f0cd2f5cf922',
'PciDevice': '1.7-680e4c590aae154958ccf9677774413b',
+ 'PCIDeviceBus': '1.0-2b891cb77e42961044689f3dc2718995',
'PciDeviceList': '1.3-52ff14355491c8c580bdc0ba34c26210',
'PciDevicePool': '1.1-3f5ddc3ff7bfa14da7f6c7e9904cc000',
'PciDevicePoolList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e',
@@ -1126,9 +1127,9 @@ object_data = {
'ResourceList': '1.0-4a53826625cc280e15fae64a575e0879',
'ResourceMetadata': '1.0-77509ea1ea0dd750d5864b9bd87d3f9d',
'S3ImageMapping': '1.0-7dd7366a890d82660ed121de9092276e',
- 'SCSIDeviceBus': '1.0-61c1e89a00901069ab1cf2991681533b',
'SchedulerLimits': '1.0-249c4bd8e62a9b327b7026b7f19cc641',
'SchedulerRetries': '1.1-3c9c8b16143ebbb6ad7030e999d14cc0',
+ 'SCSIDeviceBus': '1.0-61c1e89a00901069ab1cf2991681533b',
'SecurityGroup': '1.2-86d67d8d3ab0c971e1dc86e02f9524a8',
'SecurityGroupList': '1.1-c655ed13298e630f4d398152f7d08d71',
'Selection': '1.1-548e3c2f04da2a61ceaf9c4e1589f264',
@@ -1141,16 +1142,14 @@ object_data = {
'TrustedCerts': '1.0-dcf528851e0f868c77ee47e90563cda7',
'USBDeviceBus': '1.0-e4c7dd6032e46cd74b027df5eb2d4750',
'VIFMigrateData': '1.0-cb15282b25a039ab35046ed705eb931d',
- 'VMwareLiveMigrateData': '1.0-a3cc858a2bf1d3806d6f57cfaa1fb98a',
'VirtCPUFeature': '1.0-ea2464bdd09084bd388e5f61d5d4fc86',
'VirtCPUModel': '1.0-5e1864af9227f698326203d7249796b5',
'VirtCPUTopology': '1.0-fc694de72e20298f7c6bab1083fd4563',
'VirtualInterface': '1.3-efd3ca8ebcc5ce65fff5a25f31754c54',
'VirtualInterfaceList': '1.0-9750e2074437b3077e46359102779fc6',
+ 'VMwareLiveMigrateData': '1.0-a3cc858a2bf1d3806d6f57cfaa1fb98a',
'VolumeUsage': '1.0-6c8190c46ce1469bb3286a1f21c2e475',
'XenDeviceBus': '1.0-272a4f899b24e31e42b2b9a7ed7e9194',
- # TODO(efried): re-alphabetize this
- 'LibvirtVPMEMDevice': '1.0-17ffaf47585199eeb9a2b83d6bde069f',
}
diff --git a/nova/tests/unit/objects/test_request_spec.py b/nova/tests/unit/objects/test_request_spec.py
index d91015a699..58b9859234 100644
--- a/nova/tests/unit/objects/test_request_spec.py
+++ b/nova/tests/unit/objects/test_request_spec.py
@@ -430,6 +430,62 @@ class _TestRequestSpecObject(object):
self.assertListEqual([rg], spec.requested_resources)
self.assertEqual(req_lvl_params, spec.request_level_params)
+ def test_from_components_flavor_based_pci_requests(self):
+ self.flags(group='filter_scheduler', pci_in_placement=True)
+ ctxt = context.RequestContext(
+ fakes.FAKE_USER_ID, fakes.FAKE_PROJECT_ID
+ )
+ instance = fake_instance.fake_instance_obj(ctxt)
+ image = {
+ "id": uuids.image_id,
+ "properties": {"mappings": []},
+ "status": "fake-status",
+ "location": "far-away",
+ }
+ flavor = fake_flavor.fake_flavor_obj(ctxt)
+ filter_properties = {"fake": "property"}
+
+ qos_port_rg = request_spec.RequestGroup()
+ req_lvl_params = request_spec.RequestLevelParams()
+
+ pci_requests = objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ count=1,
+ alias_name='a-dev',
+ request_id=uuids.req1,
+ spec=[{"vendor_id": "1234", "product_id": "fe12"}],
+ )
+ ]
+ )
+ pci_request_group = request_spec.RequestGroup(
+ requester_id=f"{uuids.req1}-0",
+ resources={"CUSTOM_PCI_1234_FE12": 1},
+ same_provider=True,
+ )
+
+ spec = objects.RequestSpec.from_components(
+ ctxt,
+ instance.uuid,
+ image,
+ flavor,
+ instance.numa_topology,
+ pci_requests,
+ filter_properties,
+ None,
+ instance.availability_zone,
+ port_resource_requests=[qos_port_rg],
+ request_level_params=req_lvl_params,
+ )
+
+ self.assertEqual(2, len(spec.requested_resources))
+ self.assertEqual(qos_port_rg, spec.requested_resources[0])
+ self.assertEqual(
+ pci_request_group.obj_to_primitive(),
+ spec.requested_resources[1].obj_to_primitive(),
+ )
+ self.assertEqual(req_lvl_params, spec.request_level_params)
+
def test_get_scheduler_hint(self):
spec_obj = objects.RequestSpec(scheduler_hints={'foo_single': ['1'],
'foo_mul': ['1', '2']})
@@ -1054,6 +1110,183 @@ class TestRemoteRequestSpecObject(test_objects._RemoteTest,
pass
+class TestInstancePCIRequestToRequestGroups(test.NoDBTestCase):
+ def setUp(self):
+ super().setUp()
+ self.flags(group='filter_scheduler', pci_in_placement=True)
+
+ def test_pci_reqs_ignored_if_disabled(self):
+ self.flags(group='filter_scheduler', pci_in_placement=False)
+
+ spec = request_spec.RequestSpec(
+ requested_resources=[],
+ pci_requests=objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ count=1,
+ request_id=uuids.req1,
+ spec=[{"vendor_id": "de12", "product_id": "1234"}],
+ alias_name="a-dev",
+ ),
+ ]
+ ),
+ )
+
+ spec.generate_request_groups_from_pci_requests()
+
+ self.assertEqual(0, len(spec.requested_resources))
+
+ def test_neutron_based_requests_are_ignored(self):
+ pci_req = objects.InstancePCIRequest(
+ count=1,
+ request_id=uuids.req1,
+ spec=[],
+ )
+ spec = request_spec.RequestSpec(
+ requested_resources=[],
+ pci_requests=objects.InstancePCIRequests(requests=[pci_req]),
+ )
+ self.assertEqual(
+ objects.InstancePCIRequest.NEUTRON_PORT, pci_req.source
+ )
+
+ spec.generate_request_groups_from_pci_requests()
+
+ self.assertEqual(0, len(spec.requested_resources))
+
+ def test_rc_from_product_and_vendor(self):
+ spec = request_spec.RequestSpec(
+ requested_resources=[],
+ pci_requests=objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ count=1,
+ request_id=uuids.req1,
+ spec=[{"vendor_id": "de12", "product_id": "1234"}],
+ alias_name="a-dev",
+ ),
+ objects.InstancePCIRequest(
+ count=1,
+ request_id=uuids.req2,
+ spec=[{"vendor_id": "fff", "product_id": "dead"}],
+ alias_name="a-dev",
+ ),
+ ]
+ ),
+ )
+
+ spec.generate_request_groups_from_pci_requests()
+
+ self.assertEqual(2, len(spec.requested_resources))
+ self.assertEqual(
+ request_spec.RequestGroup(
+ requester_id=f"{uuids.req1}-0",
+ resources={"CUSTOM_PCI_DE12_1234": 1},
+ use_same_provider=True,
+ ).obj_to_primitive(),
+ spec.requested_resources[0].obj_to_primitive(),
+ )
+ self.assertEqual(
+ request_spec.RequestGroup(
+ requester_id=f"{uuids.req2}-0",
+ resources={"CUSTOM_PCI_FFF_DEAD": 1},
+ use_same_provider=True,
+ ).obj_to_primitive(),
+ spec.requested_resources[1].obj_to_primitive(),
+ )
+
+ def test_multi_device_split_to_multiple_groups(self):
+ spec = request_spec.RequestSpec(
+ requested_resources=[],
+ pci_requests=objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ count=2,
+ request_id=uuids.req1,
+ spec=[{"vendor_id": "de12", "product_id": "1234"}],
+ alias_name="a-dev",
+ ),
+ ]
+ ),
+ )
+
+ spec.generate_request_groups_from_pci_requests()
+
+ self.assertEqual(2, len(spec.requested_resources))
+ self.assertEqual(
+ request_spec.RequestGroup(
+ requester_id=f"{uuids.req1}-0",
+ resources={"CUSTOM_PCI_DE12_1234": 1},
+ use_same_provider=True,
+ ).obj_to_primitive(),
+ spec.requested_resources[0].obj_to_primitive(),
+ )
+ self.assertEqual(
+ request_spec.RequestGroup(
+ requester_id=f"{uuids.req1}-1",
+ resources={"CUSTOM_PCI_DE12_1234": 1},
+ use_same_provider=True,
+ ).obj_to_primitive(),
+ spec.requested_resources[1].obj_to_primitive(),
+ )
+
+ def test_with_rc_and_traits_from_the_pci_req_spec(self):
+ spec = request_spec.RequestSpec(
+ requested_resources=[],
+ pci_requests=objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ count=1,
+ request_id=uuids.req1,
+ spec=[
+ {
+ "vendor_id": "de12",
+ "product_id": "1234",
+ "resource_class": "gpu",
+ }
+ ],
+ alias_name="a-dev",
+ ),
+ objects.InstancePCIRequest(
+ count=1,
+ request_id=uuids.req2,
+ spec=[
+ {
+ "vendor_id": "fff",
+ "product_id": "dead",
+ "traits": "foo,bar,CUSTOM_BLUE",
+ }
+ ],
+ alias_name="a-dev",
+ ),
+ ]
+ ),
+ )
+
+ spec.generate_request_groups_from_pci_requests()
+
+ self.assertEqual(2, len(spec.requested_resources))
+ self.assertEqual(
+ request_spec.RequestGroup(
+ requester_id=f"{uuids.req1}-0",
+ resources={"CUSTOM_GPU": 1},
+ use_same_provider=True,
+ ).obj_to_primitive(),
+ spec.requested_resources[0].obj_to_primitive(),
+ )
+ # Note that sets would be serialized to tuples by obj_to_primitive in
+ # random order, so we need to match this spec field by field
+ expected = request_spec.RequestGroup(
+ requester_id=f"{uuids.req2}-0",
+ resources={"CUSTOM_PCI_FFF_DEAD": 1},
+ required_traits={"CUSTOM_FOO", "CUSTOM_BAR", "CUSTOM_BLUE"},
+ use_same_provider=True,
+ )
+ actual = spec.requested_resources[1]
+ for field in request_spec.RequestGroup.fields.keys():
+ self.assertEqual(getattr(expected, field), getattr(actual, field))
+
+
class TestRequestGroupObject(test.NoDBTestCase):
def setUp(self):
super(TestRequestGroupObject, self).setUp()
diff --git a/nova/tests/unit/pci/test_devspec.py b/nova/tests/unit/pci/test_devspec.py
index f51ee54ac1..4f747e7b7d 100644
--- a/nova/tests/unit/pci/test_devspec.py
+++ b/nova/tests/unit/pci/test_devspec.py
@@ -51,7 +51,7 @@ class PhysicalPciAddressTestCase(test.NoDBTestCase):
for component in invalid_val_addr:
address = dict(self.pci_addr)
address[component] = str(invalid_val_addr[component])
- self.assertRaises(exception.PciConfigInvalidWhitelist,
+ self.assertRaises(exception.PciConfigInvalidSpec,
devspec.PhysicalPciAddress, address)
def test_init_by_dict_missing_values(self):
@@ -75,7 +75,7 @@ class PhysicalPciAddressTestCase(test.NoDBTestCase):
"0000:0a:" + str(devspec.MAX_SLOT + 1) + ".5",
"0000:0a:00." + str(devspec.MAX_FUNC + 1)]
for address in invalid_addresses:
- self.assertRaises(exception.PciConfigInvalidWhitelist,
+ self.assertRaises(exception.PciConfigInvalidSpec,
devspec.PhysicalPciAddress, address)
def test_init_by_string_missing_values(self):
@@ -121,7 +121,7 @@ class PciAddressGlobSpecTestCase(test.NoDBTestCase):
"0000:0a:" + str(devspec.MAX_SLOT + 1) + ".5",
"0000:0a:00." + str(devspec.MAX_FUNC + 1)]
for address in invalid_addresses:
- self.assertRaises(exception.PciConfigInvalidWhitelist,
+ self.assertRaises(exception.PciConfigInvalidSpec,
devspec.PciAddressGlobSpec, address)
def test_match(self):
@@ -207,18 +207,18 @@ class PciAddressTestCase(test.NoDBTestCase):
def test_address_invalid_character(self):
pci_info = {"address": "0000:h4.12:6", "physical_network": "hr_net"}
- exc = self.assertRaises(exception.PciConfigInvalidWhitelist,
+ exc = self.assertRaises(exception.PciConfigInvalidSpec,
devspec.PciDeviceSpec, pci_info)
- msg = ("Invalid PCI devices Whitelist config: property func ('12:6') "
+ msg = ("Invalid [pci]device_spec config: property func ('12:6') "
"does not parse as a hex number.")
self.assertEqual(msg, str(exc))
def test_max_func(self):
pci_info = {"address": "0000:0a:00.%s" % (devspec.MAX_FUNC + 1),
"physical_network": "hr_net"}
- exc = self.assertRaises(exception.PciConfigInvalidWhitelist,
+ exc = self.assertRaises(exception.PciConfigInvalidSpec,
devspec.PciDeviceSpec, pci_info)
- msg = ('Invalid PCI devices Whitelist config: property func (%x) is '
+ msg = ('Invalid [pci]device_spec config: property func (%x) is '
'greater than the maximum allowable value (%x).'
% (devspec.MAX_FUNC + 1, devspec.MAX_FUNC))
self.assertEqual(msg, str(exc))
@@ -226,9 +226,9 @@ class PciAddressTestCase(test.NoDBTestCase):
def test_max_domain(self):
pci_info = {"address": "%x:0a:00.5" % (devspec.MAX_DOMAIN + 1),
"physical_network": "hr_net"}
- exc = self.assertRaises(exception.PciConfigInvalidWhitelist,
+ exc = self.assertRaises(exception.PciConfigInvalidSpec,
devspec.PciDeviceSpec, pci_info)
- msg = ('Invalid PCI devices Whitelist config: property domain (%X) '
+ msg = ('Invalid [pci]device_spec config: property domain (%X) '
'is greater than the maximum allowable value (%X).'
% (devspec.MAX_DOMAIN + 1, devspec.MAX_DOMAIN))
self.assertEqual(msg, str(exc))
@@ -236,9 +236,9 @@ class PciAddressTestCase(test.NoDBTestCase):
def test_max_bus(self):
pci_info = {"address": "0000:%x:00.5" % (devspec.MAX_BUS + 1),
"physical_network": "hr_net"}
- exc = self.assertRaises(exception.PciConfigInvalidWhitelist,
+ exc = self.assertRaises(exception.PciConfigInvalidSpec,
devspec.PciDeviceSpec, pci_info)
- msg = ('Invalid PCI devices Whitelist config: property bus (%X) is '
+ msg = ('Invalid [pci]device_spec config: property bus (%X) is '
'greater than the maximum allowable value (%X).'
% (devspec.MAX_BUS + 1, devspec.MAX_BUS))
self.assertEqual(msg, str(exc))
@@ -246,9 +246,9 @@ class PciAddressTestCase(test.NoDBTestCase):
def test_max_slot(self):
pci_info = {"address": "0000:0a:%x.5" % (devspec.MAX_SLOT + 1),
"physical_network": "hr_net"}
- exc = self.assertRaises(exception.PciConfigInvalidWhitelist,
+ exc = self.assertRaises(exception.PciConfigInvalidSpec,
devspec.PciDeviceSpec, pci_info)
- msg = ('Invalid PCI devices Whitelist config: property slot (%X) is '
+ msg = ('Invalid [pci]device_spec config: property slot (%X) is '
'greater than the maximum allowable value (%X).'
% (devspec.MAX_SLOT + 1, devspec.MAX_SLOT))
self.assertEqual(msg, str(exc))
@@ -382,10 +382,10 @@ class PciDevSpecTestCase(test.NoDBTestCase):
def test_vendor_id_out_of_range(self):
pci_info = {"vendor_id": "80860", "address": "*:*:*.5",
"product_id": "5057", "physical_network": "hr_net"}
- exc = self.assertRaises(exception.PciConfigInvalidWhitelist,
+ exc = self.assertRaises(exception.PciConfigInvalidSpec,
devspec.PciDeviceSpec, pci_info)
self.assertEqual(
- "Invalid PCI devices Whitelist config: property vendor_id (80860) "
+ "Invalid [pci]device_spec config: property vendor_id (80860) "
"is greater than the maximum allowable value (FFFF).",
str(exc))
@@ -398,10 +398,10 @@ class PciDevSpecTestCase(test.NoDBTestCase):
def test_product_id_out_of_range(self):
pci_info = {"vendor_id": "8086", "address": "*:*:*.5",
"product_id": "50570", "physical_network": "hr_net"}
- exc = self.assertRaises(exception.PciConfigInvalidWhitelist,
+ exc = self.assertRaises(exception.PciConfigInvalidSpec,
devspec.PciDeviceSpec, pci_info)
self.assertEqual(
- "Invalid PCI devices Whitelist config: property product_id "
+ "Invalid [pci]device_spec config: property product_id "
"(50570) is greater than the maximum allowable value (FFFF).",
str(exc))
@@ -553,21 +553,21 @@ class PciDevSpecRemoteManagedTestCase(test.NoDBTestCase):
pci_info = {"vendor_id": "8080", "address": "0000:0a:00.0",
"product_id": "5050", "physical_network": "hr_net",
PCI_REMOTE_MANAGED_TAG: "true"}
- self.assertRaises(exception.PciConfigInvalidWhitelist,
+ self.assertRaises(exception.PciConfigInvalidSpec,
devspec.PciDeviceSpec, pci_info)
# VF device ID mismatch.
pci_info = {"vendor_id": "8086", "address": "0000:0a:00.0",
"product_id": "5050", "physical_network": "hr_net",
PCI_REMOTE_MANAGED_TAG: "true"}
- self.assertRaises(exception.PciConfigInvalidWhitelist,
+ self.assertRaises(exception.PciConfigInvalidSpec,
devspec.PciDeviceSpec, pci_info)
# VF vendor ID mismatch.
pci_info = {"vendor_id": "8080", "address": "0000:0a:00.0",
"product_id": "5058", "physical_network": "hr_net",
PCI_REMOTE_MANAGED_TAG: "true"}
- self.assertRaises(exception.PciConfigInvalidWhitelist,
+ self.assertRaises(exception.PciConfigInvalidSpec,
devspec.PciDeviceSpec, pci_info)
@mock.patch('nova.pci.utils.is_physical_function',
@@ -644,6 +644,10 @@ class PciDevSpecRemoteManagedTestCase(test.NoDBTestCase):
pci = devspec.PciDeviceSpec(pci_info)
self.assertTrue(pci.match(self.test_dev))
+ @mock.patch(
+ 'nova.pci.utils.is_physical_function',
+ new=mock.Mock(return_value=False)
+ )
def test_remote_managed_vf_match_by_pci_obj(self):
pci_info = {"vendor_id": "8086", "address": "0000:0a:00.2",
"product_id": "5057", "physical_network": "hr_net",
@@ -663,6 +667,10 @@ class PciDevSpecRemoteManagedTestCase(test.NoDBTestCase):
pci_obj = objects.PciDevice.create(None, pci_dev)
self.assertTrue(pci.match_pci_obj(pci_obj))
+ @mock.patch(
+ 'nova.pci.utils.is_physical_function',
+ new=mock.Mock(return_value=False)
+ )
def test_remote_managed_vf_no_match_by_pci_obj(self):
pci_info = {"vendor_id": "8086", "address": "0000:0a:00.0",
"product_id": "5057", "physical_network": "hr_net",
diff --git a/nova/tests/unit/pci/test_manager.py b/nova/tests/unit/pci/test_manager.py
index 6ce248beba..bcd4cecb85 100644
--- a/nova/tests/unit/pci/test_manager.py
+++ b/nova/tests/unit/pci/test_manager.py
@@ -235,7 +235,7 @@ class PciDevTrackerTestCase(test.NoDBTestCase):
self, mock_debug):
self.flags(
group='pci',
- passthrough_whitelist=[
+ device_spec=[
'{"product_id":"2032", "vendor_id":"8086"}'])
# There are systems where 32 bit PCI domain is used. See bug 1897528
# for example. While nova (and qemu) does not support assigning such
@@ -651,8 +651,13 @@ class PciDevTrackerTestCase(test.NoDBTestCase):
pci_requests = copy.deepcopy(fake_pci_requests)
pci_requests[0]['count'] = 4
pci_requests_obj = self._create_pci_requests_object(pci_requests)
- self.tracker.claim_instance(mock.sentinel.context,
- pci_requests_obj, None)
+ self.assertRaises(
+ exception.PciDeviceRequestFailed,
+ self.tracker.claim_instance,
+ mock.sentinel.context,
+ pci_requests_obj,
+ None
+ )
self.assertEqual(len(self.tracker.claims[self.inst['uuid']]), 0)
devs = self.tracker.update_pci_for_instance(None,
self.inst,
@@ -687,11 +692,13 @@ class PciDevTrackerTestCase(test.NoDBTestCase):
self.inst.numa_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=1, cpuset=set([1, 2]), memory=512)])
- claims = self.tracker.claim_instance(
+ self.assertRaises(
+ exception.PciDeviceRequestFailed,
+ self.tracker.claim_instance,
mock.sentinel.context,
pci_requests_obj,
- self.inst.numa_topology)
- self.assertEqual([], claims)
+ self.inst.numa_topology
+ )
def test_update_pci_for_instance_deleted(self):
pci_requests_obj = self._create_pci_requests_object(fake_pci_requests)
@@ -803,7 +810,7 @@ class PciDevTrackerTestCase(test.NoDBTestCase):
free_pci_device_ids = (
[dev.id for dev in self.tracker.pci_stats.get_free_devs()])
self.assertEqual(2, len(free_pci_device_ids))
- allocated_devs = manager.get_instance_pci_devs(self.inst)
+ allocated_devs = self.inst.get_pci_devices()
pci_device = allocated_devs[0]
self.assertNotIn(pci_device.id, free_pci_device_ids)
instance_uuid = self.inst['uuid']
@@ -866,24 +873,3 @@ class PciDevTrackerTestCase(test.NoDBTestCase):
self.assertIsNone(self.tracker.allocations.get(instance_uuid))
free_devs = self.tracker.pci_stats.get_free_devs()
self.assertEqual(len(fake_db_devs), len(free_devs))
-
-
-class PciGetInstanceDevs(test.NoDBTestCase):
-
- def test_get_devs_object(self):
- def _fake_obj_load_attr(foo, attrname):
- if attrname == 'pci_devices':
- self.load_attr_called = True
- foo.pci_devices = objects.PciDeviceList()
-
- self.stub_out(
- 'nova.objects.Instance.obj_load_attr',
- _fake_obj_load_attr)
-
- self.load_attr_called = False
- manager.get_instance_pci_devs(objects.Instance())
- self.assertTrue(self.load_attr_called)
-
- def test_get_devs_no_pci_devices(self):
- inst = objects.Instance(pci_devices=None)
- self.assertEqual([], manager.get_instance_pci_devs(inst))
diff --git a/nova/tests/unit/pci/test_request.py b/nova/tests/unit/pci/test_request.py
index 7aefbd15fd..4a3f17f6cb 100644
--- a/nova/tests/unit/pci/test_request.py
+++ b/nova/tests/unit/pci/test_request.py
@@ -187,6 +187,21 @@ class PciRequestTestCase(test.NoDBTestCase):
self.assertIn("xxx", aliases)
self.assertEqual(policy, aliases["xxx"][0])
+ def test_get_alias_from_config_valid_rc_and_traits(self):
+ fake_alias = jsonutils.dumps({
+ "name": "xxx",
+ "resource_class": "foo",
+ "traits": "bar,baz",
+ })
+ self.flags(alias=[fake_alias], group='pci')
+ aliases = request._get_alias_from_config()
+ self.assertIsNotNone(aliases)
+ self.assertIn("xxx", aliases)
+ self.assertEqual(
+ ("legacy", [{"resource_class": "foo", "traits": "bar,baz"}]),
+ aliases["xxx"],
+ )
+
def test_get_alias_from_config_conflicting_device_type(self):
"""Check behavior when device_type conflicts occur."""
fake_alias_a = jsonutils.dumps({
diff --git a/nova/tests/unit/pci/test_stats.py b/nova/tests/unit/pci/test_stats.py
index b6a6ef22a0..7eb43a05f4 100644
--- a/nova/tests/unit/pci/test_stats.py
+++ b/nova/tests/unit/pci/test_stats.py
@@ -12,10 +12,12 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-
+import collections
from unittest import mock
from oslo_config import cfg
+from oslo_serialization import jsonutils
+from oslo_utils.fixture import uuidsentinel as uuids
from nova import exception
from nova import objects
@@ -106,17 +108,19 @@ class PciDeviceStatsTestCase(test.NoDBTestCase):
self._create_fake_devs()
def test_add_device(self):
- self.assertEqual(len(self.pci_stats.pools), 3)
+ self.assertEqual(len(self.pci_stats.pools), 4)
self.assertEqual(set([d['vendor_id'] for d in self.pci_stats]),
set(['v1', 'v2', 'v3']))
- self.assertEqual(set([d['count'] for d in self.pci_stats]),
- set([1, 2]))
+ self.assertEqual([d['count'] for d in self.pci_stats], [1, 1, 1, 1])
def test_remove_device(self):
+ self.assertEqual(len(self.pci_stats.pools), 4)
self.pci_stats.remove_device(self.fake_dev_2)
- self.assertEqual(len(self.pci_stats.pools), 2)
- self.assertEqual(self.pci_stats.pools[0]['count'], 2)
+ self.assertEqual(len(self.pci_stats.pools), 3)
+ self.assertEqual(self.pci_stats.pools[0]['count'], 1)
self.assertEqual(self.pci_stats.pools[0]['vendor_id'], 'v1')
+ self.assertEqual(self.pci_stats.pools[1]['count'], 1)
+ self.assertEqual(self.pci_stats.pools[1]['vendor_id'], 'v1')
def test_remove_device_exception(self):
self.pci_stats.remove_device(self.fake_dev_2)
@@ -145,36 +149,36 @@ class PciDeviceStatsTestCase(test.NoDBTestCase):
m = self.pci_stats.to_device_pools_obj()
new_stats = stats.PciDeviceStats(objects.NUMATopology(), m)
- self.assertEqual(len(new_stats.pools), 3)
- self.assertEqual(set([d['count'] for d in new_stats]),
- set([1, 2]))
+ self.assertEqual(len(new_stats.pools), 4)
+ self.assertEqual([d['count'] for d in new_stats], [1, 1, 1, 1])
self.assertEqual(set([d['vendor_id'] for d in new_stats]),
set(['v1', 'v2', 'v3']))
def test_apply_requests(self):
- self.assertEqual(len(self.pci_stats.pools), 3)
- self.pci_stats.apply_requests(pci_requests)
+ self.assertEqual(len(self.pci_stats.pools), 4)
+ self.pci_stats.apply_requests(pci_requests, {})
self.assertEqual(len(self.pci_stats.pools), 2)
self.assertEqual(self.pci_stats.pools[0]['vendor_id'], 'v1')
self.assertEqual(self.pci_stats.pools[0]['count'], 1)
def test_apply_requests_failed(self):
- self.assertRaises(exception.PciDeviceRequestFailed,
+ self.assertRaises(
+ exception.PciDeviceRequestFailed,
self.pci_stats.apply_requests,
- pci_requests_multiple)
+ pci_requests_multiple,
+ {},
+ )
def test_support_requests(self):
- self.assertTrue(self.pci_stats.support_requests(pci_requests))
- self.assertEqual(len(self.pci_stats.pools), 3)
- self.assertEqual(set([d['count'] for d in self.pci_stats]),
- set((1, 2)))
+ self.assertTrue(self.pci_stats.support_requests(pci_requests, {}))
+ self.assertEqual(len(self.pci_stats.pools), 4)
+ self.assertEqual([d['count'] for d in self.pci_stats], [1, 1, 1, 1])
def test_support_requests_failed(self):
self.assertFalse(
- self.pci_stats.support_requests(pci_requests_multiple))
- self.assertEqual(len(self.pci_stats.pools), 3)
- self.assertEqual(set([d['count'] for d in self.pci_stats]),
- set([1, 2]))
+ self.pci_stats.support_requests(pci_requests_multiple, {}))
+ self.assertEqual(len(self.pci_stats.pools), 4)
+ self.assertEqual([d['count'] for d in self.pci_stats], [1, 1, 1, 1])
def test_support_requests_numa(self):
cells = [
@@ -183,14 +187,18 @@ class PciDeviceStatsTestCase(test.NoDBTestCase):
objects.InstanceNUMACell(
id=1, cpuset=set(), pcpuset=set(), memory=0),
]
- self.assertTrue(self.pci_stats.support_requests(pci_requests, cells))
+ self.assertTrue(
+ self.pci_stats.support_requests(pci_requests, {}, cells)
+ )
def test_support_requests_numa_failed(self):
cells = [
objects.InstanceNUMACell(
id=0, cpuset=set(), pcpuset=set(), memory=0),
]
- self.assertFalse(self.pci_stats.support_requests(pci_requests, cells))
+ self.assertFalse(
+ self.pci_stats.support_requests(pci_requests, {}, cells)
+ )
def test_support_requests_no_numa_info(self):
cells = [
@@ -198,12 +206,16 @@ class PciDeviceStatsTestCase(test.NoDBTestCase):
id=0, cpuset=set(), pcpuset=set(), memory=0),
]
pci_requests = self._get_fake_requests(vendor_ids=['v3'])
- self.assertTrue(self.pci_stats.support_requests(pci_requests, cells))
+ self.assertTrue(
+ self.pci_stats.support_requests(pci_requests, {}, cells)
+ )
# 'legacy' is the default numa_policy so the result must be same
pci_requests = self._get_fake_requests(vendor_ids=['v3'],
numa_policy = fields.PCINUMAAffinityPolicy.LEGACY)
- self.assertTrue(self.pci_stats.support_requests(pci_requests, cells))
+ self.assertTrue(
+ self.pci_stats.support_requests(pci_requests, {}, cells)
+ )
def test_support_requests_numa_pci_numa_policy_preferred(self):
# numa node 0 has 2 devices with vendor_id 'v1'
@@ -217,7 +229,9 @@ class PciDeviceStatsTestCase(test.NoDBTestCase):
pci_requests = self._get_fake_requests(
numa_policy=fields.PCINUMAAffinityPolicy.PREFERRED)
- self.assertTrue(self.pci_stats.support_requests(pci_requests, cells))
+ self.assertTrue(
+ self.pci_stats.support_requests(pci_requests, {}, cells)
+ )
def test_support_requests_no_numa_info_pci_numa_policy_required(self):
# pci device with vendor_id 'v3' has numa_node=None.
@@ -229,7 +243,9 @@ class PciDeviceStatsTestCase(test.NoDBTestCase):
pci_requests = self._get_fake_requests(vendor_ids=['v3'],
numa_policy=fields.PCINUMAAffinityPolicy.REQUIRED)
- self.assertFalse(self.pci_stats.support_requests(pci_requests, cells))
+ self.assertFalse(
+ self.pci_stats.support_requests(pci_requests, {}, cells)
+ )
def test_filter_pools_for_socket_affinity_no_socket(self):
self.pci_stats.numa_topology = objects.NUMATopology(
@@ -261,8 +277,11 @@ class PciDeviceStatsTestCase(test.NoDBTestCase):
self.assertEqual(0, len(devs))
def test_consume_requests_failed(self):
- self.assertIsNone(self.pci_stats.consume_requests(
- pci_requests_multiple))
+ self.assertRaises(
+ exception.PciDeviceRequestFailed,
+ self.pci_stats.consume_requests,
+ pci_requests_multiple,
+ )
def test_consume_requests_numa(self):
cells = [
@@ -281,7 +300,12 @@ class PciDeviceStatsTestCase(test.NoDBTestCase):
objects.InstanceNUMACell(
id=0, cpuset=set(), pcpuset=set(), memory=0),
]
- self.assertIsNone(self.pci_stats.consume_requests(pci_requests, cells))
+ self.assertRaises(
+ exception.PciDeviceRequestFailed,
+ self.pci_stats.consume_requests,
+ pci_requests,
+ cells,
+ )
def test_consume_requests_no_numa_info(self):
cells = [
@@ -313,11 +337,16 @@ class PciDeviceStatsTestCase(test.NoDBTestCase):
pci_requests = self._get_fake_requests(vendor_ids=[vendor_id],
numa_policy=policy, count=count)
- devs = self.pci_stats.consume_requests(pci_requests, cells)
if expected is None:
- self.assertIsNone(devs)
+ self.assertRaises(
+ exception.PciDeviceRequestFailed,
+ self.pci_stats.consume_requests,
+ pci_requests,
+ cells,
+ )
else:
+ devs = self.pci_stats.consume_requests(pci_requests, cells)
self.assertEqual(set(expected),
set([dev.product_id for dev in devs]))
@@ -444,9 +473,9 @@ class PciDeviceStatsTestCase(test.NoDBTestCase):
@mock.patch(
'nova.pci.whitelist.Whitelist._parse_white_list_from_config')
- def test_white_list_parsing(self, mock_whitelist_parse):
- white_list = '{"product_id":"0001", "vendor_id":"8086"}'
- CONF.set_override('passthrough_whitelist', white_list, 'pci')
+ def test_device_spec_parsing(self, mock_whitelist_parse):
+ device_spec = {"product_id": "0001", "vendor_id": "8086"}
+ CONF.set_override('device_spec', jsonutils.dumps(device_spec), 'pci')
pci_stats = stats.PciDeviceStats(objects.NUMATopology())
pci_stats.add_device(self.fake_dev_2)
pci_stats.remove_device(self.fake_dev_2)
@@ -457,16 +486,34 @@ class PciDeviceStatsWithTagsTestCase(test.NoDBTestCase):
def setUp(self):
super(PciDeviceStatsWithTagsTestCase, self).setUp()
- white_list = ['{"vendor_id":"1137","product_id":"0071",'
- '"address":"*:0a:00.*","physical_network":"physnet1"}',
- '{"vendor_id":"1137","product_id":"0072"}',
- '{"vendor_id":"15b3","product_id":"101e", '
- '"remote_managed": "true"}',
- '{"vendor_id":"15b3","product_id":"101c"}',
- '{"vendor_id":"15b3","product_id":"1018", '
- '"remote_managed": "false"}']
- self.flags(passthrough_whitelist=white_list, group='pci')
- dev_filter = whitelist.Whitelist(white_list)
+ device_spec = [
+ jsonutils.dumps(
+ {
+ "vendor_id": "1137",
+ "product_id": "0071",
+ "address": "*:0a:00.*",
+ "physical_network": "physnet1",
+ }
+ ),
+ jsonutils.dumps({"vendor_id": "1137", "product_id": "0072"}),
+ jsonutils.dumps(
+ {
+ "vendor_id": "15b3",
+ "product_id": "101e",
+ "remote_managed": "true",
+ }
+ ),
+ jsonutils.dumps({"vendor_id": "15b3", "product_id": "101c"}),
+ jsonutils.dumps(
+ {
+ "vendor_id": "15b3",
+ "product_id": "1018",
+ "remote_managed": "false",
+ }
+ ),
+ ]
+ self.flags(device_spec=device_spec, group="pci")
+ dev_filter = whitelist.Whitelist(device_spec)
self.pci_stats = stats.PciDeviceStats(
objects.NUMATopology(),
dev_filter=dev_filter)
@@ -539,7 +586,7 @@ class PciDeviceStatsWithTagsTestCase(test.NoDBTestCase):
'compute_node_id': 1,
'address': '0000:0e:00.1',
'vendor_id': '15b3',
- 'product_id': '1018',
+ 'product_id': '101c',
'status': 'available',
'request_id': None,
'dev_type': fields.PciDeviceType.SRIOV_VF,
@@ -567,35 +614,68 @@ class PciDeviceStatsWithTagsTestCase(test.NoDBTestCase):
self.assertEqual(v, pool[k])
def _assertPools(self):
+ nr_tagged = len(self.pci_tagged_devices)
+ nr_untagged = len(self.pci_untagged_devices)
+ nr_remote = len(self.remote_managed_netdevs)
+ nr_local = len(self.locally_managed_netdevs)
+ self.assertEqual(
+ nr_tagged + nr_untagged + nr_remote + nr_local,
+ len(self.pci_stats.pools),
+ )
# Pools are ordered based on the number of keys. 'product_id',
# 'vendor_id' are always part of the keys. When tags are present,
- # they are also part of the keys. In this test class, we have
- # 5 pools with the second one having the tag 'physical_network'
- # and the value 'physnet1' and multiple pools for testing
- # variations of explicit/implicit remote_managed tagging.
- self.assertEqual(5, len(self.pci_stats.pools))
- self._assertPoolContent(self.pci_stats.pools[0], '1137', '0072',
- len(self.pci_untagged_devices))
- self.assertEqual(self.pci_untagged_devices,
- self.pci_stats.pools[0]['devices'])
- self._assertPoolContent(self.pci_stats.pools[1], '1137', '0071',
- len(self.pci_tagged_devices),
- physical_network='physnet1')
- self.assertEqual(self.pci_tagged_devices,
- self.pci_stats.pools[1]['devices'])
- self._assertPoolContent(self.pci_stats.pools[2], '15b3', '101e',
- len(self.remote_managed_netdevs),
- remote_managed='true')
- self.assertEqual(self.remote_managed_netdevs,
- self.pci_stats.pools[2]['devices'])
- self._assertPoolContent(self.pci_stats.pools[3], '15b3', '101c', 1,
- remote_managed='false')
- self.assertEqual([self.locally_managed_netdevs[0]],
- self.pci_stats.pools[3]['devices'])
- self._assertPoolContent(self.pci_stats.pools[4], '15b3', '1018', 1,
- remote_managed='false')
- self.assertEqual([self.locally_managed_netdevs[1]],
- self.pci_stats.pools[4]['devices'])
+ # they are also part of the keys.
+
+ # 3 pools for the pci_untagged_devices
+ devs = []
+ j = 0
+ for i in range(j, j + nr_untagged):
+ self._assertPoolContent(self.pci_stats.pools[i], '1137', '0072', 1)
+ devs += self.pci_stats.pools[i]['devices']
+ self.assertEqual(self.pci_untagged_devices, devs)
+ j += nr_untagged
+
+ # 4 pools for the pci_tagged_devices'
+ devs = []
+ for i in range(j, j + nr_tagged):
+ self._assertPoolContent(
+ self.pci_stats.pools[i],
+ "1137",
+ "0071",
+ 1,
+ physical_network="physnet1",
+ )
+ devs += self.pci_stats.pools[i]['devices']
+ self.assertEqual(self.pci_tagged_devices, devs)
+ j += nr_tagged
+
+ # one with remote_managed_netdevs
+ devs = []
+ for i in range(j, j + nr_remote):
+ self._assertPoolContent(
+ self.pci_stats.pools[i],
+ "15b3",
+ "101e",
+ 1,
+ remote_managed="true",
+ )
+ devs += self.pci_stats.pools[i]['devices']
+ self.assertEqual(self.remote_managed_netdevs, devs)
+ j += nr_remote
+
+ # two with locally_managed_netdevs
+ devs = []
+ for i in range(j, j + nr_local):
+ self._assertPoolContent(
+ self.pci_stats.pools[i],
+ "15b3",
+ "101c",
+ 1,
+ remote_managed="false",
+ )
+ devs += self.pci_stats.pools[i]['devices']
+ self.assertEqual(self.locally_managed_netdevs, devs)
+ j += nr_local
def test_add_devices(self):
self._create_pci_devices()
@@ -618,20 +698,30 @@ class PciDeviceStatsWithTagsTestCase(test.NoDBTestCase):
PCI_REMOTE_MANAGED_TAG: 'False'}]),
objects.InstancePCIRequest(count=1,
spec=[{'vendor_id': '15b3',
- 'product_id': '1018',
+ 'product_id': '101c',
PCI_REMOTE_MANAGED_TAG: 'False'}])]
devs = self.pci_stats.consume_requests(pci_requests)
self.assertEqual(5, len(devs))
- self.assertEqual(set(['0071', '0072', '1018', '101e', '101c']),
+ self.assertEqual(set(['0071', '0072', '101e', '101c']),
set([dev.product_id for dev in devs]))
- self._assertPoolContent(self.pci_stats.pools[0], '1137', '0072', 2)
- self._assertPoolContent(self.pci_stats.pools[1], '1137', '0071', 3,
+ self._assertPoolContent(self.pci_stats.pools[0], '1137', '0072', 0)
+ self._assertPoolContent(self.pci_stats.pools[1], '1137', '0072', 1)
+ self._assertPoolContent(self.pci_stats.pools[2], '1137', '0072', 1)
+
+ self._assertPoolContent(self.pci_stats.pools[3], '1137', '0071', 0,
physical_network='physnet1')
- self._assertPoolContent(self.pci_stats.pools[2], '15b3', '101e', 0,
+ self._assertPoolContent(self.pci_stats.pools[4], '1137', '0071', 1,
+ physical_network='physnet1')
+ self._assertPoolContent(self.pci_stats.pools[5], '1137', '0071', 1,
+ physical_network='physnet1')
+ self._assertPoolContent(self.pci_stats.pools[6], '1137', '0071', 1,
+ physical_network='physnet1')
+
+ self._assertPoolContent(self.pci_stats.pools[7], '15b3', '101e', 0,
remote_managed='true')
- self._assertPoolContent(self.pci_stats.pools[3], '15b3', '101c', 0,
+ self._assertPoolContent(self.pci_stats.pools[8], '15b3', '101c', 0,
remote_managed='false')
- self._assertPoolContent(self.pci_stats.pools[4], '15b3', '1018', 0,
+ self._assertPoolContent(self.pci_stats.pools[9], '15b3', '101c', 0,
remote_managed='false')
def test_add_device_no_devspec(self):
@@ -674,43 +764,779 @@ class PciDeviceStatsWithTagsTestCase(test.NoDBTestCase):
self.pci_stats.remove_device(dev2)
self._assertPools()
- def test_update_device(self):
- # Update device type of one of the device from type-PCI to
+ def test_update_device_splits_the_pool(self):
+ # Update device type of one of the device from type-VF to
# type-PF. Verify if the existing pool is updated and a new
# pool is created with dev_type type-PF.
- self._create_pci_devices()
- dev1 = self.pci_tagged_devices.pop()
- dev1.dev_type = 'type-PF'
- self.pci_stats.update_device(dev1)
- self.assertEqual(6, len(self.pci_stats.pools))
- self._assertPoolContent(self.pci_stats.pools[0], '1137', '0072',
- len(self.pci_untagged_devices))
- self.assertEqual(self.pci_untagged_devices,
- self.pci_stats.pools[0]['devices'])
- self._assertPoolContent(self.pci_stats.pools[1], '1137', '0071',
- len(self.pci_tagged_devices),
- physical_network='physnet1')
- self.assertEqual(self.pci_tagged_devices,
- self.pci_stats.pools[1]['devices'])
- self._assertPoolContent(self.pci_stats.pools[5], '1137', '0071',
- 1,
- physical_network='physnet1',
- remote_managed='false')
- self.assertEqual(dev1,
- self.pci_stats.pools[5]['devices'][0])
+ vfs = []
+ for i in range(3):
+ dev = objects.PciDevice(
+ compute_node_id=1,
+ address="0000:0a:00.%d" % i,
+ vendor_id="1137",
+ product_id="0071",
+ status="available",
+ dev_type="type-VF",
+ parent_addr="0000:0a:01.0",
+ numa_node=0
+ )
+ vfs.append(dev)
+ self.pci_stats.add_device(dev)
+
+ self.assertEqual(1, len(self.pci_stats.pools))
+ self.assertEqual(3, self.pci_stats.pools[0]["count"])
+ self.assertEqual(vfs, self.pci_stats.pools[0]["devices"])
+
+ dev = vfs.pop()
+ dev.dev_type = 'type-PF'
+ dev.parent_addr = None
+ self.pci_stats.update_device(dev)
+ self.assertEqual(2, len(self.pci_stats.pools))
+ self.assertEqual(2, self.pci_stats.pools[0]["count"])
+ self.assertEqual(vfs, self.pci_stats.pools[0]["devices"])
+ self.assertEqual(1, self.pci_stats.pools[1]["count"])
+ self.assertEqual([dev], self.pci_stats.pools[1]["devices"])
+
+ def test_only_vfs_from_the_same_parent_are_pooled(self):
+ pf1_vfs = []
+ for i in range(2):
+ dev = objects.PciDevice(
+ compute_node_id=1,
+ address="0000:0a:00.%d" % i,
+ vendor_id="15b3",
+ product_id="1018",
+ status="available",
+ dev_type="type-VF",
+ parent_addr="0000:0a:01.0",
+ numa_node=0
+ )
+ pf1_vfs.append(dev)
+ self.pci_stats.add_device(dev)
+
+ pf2_vfs = []
+ for i in range(2):
+ dev = objects.PciDevice(
+ compute_node_id=1,
+ address="0000:0b:00.%d" % i,
+ vendor_id="15b3",
+ product_id="1018",
+ status="available",
+ dev_type="type-VF",
+ parent_addr="0000:0b:01.0",
+ numa_node=0
+ )
+ pf2_vfs.append(dev)
+ self.pci_stats.add_device(dev)
+
+ self.assertEqual(2, len(self.pci_stats.pools))
+ self.assertEqual(2, self.pci_stats.pools[0]["count"])
+ self.assertEqual(pf1_vfs, self.pci_stats.pools[0]["devices"])
+ self.assertEqual(2, len(self.pci_stats.pools))
+ self.assertEqual(2, self.pci_stats.pools[1]["count"])
+ self.assertEqual(pf2_vfs, self.pci_stats.pools[1]["devices"])
+
+
+class PciDeviceStatsPlacementSupportTestCase(test.NoDBTestCase):
+
+ def test_device_spec_rc_and_traits_ignored_during_pooling(self):
+ """Assert that resource_class and traits from the device spec are not
+ used as discriminator for pool creation.
+ """
+ device_spec = [
+ jsonutils.dumps(
+ {
+ "resource_class": "foo",
+ "address": "*:81:00.1",
+ "traits": "gold",
+ }
+ ),
+ jsonutils.dumps(
+ {
+ "resource_class": "baar",
+ "address": "*:81:00.2",
+ "traits": "silver",
+ }
+ ),
+ ]
+ self.flags(device_spec=device_spec, group="pci")
+ dev_filter = whitelist.Whitelist(device_spec)
+ pci_stats = stats.PciDeviceStats(
+ objects.NUMATopology(),
+ dev_filter=dev_filter)
+ pci_dev1 = objects.PciDevice(
+ vendor_id="dead",
+ product_id="beef",
+ address="0000:81:00.1",
+ parent_addr="0000:81:00.0",
+ numa_node=0,
+ dev_type="type-VF",
+ )
+ pci_dev2 = objects.PciDevice(
+ vendor_id="dead",
+ product_id="beef",
+ address="0000:81:00.2",
+ parent_addr="0000:81:00.0",
+ numa_node=0,
+ dev_type="type-VF",
+ )
+ # the two device matched by different device_specs with different
+ # resource_class and traits fields
+ pci_stats.add_device(pci_dev1)
+ pci_stats.add_device(pci_dev2)
+
+ # but they are put in the same pool as all the other fields are
+ # matching
+ self.assertEqual(1, len(pci_stats.pools))
+ self.assertEqual(2, pci_stats.pools[0]["count"])
+
+ def test_filter_pools_for_spec_ignores_rc_and_traits_in_spec(self):
+ """Assert that resource_class and traits are ignored in the pci
+ request spec during matching the request to pools.
+ """
+ pci_stats = stats.PciDeviceStats(objects.NUMATopology())
+ pools = [{"vendor_id": "dead", "product_id": "beef"}]
+
+ matching_pools = pci_stats._filter_pools_for_spec(
+ pools=pools,
+ request=objects.InstancePCIRequest(
+ spec=[
+ {
+ "vendor_id": "dead",
+ "product_id": "beef",
+ "resource_class": "foo",
+ "traits": "blue",
+ }
+ ]
+ ),
+ )
+
+ self.assertEqual(pools, matching_pools)
+
+ def test_populate_pools_metadata_from_assigned_devices(self):
+ device_spec = [
+ jsonutils.dumps(
+ {
+ "address": "0000:81:00.*",
+ }
+ ),
+ ]
+ self.flags(device_spec=device_spec, group="pci")
+ dev_filter = whitelist.Whitelist(device_spec)
+ pci_stats = stats.PciDeviceStats(
+ objects.NUMATopology(),
+ dev_filter=dev_filter)
+ pci_dev1 = objects.PciDevice(
+ vendor_id="dead",
+ product_id="beef",
+ address="0000:81:00.1",
+ parent_addr="0000:81:00.0",
+ numa_node=0,
+ dev_type="type-VF",
+ )
+ pci_dev2 = objects.PciDevice(
+ vendor_id="dead",
+ product_id="beef",
+ address="0000:81:00.2",
+ parent_addr="0000:81:00.0",
+ numa_node=0,
+ dev_type="type-VF",
+ )
+ pci_stats.add_device(pci_dev1)
+ pci_dev1.extra_info = {'rp_uuid': uuids.rp1}
+ pci_stats.add_device(pci_dev2)
+ pci_dev2.extra_info = {'rp_uuid': uuids.rp1}
+
+ self.assertEqual(1, len(pci_stats.pools))
+
+ pci_stats.populate_pools_metadata_from_assigned_devices()
+
+ self.assertEqual(uuids.rp1, pci_stats.pools[0]['rp_uuid'])
+
+ def test_populate_pools_metadata_from_assigned_devices_device_without_rp(
+ self
+ ):
+ device_spec = [
+ jsonutils.dumps(
+ {
+ "address": "0000:81:00.*",
+ }
+ ),
+ ]
+ self.flags(device_spec=device_spec, group="pci")
+ dev_filter = whitelist.Whitelist(device_spec)
+ pci_stats = stats.PciDeviceStats(
+ objects.NUMATopology(),
+ dev_filter=dev_filter)
+ pci_dev1 = objects.PciDevice(
+ vendor_id="dead",
+ product_id="beef",
+ address="0000:81:00.1",
+ parent_addr="0000:81:00.0",
+ numa_node=0,
+ dev_type="type-VF",
+ )
+ pci_stats.add_device(pci_dev1)
+
+ self.assertEqual(1, len(pci_stats.pools))
+
+ pci_stats.populate_pools_metadata_from_assigned_devices()
+
+ self.assertNotIn('rp_uuid', pci_stats.pools[0])
+
+ def test_populate_pools_metadata_from_assigned_devices_multiple_rp(self):
+ device_spec = [
+ jsonutils.dumps(
+ {
+ "address": "0000:81:00.*",
+ }
+ ),
+ ]
+ self.flags(device_spec=device_spec, group="pci")
+ dev_filter = whitelist.Whitelist(device_spec)
+ pci_stats = stats.PciDeviceStats(
+ objects.NUMATopology(),
+ dev_filter=dev_filter)
+ pci_dev1 = objects.PciDevice(
+ compute_node_id=1,
+ vendor_id="dead",
+ product_id="beef",
+ address="0000:81:00.1",
+ parent_addr="0000:81:00.0",
+ numa_node=0,
+ dev_type="type-VF",
+ )
+ pci_dev2 = objects.PciDevice(
+ compute_node_id=1,
+ vendor_id="dead",
+ product_id="beef",
+ address="0000:81:00.2",
+ parent_addr="0000:81:00.0",
+ numa_node=0,
+ dev_type="type-VF",
+ )
+ pci_stats.add_device(pci_dev1)
+ pci_dev1.extra_info = {'rp_uuid': uuids.rp1}
+ pci_stats.add_device(pci_dev2)
+ pci_dev2.extra_info = {'rp_uuid': uuids.rp2}
+
+ self.assertEqual(1, len(pci_stats.pools))
+
+ self.assertRaises(
+ ValueError,
+ pci_stats.populate_pools_metadata_from_assigned_devices,
+ )
+
+
+class PciDeviceStatsProviderMappingTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super().setUp()
+ # for simplicity accept any devices
+ device_spec = [
+ jsonutils.dumps(
+ {
+ "address": "*:*:*.*",
+ }
+ ),
+ ]
+ self.flags(device_spec=device_spec, group="pci")
+ self.dev_filter = whitelist.Whitelist(device_spec)
+ self.pci_stats = stats.PciDeviceStats(
+ objects.NUMATopology(), dev_filter=self.dev_filter
+ )
+ # add devices represented by different RPs in placement
+ # two VFs on the same PF
+ self.vf1 = objects.PciDevice(
+ compute_node_id=1,
+ vendor_id="dead",
+ product_id="beef",
+ address="0000:81:00.1",
+ parent_addr="0000:81:00.0",
+ numa_node=0,
+ dev_type="type-VF",
+ )
+ self.vf2 = objects.PciDevice(
+ compute_node_id=1,
+ vendor_id="dead",
+ product_id="beef",
+ address="0000:81:00.2",
+ parent_addr="0000:81:00.0",
+ numa_node=0,
+ dev_type="type-VF",
+ )
+ self.pci_stats.add_device(self.vf1)
+ self.vf1.extra_info = {'rp_uuid': uuids.pf1}
+ self.pci_stats.add_device(self.vf2)
+ self.vf2.extra_info = {'rp_uuid': uuids.pf1}
+ # two PFs pf2 and pf3 (pf1 is used for the paren of the above VFs)
+ self.pf2 = objects.PciDevice(
+ compute_node_id=1,
+ vendor_id="dead",
+ product_id="beef",
+ address="0000:82:00.0",
+ parent_addr=None,
+ numa_node=0,
+ dev_type="type-PF",
+ )
+ self.pci_stats.add_device(self.pf2)
+ self.pf2.extra_info = {'rp_uuid': uuids.pf2}
+
+ self.pf3 = objects.PciDevice(
+ compute_node_id=1,
+ vendor_id="dead",
+ product_id="beef",
+ address="0000:83:00.0",
+ parent_addr=None,
+ numa_node=0,
+ dev_type="type-PF",
+ )
+ self.pci_stats.add_device(self.pf3)
+ self.pf3.extra_info = {'rp_uuid': uuids.pf3}
+ # a PCI
+ self.pci1 = objects.PciDevice(
+ compute_node_id=1,
+ vendor_id="dead",
+ product_id="beef",
+ address="0000:84:00.0",
+ parent_addr=None,
+ numa_node=0,
+ dev_type="type-PCI",
+ )
+ self.pci_stats.add_device(self.pci1)
+ self.pci1.extra_info = {'rp_uuid': uuids.pci1}
+
+ # populate the RP -> pool mapping from the devices to its pools
+ self.pci_stats.populate_pools_metadata_from_assigned_devices()
+
+ # we have 1 pool for the two VFs then the rest has it own pool one by
+ # one
+ self.num_pools = 4
+ self.assertEqual(self.num_pools, len(self.pci_stats.pools))
+ self.num_devs = 5
+ self.assertEqual(
+ self.num_devs, sum(pool["count"] for pool in self.pci_stats.pools)
+ )
+
+ def test_support_request_unrestricted(self):
+ reqs = []
+ for dev_type in ["type-VF", "type-PF", "type-PCI"]:
+ req = objects.InstancePCIRequest(
+ count=1,
+ alias_name='a-dev',
+ spec=[
+ {
+ "vendor_id": "dead",
+ "product_id": "beef",
+ "dev_type": dev_type,
+ }
+ ],
+ )
+ reqs.append(req)
+
+ # an empty mapping means unrestricted by any provider
+ # we have devs for all type so each request should fit
+ self.assertTrue(self.pci_stats.support_requests(reqs, {}))
+
+ # the support_requests call is expected not to consume any device
+ self.assertEqual(self.num_pools, len(self.pci_stats.pools))
+ self.assertEqual(
+ self.num_devs, sum(pool["count"] for pool in self.pci_stats.pools)
+ )
+
+ # now apply the same request to consume the pools
+ self.pci_stats.apply_requests(reqs, {})
+ # we have consumed a 3 devs (a VF, a PF, and a PCI)
+ self.assertEqual(
+ self.num_devs - 3,
+ sum(pool["count"] for pool in self.pci_stats.pools),
+ )
+ # the empty pools are purged. We have one pool for the remaining VF
+ # and the remaining PF
+ self.assertEqual(2, len(self.pci_stats.pools))
+
+ def test_support_request_restricted_by_provider_mapping(self):
+ pf_req = objects.InstancePCIRequest(
+ count=1,
+ alias_name='a-dev',
+ request_id=uuids.req1,
+ spec=[
+ {
+ "vendor_id": "dead",
+ "product_id": "beef",
+ "dev_type": "type-PF",
+ }
+ ],
+ )
+
+ # simulate the placement restricted the possible RPs to pf3
+ self.assertTrue(
+ self.pci_stats.support_requests(
+ [pf_req], {f"{uuids.req1}-0": [uuids.pf3]}
+ )
+ )
+
+ # the support_requests call is expected not to consume any device
+ self.assertEqual(self.num_pools, len(self.pci_stats.pools))
+ self.assertEqual(
+ self.num_devs, sum(pool["count"] for pool in self.pci_stats.pools)
+ )
+
+ # now apply the request and see if the right device is consumed
+ self.pci_stats.apply_requests(
+ [pf_req], {f"{uuids.req1}-0": [uuids.pf3]}
+ )
+
+ self.assertEqual(self.num_pools - 1, len(self.pci_stats.pools))
+ self.assertEqual(
+ self.num_devs - 1,
+ sum(pool["count"] for pool in self.pci_stats.pools),
+ )
+ # pf3 is not available in the pools any more
+ self.assertEqual(
+ {uuids.pf1, uuids.pf2, uuids.pci1},
+ {pool['rp_uuid'] for pool in self.pci_stats.pools},
+ )
+
+ def test_support_request_restricted_by_provider_mapping_does_not_fit(self):
+ pf_req = objects.InstancePCIRequest(
+ count=1,
+ alias_name='a-dev',
+ request_id=uuids.req1,
+ spec=[
+ {
+ "vendor_id": "dead",
+ "product_id": "beef",
+ "dev_type": "type-PF",
+ }
+ ],
+ )
+
+ # Simulate that placement returned an allocation candidate with a PF
+ # that is not in the pools anymore, e.g. filtered out by numa cell.
+ # We expect the request to fail
+ self.assertFalse(
+ self.pci_stats.support_requests(
+ [pf_req], {f"{uuids.req1}-0": [uuids.pf4]}
+ )
+ )
+ self.assertRaises(
+ exception.PciDeviceRequestFailed,
+ self.pci_stats.apply_requests,
+ [pf_req],
+ {f"{uuids.req1}-0": [uuids.pf4]},
+ )
+ # and the pools are not changed
+ self.assertEqual(self.num_pools, len(self.pci_stats.pools))
+ self.assertEqual(
+ self.num_devs, sum(pool["count"] for pool in self.pci_stats.pools)
+ )
+
+ def test_support_request_neutron_port_based_request_ignore_mapping(self):
+ # by not having the alias_name set this becomes a neutron port based
+ # PCI request
+ pf_req = objects.InstancePCIRequest(
+ count=1,
+ request_id=uuids.req1,
+ spec=[
+ {
+ "vendor_id": "dead",
+ "product_id": "beef",
+ "dev_type": "type-PF",
+ }
+ ],
+ )
+
+ # Simulate that placement returned an allocation candidate with a PF
+ # that is not in the pools anymore, e.g. filtered out by numa cell.
+ # We expect that the placement selection is ignored for neutron port
+ # based requests so this request should fit as we have PFs in the pools
+ self.assertTrue(
+ self.pci_stats.support_requests(
+ [pf_req], {f"{uuids.req1}-0": [uuids.pf4]}
+ )
+ )
+ self.pci_stats.apply_requests(
+ [pf_req],
+ {f"{uuids.req1}-0": [uuids.pf4]},
+ )
+ # and a PF is consumed
+ self.assertEqual(self.num_pools - 1, len(self.pci_stats.pools))
+ self.assertEqual(
+ self.num_devs - 1,
+ sum(pool["count"] for pool in self.pci_stats.pools),
+ )
+
+ def test_support_request_req_with_count_2(self):
+ # now ask for two PFs in a single request
+ pf_req = objects.InstancePCIRequest(
+ count=2,
+ alias_name='a-dev',
+ request_id=uuids.req1,
+ spec=[
+ {
+ "vendor_id": "dead",
+ "product_id": "beef",
+ "dev_type": "type-PF",
+ }
+ ],
+ )
+
+ # Simulate that placement returned one candidate RP for both PF reqs
+ mapping = {
+ f"{uuids.req1}-0": [uuids.pf2],
+ f"{uuids.req1}-1": [uuids.pf3],
+ }
+ # so the request fits
+ self.assertTrue(self.pci_stats.support_requests([pf_req], mapping))
+ self.pci_stats.apply_requests([pf_req], mapping)
+ # and both PFs are consumed
+ self.assertEqual(self.num_pools - 2, len(self.pci_stats.pools))
+ self.assertEqual(
+ self.num_devs - 2,
+ sum(pool["count"] for pool in self.pci_stats.pools),
+ )
+ self.assertEqual(
+ {uuids.pf1, uuids.pci1},
+ {pool['rp_uuid'] for pool in self.pci_stats.pools},
+ )
+
+ def test_support_requests_multiple_reqs(self):
+ # request both a VF and a PF
+ vf_req = objects.InstancePCIRequest(
+ count=1,
+ alias_name='a-dev',
+ request_id=uuids.vf_req,
+ spec=[
+ {
+ "vendor_id": "dead",
+ "product_id": "beef",
+ "dev_type": "type-VF",
+ }
+ ],
+ )
+ pf_req = objects.InstancePCIRequest(
+ count=1,
+ alias_name='a-dev',
+ request_id=uuids.pf_req,
+ spec=[
+ {
+ "vendor_id": "dead",
+ "product_id": "beef",
+ "dev_type": "type-PF",
+ }
+ ],
+ )
+
+ # Simulate that placement returned one candidate RP for both reqs
+ mapping = {
+ # the VF is represented by the parent PF RP
+ f"{uuids.vf_req}-0": [uuids.pf1],
+ f"{uuids.pf_req}-0": [uuids.pf3],
+ }
+ # so the request fits
+ self.assertTrue(
+ self.pci_stats.support_requests([vf_req, pf_req], mapping)
+ )
+ self.pci_stats.apply_requests([vf_req, pf_req], mapping)
+ # and the proper devices are consumed
+ # Note that the VF pool still has a device so it remains
+ self.assertEqual(self.num_pools - 1, len(self.pci_stats.pools))
+ self.assertEqual(
+ self.num_devs - 2,
+ sum(pool["count"] for pool in self.pci_stats.pools),
+ )
+ self.assertEqual(
+ {uuids.pf1, uuids.pf2, uuids.pci1},
+ {pool['rp_uuid'] for pool in self.pci_stats.pools},
+ )
+
+ def test_apply_gets_requested_uuids_from_pci_req(self):
+ pf_req = objects.InstancePCIRequest(
+ count=1,
+ alias_name='a-dev',
+ request_id=uuids.req1,
+ spec=[
+ {
+ "vendor_id": "dead",
+ "product_id": "beef",
+ "dev_type": "type-PF",
+ # Simulate that the scheduler already allocate a candidate
+ # and the mapping is stored in the request.
+ # The allocation restricts that we can only consume from
+ # PF3
+ "rp_uuids": ",".join([uuids.pf3])
+ }
+ ],
+ )
+
+ # call apply with None mapping signalling that the allocation is
+ # already done and the resulted mapping is stored in the request
+ self.pci_stats.apply_requests([pf_req], provider_mapping=None)
+
+ # assert that the right device is consumed
+ self.assertEqual(self.num_pools - 1, len(self.pci_stats.pools))
+ self.assertEqual(
+ self.num_devs - 1,
+ sum(pool["count"] for pool in self.pci_stats.pools),
+ )
+ # pf3 is not available in the pools anymore
+ self.assertEqual(
+ {uuids.pf1, uuids.pf2, uuids.pci1},
+ {pool['rp_uuid'] for pool in self.pci_stats.pools},
+ )
+
+ def _create_two_pools_with_two_vfs(self):
+ # create two pools (PFs) with two VFs each
+ self.pci_stats = stats.PciDeviceStats(
+ objects.NUMATopology(), dev_filter=self.dev_filter
+ )
+ for pf_index in [1, 2]:
+ for vf_index in [1, 2]:
+ dev = objects.PciDevice(
+ compute_node_id=1,
+ vendor_id="dead",
+ product_id="beef",
+ address=f"0000:81:0{pf_index}.{vf_index}",
+ parent_addr=f"0000:81:0{pf_index}.0",
+ numa_node=0,
+ dev_type="type-VF",
+ )
+ self.pci_stats.add_device(dev)
+ dev.extra_info = {'rp_uuid': getattr(uuids, f"pf{pf_index}")}
+
+ # populate the RP -> pool mapping from the devices to its pools
+ self.pci_stats.populate_pools_metadata_from_assigned_devices()
+
+ # we have 2 pool and 4 devs in total
+ self.num_pools = 2
+ self.assertEqual(self.num_pools, len(self.pci_stats.pools))
+ self.num_devs = 4
+ self.assertEqual(
+ self.num_devs, sum(pool["count"] for pool in self.pci_stats.pools)
+ )
+
+ def test_apply_asymmetric_allocation(self):
+ self._create_two_pools_with_two_vfs()
+ # ask for 3 VFs
+ vf_req = objects.InstancePCIRequest(
+ count=3,
+ alias_name='a-vf',
+ request_id=uuids.vf_req,
+ spec=[
+ {
+ "vendor_id": "dead",
+ "product_id": "beef",
+ "dev_type": "type-VF",
+ }
+ ],
+ )
+
+ # Simulate that placement returned an allocation candidate where 1 VF
+ # is consumed from PF1 and two from PF2
+ mapping = {
+ # the VF is represented by the parent PF RP
+ f"{uuids.vf_req}-0": [uuids.pf1],
+ f"{uuids.vf_req}-1": [uuids.pf2],
+ f"{uuids.vf_req}-2": [uuids.pf2],
+ }
+ # This should fit
+ self.assertTrue(
+ self.pci_stats.support_requests([vf_req], mapping)
+ )
+ # and when consumed the consumption from the pools should be in sync
+ # with the placement allocation. So the PF2 pool is expected to
+ # disappear as it is fully consumed and the PF1 pool should have
+ # one free device.
+ self.pci_stats.apply_requests([vf_req], mapping)
+ self.assertEqual(1, len(self.pci_stats.pools))
+ self.assertEqual(uuids.pf1, self.pci_stats.pools[0]['rp_uuid'])
+ self.assertEqual(1, self.pci_stats.pools[0]['count'])
+
+ def test_consume_asymmetric_allocation(self):
+ self._create_two_pools_with_two_vfs()
+ # ask for 3 VFs
+ vf_req = objects.InstancePCIRequest(
+ count=3,
+ alias_name='a-vf',
+ request_id=uuids.vf_req,
+ spec=[
+ {
+ "vendor_id": "dead",
+ "product_id": "beef",
+ "dev_type": "type-VF",
+ # Simulate that the scheduler already allocate a candidate
+ # and the mapping is stored in the request.
+ # In placement 1 VF is allocated from PF1 and two from PF2
+ "rp_uuids": ",".join([uuids.pf1, uuids.pf2, uuids.pf2])
+ }
+ ],
+ )
+
+ # So when the PCI claim consumes devices based on this request we
+ # expect that nova follows what is allocated in placement.
+ devs = self.pci_stats.consume_requests([vf_req])
+ self.assertEqual(
+ {"0000:81:01.0": 1, "0000:81:02.0": 2},
+ collections.Counter(dev.parent_addr for dev in devs),
+ )
+
+ def test_consume_restricted_by_allocation(self):
+ pf_req = objects.InstancePCIRequest(
+ count=1,
+ alias_name='a-dev',
+ request_id=uuids.req1,
+ spec=[
+ {
+ "vendor_id": "dead",
+ "product_id": "beef",
+ "dev_type": "type-PF",
+ # Simulate that the scheduler already allocate a candidate
+ # and the mapping is stored in the request.
+ # The allocation restricts that we can only consume from
+ # PF3
+ "rp_uuids": ",".join([uuids.pf3])
+ }
+ ],
+ )
+
+ # Call consume. It always expects the allocated mapping to be stores
+ # the in PCI request as it is always called from the compute side.
+ consumed_devs = self.pci_stats.consume_requests([pf_req])
+ # assert that the right device is consumed
+ self.assertEqual([self.pf3], consumed_devs)
+ # pf3 is not available in the pools anymore
+ self.assertEqual(
+ {uuids.pf1, uuids.pf2, uuids.pci1},
+ {
+ pool["rp_uuid"]
+ for pool in self.pci_stats.pools
+ if pool["count"] > 0
+ },
+ )
class PciDeviceVFPFStatsTestCase(test.NoDBTestCase):
def setUp(self):
super(PciDeviceVFPFStatsTestCase, self).setUp()
- white_list = ['{"vendor_id":"8086","product_id":"1528"}',
- '{"vendor_id":"8086","product_id":"1515"}',
- '{"vendor_id":"15b3","product_id":"a2d6", '
- '"remote_managed": "false"}',
- '{"vendor_id":"15b3","product_id":"101e", '
- '"remote_managed": "true"}']
- self.flags(passthrough_whitelist=white_list, group='pci')
+ device_spec = [
+ jsonutils.dumps({"vendor_id": "8086", "product_id": "1528"}),
+ jsonutils.dumps({"vendor_id": "8086", "product_id": "1515"}),
+ jsonutils.dumps(
+ {
+ "vendor_id": "15b3",
+ "product_id": "a2d6",
+ "remote_managed": "false",
+ }
+ ),
+ jsonutils.dumps(
+ {
+ "vendor_id": "15b3",
+ "product_id": "101e",
+ "remote_managed": "true",
+ }
+ ),
+ ]
+ self.flags(device_spec=device_spec, group='pci')
self.pci_stats = stats.PciDeviceStats(objects.NUMATopology())
def _create_pci_devices(self, vf_product_id=1515, pf_product_id=1528):
@@ -876,13 +1702,21 @@ class PciDeviceVFPFStatsTestCase(test.NoDBTestCase):
objects.InstancePCIRequest(count=1,
spec=[{'product_id': '1528',
'dev_type': 'type-PF'}])]
- self.assertIsNone(self.pci_stats.consume_requests(pci_requests))
+ self.assertRaises(
+ exception.PciDeviceRequestFailed,
+ self.pci_stats.consume_requests,
+ pci_requests,
+ )
def test_consume_VF_and_PF_same_product_id_failed(self):
self._create_pci_devices(pf_product_id=1515)
pci_requests = [objects.InstancePCIRequest(count=9,
spec=[{'product_id': '1515'}])]
- self.assertIsNone(self.pci_stats.consume_requests(pci_requests))
+ self.assertRaises(
+ exception.PciDeviceRequestFailed,
+ self.pci_stats.consume_requests,
+ pci_requests,
+ )
def test_consume_PF_not_remote_managed(self):
self._create_pci_devices()
@@ -924,8 +1758,11 @@ class PciDeviceVFPFStatsTestCase(test.NoDBTestCase):
objects.InstancePCIRequest(count=1,
spec=[{'product_id': '101e'}])]
free_devs_before = self.pci_stats.get_free_devs()
- devs = self.pci_stats.consume_requests(pci_requests)
- self.assertIsNone(devs)
+ self.assertRaises(
+ exception.PciDeviceRequestFailed,
+ self.pci_stats.consume_requests,
+ pci_requests,
+ )
free_devs_after = self.pci_stats.get_free_devs()
self.assertEqual(free_devs_before, free_devs_after)
diff --git a/nova/tests/unit/policies/base.py b/nova/tests/unit/policies/base.py
index 5ebccd9121..7490441d92 100644
--- a/nova/tests/unit/policies/base.py
+++ b/nova/tests/unit/policies/base.py
@@ -58,6 +58,16 @@ class BasePolicyTest(test.TestCase):
def setUp(self):
super(BasePolicyTest, self).setUp()
+ # TODO(gmann): enforce_scope and enforce_new_defaults are enabled
+ # by default in the code so disable them in base test class until
+ # we have deprecated rules and their tests. We have enforce_scope
+ # and no-legacy tests which are explicitly enabling scope and new
+ # defaults to test the new defaults and scope. In future, once
+ # we remove the deprecated rules, along with refactoring the unit
+ # tests we can remove overriding the oslo policy flags.
+ self.flags(enforce_scope=False, group="oslo_policy")
+ if not self.without_deprecated_rules:
+ self.flags(enforce_new_defaults=False, group="oslo_policy")
self.useFixture(fixtures.NeutronFixture(self))
self.policy = self.useFixture(fixtures.RealPolicyFixture())
@@ -134,6 +144,44 @@ class BasePolicyTest(test.TestCase):
self.system_admin_context, self.system_foo_context,
self.system_member_context, self.system_reader_context,
])
+ # A few commmon set of contexts to be used in tests
+ #
+ # With scope disable and no legacy rule, any admin,
+ # project members have access. No other role in that project
+ # will have access.
+ self.project_member_or_admin_with_no_scope_no_legacy = set([
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context, self.project_member_context,
+ ])
+ # With scope enable and legacy rule, only project scoped admin
+ # and any role in that project will have access.
+ self.project_m_r_or_admin_with_scope_and_legacy = set([
+ self.legacy_admin_context, self.project_admin_context,
+ self.project_member_context, self.project_reader_context,
+ self.project_foo_context
+ ])
+ # With scope enable and no legacy rule, only project scoped admin
+ # and project members have access. No other role in that project
+ # or system scoped token will have access.
+ self.project_member_or_admin_with_scope_no_legacy = set([
+ self.legacy_admin_context, self.project_admin_context,
+ self.project_member_context
+ ])
+ # With scope disable and no legacy rule, any admin,
+ # project members, and project reader have access. No other
+ # role in that project will have access.
+ self.project_reader_or_admin_with_no_scope_no_legacy = set([
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context, self.project_member_context,
+ self.project_reader_context
+ ])
+ # With scope enable and no legacy rule, only project scoped admin,
+ # project members, and project reader have access. No other role
+ # in that project or system scoped token will have access.
+ self.project_reader_or_admin_with_scope_no_legacy = set([
+ self.legacy_admin_context, self.project_admin_context,
+ self.project_member_context, self.project_reader_context
+ ])
if self.without_deprecated_rules:
# To simulate the new world, remove deprecations by overriding
@@ -149,6 +197,10 @@ class BasePolicyTest(test.TestCase):
"role:member and project_id:%(project_id)s",
"project_reader_api":
"role:reader and project_id:%(project_id)s",
+ "project_member_or_admin":
+ "rule:project_member_api or rule:context_is_admin",
+ "project_reader_or_admin":
+ "rule:project_reader_api or rule:context_is_admin",
})
self.policy.set_rules(self.rules_without_deprecation,
overwrite=False)
diff --git a/nova/tests/unit/policies/test_admin_actions.py b/nova/tests/unit/policies/test_admin_actions.py
index 60458a1a80..21157fd832 100644
--- a/nova/tests/unit/policies/test_admin_actions.py
+++ b/nova/tests/unit/policies/test_admin_actions.py
@@ -78,12 +78,6 @@ class AdminActionsNoLegacyNoScopePolicyTest(AdminActionsPolicyTest):
without_deprecated_rules = True
- def setUp(self):
- super(AdminActionsNoLegacyNoScopePolicyTest, self).setUp()
- # With no legacy rule and scope disable, only project admin
- # is able to perform server admin actions.
- self.project_action_authorized_contexts = [self.project_admin_context]
-
class AdminActionsScopeTypePolicyTest(AdminActionsPolicyTest):
"""Test Admin Actions APIs policies with system scope enabled.
@@ -111,10 +105,3 @@ class AdminActionsScopeTypeNoLegacyPolicyTest(AdminActionsScopeTypePolicyTest):
only project admin is able to perform admin action on their server.
"""
without_deprecated_rules = True
-
- def setUp(self):
- super(AdminActionsScopeTypeNoLegacyPolicyTest, self).setUp()
- # This is how our RBAC will looks like. With no legacy rule
- # and scope enable, only project admin is able to perform
- # server admin actions.
- self.project_action_authorized_contexts = [self.project_admin_context]
diff --git a/nova/tests/unit/policies/test_admin_password.py b/nova/tests/unit/policies/test_admin_password.py
index e5975086f4..01cce2950e 100644
--- a/nova/tests/unit/policies/test_admin_password.py
+++ b/nova/tests/unit/policies/test_admin_password.py
@@ -101,8 +101,8 @@ class AdminPasswordNoLegacyNoScopePolicyTest(AdminPasswordPolicyTest):
super(AdminPasswordNoLegacyNoScopePolicyTest, self).setUp()
# With no legacy rule, only project admin or member will be
# able to change the server password.
- self.project_action_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
class AdminPasswordScopeTypePolicyTest(AdminPasswordPolicyTest):
@@ -119,10 +119,8 @@ class AdminPasswordScopeTypePolicyTest(AdminPasswordPolicyTest):
super(AdminPasswordScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
# Scope enable will not allow system admin to change password.
- self.project_action_authorized_contexts = [
- self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
+ self.project_action_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
class AdminPasswordScopeTypeNoLegacyTest(AdminPasswordScopeTypePolicyTest):
@@ -139,5 +137,5 @@ class AdminPasswordScopeTypeNoLegacyTest(AdminPasswordScopeTypePolicyTest):
# With scope enable and no legacy rule only project admin/member
# will be able to change password for the server.
- self.project_action_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_aggregates.py b/nova/tests/unit/policies/test_aggregates.py
index 8aaf0a6101..6ac7b6e010 100644
--- a/nova/tests/unit/policies/test_aggregates.py
+++ b/nova/tests/unit/policies/test_aggregates.py
@@ -35,14 +35,14 @@ class AggregatesPolicyTest(base.BasePolicyTest):
# With legacy rule and scope check disabled by default, system admin,
# legacy admin, and project admin will be able to perform Aggregate
# Operations.
- self.system_admin_authorized_contexts = [
+ self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context]
@mock.patch('nova.compute.api.AggregateAPI.get_aggregate_list')
def test_list_aggregate_policy(self, mock_list):
rule_name = "os_compute_api:os-aggregates:index"
- self.common_policy_auth(self.system_admin_authorized_contexts,
+ self.common_policy_auth(self.project_admin_authorized_contexts,
rule_name, self.controller.index,
self.req)
@@ -55,7 +55,7 @@ class AggregatesPolicyTest(base.BasePolicyTest):
"hosts": ["host1", "host2"]})
body = {"aggregate": {"name": "test",
"availability_zone": "nova1"}}
- self.common_policy_auth(self.system_admin_authorized_contexts,
+ self.common_policy_auth(self.project_admin_authorized_contexts,
rule_name,
self.controller.create,
self.req, body=body)
@@ -63,7 +63,7 @@ class AggregatesPolicyTest(base.BasePolicyTest):
@mock.patch('nova.compute.api.AggregateAPI.update_aggregate')
def test_update_aggregate_policy(self, mock_update):
rule_name = "os_compute_api:os-aggregates:update"
- self.common_policy_auth(self.system_admin_authorized_contexts,
+ self.common_policy_auth(self.project_admin_authorized_contexts,
rule_name, self.controller.update,
self.req, 1,
body={"aggregate": {"name": "new_name"}})
@@ -71,7 +71,7 @@ class AggregatesPolicyTest(base.BasePolicyTest):
@mock.patch('nova.compute.api.AggregateAPI.delete_aggregate')
def test_delete_aggregate_policy(self, mock_delete):
rule_name = "os_compute_api:os-aggregates:delete"
- self.common_policy_auth(self.system_admin_authorized_contexts,
+ self.common_policy_auth(self.project_admin_authorized_contexts,
rule_name,
self.controller.delete,
self.req, 1)
@@ -79,7 +79,7 @@ class AggregatesPolicyTest(base.BasePolicyTest):
@mock.patch('nova.compute.api.AggregateAPI.get_aggregate')
def test_show_aggregate_policy(self, mock_show):
rule_name = "os_compute_api:os-aggregates:show"
- self.common_policy_auth(self.system_admin_authorized_contexts,
+ self.common_policy_auth(self.project_admin_authorized_contexts,
rule_name, self.controller.show,
self.req, 1)
@@ -87,7 +87,7 @@ class AggregatesPolicyTest(base.BasePolicyTest):
def test_set_metadata_aggregate_policy(self, mock_metadata):
rule_name = "os_compute_api:os-aggregates:set_metadata"
body = {"set_metadata": {"metadata": {"foo": "bar"}}}
- self.common_policy_auth(self.system_admin_authorized_contexts,
+ self.common_policy_auth(self.project_admin_authorized_contexts,
rule_name,
self.controller._set_metadata,
self.req, 1, body=body)
@@ -95,7 +95,7 @@ class AggregatesPolicyTest(base.BasePolicyTest):
@mock.patch('nova.compute.api.AggregateAPI.add_host_to_aggregate')
def test_add_host_aggregate_policy(self, mock_add):
rule_name = "os_compute_api:os-aggregates:add_host"
- self.common_policy_auth(self.system_admin_authorized_contexts,
+ self.common_policy_auth(self.project_admin_authorized_contexts,
rule_name, self.controller._add_host,
self.req, 1,
body={"add_host": {"host": "host1"}})
@@ -103,7 +103,7 @@ class AggregatesPolicyTest(base.BasePolicyTest):
@mock.patch('nova.compute.api.AggregateAPI.remove_host_from_aggregate')
def test_remove_host_aggregate_policy(self, mock_remove):
rule_name = "os_compute_api:os-aggregates:remove_host"
- self.common_policy_auth(self.system_admin_authorized_contexts,
+ self.common_policy_auth(self.project_admin_authorized_contexts,
rule_name,
self.controller._remove_host,
self.req, 1,
@@ -118,7 +118,7 @@ class AggregatesPolicyTest(base.BasePolicyTest):
body = {'cache': [{'id': uuids.fake_id}]}
req = fakes.HTTPRequest.blank('', version='2.81')
with mock.patch('nova.conductor.api.ComputeTaskAPI.cache_images'):
- self.common_policy_auth(self.system_admin_authorized_contexts,
+ self.common_policy_auth(self.project_admin_authorized_contexts,
rule_name, self.controller.images,
req, 1, body=body)
@@ -149,9 +149,10 @@ class AggregatesScopeTypePolicyTest(AggregatesPolicyTest):
super(AggregatesScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
- # With scope checks enable, only system admin is able to perform
- # Aggregate Operations.
- self.system_admin_authorized_contexts = [self.system_admin_context]
+ # With scope checks enabled, only project-scoped admins are
+ # able to perform Aggregate Operations.
+ self.project_admin_authorized_contexts = [self.legacy_admin_context,
+ self.project_admin_context]
class AggregatesScopeTypeNoLegacyPolicyTest(AggregatesScopeTypePolicyTest):
diff --git a/nova/tests/unit/policies/test_attach_interfaces.py b/nova/tests/unit/policies/test_attach_interfaces.py
index 781ce29e8a..33c531c9c7 100644
--- a/nova/tests/unit/policies/test_attach_interfaces.py
+++ b/nova/tests/unit/policies/test_attach_interfaces.py
@@ -117,22 +117,21 @@ class AttachInterfacesNoLegacyNoScopePolicyTest(AttachInterfacesPolicyTest):
without_deprecated_rules = True
rules_without_deprecation = {
ai_policies.POLICY_ROOT % 'list':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
ai_policies.POLICY_ROOT % 'show':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
ai_policies.POLICY_ROOT % 'create':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
ai_policies.POLICY_ROOT % 'delete':
- base_policy.PROJECT_MEMBER}
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
def setUp(self):
super(AttachInterfacesNoLegacyNoScopePolicyTest, self).setUp()
# With no legacy rule, legacy admin loose power.
- self.project_member_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
- self.project_reader_authorized_contexts = [
- self.project_admin_context, self.project_member_context,
- self.project_reader_context]
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_no_scope_no_legacy)
class AttachInterfacesScopeTypePolicyTest(AttachInterfacesPolicyTest):
@@ -149,12 +148,10 @@ class AttachInterfacesScopeTypePolicyTest(AttachInterfacesPolicyTest):
super(AttachInterfacesScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
# With Scope enable, system users no longer allowed.
- self.project_member_authorized_contexts = [
- self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
+ self.project_member_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
self.project_reader_authorized_contexts = (
- self.project_member_authorized_contexts)
+ self.project_m_r_or_admin_with_scope_and_legacy)
class AttachInterfacesDeprecatedPolicyTest(base.BasePolicyTest):
@@ -217,20 +214,19 @@ class AttachInterfacesScopeTypeNoLegacyPolicyTest(
without_deprecated_rules = True
rules_without_deprecation = {
ai_policies.POLICY_ROOT % 'list':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
ai_policies.POLICY_ROOT % 'show':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
ai_policies.POLICY_ROOT % 'create':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
ai_policies.POLICY_ROOT % 'delete':
- base_policy.PROJECT_MEMBER}
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
def setUp(self):
super(AttachInterfacesScopeTypeNoLegacyPolicyTest, self).setUp()
# With no legacy and scope enable, only project admin, member,
# and reader will be able to allowed operation on server interface.
- self.project_member_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
- self.project_reader_authorized_contexts = [
- self.project_admin_context, self.project_member_context,
- self.project_reader_context]
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_availability_zone.py b/nova/tests/unit/policies/test_availability_zone.py
index d021cc14d8..1852f8444c 100644
--- a/nova/tests/unit/policies/test_availability_zone.py
+++ b/nova/tests/unit/policies/test_availability_zone.py
@@ -34,20 +34,21 @@ class AvailabilityZonePolicyTest(base.BasePolicyTest):
# With legacy rule and scope check disabled by default, system admin,
# legacy admin, and project admin will be able to get AZ with host
# information.
- self.system_admin_authorized_contexts = [
+ self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context]
+ self.project_authorized_contexts = self.all_contexts
@mock.patch('nova.objects.Instance.save')
def test_availability_zone_list_policy(self, mock_save):
rule_name = "os_compute_api:os-availability-zone:list"
- self.common_policy_auth(self.all_contexts,
+ self.common_policy_auth(self.project_authorized_contexts,
rule_name, self.controller.index,
self.req)
def test_availability_zone_detail_policy(self):
rule_name = "os_compute_api:os-availability-zone:detail"
- self.common_policy_auth(self.system_admin_authorized_contexts,
+ self.common_policy_auth(self.project_admin_authorized_contexts,
rule_name, self.controller.detail,
self.req)
@@ -79,9 +80,11 @@ class AvailabilityZoneScopeTypePolicyTest(AvailabilityZonePolicyTest):
super(AvailabilityZoneScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
- # With scope checks enable, only system admin is able to get
- # AZ with host information.
- self.system_admin_authorized_contexts = [self.system_admin_context]
+ # With scope checks enable, only project-scoped admins are
+ # able to get AZ with host information.
+ self.project_admin_authorized_contexts = [self.legacy_admin_context,
+ self.project_admin_context]
+ self.project_authorized_contexts = self.all_project_contexts
class AZScopeTypeNoLegacyPolicyTest(AvailabilityZoneScopeTypePolicyTest):
diff --git a/nova/tests/unit/policies/test_baremetal_nodes.py b/nova/tests/unit/policies/test_baremetal_nodes.py
index 7a045b2ff0..68f02087c4 100644
--- a/nova/tests/unit/policies/test_baremetal_nodes.py
+++ b/nova/tests/unit/policies/test_baremetal_nodes.py
@@ -43,13 +43,13 @@ class BaremetalNodesPolicyTest(base.BasePolicyTest):
lambda *_: FAKE_IRONIC_CLIENT)
# With legacy rule and scope check disabled by default, system admin,
# legacy admin, and project admin will be able to get baremetal nodes.
- self.system_admin_authorized_contexts = [
+ self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context]
def test_index_nodes_policy(self):
rule_name = "os_compute_api:os-baremetal-nodes:list"
- self.common_policy_auth(self.system_admin_authorized_contexts,
+ self.common_policy_auth(self.project_admin_authorized_contexts,
rule_name, self.controller.index,
self.req)
@@ -62,7 +62,7 @@ class BaremetalNodesPolicyTest(base.BasePolicyTest):
mock_get.return_value = node
mock_port.return_value = []
- self.common_policy_auth(self.system_admin_authorized_contexts,
+ self.common_policy_auth(self.project_admin_authorized_contexts,
rule_name,
self.controller.show,
self.req, uuids.fake_id)
@@ -95,9 +95,10 @@ class BaremetalNodesScopeTypePolicyTest(BaremetalNodesPolicyTest):
super(BaremetalNodesScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
- # With scope checks enable, only system admin is able to get
- # baremetal nodes.
- self.system_admin_authorized_contexts = [self.system_admin_context]
+ # With scope checks enable, only project-scoped admins are
+ # able to get baremetal nodes.
+ self.project_admin_authorized_contexts = [self.legacy_admin_context,
+ self.project_admin_context]
class BNScopeTypeNoLegacyPolicyTest(BaremetalNodesScopeTypePolicyTest):
diff --git a/nova/tests/unit/policies/test_console_output.py b/nova/tests/unit/policies/test_console_output.py
index 0c3ed9ed2d..c1bccf1d55 100644
--- a/nova/tests/unit/policies/test_console_output.py
+++ b/nova/tests/unit/policies/test_console_output.py
@@ -73,8 +73,8 @@ class ConsoleOutputNoLegacyNoScopePolicyTest(ConsoleOutputPolicyTest):
super(ConsoleOutputNoLegacyNoScopePolicyTest, self).setUp()
# With no legacy rule, only project admin or member is able to
# get the server console.
- self.project_member_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
class ConsoleOutputScopeTypePolicyTest(ConsoleOutputPolicyTest):
@@ -92,10 +92,8 @@ class ConsoleOutputScopeTypePolicyTest(ConsoleOutputPolicyTest):
super(ConsoleOutputScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
# Scope enable will not allow system admin.
- self.project_member_authorized_contexts = [
- self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
+ self.project_member_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
class ConsoleOutputScopeTypeNoLegacyPolicyTest(
@@ -110,5 +108,5 @@ class ConsoleOutputScopeTypeNoLegacyPolicyTest(
# With scope enable and no legacy rule, only project admin/member can
# get the server console.
- self.project_member_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_create_backup.py b/nova/tests/unit/policies/test_create_backup.py
index 83762e2214..b54ed366df 100644
--- a/nova/tests/unit/policies/test_create_backup.py
+++ b/nova/tests/unit/policies/test_create_backup.py
@@ -81,8 +81,8 @@ class CreateBackupNoLegacyNoScopePolicyTest(CreateBackupPolicyTest):
super(CreateBackupNoLegacyNoScopePolicyTest, self).setUp()
# With no legacy rule, only project admin or member will be
# able to create the server backup.
- self.project_member_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
class CreateBackupScopeTypePolicyTest(CreateBackupPolicyTest):
@@ -100,10 +100,8 @@ class CreateBackupScopeTypePolicyTest(CreateBackupPolicyTest):
super(CreateBackupScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
# Scope enable will not allow system users to create the server.
- self.project_member_authorized_contexts = [
- self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
+ self.project_member_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
class CreateBackupScopeTypeNoLegacyPolicyTest(CreateBackupScopeTypePolicyTest):
@@ -116,5 +114,5 @@ class CreateBackupScopeTypeNoLegacyPolicyTest(CreateBackupScopeTypePolicyTest):
super(CreateBackupScopeTypeNoLegacyPolicyTest, self).setUp()
# With scope enable and no legacy rule, only project admin/member
# will be able to create the server backup.
- self.project_member_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_deferred_delete.py b/nova/tests/unit/policies/test_deferred_delete.py
index faa26e7b15..08bb0213f4 100644
--- a/nova/tests/unit/policies/test_deferred_delete.py
+++ b/nova/tests/unit/policies/test_deferred_delete.py
@@ -105,16 +105,16 @@ class DeferredDeleteNoLegacyNoScopePolicyTest(DeferredDeletePolicyTest):
without_deprecated_rules = True
rules_without_deprecation = {
dd_policies.BASE_POLICY_NAME % 'restore':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
dd_policies.BASE_POLICY_NAME % 'force':
- base_policy.PROJECT_MEMBER}
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
def setUp(self):
super(DeferredDeleteNoLegacyNoScopePolicyTest, self).setUp()
# With no legacy rule, only project admin or member is able to force
# delete or restore server.
- self.project_member_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
class DeferredDeleteScopeTypePolicyTest(DeferredDeletePolicyTest):
@@ -132,10 +132,8 @@ class DeferredDeleteScopeTypePolicyTest(DeferredDeletePolicyTest):
super(DeferredDeleteScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
# Scope enable will not allow system admin.
- self.project_member_authorized_contexts = [
- self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
+ self.project_member_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
class DeferredDeleteScopeTypeNoLegacyPolicyTest(
@@ -146,14 +144,14 @@ class DeferredDeleteScopeTypeNoLegacyPolicyTest(
without_deprecated_rules = True
rules_without_deprecation = {
dd_policies.BASE_POLICY_NAME % 'restore':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
dd_policies.BASE_POLICY_NAME % 'force':
- base_policy.PROJECT_MEMBER}
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
def setUp(self):
super(DeferredDeleteScopeTypeNoLegacyPolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
# With scope enable and no legacy rule, only project admin/member is
# able to force delete or restore server.
- self.project_member_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_evacuate.py b/nova/tests/unit/policies/test_evacuate.py
index 491b17779c..b9e4c29dba 100644
--- a/nova/tests/unit/policies/test_evacuate.py
+++ b/nova/tests/unit/policies/test_evacuate.py
@@ -103,7 +103,7 @@ class EvacuatePolicyTest(base.BasePolicyTest):
evacuate_mock.assert_called_once_with(
self.user_req.environ['nova.context'],
mock.ANY, 'my-host', False,
- 'MyNewPass', None)
+ 'MyNewPass', None, None)
class EvacuateNoLegacyNoScopePolicyTest(EvacuatePolicyTest):
@@ -114,12 +114,6 @@ class EvacuateNoLegacyNoScopePolicyTest(EvacuatePolicyTest):
without_deprecated_rules = True
- def setUp(self):
- super(EvacuateNoLegacyNoScopePolicyTest, self).setUp()
- # With no legacy rule and scope disable, only project admin
- # will be able to evacuate server.
- self.project_action_authorized_contexts = [self.project_admin_context]
-
class EvacuateScopeTypePolicyTest(EvacuatePolicyTest):
"""Test Evacuate APIs policies with system scope enabled.
@@ -146,10 +140,3 @@ class EvacuateScopeTypeNoLegacyPolicyTest(EvacuateScopeTypePolicyTest):
and no more deprecated rules which means scope + new defaults.
"""
without_deprecated_rules = True
-
- def setUp(self):
- super(EvacuateScopeTypeNoLegacyPolicyTest, self).setUp()
- # This is how our RBAC will looks like. With no legacy rule
- # and scope enable, only project admin is able to evacuate
- # server.
- self.project_action_authorized_contexts = [self.project_admin_context]
diff --git a/nova/tests/unit/policies/test_extensions.py b/nova/tests/unit/policies/test_extensions.py
index 7865ececba..d2e3c6adde 100644
--- a/nova/tests/unit/policies/test_extensions.py
+++ b/nova/tests/unit/policies/test_extensions.py
@@ -71,6 +71,16 @@ class ExtensionsScopeTypePolicyTest(ExtensionsPolicyTest):
def setUp(self):
super(ExtensionsScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+ self.everyone_authorized_contexts = [
+ self.legacy_admin_context,
+ self.project_admin_context, self.project_member_context,
+ self.project_reader_context, self.project_foo_context,
+ self.other_project_reader_context,
+ self.other_project_member_context
+ ]
+ self.everyone_unauthorized_contexts = [
+ self.system_admin_context, self.system_member_context,
+ self.system_reader_context, self.system_foo_context]
class ExtensionsNoLegacyPolicyTest(ExtensionsScopeTypePolicyTest):
diff --git a/nova/tests/unit/policies/test_flavor_access.py b/nova/tests/unit/policies/test_flavor_access.py
index 7a3ebf96cb..cfdbbd2470 100644
--- a/nova/tests/unit/policies/test_flavor_access.py
+++ b/nova/tests/unit/policies/test_flavor_access.py
@@ -122,12 +122,11 @@ class FlavorAccessScopeTypePolicyTest(FlavorAccessPolicyTest):
super(FlavorAccessScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
- # Scope checks remove project users power.
+ # Scope checks remove system users' power.
self.admin_authorized_contexts = [
- self.system_admin_context]
- self.admin_index_authorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.system_reader_context, self.system_foo_context]
+ self.legacy_admin_context,
+ self.project_admin_context]
+ self.admin_index_authorized_contexts = self.all_project_contexts
class FlavorAccessScopeTypeNoLegacyPolicyTest(FlavorAccessScopeTypePolicyTest):
@@ -146,5 +145,9 @@ class FlavorAccessScopeTypeNoLegacyPolicyTest(FlavorAccessScopeTypePolicyTest):
def setUp(self):
super(FlavorAccessScopeTypeNoLegacyPolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
- self.admin_index_authorized_contexts = [
- self.system_admin_context]
+
+ # New defaults make this admin-only
+ self.admin_authorized_contexts = [
+ self.legacy_admin_context,
+ self.project_admin_context]
+ self.admin_index_authorized_contexts = self.admin_authorized_contexts
diff --git a/nova/tests/unit/policies/test_flavor_extra_specs.py b/nova/tests/unit/policies/test_flavor_extra_specs.py
index fdb93e6a7c..f3c8cacd57 100644
--- a/nova/tests/unit/policies/test_flavor_extra_specs.py
+++ b/nova/tests/unit/policies/test_flavor_extra_specs.py
@@ -57,7 +57,7 @@ class FlavorExtraSpecsPolicyTest(base.BasePolicyTest):
# In the base/legacy case, all project and system contexts are
# authorized in the case of things that distinguish between
# scopes, since scope checking is disabled.
- self.all_system_authorized_contexts = (self.all_project_contexts |
+ self.all_project_authorized_contexts = (self.all_project_contexts |
self.all_system_contexts)
# In the base/legacy case, any admin is an admin.
@@ -167,7 +167,7 @@ class FlavorExtraSpecsPolicyTest(base.BasePolicyTest):
}
}
authorize_res, unauthorize_res = self.common_policy_auth(
- self.all_system_authorized_contexts,
+ self.all_project_authorized_contexts,
rule_name, self.fm_ctrl._create, req, body=body,
fatal=False)
for resp in authorize_res:
@@ -187,7 +187,7 @@ class FlavorExtraSpecsPolicyTest(base.BasePolicyTest):
req = fakes.HTTPRequest.blank('', version='2.61')
authorize_res, unauthorize_res = self.common_policy_auth(
- self.all_system_authorized_contexts,
+ self.all_project_authorized_contexts,
rule_name, self.fm_ctrl._update, req, '1',
body={'flavor': {'description': None}},
fatal=False)
@@ -211,11 +211,13 @@ class FlavorExtraSpecsScopeTypePolicyTest(FlavorExtraSpecsPolicyTest):
super(FlavorExtraSpecsScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
- # Only system users are authorized for system APIs
- self.all_system_authorized_contexts = self.all_system_contexts
+ # Only project users are authorized
+ self.reduce_set('all_project_authorized', self.all_project_contexts)
+ self.reduce_set('all_authorized', self.all_project_contexts)
- # Only system_admin can do system admin things
- self.admin_authorized_contexts = [self.system_admin_context]
+ # Only admins can do admin things
+ self.admin_authorized_contexts = [self.legacy_admin_context,
+ self.project_admin_context]
class FlavorExtraSpecsNoLegacyNoScopeTest(FlavorExtraSpecsPolicyTest):
@@ -235,7 +237,7 @@ class FlavorExtraSpecsNoLegacyNoScopeTest(FlavorExtraSpecsPolicyTest):
self.system_foo_context,
self.project_foo_context,
])
- self.reduce_set('all_system_authorized', everything_but_foo)
+ self.reduce_set('all_project_authorized', everything_but_foo)
self.reduce_set('all_authorized', everything_but_foo)
@@ -252,11 +254,10 @@ class FlavorExtraSpecsNoLegacyPolicyTest(FlavorExtraSpecsScopeTypePolicyTest):
# contexts. With scope checking enabled, project and system
# contexts stay separate.
self.reduce_set(
- 'all_system_authorized',
- self.all_system_contexts - set([self.system_foo_context]))
- everything_but_foo = (
- self.all_project_contexts | self.all_system_contexts) - set([
- self.system_foo_context,
+ 'all_project_authorized',
+ self.all_project_contexts - set([self.project_foo_context]))
+ everything_but_foo_and_system = (
+ self.all_contexts - set([
self.project_foo_context,
- ])
- self.reduce_set('all_authorized', everything_but_foo)
+ ]) - self.all_system_contexts)
+ self.reduce_set('all_authorized', everything_but_foo_and_system)
diff --git a/nova/tests/unit/policies/test_flavor_manage.py b/nova/tests/unit/policies/test_flavor_manage.py
index 0422469a11..0663a689cb 100644
--- a/nova/tests/unit/policies/test_flavor_manage.py
+++ b/nova/tests/unit/policies/test_flavor_manage.py
@@ -105,10 +105,11 @@ class FlavorManageScopeTypePolicyTest(FlavorManagePolicyTest):
super(FlavorManageScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
- # With scope enable, only system admin is able to manage
+ # With scope enabled, only project admin is able to manage
# the flavors.
self.admin_authorized_contexts = [
- self.system_admin_context]
+ self.legacy_admin_context,
+ self.project_admin_context]
class FlavorManageScopeTypeNoLegacyPolicyTest(
diff --git a/nova/tests/unit/policies/test_floating_ip_pools.py b/nova/tests/unit/policies/test_floating_ip_pools.py
index 22fca84bbd..551f482bd4 100644
--- a/nova/tests/unit/policies/test_floating_ip_pools.py
+++ b/nova/tests/unit/policies/test_floating_ip_pools.py
@@ -32,15 +32,15 @@ class FloatingIPPoolsPolicyTest(base.BasePolicyTest):
self.req = fakes.HTTPRequest.blank('')
# Check that everyone is able to list FIP pools.
- self.everyone_authorized_contexts = [
+ self.everyone_authorized_contexts = set([
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context, self.project_member_context,
self.project_reader_context, self.project_foo_context,
self.other_project_reader_context,
self.other_project_member_context,
self.system_member_context, self.system_reader_context,
- self.system_foo_context]
- self.everyone_unauthorized_contexts = []
+ self.system_foo_context])
+ self.everyone_unauthorized_contexts = set([])
@mock.patch('nova.network.neutron.API.get_floating_ip_pools')
def test_floating_ip_pools_policy(self, mock_get):
@@ -66,6 +66,10 @@ class FloatingIPPoolsScopeTypePolicyTest(FloatingIPPoolsPolicyTest):
super(FloatingIPPoolsScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+ self.reduce_set('everyone_authorized', self.all_project_contexts)
+ self.everyone_unauthorized_contexts = (
+ self.all_contexts - self.everyone_authorized_contexts)
+
class FloatingIPPoolsNoLegacyPolicyTest(FloatingIPPoolsScopeTypePolicyTest):
"""Test Floating IP Pools APIs policies with system scope enabled,
diff --git a/nova/tests/unit/policies/test_floating_ips.py b/nova/tests/unit/policies/test_floating_ips.py
index 12beca9d56..26c721e9e9 100644
--- a/nova/tests/unit/policies/test_floating_ips.py
+++ b/nova/tests/unit/policies/test_floating_ips.py
@@ -152,24 +152,24 @@ class FloatingIPNoLegacyNoScopePolicyTest(FloatingIPPolicyTest):
without_deprecated_rules = True
rules_without_deprecation = {
fip_policies.BASE_POLICY_NAME % 'list':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
fip_policies.BASE_POLICY_NAME % 'show':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
fip_policies.BASE_POLICY_NAME % 'create':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
fip_policies.BASE_POLICY_NAME % 'delete':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
fip_policies.BASE_POLICY_NAME % 'add':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
fip_policies.BASE_POLICY_NAME % 'remove':
- base_policy.PROJECT_MEMBER}
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
def setUp(self):
super(FloatingIPNoLegacyNoScopePolicyTest, self).setUp()
# With no legacy rule, only project admin or member will be
# able to add/remove FIP to server.
- self.project_member_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
# With no legacy, project other roles like foo will not be able
# to operate on FIP.
self.member_authorized_contexts = [
@@ -203,10 +203,8 @@ class FloatingIPScopeTypePolicyTest(FloatingIPPolicyTest):
super(FloatingIPScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
# Scope enable will not allow system users.
- self.project_member_authorized_contexts = [
- self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
+ self.project_member_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
self.member_authorized_contexts = [
self.legacy_admin_context, self.project_admin_context,
self.project_member_context, self.project_reader_context,
@@ -228,24 +226,24 @@ class FloatingIPScopeTypeNoLegacyPolicyTest(FloatingIPScopeTypePolicyTest):
without_deprecated_rules = True
rules_without_deprecation = {
fip_policies.BASE_POLICY_NAME % 'list':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
fip_policies.BASE_POLICY_NAME % 'show':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
fip_policies.BASE_POLICY_NAME % 'create':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
fip_policies.BASE_POLICY_NAME % 'delete':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
fip_policies.BASE_POLICY_NAME % 'add':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
fip_policies.BASE_POLICY_NAME % 'remove':
- base_policy.PROJECT_MEMBER}
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
def setUp(self):
super(FloatingIPScopeTypeNoLegacyPolicyTest, self).setUp()
# Check that system admin or owner is able to
# add/delete FIP to server.
- self.project_member_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
# With no legacy and scope enabled, system users and project
# other roles like foo will not be able to operate FIP.
self.member_authorized_contexts = [
diff --git a/nova/tests/unit/policies/test_hosts.py b/nova/tests/unit/policies/test_hosts.py
index a5147dab82..e07c907cf8 100644
--- a/nova/tests/unit/policies/test_hosts.py
+++ b/nova/tests/unit/policies/test_hosts.py
@@ -35,14 +35,14 @@ class HostsPolicyTest(base.BasePolicyTest):
# With legacy rule and scope check disabled by default, system admin,
# legacy admin, and project admin will be able to perform hosts
# Operations.
- self.system_admin_authorized_contexts = [
+ self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context]
@mock.patch('nova.compute.api.HostAPI.service_get_all')
def test_list_hosts_policy(self, mock_get):
rule_name = policies.POLICY_NAME % 'list'
- self.common_policy_auth(self.system_admin_authorized_contexts,
+ self.common_policy_auth(self.project_admin_authorized_contexts,
rule_name, self.controller.index,
self.req)
@@ -53,34 +53,34 @@ class HostsPolicyTest(base.BasePolicyTest):
@mock.patch('nova.compute.api.HostAPI.instance_get_all_by_host')
def test_show_host_policy(self, mock_get, mock_node, mock_map, mock_set):
rule_name = policies.POLICY_NAME % 'show'
- self.common_policy_auth(self.system_admin_authorized_contexts,
+ self.common_policy_auth(self.project_admin_authorized_contexts,
rule_name, self.controller.show,
self.req, 11111)
def test_update_host_policy(self):
rule_name = policies.POLICY_NAME % 'update'
- self.common_policy_auth(self.system_admin_authorized_contexts,
+ self.common_policy_auth(self.project_admin_authorized_contexts,
rule_name, self.controller.update,
self.req, 11111, body={})
@mock.patch('nova.compute.api.HostAPI.host_power_action')
def test_reboot_host_policy(self, mock_action):
rule_name = policies.POLICY_NAME % 'reboot'
- self.common_policy_auth(self.system_admin_authorized_contexts,
+ self.common_policy_auth(self.project_admin_authorized_contexts,
rule_name, self.controller.reboot,
self.req, 11111)
@mock.patch('nova.compute.api.HostAPI.host_power_action')
def test_shutdown_host_policy(self, mock_action):
rule_name = policies.POLICY_NAME % 'shutdown'
- self.common_policy_auth(self.system_admin_authorized_contexts,
+ self.common_policy_auth(self.project_admin_authorized_contexts,
rule_name, self.controller.shutdown,
self.req, 11111)
@mock.patch('nova.compute.api.HostAPI.host_power_action')
def test_startup_host_policy(self, mock_action):
rule_name = policies.POLICY_NAME % 'start'
- self.common_policy_auth(self.system_admin_authorized_contexts,
+ self.common_policy_auth(self.project_admin_authorized_contexts,
rule_name, self.controller.startup,
self.req, 11111)
@@ -113,7 +113,8 @@ class HostsScopeTypePolicyTest(HostsPolicyTest):
# With scope checks enable, only system admin is able to perform
# hosts Operations.
- self.system_admin_authorized_contexts = [self.system_admin_context]
+ self.project_admin_authorized_contexts = [self.legacy_admin_context,
+ self.project_admin_context]
class HostsScopeTypeNoLegacyPolicyTest(HostsScopeTypePolicyTest):
diff --git a/nova/tests/unit/policies/test_hypervisors.py b/nova/tests/unit/policies/test_hypervisors.py
index fbdf805fce..dd17ebe2fe 100644
--- a/nova/tests/unit/policies/test_hypervisors.py
+++ b/nova/tests/unit/policies/test_hypervisors.py
@@ -39,51 +39,51 @@ class HypervisorsPolicyTest(base.BasePolicyTest):
# With legacy rule and scope check disabled by default, system admin,
# legacy admin, and project admin will be able to perform hypervisors
# Operations.
- self.system_admin_authorized_contexts = [
+ self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context]
def test_list_hypervisors_policy(self):
rule_name = hv_policies.BASE_POLICY_NAME % 'list'
- self.common_policy_auth(self.system_admin_authorized_contexts,
+ self.common_policy_auth(self.project_admin_authorized_contexts,
rule_name, self.controller.index,
self.req)
def test_list_details_hypervisors_policy(self):
rule_name = hv_policies.BASE_POLICY_NAME % 'list-detail'
- self.common_policy_auth(self.system_admin_authorized_contexts,
+ self.common_policy_auth(self.project_admin_authorized_contexts,
rule_name, self.controller.detail,
self.req)
def test_show_hypervisors_policy(self):
rule_name = hv_policies.BASE_POLICY_NAME % 'show'
- self.common_policy_auth(self.system_admin_authorized_contexts,
+ self.common_policy_auth(self.project_admin_authorized_contexts,
rule_name, self.controller.show,
self.req, 11111)
@mock.patch('nova.compute.api.HostAPI.get_host_uptime')
def test_uptime_hypervisors_policy(self, mock_uptime):
rule_name = hv_policies.BASE_POLICY_NAME % 'uptime'
- self.common_policy_auth(self.system_admin_authorized_contexts,
+ self.common_policy_auth(self.project_admin_authorized_contexts,
rule_name, self.controller.uptime,
self.req, 11111)
def test_search_hypervisors_policy(self):
rule_name = hv_policies.BASE_POLICY_NAME % 'search'
- self.common_policy_auth(self.system_admin_authorized_contexts,
+ self.common_policy_auth(self.project_admin_authorized_contexts,
rule_name, self.controller.search,
self.req, 11111)
def test_servers_hypervisors_policy(self):
rule_name = hv_policies.BASE_POLICY_NAME % 'servers'
- self.common_policy_auth(self.system_admin_authorized_contexts,
+ self.common_policy_auth(self.project_admin_authorized_contexts,
rule_name, self.controller.servers,
self.req, 11111)
@mock.patch('nova.compute.api.HostAPI.compute_node_statistics')
def test_statistics_hypervisors_policy(self, mock_statistics):
rule_name = hv_policies.BASE_POLICY_NAME % 'statistics'
- self.common_policy_auth(self.system_admin_authorized_contexts,
+ self.common_policy_auth(self.project_admin_authorized_contexts,
rule_name, self.controller.statistics,
self.req)
@@ -115,7 +115,8 @@ class HypervisorsScopeTypePolicyTest(HypervisorsPolicyTest):
# With scope checks enable, only system admin is able to perform
# hypervisors Operations.
- self.system_admin_authorized_contexts = [self.system_admin_context]
+ self.project_admin_authorized_contexts = [self.legacy_admin_context,
+ self.project_admin_context]
class HypervisorsScopeTypeNoLegacyPolicyTest(HypervisorsScopeTypePolicyTest):
diff --git a/nova/tests/unit/policies/test_instance_actions.py b/nova/tests/unit/policies/test_instance_actions.py
index 2225261d5e..1ca9a66c14 100644
--- a/nova/tests/unit/policies/test_instance_actions.py
+++ b/nova/tests/unit/policies/test_instance_actions.py
@@ -140,20 +140,17 @@ class InstanceActionsNoLegacyNoScopePolicyTest(InstanceActionsPolicyTest):
without_deprecated_rules = True
rules_without_deprecation = {
ia_policies.BASE_POLICY_NAME % 'list':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
ia_policies.BASE_POLICY_NAME % 'show':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
ia_policies.BASE_POLICY_NAME % 'events':
- base_policy.PROJECT_ADMIN,
+ base_policy.ADMIN,
}
def setUp(self):
super(InstanceActionsNoLegacyNoScopePolicyTest, self).setUp()
- # With no legacy rule, legacy admin loose power.
- self.project_admin_authorized_contexts = [self.project_admin_context]
- self.project_reader_authorized_contexts = [
- self.project_admin_context, self.project_member_context,
- self.project_reader_context]
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_no_scope_no_legacy)
class InstanceActionsDeprecatedPolicyTest(base.BasePolicyTest):
@@ -231,10 +228,8 @@ class InstanceActionsScopeTypePolicyTest(InstanceActionsPolicyTest):
# With Scope enable, system users no longer allowed.
self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.project_admin_context]
- self.project_reader_authorized_contexts = [
- self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
+ self.project_reader_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
@mock.patch('nova.objects.InstanceActionEventList.get_by_action')
@mock.patch('nova.objects.InstanceAction.get_by_request_id')
@@ -280,27 +275,25 @@ class InstanceActionsScopeTypePolicyTest(InstanceActionsPolicyTest):
self.assertNotIn('details', event)
-class InstanceActionsScopeTypeNoLegacyPolicyTest(InstanceActionsPolicyTest):
+class InstanceActionsScopeTypeNoLegacyPolicyTest(
+ InstanceActionsScopeTypePolicyTest):
"""Test os-instance-actions APIs policies with system scope enabled,
and no more deprecated rules.
"""
without_deprecated_rules = True
rules_without_deprecation = {
ia_policies.BASE_POLICY_NAME % 'list':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
ia_policies.BASE_POLICY_NAME % 'show':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
ia_policies.BASE_POLICY_NAME % 'events':
- base_policy.PROJECT_ADMIN,
+ base_policy.ADMIN,
}
def setUp(self):
super(InstanceActionsScopeTypeNoLegacyPolicyTest, self).setUp()
- self.flags(enforce_scope=True, group="oslo_policy")
# With no legacy and scope enable, only project admin, member,
# and reader will be able to get server action and only admin
# with event details.
- self.project_admin_authorized_contexts = [self.project_admin_context]
- self.project_reader_authorized_contexts = [
- self.project_admin_context, self.project_member_context,
- self.project_reader_context]
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_instance_usage_audit_log.py b/nova/tests/unit/policies/test_instance_usage_audit_log.py
index 311a404075..71b0cdd2aa 100644
--- a/nova/tests/unit/policies/test_instance_usage_audit_log.py
+++ b/nova/tests/unit/policies/test_instance_usage_audit_log.py
@@ -85,7 +85,8 @@ class InstanceUsageScopeTypePolicyTest(InstanceUsageAuditLogPolicyTest):
# Scope checks remove project users power.
self.admin_authorized_contexts = [
- self.system_admin_context]
+ self.legacy_admin_context,
+ self.project_admin_context]
class InstanceUsageScopeTypeNoLegacyPolicyTest(
diff --git a/nova/tests/unit/policies/test_keypairs.py b/nova/tests/unit/policies/test_keypairs.py
index b30d5e2455..ee39133b7a 100644
--- a/nova/tests/unit/policies/test_keypairs.py
+++ b/nova/tests/unit/policies/test_keypairs.py
@@ -35,7 +35,7 @@ class KeypairsPolicyTest(base.BasePolicyTest):
# Check that everyone is able to create, delete and get
# their keypairs.
- self.everyone_authorized_contexts = [
+ self.everyone_authorized_contexts = set([
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context,
self.system_member_context, self.system_reader_context,
@@ -43,13 +43,13 @@ class KeypairsPolicyTest(base.BasePolicyTest):
self.project_reader_context, self.project_foo_context,
self.other_project_member_context,
self.other_project_reader_context,
- ]
+ ])
# Check that admin is able to create, delete and get
# other users keypairs.
- self.admin_authorized_contexts = [
+ self.admin_authorized_contexts = set([
self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context]
+ self.project_admin_context])
@mock.patch('nova.compute.api.KeypairAPI.get_key_pairs')
def test_index_keypairs_policy(self, mock_get):
@@ -152,6 +152,12 @@ class KeypairsScopeTypePolicyTest(KeypairsPolicyTest):
super(KeypairsScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+ # With scope checking, only project-scoped users are allowed
+ self.reduce_set('everyone_authorized', self.all_project_contexts)
+ self.admin_authorized_contexts = [
+ self.legacy_admin_context,
+ self.project_admin_context]
+
class KeypairsNoLegacyPolicyTest(KeypairsScopeTypePolicyTest):
"""Test Keypairs APIs policies with system scope enabled,
diff --git a/nova/tests/unit/policies/test_limits.py b/nova/tests/unit/policies/test_limits.py
index 1141f148bb..aba647caec 100644
--- a/nova/tests/unit/policies/test_limits.py
+++ b/nova/tests/unit/policies/test_limits.py
@@ -95,7 +95,7 @@ class LimitsNoLegacyNoScopeTest(LimitsPolicyTest):
without_deprecated_rules = True
rules_without_deprecation = {
limits_policies.OTHER_PROJECT_LIMIT_POLICY_NAME:
- base_policy.PROJECT_ADMIN}
+ base_policy.ADMIN}
def setUp(self):
super(LimitsNoLegacyNoScopeTest, self).setUp()
@@ -141,7 +141,7 @@ class LimitsScopeTypeNoLegacyPolicyTest(LimitsScopeTypePolicyTest):
without_deprecated_rules = True
rules_without_deprecation = {
limits_policies.OTHER_PROJECT_LIMIT_POLICY_NAME:
- base_policy.PROJECT_ADMIN}
+ base_policy.ADMIN}
def setUp(self):
super(LimitsScopeTypeNoLegacyPolicyTest, self).setUp()
diff --git a/nova/tests/unit/policies/test_lock_server.py b/nova/tests/unit/policies/test_lock_server.py
index 292821c7d2..31de5cff0c 100644
--- a/nova/tests/unit/policies/test_lock_server.py
+++ b/nova/tests/unit/policies/test_lock_server.py
@@ -139,11 +139,9 @@ class LockServerNoLegacyNoScopePolicyTest(LockServerPolicyTest):
def setUp(self):
super(LockServerNoLegacyNoScopePolicyTest, self).setUp()
# With no legacy rule, only project admin or member will be
- # able to lock/unlock the server and only project admin can
- # override the unlock.
- self.project_action_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
- self.project_admin_authorized_contexts = [self.project_admin_context]
+ # able to lock/unlock the server.
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
class LockServerScopeTypePolicyTest(LockServerPolicyTest):
@@ -160,10 +158,8 @@ class LockServerScopeTypePolicyTest(LockServerPolicyTest):
super(LockServerScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
# Scope enable will not allow system admin to lock/unlock the server.
- self.project_action_authorized_contexts = [
- self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
+ self.project_action_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.project_admin_context]
@@ -178,9 +174,8 @@ class LockServerScopeTypeNoLegacyPolicyTest(LockServerScopeTypePolicyTest):
super(LockServerScopeTypeNoLegacyPolicyTest, self).setUp()
# With scope enable and no legacy rule, only project admin/member
# will be able to lock/unlock the server.
- self.project_action_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
- self.project_admin_authorized_contexts = [self.project_admin_context]
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
class LockServerOverridePolicyTest(LockServerScopeTypeNoLegacyPolicyTest):
diff --git a/nova/tests/unit/policies/test_migrate_server.py b/nova/tests/unit/policies/test_migrate_server.py
index 43314956e6..0f750770d9 100644
--- a/nova/tests/unit/policies/test_migrate_server.py
+++ b/nova/tests/unit/policies/test_migrate_server.py
@@ -83,11 +83,6 @@ class MigrateServerNoLegacyNoScopeTest(MigrateServerPolicyTest):
without_deprecated_rules = True
- def setUp(self):
- super(MigrateServerNoLegacyNoScopeTest, self).setUp()
- self.project_admin_authorized_contexts = [
- self.project_admin_context]
-
class MigrateServerScopeTypePolicyTest(MigrateServerPolicyTest):
"""Test Migrate Server APIs policies with system scope enabled.
@@ -115,12 +110,6 @@ class MigrateServerScopeTypeNoLegacyPolicyTest(
"""
without_deprecated_rules = True
- def setUp(self):
- super(MigrateServerScopeTypeNoLegacyPolicyTest, self).setUp()
- # with no legacy rule and scope enable., only project admin is able to
- # migrate the server.
- self.project_admin_authorized_contexts = [self.project_admin_context]
-
class MigrateServerOverridePolicyTest(
MigrateServerScopeTypeNoLegacyPolicyTest):
diff --git a/nova/tests/unit/policies/test_multinic.py b/nova/tests/unit/policies/test_multinic.py
index cd35994f1b..852ff25965 100644
--- a/nova/tests/unit/policies/test_multinic.py
+++ b/nova/tests/unit/policies/test_multinic.py
@@ -83,16 +83,16 @@ class MultinicNoLegacyNoScopePolicyTest(MultinicPolicyTest):
without_deprecated_rules = True
rules_without_deprecation = {
policies.BASE_POLICY_NAME % 'add':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
policies.BASE_POLICY_NAME % 'remove':
- base_policy.PROJECT_MEMBER}
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
def setUp(self):
super(MultinicNoLegacyNoScopePolicyTest, self).setUp()
# With no legacy rule, only project admin or member will be
# able to add/remove the fixed ip.
- self.project_action_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
class MultinicScopeTypePolicyTest(MultinicPolicyTest):
@@ -111,10 +111,8 @@ class MultinicScopeTypePolicyTest(MultinicPolicyTest):
self.flags(enforce_scope=True, group="oslo_policy")
# Scope enable will not allow system admin to add/remove
# the fixed ip.
- self.project_action_authorized_contexts = [
- self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
+ self.project_action_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
class MultinicScopeTypeNoLegacyPolicyTest(MultinicScopeTypePolicyTest):
@@ -124,13 +122,13 @@ class MultinicScopeTypeNoLegacyPolicyTest(MultinicScopeTypePolicyTest):
without_deprecated_rules = True
rules_without_deprecation = {
policies.BASE_POLICY_NAME % 'add':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
policies.BASE_POLICY_NAME % 'remove':
- base_policy.PROJECT_MEMBER}
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
def setUp(self):
super(MultinicScopeTypeNoLegacyPolicyTest, self).setUp()
# With scope enable and no legacy rule, only project admin/member
# will be able to add/remove the fixed ip.
- self.project_action_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_networks.py b/nova/tests/unit/policies/test_networks.py
index 25011859e3..9c3e0b735a 100644
--- a/nova/tests/unit/policies/test_networks.py
+++ b/nova/tests/unit/policies/test_networks.py
@@ -73,9 +73,9 @@ class NetworksNoLegacyNoScopePolicyTest(NetworksPolicyTest):
without_deprecated_rules = True
rules_without_deprecation = {
policies.POLICY_ROOT % 'list':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
policies.POLICY_ROOT % 'show':
- base_policy.PROJECT_READER}
+ base_policy.PROJECT_READER_OR_ADMIN}
def setUp(self):
super(NetworksNoLegacyNoScopePolicyTest, self).setUp()
@@ -120,9 +120,9 @@ class NetworksScopeTypeNoLegacyPolicyTest(NetworksScopeTypePolicyTest):
without_deprecated_rules = True
rules_without_deprecation = {
policies.POLICY_ROOT % 'list':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
policies.POLICY_ROOT % 'show':
- base_policy.PROJECT_READER}
+ base_policy.PROJECT_READER_OR_ADMIN}
def setUp(self):
super(NetworksScopeTypeNoLegacyPolicyTest, self).setUp()
diff --git a/nova/tests/unit/policies/test_pause_server.py b/nova/tests/unit/policies/test_pause_server.py
index aa27a7c701..86a3e616dd 100644
--- a/nova/tests/unit/policies/test_pause_server.py
+++ b/nova/tests/unit/policies/test_pause_server.py
@@ -109,8 +109,8 @@ class PauseServerNoLegacyNoScopePolicyTest(PauseServerPolicyTest):
super(PauseServerNoLegacyNoScopePolicyTest, self).setUp()
# With no legacy rule, only project admin or member will be
# able to pause/unpause the server.
- self.project_action_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
class PauseServerScopeTypePolicyTest(PauseServerPolicyTest):
@@ -127,10 +127,8 @@ class PauseServerScopeTypePolicyTest(PauseServerPolicyTest):
super(PauseServerScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
# Scope enable will not allow system admin to pause/unpause the server.
- self.project_action_authorized_contexts = [
- self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
+ self.project_action_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
class PauseServerScopeTypeNoLegacyPolicyTest(PauseServerScopeTypePolicyTest):
@@ -143,5 +141,5 @@ class PauseServerScopeTypeNoLegacyPolicyTest(PauseServerScopeTypePolicyTest):
super(PauseServerScopeTypeNoLegacyPolicyTest, self).setUp()
# With scope enable and no legacy rule, only project admin/member
# will be able to pause/unpause the server.
- self.project_action_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_quota_class_sets.py b/nova/tests/unit/policies/test_quota_class_sets.py
index d008092a95..09b90d5ebc 100644
--- a/nova/tests/unit/policies/test_quota_class_sets.py
+++ b/nova/tests/unit/policies/test_quota_class_sets.py
@@ -34,7 +34,7 @@ class QuotaClassSetsPolicyTest(base.BasePolicyTest):
# With legacy rule and scope check disabled by default, system admin,
# legacy admin, and project admin will be able to get, update quota
# class.
- self.system_admin_authorized_contexts = [
+ self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context]
@@ -46,7 +46,7 @@ class QuotaClassSetsPolicyTest(base.BasePolicyTest):
'ram': 51200, 'floating_ips': -1,
'fixed_ips': -1, 'instances': 10,
'injected_files': 5, 'cores': 20}}
- self.common_policy_auth(self.system_admin_authorized_contexts,
+ self.common_policy_auth(self.project_admin_authorized_contexts,
rule_name,
self.controller.update,
self.req, 'test_class',
@@ -55,7 +55,7 @@ class QuotaClassSetsPolicyTest(base.BasePolicyTest):
@mock.patch('nova.quota.QUOTAS.get_class_quotas')
def test_show_quota_class_sets_policy(self, mock_get):
rule_name = policies.POLICY_ROOT % 'show'
- self.common_policy_auth(self.system_admin_authorized_contexts,
+ self.common_policy_auth(self.project_admin_authorized_contexts,
rule_name,
self.controller.show,
self.req, 'test_class')
@@ -86,9 +86,10 @@ class QuotaClassSetsScopeTypePolicyTest(QuotaClassSetsPolicyTest):
super(QuotaClassSetsScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
- # With scope checks enable, only system admin is able to update
- # and get quota class.
- self.system_admin_authorized_contexts = [self.system_admin_context]
+ # With scope checks enable, only project admins are able to
+ # update and get quota class.
+ self.project_admin_authorized_contexts = [self.legacy_admin_context,
+ self.project_admin_context]
class QuotaClassScopeTypeNoLegacyPolicyTest(QuotaClassSetsScopeTypePolicyTest):
diff --git a/nova/tests/unit/policies/test_quota_sets.py b/nova/tests/unit/policies/test_quota_sets.py
index 64932b7fa4..3ff8cd1c02 100644
--- a/nova/tests/unit/policies/test_quota_sets.py
+++ b/nova/tests/unit/policies/test_quota_sets.py
@@ -36,27 +36,27 @@ class QuotaSetsPolicyTest(base.BasePolicyTest):
# With legacy rule all admin is able to update or revert their quota
# to default or get other project quota.
- self.project_admin_authorized_contexts = [
+ self.project_admin_authorized_contexts = set([
self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context]
+ self.project_admin_context])
# With legacy rule, everyone is able to get their own quota.
- self.project_reader_authorized_contexts = [
+ self.project_reader_authorized_contexts = set([
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context,
self.system_member_context, self.system_reader_context,
self.system_foo_context, self.project_member_context,
self.project_reader_context, self.project_foo_context,
self.other_project_member_context,
- self.other_project_reader_context]
+ self.other_project_reader_context])
# Everyone is able to get the default quota
- self.everyone_authorized_contexts = [
+ self.everyone_authorized_contexts = set([
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context,
self.system_member_context, self.system_reader_context,
self.system_foo_context, self.project_member_context,
self.project_reader_context, self.project_foo_context,
self.other_project_member_context,
- self.other_project_reader_context]
+ self.other_project_reader_context])
@mock.patch('nova.quota.QUOTAS.get_project_quotas')
@mock.patch('nova.quota.QUOTAS.get_settable_quotas')
@@ -176,16 +176,13 @@ class QuotaSetsScopeTypePolicyTest(QuotaSetsPolicyTest):
super(QuotaSetsScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
- # With scope enable, system users will be disallowed.
- self.project_admin_authorized_contexts = [
+ # With scope enabled, system users will be disallowed.
+ self.reduce_set('project_admin_authorized', set([
self.legacy_admin_context,
- self.project_admin_context]
- self.project_reader_authorized_contexts = [
- self.legacy_admin_context, self.project_admin_context,
- self.project_member_context, self.project_reader_context,
- self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context]
+ self.project_admin_context]))
+ self.reduce_set('project_reader_authorized',
+ self.all_project_contexts)
+ self.everyone_authorized_contexts = self.all_project_contexts
class QuotaSetsScopeTypeNoLegacyPolicyTest(QuotaSetsScopeTypePolicyTest):
@@ -197,6 +194,8 @@ class QuotaSetsScopeTypeNoLegacyPolicyTest(QuotaSetsScopeTypePolicyTest):
def setUp(self):
super(QuotaSetsScopeTypeNoLegacyPolicyTest, self).setUp()
- self.project_reader_authorized_contexts = [
- self.legacy_admin_context, self.project_admin_context,
- self.project_member_context, self.project_reader_context]
+ # With scope enabled and no legacy, system and
+ # non-reader/member users are disallowed.
+ self.reduce_set('project_reader_authorized',
+ self.all_project_contexts -
+ set([self.project_foo_context]))
diff --git a/nova/tests/unit/policies/test_remote_consoles.py b/nova/tests/unit/policies/test_remote_consoles.py
index a01efd8e42..a441d1c550 100644
--- a/nova/tests/unit/policies/test_remote_consoles.py
+++ b/nova/tests/unit/policies/test_remote_consoles.py
@@ -79,8 +79,8 @@ class RemoteConsolesNoLegacyNoScopePolicyTest(RemoteConsolesPolicyTest):
super(RemoteConsolesNoLegacyNoScopePolicyTest, self).setUp()
# With no legacy rule, only project admin or member will be
# able get server remote consoles.
- self.project_action_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
class RemoteConsolesScopeTypePolicyTest(RemoteConsolesPolicyTest):
@@ -98,10 +98,8 @@ class RemoteConsolesScopeTypePolicyTest(RemoteConsolesPolicyTest):
self.flags(enforce_scope=True, group="oslo_policy")
# Scope enable will not allow system admin to get server
# remote console.
- self.project_action_authorized_contexts = [
- self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
+ self.project_action_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
class RemoteConsolesScopeTypeNoLegacyPolicyTest(
@@ -116,5 +114,5 @@ class RemoteConsolesScopeTypeNoLegacyPolicyTest(
super(RemoteConsolesScopeTypeNoLegacyPolicyTest, self).setUp()
# With scope enable and no legacy rule, only project admin/member
# will be able to get server remote console.
- self.project_action_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_rescue.py b/nova/tests/unit/policies/test_rescue.py
index a8e41c8631..120809877c 100644
--- a/nova/tests/unit/policies/test_rescue.py
+++ b/nova/tests/unit/policies/test_rescue.py
@@ -108,16 +108,16 @@ class RescueServerNoLegacyNoScopePolicyTest(RescueServerPolicyTest):
without_deprecated_rules = True
rules_without_deprecation = {
rs_policies.UNRESCUE_POLICY_NAME:
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
rs_policies.BASE_POLICY_NAME:
- base_policy.PROJECT_MEMBER}
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
def setUp(self):
super(RescueServerNoLegacyNoScopePolicyTest, self).setUp()
# With no legacy rule, only project admin or member will be
# able to rescue/unrescue the server.
- self.project_action_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
class RescueServerScopeTypePolicyTest(RescueServerPolicyTest):
@@ -135,10 +135,8 @@ class RescueServerScopeTypePolicyTest(RescueServerPolicyTest):
self.flags(enforce_scope=True, group="oslo_policy")
# Scope enable will not allow system admin to rescue/unrescue the
# server.
- self.project_action_authorized_contexts = [
- self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
+ self.project_action_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
class RescueServerScopeTypeNoLegacyPolicyTest(RescueServerScopeTypePolicyTest):
@@ -149,13 +147,13 @@ class RescueServerScopeTypeNoLegacyPolicyTest(RescueServerScopeTypePolicyTest):
without_deprecated_rules = True
rules_without_deprecation = {
rs_policies.UNRESCUE_POLICY_NAME:
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
rs_policies.BASE_POLICY_NAME:
- base_policy.PROJECT_MEMBER}
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
def setUp(self):
super(RescueServerScopeTypeNoLegacyPolicyTest, self).setUp()
# With scope enable and no legacy rule, only project admin/member
# will be able to rescue/unrescue the server.
- self.project_action_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_security_groups.py b/nova/tests/unit/policies/test_security_groups.py
index b7afac26cb..a9d2f484ba 100644
--- a/nova/tests/unit/policies/test_security_groups.py
+++ b/nova/tests/unit/policies/test_security_groups.py
@@ -104,22 +104,20 @@ class ServerSecurityGroupsNoLegacyNoScopePolicyTest(
without_deprecated_rules = True
rules_without_deprecation = {
policies.POLICY_NAME % 'list':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
policies.POLICY_NAME % 'add':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
policies.POLICY_NAME % 'remove':
- base_policy.PROJECT_MEMBER}
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
def setUp(self):
super(ServerSecurityGroupsNoLegacyNoScopePolicyTest, self).setUp()
# With no legacy rule, only project admin or member will be
# able to add/remove SG to server and reader to get SG.
- self.project_member_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
- self.project_reader_authorized_contexts = [
- self.project_admin_context, self.project_member_context,
- self.project_reader_context,
- ]
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_no_scope_no_legacy)
class SecurityGroupsPolicyTest(base.BasePolicyTest):
@@ -243,19 +241,19 @@ class SecurityGroupsNoLegacyNoScopePolicyTest(
without_deprecated_rules = True
rules_without_deprecation = {
policies.POLICY_NAME % 'get':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
policies.POLICY_NAME % 'show':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
policies.POLICY_NAME % 'create':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
policies.POLICY_NAME % 'update':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
policies.POLICY_NAME % 'delete':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
policies.POLICY_NAME % 'rule:create':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
policies.POLICY_NAME % 'rule:delete':
- base_policy.PROJECT_MEMBER}
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
def setUp(self):
super(SecurityGroupsNoLegacyNoScopePolicyTest, self).setUp()
@@ -321,15 +319,10 @@ class ServerSecurityGroupsScopeTypePolicyTest(ServerSecurityGroupsPolicyTest):
super(ServerSecurityGroupsScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
# Scope enable will not allow system users.
- self.project_member_authorized_contexts = [
- self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
- self.project_reader_authorized_contexts = [
- self.legacy_admin_context, self.project_admin_context,
- self.project_member_context, self.project_reader_context,
- self.project_foo_context
- ]
+ self.project_member_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
class ServerSecurityGroupsScopeTypeNoLegacyPolicyTest(
@@ -340,23 +333,21 @@ class ServerSecurityGroupsScopeTypeNoLegacyPolicyTest(
without_deprecated_rules = True
rules_without_deprecation = {
policies.POLICY_NAME % 'list':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
policies.POLICY_NAME % 'add':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
policies.POLICY_NAME % 'remove':
- base_policy.PROJECT_MEMBER}
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
def setUp(self):
super(ServerSecurityGroupsScopeTypeNoLegacyPolicyTest, self).setUp()
# With scope enable and no legacy rule, only project admin/member
# will be able to add/remove the SG to their server and reader
# will get SG of server.
- self.project_member_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
- self.project_reader_authorized_contexts = [
- self.project_admin_context, self.project_member_context,
- self.project_reader_context
- ]
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_scope_no_legacy)
class SecurityGroupsNoLegacyPolicyTest(SecurityGroupsScopeTypePolicyTest):
@@ -366,19 +357,19 @@ class SecurityGroupsNoLegacyPolicyTest(SecurityGroupsScopeTypePolicyTest):
without_deprecated_rules = True
rules_without_deprecation = {
policies.POLICY_NAME % 'get':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
policies.POLICY_NAME % 'show':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
policies.POLICY_NAME % 'create':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
policies.POLICY_NAME % 'update':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
policies.POLICY_NAME % 'delete':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
policies.POLICY_NAME % 'rule:create':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
policies.POLICY_NAME % 'rule:delete':
- base_policy.PROJECT_MEMBER}
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
def setUp(self):
super(SecurityGroupsNoLegacyPolicyTest, self).setUp()
diff --git a/nova/tests/unit/policies/test_server_diagnostics.py b/nova/tests/unit/policies/test_server_diagnostics.py
index 932f5e2033..4a4b192baa 100644
--- a/nova/tests/unit/policies/test_server_diagnostics.py
+++ b/nova/tests/unit/policies/test_server_diagnostics.py
@@ -66,11 +66,6 @@ class ServerDiagnosticsNoLegacyNoScopeTest(ServerDiagnosticsPolicyTest):
without_deprecated_rules = True
- def setUp(self):
- super(ServerDiagnosticsNoLegacyNoScopeTest, self).setUp()
- self.project_admin_authorized_contexts = [
- self.project_admin_context]
-
class ServerDiagnosticsScopeTypePolicyTest(ServerDiagnosticsPolicyTest):
"""Test Server Diagnostics APIs policies with system scope enabled.
@@ -98,12 +93,6 @@ class ServerDiagnosticsScopeTypeNoLegacyPolicyTest(
"""
without_deprecated_rules = True
- def setUp(self):
- super(ServerDiagnosticsScopeTypeNoLegacyPolicyTest, self).setUp()
- # with no legacy rule and scope enable., only project admin is able to
- # get server diagnostics.
- self.project_admin_authorized_contexts = [self.project_admin_context]
-
class ServerDiagnosticsOverridePolicyTest(
ServerDiagnosticsScopeTypeNoLegacyPolicyTest):
diff --git a/nova/tests/unit/policies/test_server_groups.py b/nova/tests/unit/policies/test_server_groups.py
index f93855b175..b0df7ccb89 100644
--- a/nova/tests/unit/policies/test_server_groups.py
+++ b/nova/tests/unit/policies/test_server_groups.py
@@ -163,12 +163,10 @@ class ServerGroupNoLegacyNoScopePolicyTest(ServerGroupPolicyTest):
super(ServerGroupNoLegacyNoScopePolicyTest, self).setUp()
# With no legacy, only project admin, member will be able to delete
# the SG and also reader will be able to get the SG.
- self.project_member_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
-
- self.project_reader_authorized_contexts = [
- self.project_admin_context, self.project_member_context,
- self.project_reader_context]
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_no_scope_no_legacy)
# Even with no legacy rule, legacy admin is allowed to create SG
# use requesting context's project_id. Same for list SG.
@@ -205,16 +203,10 @@ class ServerGroupScopeTypePolicyTest(ServerGroupPolicyTest):
self.flags(enforce_scope=True, group="oslo_policy")
# With scope enable, it disallow system users.
- self.project_member_authorized_contexts = [
- self.legacy_admin_context, self.project_admin_context,
- self.project_member_context, self.project_reader_context,
- self.project_foo_context,
- ]
- self.project_reader_authorized_contexts = [
- self.legacy_admin_context, self.project_admin_context,
- self.project_member_context, self.project_reader_context,
- self.project_foo_context,
- ]
+ self.project_member_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
self.project_create_authorized_contexts = [
self.legacy_admin_context, self.project_admin_context,
@@ -244,17 +236,16 @@ class ServerGroupScopeTypeNoLegacyPolicyTest(ServerGroupScopeTypePolicyTest):
def setUp(self):
super(ServerGroupScopeTypeNoLegacyPolicyTest, self).setUp()
- self.project_member_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
self.project_create_authorized_contexts = [
self.legacy_admin_context, self.project_admin_context,
self.project_member_context,
self.other_project_member_context]
- self.project_reader_authorized_contexts = [
- self.project_admin_context, self.project_member_context,
- self.project_reader_context]
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_scope_no_legacy)
self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.project_admin_context]
diff --git a/nova/tests/unit/policies/test_server_ips.py b/nova/tests/unit/policies/test_server_ips.py
index f0ce600705..b837d2d0e2 100644
--- a/nova/tests/unit/policies/test_server_ips.py
+++ b/nova/tests/unit/policies/test_server_ips.py
@@ -84,10 +84,8 @@ class ServerIpsNoLegacyNoScopePolicyTest(ServerIpsPolicyTest):
super(ServerIpsNoLegacyNoScopePolicyTest, self).setUp()
# With no legacy, only project admin, member, and reader will be able
# to get their server IP addresses.
- self.project_reader_authorized_contexts = [
- self.project_admin_context, self.project_member_context,
- self.project_reader_context,
- ]
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_no_scope_no_legacy)
class ServerIpsScopeTypePolicyTest(ServerIpsPolicyTest):
@@ -105,11 +103,8 @@ class ServerIpsScopeTypePolicyTest(ServerIpsPolicyTest):
self.flags(enforce_scope=True, group="oslo_policy")
# With scope enabled, system users will not be able
# to get the server IP addresses.
- self.project_reader_authorized_contexts = [
- self.legacy_admin_context, self.project_admin_context,
- self.project_member_context, self.project_reader_context,
- self.project_foo_context
- ]
+ self.project_reader_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
class ServerIpsScopeTypeNoLegacyPolicyTest(ServerIpsScopeTypePolicyTest):
@@ -120,9 +115,7 @@ class ServerIpsScopeTypeNoLegacyPolicyTest(ServerIpsScopeTypePolicyTest):
def setUp(self):
super(ServerIpsScopeTypeNoLegacyPolicyTest, self).setUp()
- # With no legacy and scope enable, only project admin, member,
+ # With no legacy and scope enable, only admin, member,
# and reader will be able to get their server IP addresses.
- self.project_reader_authorized_contexts = [
- self.project_admin_context, self.project_member_context,
- self.project_reader_context
- ]
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_server_metadata.py b/nova/tests/unit/policies/test_server_metadata.py
index 8b95d05894..cf4fb19e7b 100644
--- a/nova/tests/unit/policies/test_server_metadata.py
+++ b/nova/tests/unit/policies/test_server_metadata.py
@@ -119,11 +119,10 @@ class ServerMetadataNoLegacyNoScopePolicyTest(ServerMetadataPolicyTest):
def setUp(self):
super(ServerMetadataNoLegacyNoScopePolicyTest, self).setUp()
# With no legacy rule, legacy admin loose power.
- self.project_member_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
- self.project_reader_authorized_contexts = [
- self.project_admin_context, self.project_member_context,
- self.project_reader_context]
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_no_scope_no_legacy)
class ServerMetadataScopeTypePolicyTest(ServerMetadataPolicyTest):
@@ -140,12 +139,10 @@ class ServerMetadataScopeTypePolicyTest(ServerMetadataPolicyTest):
super(ServerMetadataScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
# With Scope enable, system users no longer allowed.
- self.project_member_authorized_contexts = [
- self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
+ self.project_member_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
self.project_reader_authorized_contexts = (
- self.project_member_authorized_contexts)
+ self.project_m_r_or_admin_with_scope_and_legacy)
class ServerMetadataScopeTypeNoLegacyPolicyTest(
@@ -160,8 +157,7 @@ class ServerMetadataScopeTypeNoLegacyPolicyTest(
super(ServerMetadataScopeTypeNoLegacyPolicyTest, self).setUp()
# With no legacy and scope enable, only project admin, member,
# and reader will be able to allowed operation on server metadata.
- self.project_member_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
- self.project_reader_authorized_contexts = [
- self.project_admin_context, self.project_member_context,
- self.project_reader_context]
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_server_migrations.py b/nova/tests/unit/policies/test_server_migrations.py
index bf69166b53..b17d4ded1d 100644
--- a/nova/tests/unit/policies/test_server_migrations.py
+++ b/nova/tests/unit/policies/test_server_migrations.py
@@ -93,11 +93,6 @@ class ServerMigrationsNoLegacyNoScopeTest(ServerMigrationsPolicyTest):
without_deprecated_rules = True
- def setUp(self):
- super(ServerMigrationsNoLegacyNoScopeTest, self).setUp()
- self.project_admin_authorized_contexts = [
- self.project_admin_context]
-
class ServerMigrationsScopeTypePolicyTest(ServerMigrationsPolicyTest):
"""Test Server Migrations APIs policies with system scope enabled.
@@ -124,12 +119,6 @@ class ServerMigrationsScopeTypeNoLegacyPolicyTest(
"""
without_deprecated_rules = True
- def setUp(self):
- super(ServerMigrationsScopeTypeNoLegacyPolicyTest, self).setUp()
- # Check that admin is able to perform operations
- # for server migrations.
- self.project_admin_authorized_contexts = [self.project_admin_context]
-
class ServerMigrationsOverridePolicyTest(
ServerMigrationsScopeTypeNoLegacyPolicyTest):
diff --git a/nova/tests/unit/policies/test_server_password.py b/nova/tests/unit/policies/test_server_password.py
index 48f2046693..b163c6c562 100644
--- a/nova/tests/unit/policies/test_server_password.py
+++ b/nova/tests/unit/policies/test_server_password.py
@@ -80,18 +80,17 @@ class ServerPasswordNoLegacyNoScopePolicyTest(ServerPasswordPolicyTest):
without_deprecated_rules = True
rules_without_deprecation = {
policies.BASE_POLICY_NAME % 'show':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
policies.BASE_POLICY_NAME % 'clear':
- base_policy.PROJECT_MEMBER}
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
def setUp(self):
super(ServerPasswordNoLegacyNoScopePolicyTest, self).setUp()
# With no legacy rule, legacy admin loose power.
- self.project_member_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
- self.project_reader_authorized_contexts = [
- self.project_admin_context, self.project_member_context,
- self.project_reader_context]
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_no_scope_no_legacy)
class ServerPasswordScopeTypePolicyTest(ServerPasswordPolicyTest):
@@ -108,12 +107,10 @@ class ServerPasswordScopeTypePolicyTest(ServerPasswordPolicyTest):
super(ServerPasswordScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
# With Scope enable, system users no longer allowed.
- self.project_member_authorized_contexts = [
- self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
+ self.project_member_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
self.project_reader_authorized_contexts = (
- self.project_member_authorized_contexts)
+ self.project_m_r_or_admin_with_scope_and_legacy)
class ServerPasswordScopeTypeNoLegacyPolicyTest(
@@ -124,16 +121,15 @@ class ServerPasswordScopeTypeNoLegacyPolicyTest(
without_deprecated_rules = True
rules_without_deprecation = {
policies.BASE_POLICY_NAME % 'show':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
policies.BASE_POLICY_NAME % 'clear':
- base_policy.PROJECT_MEMBER}
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
def setUp(self):
super(ServerPasswordScopeTypeNoLegacyPolicyTest, self).setUp()
# With no legacy and scope enable, only project admin, member,
# and reader will be able to allowed operation on server password.
- self.project_member_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
- self.project_reader_authorized_contexts = [
- self.project_admin_context, self.project_member_context,
- self.project_reader_context]
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_server_tags.py b/nova/tests/unit/policies/test_server_tags.py
index 1d905e2b3d..412177408c 100644
--- a/nova/tests/unit/policies/test_server_tags.py
+++ b/nova/tests/unit/policies/test_server_tags.py
@@ -132,11 +132,10 @@ class ServerTagsNoLegacyNoScopePolicyTest(ServerTagsPolicyTest):
def setUp(self):
super(ServerTagsNoLegacyNoScopePolicyTest, self).setUp()
# With no legacy rule, legacy admin loose power.
- self.project_member_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
- self.project_reader_authorized_contexts = [
- self.project_admin_context, self.project_member_context,
- self.project_reader_context]
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_no_scope_no_legacy)
class ServerTagsScopeTypePolicyTest(ServerTagsPolicyTest):
@@ -153,12 +152,10 @@ class ServerTagsScopeTypePolicyTest(ServerTagsPolicyTest):
super(ServerTagsScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
# With Scope enable, system users no longer allowed.
- self.project_member_authorized_contexts = [
- self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
+ self.project_member_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
self.project_reader_authorized_contexts = (
- self.project_member_authorized_contexts)
+ self.project_m_r_or_admin_with_scope_and_legacy)
class ServerTagsScopeTypeNoLegacyPolicyTest(ServerTagsScopeTypePolicyTest):
@@ -172,8 +169,7 @@ class ServerTagsScopeTypeNoLegacyPolicyTest(ServerTagsScopeTypePolicyTest):
super(ServerTagsScopeTypeNoLegacyPolicyTest, self).setUp()
# With no legacy and scope enable, only project admin, member,
# and reader will be able to allowed operation on server tags.
- self.project_member_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
- self.project_reader_authorized_contexts = [
- self.project_admin_context, self.project_member_context,
- self.project_reader_context]
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_server_topology.py b/nova/tests/unit/policies/test_server_topology.py
index 8624c3e7e7..e2f81dfaad 100644
--- a/nova/tests/unit/policies/test_server_topology.py
+++ b/nova/tests/unit/policies/test_server_topology.py
@@ -98,11 +98,8 @@ class ServerTopologyNoLegacyNoScopePolicyTest(ServerTopologyPolicyTest):
def setUp(self):
super(ServerTopologyNoLegacyNoScopePolicyTest, self).setUp()
- # With no legacy rule, legacy admin loose power.
- self.project_admin_authorized_contexts = [self.project_admin_context]
- self.project_reader_authorized_contexts = [
- self.project_admin_context, self.project_member_context,
- self.project_reader_context]
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_no_scope_no_legacy)
class ServerTopologyScopeTypePolicyTest(ServerTopologyPolicyTest):
@@ -121,10 +118,8 @@ class ServerTopologyScopeTypePolicyTest(ServerTopologyPolicyTest):
# With Scope enable, system users no longer allowed.
self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.project_admin_context]
- self.project_reader_authorized_contexts = [
- self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
+ self.project_reader_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
class ServerTopologyScopeTypeNoLegacyPolicyTest(
@@ -138,9 +133,6 @@ class ServerTopologyScopeTypeNoLegacyPolicyTest(
def setUp(self):
super(ServerTopologyScopeTypeNoLegacyPolicyTest, self).setUp()
# With no legacy and scope enable, only project admin, member,
- # and reader will be able to get server topology and only admin
- # with host info.
- self.project_admin_authorized_contexts = [self.project_admin_context]
- self.project_reader_authorized_contexts = [
- self.project_admin_context, self.project_member_context,
- self.project_reader_context]
+ # and reader will be able to get server topology.
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_servers.py b/nova/tests/unit/policies/test_servers.py
index e089e5245d..eee1e4ba51 100644
--- a/nova/tests/unit/policies/test_servers.py
+++ b/nova/tests/unit/policies/test_servers.py
@@ -1229,10 +1229,9 @@ class ServersPolicyTest(base.BasePolicyTest):
@mock.patch('nova.compute.api.API._allow_resize_to_same_host')
@mock.patch('nova.objects.RequestSpec.get_by_instance_uuid')
@mock.patch('nova.objects.Instance.save')
- @mock.patch('nova.api.openstack.common.get_instance')
@mock.patch('nova.conductor.ComputeTaskAPI.resize_instance')
def test_cross_cell_resize_server_policy(
- self, mock_resize, mock_get, mock_save, mock_rs, mock_allow, m_net
+ self, mock_resize, mock_save, mock_rs, mock_allow, m_net
):
# 'migrate' policy is checked before 'resize:cross_cell' so
@@ -1262,7 +1261,7 @@ class ServersPolicyTest(base.BasePolicyTest):
)
return inst
- mock_get.side_effect = fake_get
+ self.mock_get.side_effect = fake_get
def fake_validate(context, instance,
host_name, allow_cross_cell_resize):
@@ -1325,7 +1324,7 @@ class ServersNoLegacyNoScopeTest(ServersPolicyTest):
without_deprecated_rules = True
rules_without_deprecation = {
policies.SERVERS % 'show:flavor-extra-specs':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
}
def setUp(self):
@@ -1333,23 +1332,14 @@ class ServersNoLegacyNoScopeTest(ServersPolicyTest):
# Disabling legacy rule support means that we no longer allow
# random roles on our project to take action on our
- # resources. We also do not allow admin on other projects
- # (i.e. legacy_admin), nor system (because it's admin on no
- # project).
- self.reduce_set('project_action_authorized', set([
- self.project_admin_context, self.project_member_context,
- ]))
-
- self.reduce_set('project_admin_authorized', set([
- self.project_admin_context
- ]))
+ # resources. Legacy admin will have access.
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
# The only additional role that can read our resources is our
# own project_reader.
self.project_reader_authorized_contexts = (
- self.project_action_authorized_contexts |
- set([self.project_reader_context])
- )
+ self.project_reader_or_admin_with_no_scope_no_legacy)
# Disabling legacy support means random roles lose power to
# see everything in their project.
@@ -1439,7 +1429,7 @@ class ServersNoLegacyPolicyTest(ServersScopeTypePolicyTest):
without_deprecated_rules = True
rules_without_deprecation = {
policies.SERVERS % 'show:flavor-extra-specs':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
}
def setUp(self):
@@ -1449,15 +1439,8 @@ class ServersNoLegacyPolicyTest(ServersScopeTypePolicyTest):
# powerful on our project. Also, we drop the "any role on the
# project means you can do stuff" behavior, so project_reader
# and project_foo lose power.
- self.reduce_set('project_action_authorized', set([
- self.project_admin_context,
- self.project_member_context,
- ]))
-
- # With no legacy rule and scope checks enable, only project
- # admin can do admin things on project resource.
- self.reduce_set('project_admin_authorized',
- set([self.project_admin_context]))
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
# Only project_reader has additional read access to our
# project resources.
diff --git a/nova/tests/unit/policies/test_services.py b/nova/tests/unit/policies/test_services.py
index 5a77b559ba..72465eb748 100644
--- a/nova/tests/unit/policies/test_services.py
+++ b/nova/tests/unit/policies/test_services.py
@@ -35,21 +35,21 @@ class ServicesPolicyTest(base.BasePolicyTest):
# With legacy rule and scope check disabled by default, system admin,
# legacy admin, and project admin will be able to perform Services
# Operations.
- self.system_admin_authorized_contexts = [
+ self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context]
def test_delete_service_policy(self):
rule_name = "os_compute_api:os-services:delete"
with mock.patch('nova.compute.api.HostAPI.service_get_by_id'):
- self.common_policy_auth(self.system_admin_authorized_contexts,
+ self.common_policy_auth(self.project_admin_authorized_contexts,
rule_name, self.controller.delete,
self.req, 1)
def test_index_service_policy(self):
rule_name = "os_compute_api:os-services:list"
with mock.patch('nova.compute.api.HostAPI.service_get_all'):
- self.common_policy_auth(self.system_admin_authorized_contexts,
+ self.common_policy_auth(self.project_admin_authorized_contexts,
rule_name, self.controller.index,
self.req)
@@ -58,7 +58,7 @@ class ServicesPolicyTest(base.BasePolicyTest):
body = {'host': 'host1', 'binary': 'nova-compute'}
update = 'nova.compute.api.HostAPI.service_update_by_host_and_binary'
with mock.patch(update):
- self.common_policy_auth(self.system_admin_authorized_contexts,
+ self.common_policy_auth(self.project_admin_authorized_contexts,
rule_name, self.controller.update,
self.req, 'enable', body=body)
@@ -69,7 +69,7 @@ class ServicesPolicyTest(base.BasePolicyTest):
service = self.start_service(
'compute', 'fake-compute-host').service_ref
with mock.patch('nova.compute.api.HostAPI.service_update'):
- self.common_policy_auth(self.system_admin_authorized_contexts,
+ self.common_policy_auth(self.project_admin_authorized_contexts,
rule_name, self.controller.update,
req, service.uuid,
body={'status': 'enabled'})
@@ -107,7 +107,8 @@ class ServicesScopeTypePolicyTest(ServicesPolicyTest):
# With scope checks enable, only system admin is able to perform
# Service Operations.
- self.system_admin_authorized_contexts = [self.system_admin_context]
+ self.project_admin_authorized_contexts = [self.legacy_admin_context,
+ self.project_admin_context]
class ServicesScopeTypeNoLegacyPolicyTest(ServicesScopeTypePolicyTest):
diff --git a/nova/tests/unit/policies/test_shelve.py b/nova/tests/unit/policies/test_shelve.py
index 87bff30178..052f844c3d 100644
--- a/nova/tests/unit/policies/test_shelve.py
+++ b/nova/tests/unit/policies/test_shelve.py
@@ -122,9 +122,8 @@ class ShelveServerNoLegacyNoScopePolicyTest(ShelveServerPolicyTest):
# With no legacy rule, only project admin or member will be
# able to shelve/unshelve the server and only project admin can
# shelve offload the server.
- self.project_action_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
- self.project_admin_authorized_contexts = [self.project_admin_context]
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
class ShelveServerScopeTypePolicyTest(ShelveServerPolicyTest):
@@ -142,10 +141,8 @@ class ShelveServerScopeTypePolicyTest(ShelveServerPolicyTest):
self.flags(enforce_scope=True, group="oslo_policy")
# Scope enable will not allow system admin to shelve/unshelve the
# server.
- self.project_action_authorized_contexts = [
- self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
+ self.project_action_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.project_admin_context]
@@ -160,6 +157,5 @@ class ShelveServerScopeTypeNoLegacyPolicyTest(ShelveServerScopeTypePolicyTest):
super(ShelveServerScopeTypeNoLegacyPolicyTest, self).setUp()
# With scope enable and no legacy rule, only project admin/member
# will be able to shelve/unshelve the server.
- self.project_action_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
- self.project_admin_authorized_contexts = [self.project_admin_context]
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_simple_tenant_usage.py b/nova/tests/unit/policies/test_simple_tenant_usage.py
index 1dbd0715d1..d6aa7af901 100644
--- a/nova/tests/unit/policies/test_simple_tenant_usage.py
+++ b/nova/tests/unit/policies/test_simple_tenant_usage.py
@@ -70,10 +70,8 @@ class SimpleTenantUsageNoLegacyNoScopePolicyTest(SimpleTenantUsagePolicyTest):
super(SimpleTenantUsageNoLegacyNoScopePolicyTest, self).setUp()
# With no legacy, project other roles like foo will not be able
# to get tenant usage.
- self.project_reader_authorized_contexts = [
- self.project_admin_context, self.project_member_context,
- self.project_reader_context,
- ]
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_no_scope_no_legacy)
class SimpleTenantUsageScopeTypePolicyTest(SimpleTenantUsagePolicyTest):
@@ -92,11 +90,8 @@ class SimpleTenantUsageScopeTypePolicyTest(SimpleTenantUsagePolicyTest):
# With Scope enable, system users no longer allowed.
self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.project_admin_context]
- self.project_reader_authorized_contexts = [
- self.legacy_admin_context, self.project_admin_context,
- self.project_member_context, self.project_reader_context,
- self.project_foo_context,
- ]
+ self.project_reader_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
class SimpleTenantUsageScopeTypeNoLegacyPolicyTest(
@@ -109,7 +104,5 @@ class SimpleTenantUsageScopeTypeNoLegacyPolicyTest(
def setUp(self):
super(SimpleTenantUsageScopeTypeNoLegacyPolicyTest, self).setUp()
- self.project_reader_authorized_contexts = [
- self.project_admin_context,
- self.project_member_context, self.project_reader_context,
- ]
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_suspend_server.py b/nova/tests/unit/policies/test_suspend_server.py
index 729f13b4b3..7d3cde2799 100644
--- a/nova/tests/unit/policies/test_suspend_server.py
+++ b/nova/tests/unit/policies/test_suspend_server.py
@@ -107,8 +107,8 @@ class SuspendServerNoLegacyNoScopePolicyTest(SuspendServerPolicyTest):
super(SuspendServerNoLegacyNoScopePolicyTest, self).setUp()
# With no legacy rule, only project admin or member will be
# able to suspend/resume the server.
- self.project_action_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
class SuspendServerScopeTypePolicyTest(SuspendServerPolicyTest):
@@ -125,10 +125,8 @@ class SuspendServerScopeTypePolicyTest(SuspendServerPolicyTest):
super(SuspendServerScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
# Scope enable will not allow system admin to suspend/resume server.
- self.project_action_authorized_contexts = [
- self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
+ self.project_action_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
class SuspendServerScopeTypeNoLegacyTest(SuspendServerScopeTypePolicyTest):
@@ -143,5 +141,5 @@ class SuspendServerScopeTypeNoLegacyTest(SuspendServerScopeTypePolicyTest):
super(SuspendServerScopeTypeNoLegacyTest, self).setUp()
# With scope enable and no legacy rule only project admin/member
# will be able to suspend/resume the server.
- self.project_action_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_tenant_networks.py b/nova/tests/unit/policies/test_tenant_networks.py
index dedcc3cfa9..a5bc614902 100644
--- a/nova/tests/unit/policies/test_tenant_networks.py
+++ b/nova/tests/unit/policies/test_tenant_networks.py
@@ -72,9 +72,9 @@ class TenantNetworksNoLegacyNoScopePolicyTest(TenantNetworksPolicyTest):
without_deprecated_rules = True
rules_without_deprecation = {
policies.POLICY_NAME % 'list':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
policies.POLICY_NAME % 'show':
- base_policy.PROJECT_READER}
+ base_policy.PROJECT_READER_OR_ADMIN}
def setUp(self):
super(TenantNetworksNoLegacyNoScopePolicyTest, self).setUp()
@@ -120,9 +120,9 @@ class TenantNetworksScopeTypeNoLegacyPolicyTest(
without_deprecated_rules = True
rules_without_deprecation = {
policies.POLICY_NAME % 'list':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
policies.POLICY_NAME % 'show':
- base_policy.PROJECT_READER}
+ base_policy.PROJECT_READER_OR_ADMIN}
def setUp(self):
super(TenantNetworksScopeTypeNoLegacyPolicyTest, self).setUp()
diff --git a/nova/tests/unit/policies/test_volumes.py b/nova/tests/unit/policies/test_volumes.py
index 53985e7ab1..896881c03f 100644
--- a/nova/tests/unit/policies/test_volumes.py
+++ b/nova/tests/unit/policies/test_volumes.py
@@ -215,14 +215,12 @@ class VolumeAttachNoLegacyNoScopePolicyTest(VolumeAttachPolicyTest):
def setUp(self):
super(VolumeAttachNoLegacyNoScopePolicyTest, self).setUp()
- # With no legacy rule, only project admin, member, or reader will be
+ # With no legacy rule, only admin, member, or reader will be
# able to perform volume attachment operation on its own project.
- self.project_reader_authorized_contexts = [
- self.project_admin_context, self.project_member_context,
- self.project_reader_context]
-
- self.project_member_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_no_scope_no_legacy)
class VolumeAttachScopeTypePolicyTest(VolumeAttachPolicyTest):
@@ -242,15 +240,10 @@ class VolumeAttachScopeTypePolicyTest(VolumeAttachPolicyTest):
# Scope enable will not allow system admin to perform the
# volume attachments.
- self.project_member_authorized_contexts = [
- self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
-
- self.project_reader_authorized_contexts = [
- self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
+ self.project_member_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.project_admin_context]
@@ -268,11 +261,10 @@ class VolumeAttachScopeTypeNoLegacyPolicyTest(VolumeAttachScopeTypePolicyTest):
# With scope enable and no legacy rule, it will not allow
# system users and project admin/member/reader will be able to
# perform volume attachment operation on its own project.
- self.project_reader_authorized_contexts = [
- self.project_admin_context, self.project_member_context,
- self.project_reader_context]
- self.project_member_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_scope_no_legacy)
class VolumesPolicyTest(base.BasePolicyTest):
@@ -403,25 +395,25 @@ class VolumesNoLegacyNoScopePolicyTest(VolumesPolicyTest):
without_deprecated_rules = True
rules_without_deprecation = {
v_policies.POLICY_NAME % 'list':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
v_policies.POLICY_NAME % 'detail':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
v_policies.POLICY_NAME % 'show':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
v_policies.POLICY_NAME % 'create':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
v_policies.POLICY_NAME % 'delete':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
v_policies.POLICY_NAME % 'snapshots:list':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
v_policies.POLICY_NAME % 'snapshots:detail':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
v_policies.POLICY_NAME % 'snapshots:delete':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
v_policies.POLICY_NAME % 'snapshots:create':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
v_policies.POLICY_NAME % 'snapshots:show':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
}
def setUp(self):
@@ -482,25 +474,25 @@ class VolumesScopeTypeNoLegacyPolicyTest(VolumesScopeTypePolicyTest):
rules_without_deprecation = {
v_policies.POLICY_NAME % 'list':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
v_policies.POLICY_NAME % 'detail':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
v_policies.POLICY_NAME % 'show':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
v_policies.POLICY_NAME % 'create':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
v_policies.POLICY_NAME % 'delete':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
v_policies.POLICY_NAME % 'snapshots:list':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
v_policies.POLICY_NAME % 'snapshots:detail':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
v_policies.POLICY_NAME % 'snapshots:delete':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
v_policies.POLICY_NAME % 'snapshots:create':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
v_policies.POLICY_NAME % 'snapshots:show':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
}
def setUp(self):
diff --git a/nova/tests/unit/scheduler/client/test_report.py b/nova/tests/unit/scheduler/client/test_report.py
index 9a1766927c..40ebac9af9 100644
--- a/nova/tests/unit/scheduler/client/test_report.py
+++ b/nova/tests/unit/scheduler/client/test_report.py
@@ -11,6 +11,7 @@
# under the License.
import copy
+import ddt
import time
from unittest import mock
from urllib import parse
@@ -157,6 +158,60 @@ class SafeConnectedTestCase(test.NoDBTestCase):
self.assertTrue(req.called)
+@ddt.ddt
+class TestSingleton(test.NoDBTestCase):
+ def test_singleton(self):
+ # Make sure we start with a clean slate
+ self.assertIsNone(report.PLACEMENTCLIENT)
+
+ # Make sure the first call creates the singleton, sets it
+ # globally, and returns it
+ client = report.report_client_singleton()
+ self.assertEqual(client, report.PLACEMENTCLIENT)
+
+ # Make sure that a subsequent call returns the same thing
+ # again and that the global is unchanged
+ self.assertEqual(client, report.report_client_singleton())
+ self.assertEqual(client, report.PLACEMENTCLIENT)
+
+ @ddt.data(ks_exc.EndpointNotFound,
+ ks_exc.MissingAuthPlugin,
+ ks_exc.Unauthorized,
+ ks_exc.DiscoveryFailure,
+ ks_exc.ConnectFailure,
+ ks_exc.RequestTimeout,
+ ks_exc.GatewayTimeout,
+ test.TestingException)
+ def test_errors(self, exc):
+ self._test_error(exc)
+
+ @mock.patch.object(report, 'LOG')
+ def _test_error(self, exc, mock_log):
+ with mock.patch.object(report.SchedulerReportClient, '_create_client',
+ side_effect=exc):
+ self.assertRaises(exc, report.report_client_singleton)
+ mock_log.error.assert_called_once()
+
+ def test_error_then_success(self):
+ # Simulate an error
+ self._test_error(ks_exc.ConnectFailure)
+
+ # Ensure we did not set the global client
+ self.assertIsNone(report.PLACEMENTCLIENT)
+
+ # Call again, with no error
+ client = report.report_client_singleton()
+
+ # Make sure we got a client and that it was set as the global
+ # one
+ self.assertIsNotNone(client)
+ self.assertEqual(client, report.PLACEMENTCLIENT)
+
+ # Make sure we keep getting the same one
+ client2 = report.report_client_singleton()
+ self.assertEqual(client, client2)
+
+
class TestConstructor(test.NoDBTestCase):
def setUp(self):
super(TestConstructor, self).setUp()
diff --git a/nova/tests/unit/scheduler/filters/test_numa_topology_filters.py b/nova/tests/unit/scheduler/filters/test_numa_topology_filters.py
index 0ebe95d5e4..ba9073e0df 100644
--- a/nova/tests/unit/scheduler/filters/test_numa_topology_filters.py
+++ b/nova/tests/unit/scheduler/filters/test_numa_topology_filters.py
@@ -11,6 +11,7 @@
# under the License.
import itertools
+from unittest import mock
from oslo_utils.fixture import uuidsentinel as uuids
@@ -53,7 +54,9 @@ class TestNUMATopologyFilter(test.NoDBTestCase):
{'numa_topology': fakes.NUMA_TOPOLOGY,
'pci_stats': None,
'cpu_allocation_ratio': 16.0,
- 'ram_allocation_ratio': 1.5})
+ 'ram_allocation_ratio': 1.5,
+ 'allocation_candidates': [{"mappings": {}}]
+ })
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
def test_numa_topology_filter_numa_instance_no_numa_host_fail(self):
@@ -132,7 +135,9 @@ class TestNUMATopologyFilter(test.NoDBTestCase):
{'numa_topology': fakes.NUMA_TOPOLOGY,
'pci_stats': None,
'cpu_allocation_ratio': 21,
- 'ram_allocation_ratio': 1.3})
+ 'ram_allocation_ratio': 1.3,
+ 'allocation_candidates': [{"mappings": {}}]
+ })
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
limits = host.limits['numa_topology']
self.assertEqual(limits.cpu_allocation_ratio, 21)
@@ -180,7 +185,9 @@ class TestNUMATopologyFilter(test.NoDBTestCase):
'numa_topology': numa_topology,
'pci_stats': None,
'cpu_allocation_ratio': 1,
- 'ram_allocation_ratio': 1.5})
+ 'ram_allocation_ratio': 1.5,
+ 'allocation_candidates': [{"mappings": {}}],
+ })
assertion = self.assertTrue if passes else self.assertFalse
# test combinations of image properties and extra specs
@@ -237,7 +244,9 @@ class TestNUMATopologyFilter(test.NoDBTestCase):
{'numa_topology': fakes.NUMA_TOPOLOGY,
'pci_stats': None,
'cpu_allocation_ratio': 16.0,
- 'ram_allocation_ratio': 1.5})
+ 'ram_allocation_ratio': 1.5,
+ 'allocation_candidates': [{"mappings": {}}]
+ })
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
def test_numa_topology_filter_fail_mempages(self):
@@ -287,7 +296,9 @@ class TestNUMATopologyFilter(test.NoDBTestCase):
'numa_topology': host_topology,
'pci_stats': None,
'cpu_allocation_ratio': 16.0,
- 'ram_allocation_ratio': 1.5})
+ 'ram_allocation_ratio': 1.5,
+ 'allocation_candidates': [{"mappings": {}}],
+ })
def test_numa_topology_filter_pass_networks(self):
host = self._get_fake_host_state_with_networks()
@@ -329,3 +340,79 @@ class TestNUMATopologyFilter(test.NoDBTestCase):
network_metadata=network_metadata)
self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
+
+ @mock.patch("nova.virt.hardware.numa_fit_instance_to_host")
+ def test_filters_candidates(self, mock_numa_fit):
+ instance_topology = objects.InstanceNUMATopology(
+ cells=[
+ objects.InstanceNUMACell(
+ id=0, cpuset=set([1]), pcpuset=set(), memory=512
+ ),
+ ]
+ )
+ spec_obj = self._get_spec_obj(numa_topology=instance_topology)
+ host = fakes.FakeHostState(
+ "host1",
+ "node1",
+ {
+ "numa_topology": fakes.NUMA_TOPOLOGY,
+ "pci_stats": None,
+ "cpu_allocation_ratio": 16.0,
+ "ram_allocation_ratio": 1.5,
+ # simulate that placement returned 3 candidates for this host
+ "allocation_candidates": [
+ {"mappings": {f"{uuids.req1}-0": ["candidate_rp_1"]}},
+ {"mappings": {f"{uuids.req1}-0": ["candidate_rp_2"]}},
+ {"mappings": {f"{uuids.req1}-0": ["candidate_rp_3"]}},
+ ],
+ },
+ )
+ # and that from those candidates only the second matches the numa logic
+ mock_numa_fit.side_effect = [False, True, False]
+
+ # run the filter and expect that the host passes as it has at least
+ # one viable candidate
+ self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
+ # also assert that the filter checked all three candidates
+ self.assertEqual(3, len(mock_numa_fit.mock_calls))
+ # and also it reduced the candidates in the host state to the only
+ # matching one
+ self.assertEqual(1, len(host.allocation_candidates))
+ self.assertEqual(
+ {"mappings": {f"{uuids.req1}-0": ["candidate_rp_2"]}},
+ host.allocation_candidates[0],
+ )
+
+ @mock.patch("nova.virt.hardware.numa_fit_instance_to_host")
+ def test_filter_fails_if_no_matching_candidate_left(self, mock_numa_fit):
+ instance_topology = objects.InstanceNUMATopology(
+ cells=[
+ objects.InstanceNUMACell(
+ id=0, cpuset=set([1]), pcpuset=set(), memory=512
+ ),
+ ]
+ )
+ spec_obj = self._get_spec_obj(numa_topology=instance_topology)
+ host = fakes.FakeHostState(
+ "host1",
+ "node1",
+ {
+ "numa_topology": fakes.NUMA_TOPOLOGY,
+ "pci_stats": None,
+ "cpu_allocation_ratio": 16.0,
+ "ram_allocation_ratio": 1.5,
+ # simulate that placement returned 1 candidate for this host
+ "allocation_candidates": [
+ {"mappings": {f"{uuids.req1}-0": ["candidate_rp_1"]}},
+ ],
+ },
+ )
+ # simulate that the only candidate we have does not match
+ mock_numa_fit.side_effect = [False]
+
+ # run the filter and expect that it fails the host as there is no
+ # viable candidate left
+ self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
+ self.assertEqual(1, len(mock_numa_fit.mock_calls))
+ # and also it made the candidates list empty in the host state
+ self.assertEqual(0, len(host.allocation_candidates))
diff --git a/nova/tests/unit/scheduler/filters/test_pci_passthrough_filters.py b/nova/tests/unit/scheduler/filters/test_pci_passthrough_filters.py
index edd9735b34..27d80b884e 100644
--- a/nova/tests/unit/scheduler/filters/test_pci_passthrough_filters.py
+++ b/nova/tests/unit/scheduler/filters/test_pci_passthrough_filters.py
@@ -12,6 +12,8 @@
from unittest import mock
+from oslo_utils.fixture import uuidsentinel as uuids
+
from nova import objects
from nova.pci import stats
from nova.scheduler.filters import pci_passthrough_filter
@@ -33,11 +35,16 @@ class TestPCIPassthroughFilter(test.NoDBTestCase):
requests = objects.InstancePCIRequests(requests=[request])
spec_obj = objects.RequestSpec(pci_requests=requests)
host = fakes.FakeHostState(
- 'host1', 'node1',
- attribute_dict={'pci_stats': pci_stats_mock})
+ "host1",
+ "node1",
+ attribute_dict={
+ "pci_stats": pci_stats_mock,
+ "allocation_candidates": [{"mappings": {}}],
+ },
+ )
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
pci_stats_mock.support_requests.assert_called_once_with(
- requests.requests)
+ requests.requests, provider_mapping={})
def test_pci_passthrough_fail(self):
pci_stats_mock = mock.MagicMock()
@@ -47,11 +54,16 @@ class TestPCIPassthroughFilter(test.NoDBTestCase):
requests = objects.InstancePCIRequests(requests=[request])
spec_obj = objects.RequestSpec(pci_requests=requests)
host = fakes.FakeHostState(
- 'host1', 'node1',
- attribute_dict={'pci_stats': pci_stats_mock})
+ "host1",
+ "node1",
+ attribute_dict={
+ "pci_stats": pci_stats_mock,
+ "allocation_candidates": [{"mappings": {}}],
+ },
+ )
self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
pci_stats_mock.support_requests.assert_called_once_with(
- requests.requests)
+ requests.requests, provider_mapping={})
def test_pci_passthrough_no_pci_request(self):
spec_obj = objects.RequestSpec(pci_requests=None)
@@ -82,3 +94,92 @@ class TestPCIPassthroughFilter(test.NoDBTestCase):
host = fakes.FakeHostState('host1', 'node1',
attribute_dict={'pci_stats': None})
self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
+
+ def test_filters_candidates(self):
+ pci_stats_mock = mock.MagicMock()
+ # simulate that only the second allocation candidate fits
+ pci_stats_mock.support_requests.side_effect = [False, True, False]
+ request = objects.InstancePCIRequest(
+ count=1,
+ spec=[{"vendor_id": "8086"}],
+ request_id=uuids.req1,
+ )
+ requests = objects.InstancePCIRequests(requests=[request])
+ spec_obj = objects.RequestSpec(pci_requests=requests)
+ host = fakes.FakeHostState(
+ "host1",
+ "node1",
+ attribute_dict={
+ "pci_stats": pci_stats_mock,
+ # simulate the placement returned 3 possible candidates
+ "allocation_candidates": [
+ {"mappings": {f"{uuids.req1}-0": ["candidate_rp_1"]}},
+ {"mappings": {f"{uuids.req1}-0": ["candidate_rp_2"]}},
+ {"mappings": {f"{uuids.req1}-0": ["candidate_rp_3"]}},
+ ],
+ },
+ )
+
+ # run the filter and expect that it passes the host as there is at
+ # least one viable candidate
+ self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
+
+ # also assert that the filter checked all three candidates
+ pci_stats_mock.support_requests.assert_has_calls(
+ [
+ mock.call(
+ requests.requests,
+ provider_mapping={f"{uuids.req1}-0": ["candidate_rp_1"]},
+ ),
+ mock.call(
+ requests.requests,
+ provider_mapping={f"{uuids.req1}-0": ["candidate_rp_2"]},
+ ),
+ mock.call(
+ requests.requests,
+ provider_mapping={f"{uuids.req1}-0": ["candidate_rp_3"]},
+ ),
+ ]
+ )
+ # and also it reduced the candidates in the host state to the only
+ # matching one
+ self.assertEqual(1, len(host.allocation_candidates))
+ self.assertEqual(
+ {"mappings": {f"{uuids.req1}-0": ["candidate_rp_2"]}},
+ host.allocation_candidates[0],
+ )
+
+ def test_filter_fails_if_no_matching_candidate_left(self):
+ pci_stats_mock = mock.MagicMock()
+ # simulate that the only candidate we have does not match
+ pci_stats_mock.support_requests.side_effect = [False]
+ request = objects.InstancePCIRequest(
+ count=1,
+ spec=[{"vendor_id": "8086"}],
+ request_id=uuids.req1,
+ )
+ requests = objects.InstancePCIRequests(requests=[request])
+ spec_obj = objects.RequestSpec(pci_requests=requests)
+ host = fakes.FakeHostState(
+ "host1",
+ "node1",
+ attribute_dict={
+ "pci_stats": pci_stats_mock,
+ # simulate the placement returned 3 possible candidates
+ "allocation_candidates": [
+ {"mappings": {f"{uuids.req1}-0": ["candidate_rp_1"]}},
+ ],
+ },
+ )
+
+ # run the filter and expect that it fails the host as there is no
+ # viable candidate left
+ self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
+
+ # also assert that the filter checked our candidate
+ pci_stats_mock.support_requests.assert_called_once_with(
+ requests.requests,
+ provider_mapping={f"{uuids.req1}-0": ["candidate_rp_1"]},
+ )
+ # and also it made the candidates list empty in the host state
+ self.assertEqual(0, len(host.allocation_candidates))
diff --git a/nova/tests/unit/scheduler/test_host_manager.py b/nova/tests/unit/scheduler/test_host_manager.py
index c4445d5578..1a7daa515f 100644
--- a/nova/tests/unit/scheduler/test_host_manager.py
+++ b/nova/tests/unit/scheduler/test_host_manager.py
@@ -1562,10 +1562,14 @@ class HostStateTestCase(test.NoDBTestCase):
self.assertIsNone(host.updated)
host.consume_from_request(spec_obj)
- numa_fit_mock.assert_called_once_with(fake_host_numa_topology,
- fake_numa_topology,
- limits=None, pci_requests=None,
- pci_stats=None)
+ numa_fit_mock.assert_called_once_with(
+ fake_host_numa_topology,
+ fake_numa_topology,
+ limits=None,
+ pci_requests=None,
+ pci_stats=None,
+ provider_mapping=None,
+ )
numa_usage_mock.assert_called_once_with(fake_host_numa_topology,
fake_numa_topology)
sync_mock.assert_called_once_with(("fakehost", "fakenode"))
diff --git a/nova/tests/unit/scheduler/test_manager.py b/nova/tests/unit/scheduler/test_manager.py
index 9356292918..e7866069b3 100644
--- a/nova/tests/unit/scheduler/test_manager.py
+++ b/nova/tests/unit/scheduler/test_manager.py
@@ -26,6 +26,7 @@ from oslo_utils.fixture import uuidsentinel as uuids
from nova import context
from nova import exception
from nova import objects
+from nova.scheduler import filters
from nova.scheduler import host_manager
from nova.scheduler import manager
from nova.scheduler import utils as scheduler_utils
@@ -396,9 +397,16 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
# assertion at the end of the test.
spec_obj.obj_reset_changes(recursive=True)
- host_state = mock.Mock(spec=host_manager.HostState, host="fake_host",
- uuid=uuids.cn1, cell_uuid=uuids.cell, nodename="fake_node",
- limits={}, aggregates=[])
+ host_state = mock.Mock(
+ spec=host_manager.HostState,
+ host="fake_host",
+ uuid=uuids.cn1,
+ cell_uuid=uuids.cell,
+ nodename="fake_node",
+ limits={},
+ aggregates=[],
+ allocation_candidates=[],
+ )
all_host_states = [host_state]
mock_get_all_states.return_value = all_host_states
@@ -459,20 +467,29 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
is_public=True,
name="small_flavor"),
project_id=uuids.project_id,
- instance_group=group)
-
- host_state = mock.Mock(spec=host_manager.HostState,
- host="fake_host", nodename="fake_node", uuid=uuids.cn1,
- limits={}, cell_uuid=uuids.cell, instances={}, aggregates=[])
+ instance_group=group,
+ requested_resources=[],
+ )
+
+ host_state = mock.Mock(
+ spec=host_manager.HostState,
+ host="fake_host",
+ nodename="fake_node",
+ uuid=uuids.cn1,
+ limits={},
+ cell_uuid=uuids.cell,
+ instances={},
+ aggregates=[],
+ allocation_candidates=[],
+ )
all_host_states = [host_state]
mock_get_all_states.return_value = all_host_states
- mock_get_hosts.return_value = all_host_states
+ mock_get_hosts.side_effect = lambda spec_obj, hosts, num: list(hosts)
instance_uuids = None
ctx = mock.Mock()
selected_hosts = self.manager._schedule(ctx, spec_obj,
- instance_uuids, mock.sentinel.alloc_reqs_by_rp_uuid,
- mock.sentinel.provider_summaries)
+ instance_uuids, None, mock.sentinel.provider_summaries)
mock_get_all_states.assert_called_once_with(
ctx.elevated.return_value, spec_obj,
@@ -510,14 +527,24 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
is_public=True,
name="small_flavor"),
project_id=uuids.project_id,
- instance_group=None)
-
- host_state = mock.Mock(spec=host_manager.HostState,
- host="fake_host", nodename="fake_node", uuid=uuids.cn1,
- cell_uuid=uuids.cell1, limits={}, aggregates=[])
+ instance_group=None,
+ requested_resources=[],
+ )
+
+ host_state = mock.Mock(
+ spec=host_manager.HostState,
+ host="fake_host",
+ nodename="fake_node",
+ uuid=uuids.cn1,
+ cell_uuid=uuids.cell1,
+ limits={},
+ aggregates=[],
+ allocation_candidates=[],
+ )
all_host_states = [host_state]
mock_get_all_states.return_value = all_host_states
- mock_get_hosts.return_value = all_host_states
+ # simulate that every host passes the filtering
+ mock_get_hosts.side_effect = lambda spec_obj, hosts, num: list(hosts)
mock_claim.return_value = True
instance_uuids = [uuids.instance]
@@ -583,11 +610,16 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
project_id=uuids.project_id,
instance_group=None)
- host_state = mock.Mock(spec=host_manager.HostState,
- host=mock.sentinel.host, uuid=uuids.cn1, cell_uuid=uuids.cell1)
+ host_state = mock.Mock(
+ spec=host_manager.HostState,
+ host=mock.sentinel.host,
+ uuid=uuids.cn1,
+ cell_uuid=uuids.cell1,
+ allocations_candidates=[],
+ )
all_host_states = [host_state]
mock_get_all_states.return_value = all_host_states
- mock_get_hosts.return_value = all_host_states
+ mock_get_hosts.side_effect = lambda spec_obj, hosts, num: list(hosts)
mock_claim.return_value = False
instance_uuids = [uuids.instance]
@@ -604,7 +636,7 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
mock_get_all_states.assert_called_once_with(
ctx.elevated.return_value, spec_obj,
mock.sentinel.provider_summaries)
- mock_get_hosts.assert_called_once_with(spec_obj, all_host_states, 0)
+ mock_get_hosts.assert_called_once_with(spec_obj, mock.ANY, 0)
mock_claim.assert_called_once_with(ctx.elevated.return_value,
self.manager.placement_client, spec_obj, uuids.instance,
alloc_reqs_by_rp_uuid[uuids.cn1][0],
@@ -635,18 +667,41 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
is_public=True,
name="small_flavor"),
project_id=uuids.project_id,
- instance_group=None)
-
- host_state = mock.Mock(spec=host_manager.HostState,
- host="fake_host", nodename="fake_node", uuid=uuids.cn1,
- cell_uuid=uuids.cell1, limits={}, updated='fake')
+ instance_group=None,
+ requested_resources=[],
+ )
+
+ host_state = mock.Mock(
+ spec=host_manager.HostState,
+ host="fake_host",
+ nodename="fake_node",
+ uuid=uuids.cn1,
+ cell_uuid=uuids.cell1,
+ limits={},
+ updated="fake",
+ allocation_candidates=[],
+ )
all_host_states = [host_state]
mock_get_all_states.return_value = all_host_states
- mock_get_hosts.side_effect = [
- all_host_states, # first instance: return all the hosts (only one)
- [], # second: act as if no more hosts that meet criteria
- all_host_states, # the final call when creating alternates
- ]
+
+ calls = []
+
+ def fake_get_sorted_hosts(spec_obj, hosts, num):
+ c = len(calls)
+ calls.append(1)
+ # first instance: return all the hosts (only one)
+ if c == 0:
+ return hosts
+ # second: act as if no more hosts that meet criteria
+ elif c == 1:
+ return []
+ # the final call when creating alternates
+ elif c == 2:
+ return hosts
+ else:
+ raise StopIteration()
+
+ mock_get_hosts.side_effect = fake_get_sorted_hosts
mock_claim.return_value = True
instance_uuids = [uuids.instance1, uuids.instance2]
@@ -679,20 +734,44 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
swap=0,
vcpus=1),
project_id=uuids.project_id,
- instance_group=None)
-
- host_state0 = mock.Mock(spec=host_manager.HostState,
- host="fake_host0", nodename="fake_node0", uuid=uuids.cn0,
- cell_uuid=uuids.cell, limits={}, aggregates=[])
- host_state1 = mock.Mock(spec=host_manager.HostState,
- host="fake_host1", nodename="fake_node1", uuid=uuids.cn1,
- cell_uuid=uuids.cell, limits={}, aggregates=[])
- host_state2 = mock.Mock(spec=host_manager.HostState,
- host="fake_host2", nodename="fake_node2", uuid=uuids.cn2,
- cell_uuid=uuids.cell, limits={}, aggregates=[])
+ instance_group=None,
+ requested_resources=[],
+ )
+
+ host_state0 = mock.Mock(
+ spec=host_manager.HostState,
+ host="fake_host0",
+ nodename="fake_node0",
+ uuid=uuids.cn0,
+ cell_uuid=uuids.cell,
+ limits={},
+ aggregates=[],
+ allocation_candidates=[],
+ )
+ host_state1 = mock.Mock(
+ spec=host_manager.HostState,
+ host="fake_host1",
+ nodename="fake_node1",
+ uuid=uuids.cn1,
+ cell_uuid=uuids.cell,
+ limits={},
+ aggregates=[],
+ allocation_candidates=[],
+ )
+ host_state2 = mock.Mock(
+ spec=host_manager.HostState,
+ host="fake_host2",
+ nodename="fake_node2",
+ uuid=uuids.cn2,
+ cell_uuid=uuids.cell,
+ limits={},
+ aggregates=[],
+ allocation_candidates=[],
+ )
all_host_states = [host_state0, host_state1, host_state2]
mock_get_all_states.return_value = all_host_states
- mock_get_hosts.return_value = all_host_states
+ # simulate that every host passes the filtering
+ mock_get_hosts.side_effect = lambda spec_obj, hosts, num: list(hosts)
mock_claim.return_value = True
instance_uuids = [uuids.instance0]
@@ -744,20 +823,44 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
swap=0,
vcpus=1),
project_id=uuids.project_id,
- instance_group=None)
-
- host_state0 = mock.Mock(spec=host_manager.HostState,
- host="fake_host0", nodename="fake_node0", uuid=uuids.cn0,
- cell_uuid=uuids.cell, limits={}, aggregates=[])
- host_state1 = mock.Mock(spec=host_manager.HostState,
- host="fake_host1", nodename="fake_node1", uuid=uuids.cn1,
- cell_uuid=uuids.cell, limits={}, aggregates=[])
- host_state2 = mock.Mock(spec=host_manager.HostState,
- host="fake_host2", nodename="fake_node2", uuid=uuids.cn2,
- cell_uuid=uuids.cell, limits={}, aggregates=[])
+ instance_group=None,
+ requested_resources=[],
+ )
+
+ host_state0 = mock.Mock(
+ spec=host_manager.HostState,
+ host="fake_host0",
+ nodename="fake_node0",
+ uuid=uuids.cn0,
+ cell_uuid=uuids.cell,
+ limits={},
+ aggregates=[],
+ allocation_candidates=[],
+ )
+ host_state1 = mock.Mock(
+ spec=host_manager.HostState,
+ host="fake_host1",
+ nodename="fake_node1",
+ uuid=uuids.cn1,
+ cell_uuid=uuids.cell,
+ limits={},
+ aggregates=[],
+ allocation_candidates=[],
+ )
+ host_state2 = mock.Mock(
+ spec=host_manager.HostState,
+ host="fake_host2",
+ nodename="fake_node2",
+ uuid=uuids.cn2,
+ cell_uuid=uuids.cell,
+ limits={},
+ aggregates=[],
+ allocation_candidates=[],
+ )
all_host_states = [host_state0, host_state1, host_state2]
mock_get_all_states.return_value = all_host_states
- mock_get_hosts.return_value = all_host_states
+ # simulate that every host passes the filtering
+ mock_get_hosts.side_effect = lambda spec_obj, hosts, num: list(hosts)
mock_claim.return_value = True
instance_uuids = [uuids.instance0]
@@ -814,17 +917,36 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
is_public=True,
name="small_flavor"),
project_id=uuids.project_id,
- instance_group=ig, instance_uuid=uuids.instance0)
+ instance_group=ig,
+ instance_uuid=uuids.instance0,
+ requested_resources=[],
+ )
# Reset the RequestSpec changes so they don't interfere with the
# assertion at the end of the test.
spec_obj.obj_reset_changes(recursive=True)
- hs1 = mock.Mock(spec=host_manager.HostState, host='host1',
- nodename="node1", limits={}, uuid=uuids.cn1,
- cell_uuid=uuids.cell1, instances={}, aggregates=[])
- hs2 = mock.Mock(spec=host_manager.HostState, host='host2',
- nodename="node2", limits={}, uuid=uuids.cn2,
- cell_uuid=uuids.cell2, instances={}, aggregates=[])
+ hs1 = mock.Mock(
+ spec=host_manager.HostState,
+ host="host1",
+ nodename="node1",
+ limits={},
+ uuid=uuids.cn1,
+ cell_uuid=uuids.cell1,
+ instances={},
+ aggregates=[],
+ allocation_candidates=[],
+ )
+ hs2 = mock.Mock(
+ spec=host_manager.HostState,
+ host="host2",
+ nodename="node2",
+ limits={},
+ uuid=uuids.cn2,
+ cell_uuid=uuids.cell2,
+ instances={},
+ aggregates=[],
+ allocation_candidates=[],
+ )
all_host_states = [hs1, hs2]
mock_get_all_states.return_value = all_host_states
mock_claim.return_value = True
@@ -838,13 +960,18 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
# _get_sorted_hosts() in the two iterations for each instance in
# num_instances
visited_instances = set([])
+ get_sorted_hosts_called_with_host_states = []
def fake_get_sorted_hosts(_spec_obj, host_states, index):
# Keep track of which instances are passed to the filters.
visited_instances.add(_spec_obj.instance_uuid)
if index % 2:
- return [hs1, hs2]
- return [hs2, hs1]
+ s = list(host_states)
+ get_sorted_hosts_called_with_host_states.append(s)
+ return s
+ s = list(host_states)
+ get_sorted_hosts_called_with_host_states.append(s)
+ return reversed(s)
mock_get_hosts.side_effect = fake_get_sorted_hosts
instance_uuids = [
getattr(uuids, 'instance%d' % x) for x in range(num_instances)
@@ -871,10 +998,14 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
# second time, we pass it the hosts that were returned from
# _get_sorted_hosts() the first time
sorted_host_calls = [
- mock.call(spec_obj, all_host_states, 0),
- mock.call(spec_obj, [hs2, hs1], 1),
+ mock.call(spec_obj, mock.ANY, 0),
+ mock.call(spec_obj, mock.ANY, 1),
]
mock_get_hosts.assert_has_calls(sorted_host_calls)
+ self.assertEqual(
+ all_host_states, get_sorted_hosts_called_with_host_states[0])
+ self.assertEqual(
+ [hs1], get_sorted_hosts_called_with_host_states[1])
# The instance group object should have both host1 and host2 in its
# instance group hosts list and there should not be any "changes" to
@@ -1168,14 +1299,36 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
name="small_flavor"),
project_id=uuids.project_id,
instance_uuid=uuids.instance_id,
- instance_group=None)
-
- host_state = mock.Mock(spec=host_manager.HostState, host="fake_host",
- uuid=uuids.cn1, cell_uuid=uuids.cell, nodename="fake_node",
- limits={}, updated="Not None")
+ instance_group=None,
+ requested_resources=[],
+ )
+
+ host_state = mock.Mock(
+ spec=host_manager.HostState,
+ host="fake_host",
+ uuid=uuids.cn1,
+ cell_uuid=uuids.cell,
+ nodename="fake_node",
+ limits={},
+ updated="Not None",
+ allocation_candidates=[],
+ )
all_host_states = [host_state]
mock_get_all_states.return_value = all_host_states
- mock_get_hosts.side_effect = [all_host_states, []]
+
+ calls = []
+
+ def fake_get_sorted_hosts(spec_obj, hosts, num):
+ c = len(calls)
+ calls.append(1)
+ if c == 0:
+ return list(hosts)
+ elif c == 1:
+ return []
+ else:
+ raise StopIteration
+
+ mock_get_hosts.side_effect = fake_get_sorted_hosts
instance_uuids = [uuids.inst1, uuids.inst2]
fake_allocs_by_rp = {uuids.cn1: [{}]}
@@ -1204,7 +1357,7 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
alloc_reqs[hs.uuid] = [{}]
mock_get_all_hosts.return_value = all_host_states
- mock_sorted.return_value = all_host_states
+ mock_sorted.side_effect = lambda spec_obj, hosts, num: list(hosts)
mock_claim.return_value = True
total_returned = num_alternates + 1
self.flags(max_attempts=total_returned, group="scheduler")
@@ -1212,14 +1365,14 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
for num in range(num_instances)]
spec_obj = objects.RequestSpec(
- num_instances=num_instances,
- flavor=objects.Flavor(memory_mb=512,
- root_gb=512,
- ephemeral_gb=0,
- swap=0,
- vcpus=1),
- project_id=uuids.project_id,
- instance_group=None)
+ num_instances=num_instances,
+ flavor=objects.Flavor(
+ memory_mb=512, root_gb=512, ephemeral_gb=0, swap=0, vcpus=1
+ ),
+ project_id=uuids.project_id,
+ instance_group=None,
+ requested_resources=[],
+ )
dests = self.manager._schedule(self.context, spec_obj,
instance_uuids, alloc_reqs, None, return_alternates=True)
@@ -1270,11 +1423,24 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
alloc_reqs[hs.uuid] = [{}]
mock_get_all_hosts.return_value = all_host_states
+
# There are two instances so _get_sorted_hosts is called once per
# instance and then once again before picking alternates.
- mock_sorted.side_effect = [all_host_states,
- list(reversed(all_host_states)),
- all_host_states]
+ calls = []
+
+ def fake_get_sorted_hosts(spec_obj, hosts, num):
+ c = len(calls)
+ calls.append(1)
+ if c == 0:
+ return list(hosts)
+ elif c == 1:
+ return list(reversed(all_host_states))
+ elif c == 2:
+ return list(hosts)
+ else:
+ raise StopIteration()
+
+ mock_sorted.side_effect = fake_get_sorted_hosts
mock_claim.return_value = True
total_returned = 3
self.flags(max_attempts=total_returned, group="scheduler")
@@ -1282,14 +1448,14 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
num_instances = len(instance_uuids)
spec_obj = objects.RequestSpec(
- num_instances=num_instances,
- flavor=objects.Flavor(memory_mb=512,
- root_gb=512,
- ephemeral_gb=0,
- swap=0,
- vcpus=1),
- project_id=uuids.project_id,
- instance_group=None)
+ num_instances=num_instances,
+ flavor=objects.Flavor(
+ memory_mb=512, root_gb=512, ephemeral_gb=0, swap=0, vcpus=1
+ ),
+ project_id=uuids.project_id,
+ instance_group=None,
+ requested_resources=[],
+ )
dests = self.manager._schedule(self.context, spec_obj,
instance_uuids, alloc_reqs, None, return_alternates=True)
@@ -1323,7 +1489,7 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
alloc_reqs[hs.uuid] = [{}]
mock_get_all_hosts.return_value = all_host_states
- mock_sorted.return_value = all_host_states
+ mock_sorted.side_effect = lambda spec_obj, hosts, num: list(hosts)
mock_claim.return_value = True
# Set the total returned to more than the number of available hosts
self.flags(max_attempts=max_attempts, group="scheduler")
@@ -1331,14 +1497,14 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
num_instances = len(instance_uuids)
spec_obj = objects.RequestSpec(
- num_instances=num_instances,
- flavor=objects.Flavor(memory_mb=512,
- root_gb=512,
- ephemeral_gb=0,
- swap=0,
- vcpus=1),
- project_id=uuids.project_id,
- instance_group=None)
+ num_instances=num_instances,
+ flavor=objects.Flavor(
+ memory_mb=512, root_gb=512, ephemeral_gb=0, swap=0, vcpus=1
+ ),
+ project_id=uuids.project_id,
+ instance_group=None,
+ requested_resources=[],
+ )
dests = self.manager._schedule(self.context, spec_obj,
instance_uuids, alloc_reqs, None, return_alternates=True)
@@ -1521,3 +1687,506 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
self.manager._discover_hosts_in_cells(mock.sentinel.context)
mock_log_warning.assert_not_called()
mock_log_debug.assert_called_once_with(msg)
+
+
+class SchedulerManagerAllocationCandidateTestCase(test.NoDBTestCase):
+
+ class ACRecorderFilter(filters.BaseHostFilter):
+ """A filter that records what allocation candidates it saw on each host
+ """
+
+ def __init__(self):
+ super().__init__()
+ self.seen_candidates = []
+
+ def host_passes(self, host_state, filter_properties):
+ # record what candidate the filter saw for each host
+ self.seen_candidates.append(list(host_state.allocation_candidates))
+ return True
+
+ class DropFirstFilter(filters.BaseHostFilter):
+ """A filter that removes one candidate and keeps the rest on each
+ host
+ """
+
+ def host_passes(self, host_state, filter_properties):
+ host_state.allocation_candidates.pop(0)
+ return bool(host_state.allocation_candidates)
+
+ @mock.patch.object(
+ host_manager.HostManager, '_init_instance_info', new=mock.Mock())
+ @mock.patch.object(
+ host_manager.HostManager, '_init_aggregates', new=mock.Mock())
+ def setUp(self):
+ super().setUp()
+ self.context = context.RequestContext('fake_user', 'fake_project')
+ self.manager = manager.SchedulerManager()
+ self.manager.host_manager.weighers = []
+ self.request_spec = objects.RequestSpec(
+ ignore_hosts=[],
+ force_hosts=[],
+ force_nodes=[],
+ requested_resources=[],
+ )
+
+ @mock.patch("nova.objects.selection.Selection.from_host_state")
+ @mock.patch(
+ "nova.scheduler.manager.SchedulerManager._consume_selected_host",
+ )
+ @mock.patch(
+ "nova.scheduler.utils.claim_resources",
+ new=mock.Mock(return_value=True),
+ )
+ @mock.patch("nova.scheduler.manager.SchedulerManager._get_all_host_states")
+ def test_filters_see_allocation_candidates_for_each_host(
+ self,
+ mock_get_all_host_states,
+ mock_consume,
+ mock_selection_from_host_state,
+ ):
+ # have a single filter configured where we can assert that the filter
+ # see the allocation_candidates of each host
+ filter = self.ACRecorderFilter()
+ self.manager.host_manager.enabled_filters = [filter]
+
+ instance_uuids = [uuids.inst1]
+
+ alloc_reqs_by_rp_uuid = {}
+ # have two hosts with different candidates
+ host1 = host_manager.HostState("host1", "node1", uuids.cell1)
+ host1.uuid = uuids.host1
+ alloc_reqs_by_rp_uuid[uuids.host1] = [
+ mock.sentinel.host1_a_c_1,
+ mock.sentinel.host1_a_c_2,
+ ]
+ host2 = host_manager.HostState("host2", "node2", uuids.cell1)
+ host2.uuid = uuids.host2
+ alloc_reqs_by_rp_uuid[uuids.host2] = [
+ mock.sentinel.host2_a_c_1,
+ ]
+ mock_get_all_host_states.return_value = iter([host1, host2])
+
+ self.manager._schedule(
+ self.context,
+ self.request_spec,
+ instance_uuids,
+ alloc_reqs_by_rp_uuid,
+ mock.sentinel.provider_summaries,
+ mock.sentinel.allocation_request_version,
+ )
+
+ # we expect that our filter seen the allocation candidate list of
+ # each host respectively
+ self.assertEqual(
+ [
+ alloc_reqs_by_rp_uuid[uuids.host1],
+ alloc_reqs_by_rp_uuid[uuids.host2],
+ ],
+ filter.seen_candidates,
+ )
+
+ @mock.patch(
+ "nova.scheduler.manager.SchedulerManager._consume_selected_host",
+ )
+ @mock.patch(
+ "nova.scheduler.utils.claim_resources",
+ new=mock.Mock(return_value=True),
+ )
+ @mock.patch("nova.scheduler.manager.SchedulerManager._get_all_host_states")
+ def test_scheduler_selects_filtered_a_c_from_hosts_state(
+ self,
+ mock_get_all_host_states,
+ mock_consume,
+ ):
+ """Assert that if a filter removes an allocation candidate from a host
+ then even if that host is selected the removed allocation candidate
+ is not used by the scheduler.
+ """
+
+ self.manager.host_manager.enabled_filters = [self.DropFirstFilter()]
+
+ instance_uuids = [uuids.inst1]
+ alloc_reqs_by_rp_uuid = {}
+ # have a host with two candidates
+ host1 = host_manager.HostState("host1", "node1", uuids.cell1)
+ host1.uuid = uuids.host1
+ alloc_reqs_by_rp_uuid[uuids.host1] = [
+ "host1-candidate1",
+ "host1-candidate2",
+ ]
+ mock_get_all_host_states.return_value = iter([host1])
+
+ result = self.manager._schedule(
+ self.context,
+ self.request_spec,
+ instance_uuids,
+ alloc_reqs_by_rp_uuid,
+ mock.sentinel.provider_summaries,
+ 'fake-alloc-req-version',
+ )
+ # we have requested one instance to be scheduled so expect on set
+ # of selections
+ self.assertEqual(1, len(result))
+ selections = result[0]
+ # we did not ask for alternatives so a single selection is expected
+ self.assertEqual(1, len(selections))
+ selection = selections[0]
+ # we expect that candidate2 is used as candidate1 is dropped by
+ # the filter
+ self.assertEqual(
+ "host1-candidate2",
+ jsonutils.loads(selection.allocation_request)
+ )
+
+ @mock.patch("nova.objects.selection.Selection.from_host_state")
+ @mock.patch(
+ "nova.scheduler.manager.SchedulerManager._consume_selected_host",
+ )
+ @mock.patch(
+ "nova.scheduler.utils.claim_resources",
+ new=mock.Mock(return_value=True),
+ )
+ @mock.patch("nova.scheduler.manager.SchedulerManager._get_all_host_states")
+ def test_consecutive_filter_sees_filtered_a_c_list(
+ self,
+ mock_get_all_host_states,
+ mock_consume,
+ mock_selection_from_host_state,
+ ):
+ # create two filters
+ # 1) DropFirstFilter runs first and drops the first candidate from each
+ # host
+ # 2) ACRecorderFilter runs next and records what candidates it saw
+ recorder_filter = self.ACRecorderFilter()
+ self.manager.host_manager.enabled_filters = [
+ self.DropFirstFilter(),
+ recorder_filter,
+ ]
+
+ instance_uuids = [uuids.inst1]
+ alloc_reqs_by_rp_uuid = {}
+ # have a host with two candidates
+ host1 = host_manager.HostState("host1", "node1", uuids.cell1)
+ host1.uuid = uuids.host1
+ alloc_reqs_by_rp_uuid[uuids.host1] = [
+ "host1-candidate1",
+ "host1-candidate2",
+ ]
+ mock_get_all_host_states.return_value = iter([host1])
+
+ self.manager._schedule(
+ self.context,
+ self.request_spec,
+ instance_uuids,
+ alloc_reqs_by_rp_uuid,
+ mock.sentinel.provider_summaries,
+ 'fake-alloc-req-version',
+ )
+
+ # we expect that the second filter saw one host with one candidate and
+ # as candidate1 was already filtered out by the run of the first filter
+ self.assertEqual(
+ [["host1-candidate2"]],
+ recorder_filter.seen_candidates
+ )
+
+ @mock.patch(
+ "nova.scheduler.manager.SchedulerManager._consume_selected_host",
+ )
+ @mock.patch(
+ "nova.scheduler.utils.claim_resources",
+ new=mock.Mock(return_value=True),
+ )
+ @mock.patch("nova.scheduler.manager.SchedulerManager._get_all_host_states")
+ def test_filters_removes_all_a_c_host_is_not_selected(
+ self,
+ mock_get_all_host_states,
+ mock_consume,
+ ):
+ # use the filter that always drops the first candidate on each host
+ self.manager.host_manager.enabled_filters = [self.DropFirstFilter()]
+
+ instance_uuids = [uuids.inst1]
+ alloc_reqs_by_rp_uuid = {}
+ # have two hosts
+ # first with a single candidate
+ host1 = host_manager.HostState("host1", "node1", uuids.cell1)
+ host1.uuid = uuids.host1
+ alloc_reqs_by_rp_uuid[uuids.host1] = [
+ "host1-candidate1",
+ ]
+ # second with two candidates
+ host2 = host_manager.HostState("host2", "node2", uuids.cell1)
+ host2.uuid = uuids.host2
+ alloc_reqs_by_rp_uuid[uuids.host2] = [
+ "host2-candidate1",
+ "host2-candidate2",
+ ]
+ mock_get_all_host_states.return_value = iter([host1, host2])
+
+ result = self.manager._schedule(
+ self.context,
+ self.request_spec,
+ instance_uuids,
+ alloc_reqs_by_rp_uuid,
+ mock.sentinel.provider_summaries,
+ 'fake-alloc-req-version',
+ )
+ # we expect that the first host is not selected as the filter
+ # removed every candidate from the host
+ # also we expect that on the second host only candidate2 could have
+ # been selected
+ # we asked for one instance, so we expect one set of selections
+ self.assertEqual(1, len(result))
+ selections = result[0]
+ # we did not ask for alternatives so a single selection is expected
+ self.assertEqual(1, len(selections))
+ selection = selections[0]
+ # we expect that candidate2 is used as candidate1 is dropped by
+ # the filter
+ self.assertEqual(uuids.host2, selection.compute_node_uuid)
+ self.assertEqual(
+ "host2-candidate2",
+ jsonutils.loads(selection.allocation_request)
+ )
+
+ @mock.patch(
+ "nova.scheduler.manager.SchedulerManager._consume_selected_host",
+ )
+ @mock.patch(
+ "nova.scheduler.utils.claim_resources",
+ new=mock.Mock(return_value=True),
+ )
+ @mock.patch("nova.scheduler.manager.SchedulerManager._get_all_host_states")
+ def test_consume_selected_host_sees_updated_request_spec(
+ self,
+ mock_get_all_host_states,
+ mock_consume,
+ ):
+ # simulate that nothing is filtered out, by not having any filters
+ self.manager.host_manager.enabled_filters = []
+
+ # set up the request spec with a request group to be updated
+ # by the selected candidate
+ self.request_spec.requested_resources = [
+ objects.RequestGroup(
+ requester_id=uuids.group_req1, provider_uuids=[]
+ )
+ ]
+
+ instance_uuids = [uuids.inst1]
+ alloc_reqs_by_rp_uuid = {}
+ # have single host with a single candidate
+ # first with a single candidate
+ host1 = host_manager.HostState("host1", "node1", uuids.cell1)
+ host1.uuid = uuids.host1
+ # simulate that placement fulfilled the above RequestGroup from
+ # a certain child RP of the host.
+ alloc_reqs_by_rp_uuid[uuids.host1] = [
+ {
+ "mappings": {
+ "": [uuids.host1],
+ uuids.group_req1: [uuids.host1_child_rp],
+ }
+ }
+ ]
+ mock_get_all_host_states.return_value = iter([host1])
+
+ # make asserts on the request_spec passed to consume
+ def assert_request_spec_updated_with_selected_candidate(
+ selected_host, spec_obj, instance_uuid=None
+ ):
+ # we expect that the scheduler updated the request_spec based
+ # the selected candidate before called consume
+ self.assertEqual(
+ [uuids.host1_child_rp],
+ spec_obj.requested_resources[0].provider_uuids,
+ )
+
+ mock_consume.side_effect = (
+ assert_request_spec_updated_with_selected_candidate)
+
+ self.manager._schedule(
+ self.context,
+ self.request_spec,
+ instance_uuids,
+ alloc_reqs_by_rp_uuid,
+ mock.sentinel.provider_summaries,
+ 'fake-alloc-req-version',
+ )
+
+ mock_consume.assert_called_once()
+
+ @mock.patch(
+ "nova.scheduler.manager.SchedulerManager._consume_selected_host",
+ )
+ @mock.patch(
+ "nova.scheduler.utils.claim_resources",
+ return_value=True,
+ )
+ @mock.patch("nova.scheduler.manager.SchedulerManager._get_all_host_states")
+ def test_get_alternate_hosts_returns_main_selection_with_claimed_a_c(
+ self,
+ mock_get_all_host_states,
+ mock_claim,
+ mock_consume,
+ ):
+ """Assert that the first (a.k.a main) selection returned for an
+ instance always maps to the allocation candidate, that was claimed by
+ the scheduler in placement.
+ """
+ # use the filter that always drops the first candidate on each host
+ self.manager.host_manager.enabled_filters = [self.DropFirstFilter()]
+
+ instance_uuids = [uuids.inst1]
+ alloc_reqs_by_rp_uuid = {}
+ # have one host with 3 candidates each fulfilling a request group
+ # from different child RP
+ host1 = host_manager.HostState("host1", "node1", uuids.cell1)
+ host1.uuid = uuids.host1
+ alloc_reqs_by_rp_uuid[uuids.host1] = [
+ {
+ "mappings": {
+ # This is odd but the un-name request group uses "" as the
+ # name of the group.
+ "": [uuids.host1],
+ uuids.group_req1: [getattr(uuids, f"host1_child{i}")],
+ }
+ } for i in [1, 2, 3]
+ ]
+ mock_get_all_host_states.return_value = iter([host1])
+
+ result = self.manager._schedule(
+ self.context,
+ self.request_spec,
+ instance_uuids,
+ alloc_reqs_by_rp_uuid,
+ mock.sentinel.provider_summaries,
+ 'fake-alloc-req-version',
+ return_alternates=True,
+ )
+
+ # we scheduled one instance
+ self.assertEqual(1, len(result))
+ selections = result[0]
+ # we did not ask for alternatives
+ self.assertEqual(1, len(selections))
+ selection = selections[0]
+ self.assertEqual(uuids.host1, selection.compute_node_uuid)
+ # we expect that host1_child2 candidate is selected as the
+ # DropFirstFilter will drop host1_child1
+ expected_a_c = {
+ "mappings": {
+ "": [uuids.host1],
+ uuids.group_req1: [uuids.host1_child2],
+ }
+ }
+ self.assertEqual(
+ expected_a_c,
+ jsonutils.loads(selection.allocation_request),
+ )
+ # and we expect that the same candidate was claimed in placement
+ mock_claim.assert_called_once_with(
+ mock.ANY,
+ self.manager.placement_client,
+ self.request_spec,
+ uuids.inst1,
+ expected_a_c,
+ allocation_request_version="fake-alloc-req-version",
+ )
+
+ @mock.patch(
+ "nova.scheduler.manager.SchedulerManager._consume_selected_host",
+ )
+ @mock.patch(
+ "nova.scheduler.utils.claim_resources",
+ return_value=True,
+ )
+ @mock.patch("nova.scheduler.manager.SchedulerManager._get_all_host_states")
+ def test_get_alternate_hosts_returns_alts_with_filtered_a_c(
+ self,
+ mock_get_all_host_states,
+ mock_claim,
+ mock_consume,
+ ):
+ """Assert that alternate generation also works based on filtered
+ candidates.
+ """
+
+ class RPFilter(filters.BaseHostFilter):
+ """A filter that only allows candidates with specific RPs"""
+
+ def __init__(self, allowed_rp_uuids):
+ self.allowed_rp_uuids = allowed_rp_uuids
+
+ def host_passes(self, host_state, filter_properties):
+ host_state.allocation_candidates = [
+ a_c
+ for a_c in host_state.allocation_candidates
+ if a_c["mappings"][uuids.group_req1][0]
+ in self.allowed_rp_uuids
+ ]
+ return True
+
+ instance_uuids = [uuids.inst1]
+ alloc_reqs_by_rp_uuid = {}
+ # have 3 hosts each with 2 allocation candidates fulfilling a request
+ # group from a different child RP
+ hosts = []
+ for i in [1, 2, 3]:
+ host = host_manager.HostState(f"host{i}", f"node{i}", uuids.cell1)
+ host.uuid = getattr(uuids, f"host{i}")
+ alloc_reqs_by_rp_uuid[host.uuid] = [
+ {
+ "mappings": {
+ "": [host.uuid],
+ uuids.group_req1: [
+ getattr(uuids, f"host{i}_child{j}")
+ ],
+ }
+ }
+ for j in [1, 2]
+ ]
+ hosts.append(host)
+ mock_get_all_host_states.return_value = iter(hosts)
+
+ # configure a filter that only "likes" host1_child2 and host3_child2
+ # RPs. This means host2 is totally out and host1 and host3 only have
+ # one viable candidate
+ self.manager.host_manager.enabled_filters = [
+ RPFilter(allowed_rp_uuids=[uuids.host1_child2, uuids.host3_child2])
+ ]
+
+ result = self.manager._schedule(
+ self.context,
+ self.request_spec,
+ instance_uuids,
+ alloc_reqs_by_rp_uuid,
+ mock.sentinel.provider_summaries,
+ 'fake-alloc-req-version',
+ return_alternates=True,
+ )
+ # we scheduled one instance
+ self.assertEqual(1, len(result))
+ selections = result[0]
+ # we expect a main selection and a single alternative
+ # (host1, and host3) on both selection we expect child2 as selected
+ # candidate
+ self.assertEqual(2, len(selections))
+ main_selection = selections[0]
+ self.assertEqual(uuids.host1, main_selection.compute_node_uuid)
+ self.assertEqual(
+ [uuids.host1_child2],
+ jsonutils.loads(main_selection.allocation_request)["mappings"][
+ uuids.group_req1
+ ],
+ )
+
+ alt_selection = selections[1]
+ self.assertEqual(uuids.host3, alt_selection.compute_node_uuid)
+ self.assertEqual(
+ [uuids.host3_child2],
+ jsonutils.loads(alt_selection.allocation_request)["mappings"][
+ uuids.group_req1
+ ],
+ )
diff --git a/nova/tests/unit/scheduler/test_request_filter.py b/nova/tests/unit/scheduler/test_request_filter.py
index 186482d4a8..77e538006a 100644
--- a/nova/tests/unit/scheduler/test_request_filter.py
+++ b/nova/tests/unit/scheduler/test_request_filter.py
@@ -612,3 +612,90 @@ class TestRequestFilter(test.NoDBTestCase):
mock_get_aggs_network.assert_has_calls([
mock.call(self.context, mock.ANY, mock.ANY, uuids.net1),
mock.call(self.context, mock.ANY, mock.ANY, uuids.net2)])
+
+ def test_ephemeral_encryption_filter_no_encryption(self):
+ # First ensure that ephemeral_encryption_filter is included
+ self.assertIn(request_filter.ephemeral_encryption_filter,
+ request_filter.ALL_REQUEST_FILTERS)
+
+ reqspec = objects.RequestSpec(
+ flavor=objects.Flavor(extra_specs={}),
+ image=objects.ImageMeta(
+ properties=objects.ImageMetaProps()))
+
+ self.assertEqual(set(), reqspec.root_required)
+ self.assertEqual(set(), reqspec.root_forbidden)
+
+ # Assert that the filter returns false and doesn't update the reqspec
+ self.assertFalse(
+ request_filter.ephemeral_encryption_filter(self.context, reqspec))
+ self.assertEqual(set(), reqspec.root_required)
+ self.assertEqual(set(), reqspec.root_forbidden)
+
+ def test_ephemeral_encryption_filter_encryption_disabled(self):
+ # First ensure that ephemeral_encryption_filter is included
+ self.assertIn(request_filter.ephemeral_encryption_filter,
+ request_filter.ALL_REQUEST_FILTERS)
+
+ reqspec = objects.RequestSpec(
+ flavor=objects.Flavor(extra_specs={}),
+ image=objects.ImageMeta(
+ properties=objects.ImageMetaProps(
+ hw_ephemeral_encryption=False)))
+ self.assertFalse(
+ request_filter.ephemeral_encryption_filter(
+ self.context, reqspec))
+ self.assertEqual(set(), reqspec.root_required)
+ self.assertEqual(set(), reqspec.root_forbidden)
+
+ reqspec = objects.RequestSpec(
+ flavor=objects.Flavor(extra_specs={
+ 'hw:ephemeral_encryption': 'False'}),
+ image=objects.ImageMeta(
+ properties=objects.ImageMetaProps()))
+ self.assertFalse(
+ request_filter.ephemeral_encryption_filter(
+ self.context, reqspec))
+ self.assertEqual(set(), reqspec.root_required)
+ self.assertEqual(set(), reqspec.root_forbidden)
+
+ def test_ephemeral_encryption_filter_encryption_no_format(self):
+ # First ensure that ephemeral_encryption_filter is included
+ self.assertIn(request_filter.ephemeral_encryption_filter,
+ request_filter.ALL_REQUEST_FILTERS)
+
+ reqspec = objects.RequestSpec(
+ flavor=objects.Flavor(extra_specs={
+ 'hw:ephemeral_encryption': 'True'}),
+ image=objects.ImageMeta(
+ properties=objects.ImageMetaProps()))
+ self.assertEqual(set(), reqspec.root_required)
+ self.assertEqual(set(), reqspec.root_forbidden)
+ self.assertTrue(
+ request_filter.ephemeral_encryption_filter(self.context, reqspec))
+ self.assertEqual(
+ {ot.COMPUTE_EPHEMERAL_ENCRYPTION}, reqspec.root_required)
+ self.assertEqual(set(), reqspec.root_forbidden)
+
+ def test_ephemeral_encryption_filter_encryption_and_format(self):
+ # First ensure that ephemeral_encryption_filter is included
+ self.assertIn(request_filter.ephemeral_encryption_filter,
+ request_filter.ALL_REQUEST_FILTERS)
+
+ reqspec = objects.RequestSpec(
+ flavor=objects.Flavor(
+ extra_specs={
+ 'hw:ephemeral_encryption': 'True',
+ 'hw:ephemeral_encryption_format': 'luks'
+ }),
+ image=objects.ImageMeta(
+ properties=objects.ImageMetaProps()))
+ self.assertEqual(set(), reqspec.root_required)
+ self.assertEqual(set(), reqspec.root_forbidden)
+ self.assertTrue(
+ request_filter.ephemeral_encryption_filter(self.context, reqspec))
+ self.assertEqual(
+ {ot.COMPUTE_EPHEMERAL_ENCRYPTION,
+ ot.COMPUTE_EPHEMERAL_ENCRYPTION_LUKS},
+ reqspec.root_required)
+ self.assertEqual(set(), reqspec.root_forbidden)
diff --git a/nova/tests/unit/test_filesystem.py b/nova/tests/unit/test_filesystem.py
new file mode 100644
index 0000000000..85f16157ee
--- /dev/null
+++ b/nova/tests/unit/test_filesystem.py
@@ -0,0 +1,52 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+from unittest import mock
+
+from nova import exception
+from nova import filesystem
+from nova import test
+
+
+class TestFSCommon(test.NoDBTestCase):
+
+ def test_read_sys(self):
+ open_mock = mock.mock_open(read_data='bar')
+ with mock.patch('builtins.open', open_mock) as m_open:
+ self.assertEqual('bar', filesystem.read_sys('foo'))
+ expected_path = os.path.join(filesystem.SYS, 'foo')
+ m_open.assert_called_once_with(expected_path, mode='r')
+
+ def test_read_sys_error(self):
+ with mock.patch('builtins.open',
+ side_effect=OSError('error')) as m_open:
+ self.assertRaises(exception.FileNotFound,
+ filesystem.read_sys, 'foo')
+ expected_path = os.path.join(filesystem.SYS, 'foo')
+ m_open.assert_called_once_with(expected_path, mode='r')
+
+ def test_write_sys(self):
+ open_mock = mock.mock_open()
+ with mock.patch('builtins.open', open_mock) as m_open:
+ self.assertIsNone(filesystem.write_sys('foo', 'bar'))
+ expected_path = os.path.join(filesystem.SYS, 'foo')
+ m_open.assert_called_once_with(expected_path, mode='w')
+ open_mock().write.assert_called_once_with('bar')
+
+ def test_write_sys_error(self):
+ with mock.patch('builtins.open',
+ side_effect=OSError('fake_error')) as m_open:
+ self.assertRaises(exception.FileNotFound,
+ filesystem.write_sys, 'foo', 'bar')
+ expected_path = os.path.join(filesystem.SYS, 'foo')
+ m_open.assert_called_once_with(expected_path, mode='w')
diff --git a/nova/tests/unit/test_hacking.py b/nova/tests/unit/test_hacking.py
index 10b2a79db4..41cbada99f 100644
--- a/nova/tests/unit/test_hacking.py
+++ b/nova/tests/unit/test_hacking.py
@@ -1043,3 +1043,24 @@ class HackingTestCase(test.NoDBTestCase):
import unittest.mock
"""
self._assert_has_no_errors(code, checks.import_stock_mock)
+
+ def test_check_set_daemon(self):
+ code = """
+ self.setDaemon(True)
+ worker.setDaemon(True)
+ self._event_thread.setDaemon(True)
+ mythread.setDaemon(False)
+ self.thread.setDaemon(1)
+ """
+ errors = [(x + 1, 0, 'N372') for x in range(5)]
+ self._assert_has_errors(
+ code, checks.check_set_daemon, expected_errors=errors)
+
+ code = """
+ self.setDaemon = True
+ worker.setDaemonFlag(True)
+ self._event_thread.resetDaemon(True)
+ self.set.Daemon(True)
+ self.thread.setdaemon(True)
+ """
+ self._assert_has_no_errors(code, checks.check_set_daemon)
diff --git a/nova/tests/unit/test_policy.py b/nova/tests/unit/test_policy.py
index e4ae09f91c..752b872381 100644
--- a/nova/tests/unit/test_policy.py
+++ b/nova/tests/unit/test_policy.py
@@ -303,10 +303,10 @@ class RealRolePolicyTestCase(test.NoDBTestCase):
def setUp(self):
super(RealRolePolicyTestCase, self).setUp()
self.policy = self.useFixture(nova_fixtures.RealPolicyFixture())
- self.non_admin_context = context.RequestContext('fake', 'fake',
- roles=['member'])
- self.admin_context = context.RequestContext('fake', 'fake', True,
- roles=['admin', 'member'])
+ self.non_admin_context = context.RequestContext(
+ 'fake', 'fake', roles=['member', 'reader'])
+ self.admin_context = context.RequestContext(
+ 'fake', 'fake', True, roles=['admin', 'member', 'reader'])
self.target = {}
self.fake_policy = jsonutils.loads(fake_policy.policy_data)
@@ -387,6 +387,7 @@ class RealRolePolicyTestCase(test.NoDBTestCase):
"os_compute_api:os-hypervisors:search",
"os_compute_api:os-hypervisors:servers",
"os_compute_api:limits:other_project",
+"os_compute_api:os-flavor-access",
)
self.admin_or_owner_rules = (
@@ -440,7 +441,6 @@ class RealRolePolicyTestCase(test.NoDBTestCase):
"os_compute_api:os-remote-consoles",
"os_compute_api:os-deferred-delete:restore",
"os_compute_api:os-deferred-delete:force",
-"os_compute_api:os-flavor-access",
"os_compute_api:os-flavor-extra-specs:index",
"os_compute_api:os-flavor-extra-specs:show",
"os_compute_api:os-floating-ips:add",
@@ -554,7 +554,8 @@ class RealRolePolicyTestCase(test.NoDBTestCase):
special_rules = ('admin_api', 'admin_or_owner', 'context_is_admin',
'os_compute_api:os-quota-class-sets:show',
'project_admin_api', 'project_member_api',
- 'project_reader_api', 'project_reader_or_admin')
+ 'project_reader_api', 'project_member_or_admin',
+ 'project_reader_or_admin')
result = set(rules.keys()) - set(self.admin_only_rules +
self.admin_or_owner_rules +
self.allow_all_rules +
diff --git a/nova/tests/unit/test_rpc.py b/nova/tests/unit/test_rpc.py
index 3fe56013bd..40a914b5f7 100644
--- a/nova/tests/unit/test_rpc.py
+++ b/nova/tests/unit/test_rpc.py
@@ -214,20 +214,20 @@ class TestRPC(test.NoDBTestCase):
@mock.patch.object(rpc, 'TRANSPORT')
@mock.patch.object(rpc, 'profiler', None)
@mock.patch.object(rpc, 'RequestContextSerializer')
- @mock.patch.object(messaging, 'RPCClient')
- def test_get_client(self, mock_client, mock_ser, mock_TRANSPORT):
+ @mock.patch.object(messaging, 'get_rpc_client')
+ def test_get_client(self, mock_get, mock_ser, mock_TRANSPORT):
tgt = mock.Mock()
ser = mock.Mock()
- mock_client.return_value = 'client'
+ mock_get.return_value = 'client'
mock_ser.return_value = ser
client = rpc.get_client(tgt, version_cap='1.0', serializer='foo')
mock_ser.assert_called_once_with('foo')
- mock_client.assert_called_once_with(mock_TRANSPORT,
- tgt, version_cap='1.0',
- call_monitor_timeout=None,
- serializer=ser)
+ mock_get.assert_called_once_with(mock_TRANSPORT,
+ tgt, version_cap='1.0',
+ call_monitor_timeout=None,
+ serializer=ser)
self.assertEqual('client', client)
@mock.patch.object(rpc, 'TRANSPORT')
@@ -253,21 +253,21 @@ class TestRPC(test.NoDBTestCase):
@mock.patch.object(rpc, 'TRANSPORT')
@mock.patch.object(rpc, 'profiler', mock.Mock())
@mock.patch.object(rpc, 'ProfilerRequestContextSerializer')
- @mock.patch.object(messaging, 'RPCClient')
- def test_get_client_profiler_enabled(self, mock_client, mock_ser,
+ @mock.patch.object(messaging, 'get_rpc_client')
+ def test_get_client_profiler_enabled(self, mock_get, mock_ser,
mock_TRANSPORT):
tgt = mock.Mock()
ser = mock.Mock()
- mock_client.return_value = 'client'
+ mock_get.return_value = 'client'
mock_ser.return_value = ser
client = rpc.get_client(tgt, version_cap='1.0', serializer='foo')
mock_ser.assert_called_once_with('foo')
- mock_client.assert_called_once_with(mock_TRANSPORT,
- tgt, version_cap='1.0',
- call_monitor_timeout=None,
- serializer=ser)
+ mock_get.assert_called_once_with(mock_TRANSPORT,
+ tgt, version_cap='1.0',
+ call_monitor_timeout=None,
+ serializer=ser)
self.assertEqual('client', client)
@mock.patch.object(rpc, 'TRANSPORT')
@@ -432,11 +432,11 @@ class TestProfilerRequestContextSerializer(test.NoDBTestCase):
class TestClientRouter(test.NoDBTestCase):
- @mock.patch('oslo_messaging.RPCClient')
- def test_by_instance(self, mock_rpcclient):
+ @mock.patch('oslo_messaging.get_rpc_client')
+ def test_by_instance(self, mock_get):
default_client = mock.Mock()
cell_client = mock.Mock()
- mock_rpcclient.return_value = cell_client
+ mock_get.return_value = cell_client
ctxt = mock.Mock()
ctxt.mq_connection = mock.sentinel.transport
@@ -444,7 +444,7 @@ class TestClientRouter(test.NoDBTestCase):
client = router.client(ctxt)
# verify a client was created by ClientRouter
- mock_rpcclient.assert_called_once_with(
+ mock_get.assert_called_once_with(
mock.sentinel.transport, default_client.target,
version_cap=default_client.version_cap,
call_monitor_timeout=default_client.call_monitor_timeout,
@@ -452,11 +452,11 @@ class TestClientRouter(test.NoDBTestCase):
# verify cell client was returned
self.assertEqual(cell_client, client)
- @mock.patch('oslo_messaging.RPCClient')
- def test_by_instance_untargeted(self, mock_rpcclient):
+ @mock.patch('oslo_messaging.get_rpc_client')
+ def test_by_instance_untargeted(self, mock_get):
default_client = mock.Mock()
cell_client = mock.Mock()
- mock_rpcclient.return_value = cell_client
+ mock_get.return_value = cell_client
ctxt = mock.Mock()
ctxt.mq_connection = None
@@ -464,7 +464,7 @@ class TestClientRouter(test.NoDBTestCase):
client = router.client(ctxt)
self.assertEqual(router.default_client, client)
- self.assertFalse(mock_rpcclient.called)
+ self.assertFalse(mock_get.called)
class TestIsNotificationsEnabledDecorator(test.NoDBTestCase):
diff --git a/nova/tests/unit/test_service.py b/nova/tests/unit/test_service.py
index 9fb6fa1c40..acc1aeca7f 100644
--- a/nova/tests/unit/test_service.py
+++ b/nova/tests/unit/test_service.py
@@ -128,7 +128,7 @@ class ServiceTestCase(test.NoDBTestCase):
serv.manager.additional_endpoints = []
serv.start()
# init_host is called before any service record is created
- serv.manager.init_host.assert_called_once_with()
+ serv.manager.init_host.assert_called_once_with(None)
mock_get_by_host_and_binary.assert_called_once_with(mock.ANY,
self.host, self.binary)
mock_create.assert_called_once_with()
@@ -186,7 +186,7 @@ class ServiceTestCase(test.NoDBTestCase):
mock_create.side_effect = ex
serv.manager = mock_manager
self.assertRaises(test.TestingException, serv.start)
- serv.manager.init_host.assert_called_with()
+ serv.manager.init_host.assert_called_with(None)
mock_get_by_host_and_binary.assert_has_calls([
mock.call(mock.ANY, self.host, self.binary),
mock.call(mock.ANY, self.host, self.binary)])
@@ -216,7 +216,7 @@ class ServiceTestCase(test.NoDBTestCase):
serv.manager.service_name = self.topic
serv.manager.additional_endpoints = []
serv.start()
- serv.manager.init_host.assert_called_once_with()
+ serv.manager.init_host.assert_called_once_with(None)
mock_get_by_host_and_binary.assert_called_once_with(mock.ANY,
self.host,
self.binary)
@@ -241,7 +241,8 @@ class ServiceTestCase(test.NoDBTestCase):
serv.manager.additional_endpoints = []
serv.start()
- serv.manager.init_host.assert_called_with()
+ serv.manager.init_host.assert_called_with(
+ mock_svc_get_by_host_and_binary.return_value)
serv.stop()
serv.manager.cleanup_host.assert_called_with()
diff --git a/nova/tests/unit/virt/disk/test_api.py b/nova/tests/unit/virt/disk/test_api.py
index 62005de525..135558e145 100644
--- a/nova/tests/unit/virt/disk/test_api.py
+++ b/nova/tests/unit/virt/disk/test_api.py
@@ -40,6 +40,7 @@ class FakeMount(object):
class APITestCase(test.NoDBTestCase):
+ @mock.patch('nova.virt.disk.vfs.guestfs.VFSGuestFS', new=mock.Mock())
def test_can_resize_need_fs_type_specified(self):
imgfile = tempfile.NamedTemporaryFile()
self.addCleanup(imgfile.close)
diff --git a/nova/tests/unit/virt/ironic/test_driver.py b/nova/tests/unit/virt/ironic/test_driver.py
index 439c15683c..52aa37ac13 100644
--- a/nova/tests/unit/virt/ironic/test_driver.py
+++ b/nova/tests/unit/virt/ironic/test_driver.py
@@ -935,6 +935,48 @@ class IronicDriverTestCase(test.NoDBTestCase):
expected = {
'CUSTOM_IRON_NFV': {
'total': 1,
+ 'reserved': 1,
+ 'min_unit': 1,
+ 'max_unit': 1,
+ 'step_size': 1,
+ 'allocation_ratio': 1.0,
+ },
+ }
+ mock_nfc.assert_called_once_with(mock.sentinel.nodename)
+ mock_nr.assert_called_once_with(mock_nfc.return_value)
+ mock_res_used.assert_called_once_with(mock_nfc.return_value)
+ mock_res_unavail.assert_called_once_with(mock_nfc.return_value)
+ result = self.ptree.data(mock.sentinel.nodename).inventory
+ self.assertEqual(expected, result)
+
+ @mock.patch.object(ironic_driver.IronicDriver,
+ '_node_resources_used', return_value=True)
+ @mock.patch.object(ironic_driver.IronicDriver,
+ '_node_resources_unavailable', return_value=False)
+ @mock.patch.object(ironic_driver.IronicDriver, '_node_resource')
+ @mock.patch.object(ironic_driver.IronicDriver, '_node_from_cache')
+ def test_update_provider_tree_with_rc_occupied_workaround(self,
+ mock_nfc, mock_nr, mock_res_unavail, mock_res_used):
+ """Ensure that when a node is used, we report the inventory matching
+ the consumed resources.
+ """
+ self.flags(skip_reserve_in_use_ironic_nodes=True,
+ group="workarounds")
+ mock_nr.return_value = {
+ 'vcpus': 24,
+ 'vcpus_used': 24,
+ 'memory_mb': 1024,
+ 'memory_mb_used': 1024,
+ 'local_gb': 100,
+ 'local_gb_used': 100,
+ 'resource_class': 'iron-nfv',
+ }
+
+ self.driver.update_provider_tree(self.ptree, mock.sentinel.nodename)
+
+ expected = {
+ 'CUSTOM_IRON_NFV': {
+ 'total': 1,
'reserved': 0,
'min_unit': 1,
'max_unit': 1,
@@ -945,7 +987,7 @@ class IronicDriverTestCase(test.NoDBTestCase):
mock_nfc.assert_called_once_with(mock.sentinel.nodename)
mock_nr.assert_called_once_with(mock_nfc.return_value)
mock_res_used.assert_called_once_with(mock_nfc.return_value)
- self.assertFalse(mock_res_unavail.called)
+ mock_res_unavail.assert_called_once_with(mock_nfc.return_value)
result = self.ptree.data(mock.sentinel.nodename).inventory
self.assertEqual(expected, result)
@@ -1016,7 +1058,7 @@ class IronicDriverTestCase(test.NoDBTestCase):
mock_nfc.assert_called_once_with(mock.sentinel.nodename)
mock_nr.assert_called_once_with(mock_nfc.return_value)
mock_res_used.assert_called_once_with(mock_nfc.return_value)
- self.assertFalse(mock_res_unavail.called)
+ mock_res_unavail.assert_called_once_with(mock_nfc.return_value)
result = self.ptree.data(mock.sentinel.nodename).traits
self.assertEqual(set(), result)
@@ -1048,7 +1090,7 @@ class IronicDriverTestCase(test.NoDBTestCase):
mock_nfc.assert_called_once_with(mock.sentinel.nodename)
mock_nr.assert_called_once_with(mock_nfc.return_value)
mock_res_used.assert_called_once_with(mock_nfc.return_value)
- self.assertFalse(mock_res_unavail.called)
+ mock_res_unavail.assert_called_once_with(mock_nfc.return_value)
result = self.ptree.data(mock.sentinel.nodename).traits
self.assertEqual(set(traits), result)
@@ -2500,7 +2542,10 @@ class IronicDriverTestCase(test.NoDBTestCase):
@mock.patch.object(cw.IronicClientWrapper, 'call')
def test_prepare_for_spawn(self, mock_call):
- node = ironic_utils.get_test_node(driver='fake')
+ node = ironic_utils.get_test_node(
+ driver='fake', instance_uuid=None,
+ provision_state=ironic_states.AVAILABLE,
+ power_state=ironic_states.POWER_OFF)
self.mock_conn.get_node.return_value = node
instance = fake_instance.fake_instance_obj(self.ctx,
node=node.uuid)
@@ -2532,7 +2577,10 @@ class IronicDriverTestCase(test.NoDBTestCase):
instance)
def test_prepare_for_spawn_conflict(self):
- node = ironic_utils.get_test_node(driver='fake')
+ node = ironic_utils.get_test_node(
+ driver='fake', instance_uuid=None,
+ provision_state=ironic_states.AVAILABLE,
+ power_state=ironic_states.POWER_OFF)
self.mock_conn.get_node.return_value = node
self.mock_conn.update_node.side_effect = sdk_exc.ConflictException
instance = fake_instance.fake_instance_obj(self.ctx, node=node.id)
@@ -2540,6 +2588,18 @@ class IronicDriverTestCase(test.NoDBTestCase):
self.driver.prepare_for_spawn,
instance)
+ def test_prepare_for_spawn_not_available(self):
+ node = ironic_utils.get_test_node(
+ driver='fake', instance_uuid=None,
+ provision_state=ironic_states.CLEANWAIT,
+ power_state=ironic_states.POWER_OFF)
+ self.mock_conn.get_node.return_value = node
+ self.mock_conn.update_node.side_effect = sdk_exc.ConflictException
+ instance = fake_instance.fake_instance_obj(self.ctx, node=node.id)
+ self.assertRaises(exception.ComputeResourcesUnavailable,
+ self.driver.prepare_for_spawn,
+ instance)
+
@mock.patch.object(ironic_driver.IronicDriver, '_cleanup_deploy')
def test_failed_spawn_cleanup(self, mock_cleanup):
node = ironic_utils.get_test_node(driver='fake')
@@ -2598,9 +2658,6 @@ class IronicDriverSyncTestCase(IronicDriverTestCase):
# that the thread completes.
self.useFixture(nova_fixtures.SpawnIsSynchronousFixture())
- self.mock_conn = self.useFixture(
- fixtures.MockPatchObject(self.driver, '_ironic_connection')).mock
-
@mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
@mock.patch.object(FAKE_CLIENT.node, 'set_provision_state')
def test_rescue(self, mock_sps, mock_looping):
diff --git a/nova/tests/unit/virt/libvirt/cpu/__init__.py b/nova/tests/unit/virt/libvirt/cpu/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/virt/libvirt/cpu/__init__.py
diff --git a/nova/tests/unit/virt/libvirt/cpu/test_api.py b/nova/tests/unit/virt/libvirt/cpu/test_api.py
new file mode 100644
index 0000000000..b5bcb762f3
--- /dev/null
+++ b/nova/tests/unit/virt/libvirt/cpu/test_api.py
@@ -0,0 +1,194 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from unittest import mock
+
+from nova import exception
+from nova import objects
+from nova import test
+from nova.virt.libvirt.cpu import api
+from nova.virt.libvirt.cpu import core
+
+
+class TestAPI(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestAPI, self).setUp()
+ self.core_1 = api.Core(1)
+
+ # Create a fake instance with two pinned CPUs but only one is on the
+ # dedicated set
+ numa_topology = objects.InstanceNUMATopology(cells=[
+ objects.InstanceNUMACell(cpu_pinning_raw={'0': '0', '2': '2'}),
+ ])
+ self.fake_inst = objects.Instance(numa_topology=numa_topology)
+
+ @mock.patch.object(core, 'get_online')
+ def test_online(self, mock_get_online):
+ mock_get_online.return_value = True
+ self.assertTrue(self.core_1.online)
+ mock_get_online.assert_called_once_with(self.core_1.ident)
+
+ @mock.patch.object(core, 'set_online')
+ def test_set_online(self, mock_set_online):
+ self.core_1.online = True
+ mock_set_online.assert_called_once_with(self.core_1.ident)
+
+ @mock.patch.object(core, 'set_offline')
+ def test_set_offline(self, mock_set_offline):
+ self.core_1.online = False
+ mock_set_offline.assert_called_once_with(self.core_1.ident)
+
+ def test_hash(self):
+ self.assertEqual(hash(self.core_1.ident), hash(self.core_1))
+
+ @mock.patch.object(core, 'get_governor')
+ def test_governor(self, mock_get_governor):
+ mock_get_governor.return_value = 'fake_governor'
+ self.assertEqual('fake_governor', self.core_1.governor)
+ mock_get_governor.assert_called_once_with(self.core_1.ident)
+
+ @mock.patch.object(core, 'set_governor')
+ def test_set_governor_low(self, mock_set_governor):
+ self.flags(cpu_power_governor_low='fake_low_gov', group='libvirt')
+ self.core_1.set_low_governor()
+ mock_set_governor.assert_called_once_with(self.core_1.ident,
+ 'fake_low_gov')
+
+ @mock.patch.object(core, 'set_governor')
+ def test_set_governor_high(self, mock_set_governor):
+ self.flags(cpu_power_governor_high='fake_high_gov', group='libvirt')
+ self.core_1.set_high_governor()
+ mock_set_governor.assert_called_once_with(self.core_1.ident,
+ 'fake_high_gov')
+
+ @mock.patch.object(core, 'set_online')
+ def test_power_up_online(self, mock_online):
+ self.flags(cpu_power_management=True, group='libvirt')
+ self.flags(cpu_dedicated_set='0-1', group='compute')
+
+ api.power_up(self.fake_inst)
+ # only core #0 can be set as core #2 is not on the dedicated set
+ # As a reminder, core(i).online calls set_online(i)
+ mock_online.assert_called_once_with(0)
+
+ @mock.patch.object(core, 'set_governor')
+ def test_power_up_governor(self, mock_set_governor):
+ self.flags(cpu_power_management=True, group='libvirt')
+ self.flags(cpu_power_management_strategy='governor', group='libvirt')
+ self.flags(cpu_dedicated_set='0-1', group='compute')
+
+ api.power_up(self.fake_inst)
+ # only core #0 can be set as core #2 is not on the dedicated set
+ # As a reminder, core(i).set_high_governor calls set_governor(i)
+ mock_set_governor.assert_called_once_with(0, 'performance')
+
+ @mock.patch.object(core, 'set_online')
+ def test_power_up_skipped(self, mock_online):
+ self.flags(cpu_power_management=False, group='libvirt')
+ api.power_up(self.fake_inst)
+ mock_online.assert_not_called()
+
+ @mock.patch.object(core, 'set_online')
+ def test_power_up_skipped_if_standard_instance(self, mock_online):
+ self.flags(cpu_power_management=True, group='libvirt')
+ api.power_up(objects.Instance(numa_topology=None))
+ mock_online.assert_not_called()
+
+ @mock.patch.object(core, 'set_offline')
+ def test_power_down_offline(self, mock_offline):
+ self.flags(cpu_power_management=True, group='libvirt')
+ self.flags(cpu_dedicated_set='0-1', group='compute')
+
+ api.power_down(self.fake_inst)
+ # only core #0 can be set as core #2 is not on the dedicated set
+ # As a reminder, core(i).online calls set_online(i)
+ mock_offline.assert_called_once_with(0)
+
+ @mock.patch.object(core, 'set_governor')
+ def test_power_down_governor(self, mock_set_governor):
+ self.flags(cpu_power_management=True, group='libvirt')
+ self.flags(cpu_power_management_strategy='governor', group='libvirt')
+ self.flags(cpu_dedicated_set='0-1', group='compute')
+
+ api.power_down(self.fake_inst)
+ # only core #0 can be set as core #2 is not on the dedicated set
+ # As a reminder, core(i).set_high_governor calls set_governor(i)
+ mock_set_governor.assert_called_once_with(0, 'powersave')
+
+ @mock.patch.object(core, 'set_offline')
+ def test_power_down_skipped(self, mock_offline):
+ self.flags(cpu_power_management=False, group='libvirt')
+ api.power_down(self.fake_inst)
+ mock_offline.assert_not_called()
+
+ @mock.patch.object(core, 'set_offline')
+ def test_power_down_skipped_if_standard_instance(self, mock_offline):
+ self.flags(cpu_power_management=True, group='libvirt')
+ api.power_down(objects.Instance(numa_topology=None))
+ mock_offline.assert_not_called()
+
+ @mock.patch.object(core, 'set_offline')
+ def test_power_down_all_dedicated_cpus_offline(self, mock_offline):
+ self.flags(cpu_power_management=True, group='libvirt')
+ self.flags(cpu_dedicated_set='0-1', group='compute')
+
+ api.power_down_all_dedicated_cpus()
+ # All dedicated CPUs are turned offline
+ mock_offline.assert_has_calls([mock.call(0), mock.call(1)])
+
+ @mock.patch.object(core, 'set_governor')
+ def test_power_down_all_dedicated_cpus_governor(self, mock_set_governor):
+ self.flags(cpu_power_management=True, group='libvirt')
+ self.flags(cpu_power_management_strategy='governor', group='libvirt')
+ self.flags(cpu_dedicated_set='0-1', group='compute')
+
+ api.power_down_all_dedicated_cpus()
+ # All dedicated CPUs are turned offline
+ mock_set_governor.assert_has_calls([mock.call(0, 'powersave'),
+ mock.call(1, 'powersave')])
+
+ @mock.patch.object(core, 'set_offline')
+ def test_power_down_all_dedicated_cpus_skipped(self, mock_offline):
+ self.flags(cpu_power_management=False, group='libvirt')
+ api.power_down_all_dedicated_cpus()
+ mock_offline.assert_not_called()
+
+ def test_power_down_all_dedicated_cpus_wrong_config(self):
+ self.flags(cpu_power_management=True, group='libvirt')
+ self.flags(cpu_dedicated_set=None, group='compute')
+ self.assertRaises(exception.InvalidConfiguration,
+ api.power_down_all_dedicated_cpus)
+
+ @mock.patch.object(core, 'get_governor')
+ @mock.patch.object(core, 'get_online')
+ def test_validate_all_dedicated_cpus_for_governor(self, mock_get_online,
+ mock_get_governor):
+ self.flags(cpu_power_management=True, group='libvirt')
+ self.flags(cpu_dedicated_set='0-1', group='compute')
+ self.flags(cpu_power_management_strategy='governor', group='libvirt')
+ mock_get_governor.return_value = 'performance'
+ mock_get_online.side_effect = (True, False)
+ self.assertRaises(exception.InvalidConfiguration,
+ api.validate_all_dedicated_cpus)
+
+ @mock.patch.object(core, 'get_governor')
+ @mock.patch.object(core, 'get_online')
+ def test_validate_all_dedicated_cpus_for_cpu_state(self, mock_get_online,
+ mock_get_governor):
+ self.flags(cpu_power_management=True, group='libvirt')
+ self.flags(cpu_dedicated_set='0-1', group='compute')
+ self.flags(cpu_power_management_strategy='cpu_state', group='libvirt')
+ mock_get_online.return_value = True
+ mock_get_governor.side_effect = ('powersave', 'performance')
+ self.assertRaises(exception.InvalidConfiguration,
+ api.validate_all_dedicated_cpus)
diff --git a/nova/tests/unit/virt/libvirt/cpu/test_core.py b/nova/tests/unit/virt/libvirt/cpu/test_core.py
new file mode 100644
index 0000000000..a3cba00d3b
--- /dev/null
+++ b/nova/tests/unit/virt/libvirt/cpu/test_core.py
@@ -0,0 +1,122 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from unittest import mock
+
+from nova import exception
+from nova import test
+from nova.tests import fixtures
+from nova.virt.libvirt.cpu import core
+
+
+class TestCore(test.NoDBTestCase):
+
+ @mock.patch.object(core.filesystem, 'read_sys')
+ @mock.patch.object(core.hardware, 'parse_cpu_spec')
+ def test_get_available_cores(self, mock_parse_cpu_spec, mock_read_sys):
+ mock_read_sys.return_value = '1-2'
+ mock_parse_cpu_spec.return_value = set([1, 2])
+ self.assertEqual(set([1, 2]), core.get_available_cores())
+ mock_read_sys.assert_called_once_with(core.AVAILABLE_PATH)
+ mock_parse_cpu_spec.assert_called_once_with('1-2')
+
+ @mock.patch.object(core.filesystem, 'read_sys')
+ @mock.patch.object(core.hardware, 'parse_cpu_spec')
+ def test_get_available_cores_none(
+ self, mock_parse_cpu_spec, mock_read_sys):
+ mock_read_sys.return_value = ''
+ self.assertEqual(set(), core.get_available_cores())
+ mock_parse_cpu_spec.assert_not_called()
+
+ @mock.patch.object(core, 'get_available_cores')
+ def test_exists(self, mock_get_available_cores):
+ mock_get_available_cores.return_value = set([1])
+ self.assertTrue(core.exists(1))
+ mock_get_available_cores.assert_called_once_with()
+ self.assertFalse(core.exists(2))
+
+ @mock.patch.object(
+ core, 'CPU_PATH_TEMPLATE',
+ new_callable=mock.PropertyMock(return_value='/sys/blah%(core)s'))
+ @mock.patch.object(core, 'exists')
+ def test_gen_cpu_path(self, mock_exists, mock_cpu_path):
+ mock_exists.return_value = True
+ self.assertEqual('/sys/blah1', core.gen_cpu_path(1))
+ mock_exists.assert_called_once_with(1)
+
+ @mock.patch.object(core, 'exists')
+ def test_gen_cpu_path_raises(self, mock_exists):
+ mock_exists.return_value = False
+ self.assertRaises(ValueError, core.gen_cpu_path, 1)
+ self.assertIn('Unable to access CPU: 1', self.stdlog.logger.output)
+
+
+class TestCoreHelpers(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestCoreHelpers, self).setUp()
+ self.useFixture(fixtures.PrivsepFixture())
+ _p1 = mock.patch.object(core, 'exists', return_value=True)
+ self.mock_exists = _p1.start()
+ self.addCleanup(_p1.stop)
+
+ _p2 = mock.patch.object(core, 'gen_cpu_path',
+ side_effect=lambda x: '/fakesys/blah%s' % x)
+ self.mock_gen_cpu_path = _p2.start()
+ self.addCleanup(_p2.stop)
+
+ @mock.patch.object(core.filesystem, 'read_sys')
+ def test_get_online(self, mock_read_sys):
+ mock_read_sys.return_value = '1'
+ self.assertTrue(core.get_online(1))
+ mock_read_sys.assert_called_once_with('/fakesys/blah1/online')
+
+ @mock.patch.object(core.filesystem, 'read_sys')
+ def test_get_online_not_exists(self, mock_read_sys):
+ mock_read_sys.side_effect = exception.FileNotFound(file_path='foo')
+ self.assertTrue(core.get_online(1))
+ mock_read_sys.assert_called_once_with('/fakesys/blah1/online')
+
+ @mock.patch.object(core.filesystem, 'write_sys')
+ @mock.patch.object(core, 'get_online')
+ def test_set_online(self, mock_get_online, mock_write_sys):
+ mock_get_online.return_value = True
+ self.assertTrue(core.set_online(1))
+ mock_write_sys.assert_called_once_with('/fakesys/blah1/online',
+ data='1')
+ mock_get_online.assert_called_once_with(1)
+
+ @mock.patch.object(core.filesystem, 'write_sys')
+ @mock.patch.object(core, 'get_online')
+ def test_set_offline(self, mock_get_online, mock_write_sys):
+ mock_get_online.return_value = False
+ self.assertTrue(core.set_offline(1))
+ mock_write_sys.assert_called_once_with('/fakesys/blah1/online',
+ data='0')
+ mock_get_online.assert_called_once_with(1)
+
+ @mock.patch.object(core.filesystem, 'read_sys')
+ def test_get_governor(self, mock_read_sys):
+ mock_read_sys.return_value = 'fake_gov'
+ self.assertEqual('fake_gov', core.get_governor(1))
+ mock_read_sys.assert_called_once_with(
+ '/fakesys/blah1/cpufreq/scaling_governor')
+
+ @mock.patch.object(core, 'get_governor')
+ @mock.patch.object(core.filesystem, 'write_sys')
+ def test_set_governor(self, mock_write_sys, mock_get_governor):
+ mock_get_governor.return_value = 'fake_gov'
+ self.assertEqual('fake_gov',
+ core.set_governor(1, 'fake_gov'))
+ mock_write_sys.assert_called_once_with(
+ '/fakesys/blah1/cpufreq/scaling_governor', data='fake_gov')
+ mock_get_governor.assert_called_once_with(1)
diff --git a/nova/tests/unit/virt/libvirt/test_blockinfo.py b/nova/tests/unit/virt/libvirt/test_blockinfo.py
index 3da827dce8..5a0dbb40ce 100644
--- a/nova/tests/unit/virt/libvirt/test_blockinfo.py
+++ b/nova/tests/unit/virt/libvirt/test_blockinfo.py
@@ -74,6 +74,7 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
def _test_block_device_info(self, with_eph=True, with_swap=True,
with_bdms=True):
swap = {'device_name': '/dev/vdb', 'swap_size': 1}
+ image = [{'device_type': 'disk', 'boot_index': 0}]
ephemerals = [{'device_type': 'disk', 'guest_format': 'ext4',
'device_name': '/dev/vdc1', 'size': 10},
{'disk_bus': 'ide', 'guest_format': None,
@@ -84,6 +85,7 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
'device_path': 'fake_device'}]
return {'root_device_name': '/dev/vda',
'swap': swap if with_swap else {},
+ 'image': image,
'ephemerals': ephemerals if with_eph else [],
'block_device_mapping':
block_device_mapping if with_bdms else []}
@@ -178,11 +180,16 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
+ block_device_info = {
+ 'image': [
+ {'device_type': 'disk', 'boot_index': 0},
+ ]
+ }
with mock.patch.object(instance_ref, 'get_flavor',
return_value=instance_ref.flavor) as get_flavor:
- mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
- "virtio", "ide",
- image_meta)
+ mapping = blockinfo.get_disk_mapping(
+ "kvm", instance_ref, "virtio", "ide", image_meta,
+ block_device_info=block_device_info)
# Since there was no block_device_info passed to get_disk_mapping we
# expect to get the swap info from the flavor in the instance.
get_flavor.assert_called_once_with()
@@ -202,7 +209,8 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
block_device_info = {
- 'root_device_name': '/dev/sda'
+ 'root_device_name': '/dev/sda',
+ 'image': [{'device_type': 'disk', 'boot_index': 0}],
}
mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
@@ -490,9 +498,12 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
- mapping = blockinfo.get_disk_mapping("lxc", instance_ref,
- "lxc", "lxc",
- image_meta)
+ block_device_info = {
+ 'image': [{'device_type': 'disk', 'boot_index': 0}],
+ }
+ mapping = blockinfo.get_disk_mapping(
+ "lxc", instance_ref, "lxc", "lxc", image_meta,
+ block_device_info=block_device_info)
expect = {
'disk': {'bus': 'lxc', 'dev': None,
'type': 'disk', 'boot_index': '1'},
@@ -527,9 +538,14 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
instance_ref.flavor.swap = 5
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
- mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
- "virtio", "ide",
- image_meta)
+ block_device_info = {
+ 'image': [
+ {'device_type': 'disk', 'boot_index': 0},
+ ]
+ }
+ mapping = blockinfo.get_disk_mapping(
+ "kvm", instance_ref, "virtio", "ide", image_meta,
+ block_device_info=block_device_info)
expect = {
'disk': {'bus': 'virtio', 'dev': 'vda',
@@ -549,6 +565,7 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
instance_ref.ephemeral_gb = 0
block_dev_info = {'swap': None, 'root_device_name': u'/dev/vda',
+ 'image': [],
'ephemerals': [],
'block_device_mapping': [{'boot_index': None,
'mount_device': u'/dev/vdb',
@@ -591,8 +608,14 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
+ block_device_info = {
+ 'image': [
+ {'device_type': 'disk', 'boot_index': 0},
+ ]
+ }
mapping = blockinfo.get_disk_mapping(
- "kvm", instance_ref, "virtio", "ide", image_meta)
+ "kvm", instance_ref, "virtio", "ide", image_meta,
+ block_device_info=block_device_info)
# Pick the first drive letter on the bus that is available
# as the config drive. Delete the last device hardcode as
@@ -647,8 +670,14 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
+ block_device_info = {
+ 'image': [
+ {'device_type': 'disk', 'boot_index': 0},
+ ]
+ }
mapping = blockinfo.get_disk_mapping(
- "kvm", instance_ref, "virtio", "ide", image_meta)
+ "kvm", instance_ref, "virtio", "ide", image_meta,
+ block_device_info=block_device_info)
expect = {
'disk': {
@@ -697,9 +726,14 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
- mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
- "virtio", "ide",
- image_meta)
+ block_device_info = {
+ 'image': [
+ {'device_type': 'disk', 'boot_index': 0},
+ ]
+ }
+ mapping = blockinfo.get_disk_mapping(
+ "kvm", instance_ref, "virtio", "ide", image_meta,
+ block_device_info=block_device_info)
expect = {
'disk': {'bus': 'virtio', 'dev': 'vda',
@@ -718,6 +752,9 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
block_device_info = {
+ 'image': [
+ {'device_type': 'disk', 'boot_index': 0},
+ ],
'ephemerals': [
{'device_type': 'disk', 'guest_format': 'ext4',
'device_name': '/dev/vdb', 'size': 10},
@@ -754,6 +791,8 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
block_device_info = {
'swap': {'device_name': '/dev/vdb',
'swap_size': 10},
+ 'image': [{'device_type': 'disk',
+ 'boot_index': 0}],
}
mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
"virtio", "ide",
@@ -775,6 +814,7 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
block_device_info = {
+ 'image': [],
'block_device_mapping': [
{'connection_info': "fake",
'mount_device': "/dev/vda",
@@ -803,6 +843,7 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
image_meta = {}
block_device_info = {
+ 'image': [],
'block_device_mapping': [
{'connection_info': None,
'mount_device': None,
@@ -858,6 +899,7 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
block_device_info = {
+ 'image': [],
'block_device_mapping': [
{'connection_info': "fake",
'mount_device': "/dev/vda",
@@ -899,6 +941,7 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
'root_device_name': '/dev/vdf',
'swap': {'device_name': '/dev/vdy',
'swap_size': 10},
+ 'image': [{'device_type': 'disk', 'boot_index': 0}],
'ephemerals': [
{'device_type': 'disk', 'guest_format': 'ext4',
'device_name': '/dev/vdb', 'size': 10},
@@ -940,6 +983,8 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
'swap': {'device_name': '/dev/vdb',
'device_type': 'really_lame_type',
'swap_size': 10},
+ 'image': [{'device_name': '/dev/vda',
+ 'device_type': 'disk'}],
'ephemerals': [{'disk_bus': 'no_such_bus',
'device_type': 'yeah_right',
'device_name': '/dev/vdc', 'size': 10}],
@@ -951,6 +996,8 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
}
expected_swap = {'device_name': '/dev/vdb', 'disk_bus': 'virtio',
'device_type': 'disk', 'swap_size': 10}
+ expected_image = {'device_name': '/dev/vda', 'device_type': 'disk',
+ 'disk_bus': 'virtio'}
expected_ephemeral = {'disk_bus': 'virtio',
'device_type': 'disk',
'device_name': '/dev/vdc', 'size': 10}
@@ -970,6 +1017,7 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
self.assertFalse(get_flavor_mock.called)
self.assertEqual(expected_swap, block_device_info['swap'])
+ self.assertEqual(expected_image, block_device_info['image'][0])
self.assertEqual(expected_ephemeral,
block_device_info['ephemerals'][0])
self.assertEqual(expected_bdm,
@@ -1124,7 +1172,10 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
'device_type': 'lame_type',
'delete_on_termination': True},
{'disk_bus': 'sata', 'guest_format': None,
- 'device_name': '/dev/sda', 'size': 3}]
+ 'device_name': '/dev/sda', 'size': 3},
+ {'encrypted': True, 'encryption_secret_uuid': uuids.secret,
+ 'encryption_format': 'luks',
+ 'encryption_options': '{"json": "options"}'}]
expected = [{'dev': 'vds', 'type': 'disk', 'bus': 'usb'},
{'dev': 'vdb', 'type': 'disk',
'bus': 'virtio', 'format': 'ext4'},
@@ -1133,7 +1184,11 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
'bus': 'scsi', 'boot_index': '1'},
{'dev': 'vdo', 'type': 'disk',
'bus': 'scsi', 'boot_index': '2'},
- {'dev': 'sda', 'type': 'disk', 'bus': 'sata'}]
+ {'dev': 'sda', 'type': 'disk', 'bus': 'sata'},
+ {'dev': 'vda', 'type': 'disk', 'bus': 'virtio',
+ 'encrypted': True, 'encryption_secret_uuid': uuids.secret,
+ 'encryption_format': 'luks',
+ 'encryption_options': {'json': 'options'}}]
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
for bdm, expected in zip(bdms, expected):
@@ -1441,6 +1496,15 @@ class DefaultDeviceNamesTestCase(test.NoDBTestCase):
'destination_type': 'volume',
'boot_index': -1}))]
+ self.image = [
+ objects.BlockDeviceMapping(self.context,
+ **fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 6, 'instance_uuid': uuids.instance,
+ 'source_type': 'image',
+ 'destination_type': 'local',
+ 'device_type': 'disk',
+ 'boot_index': 0}))]
+
def tearDown(self):
super(DefaultDeviceNamesTestCase, self).tearDown()
for patcher in self.patchers:
@@ -1450,7 +1514,7 @@ class DefaultDeviceNamesTestCase(test.NoDBTestCase):
'nova.virt.libvirt.utils.get_arch',
return_value=obj_fields.Architecture.X86_64)
def _test_default_device_names(self, eph, swap, bdm, mock_get_arch):
- bdms = eph + swap + bdm
+ bdms = self.image + eph + swap + bdm
bdi = driver.get_block_device_info(self.instance, bdms)
blockinfo.default_device_names(self.virt_type,
self.context,
diff --git a/nova/tests/unit/virt/libvirt/test_config.py b/nova/tests/unit/virt/libvirt/test_config.py
index 1967939e56..3d0b5ae685 100644
--- a/nova/tests/unit/virt/libvirt/test_config.py
+++ b/nova/tests/unit/virt/libvirt/test_config.py
@@ -16,6 +16,7 @@ from lxml import etree
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import units
+from nova import exception
from nova.objects import fields as obj_fields
from nova import test
from nova.tests.fixtures import libvirt_data as fake_libvirt_data
@@ -70,6 +71,23 @@ class LibvirtConfigTest(LibvirtConfigBaseTest):
obj = config.LibvirtConfigObject(root_name="demo")
obj.parse_str(inxml)
+ def test_parse_on_off_str(self):
+ obj = config.LibvirtConfigObject(root_name="demo")
+ self.assertTrue(obj.parse_on_off_str('on'))
+ self.assertFalse(obj.parse_on_off_str('off'))
+ self.assertFalse(obj.parse_on_off_str(None))
+ self.assertRaises(exception.InvalidInput, obj.parse_on_off_str, 'foo')
+
+ def test_get_yes_no_str(self):
+ obj = config.LibvirtConfigObject(root_name="demo")
+ self.assertEqual('yes', obj.get_yes_no_str(True))
+ self.assertEqual('no', obj.get_yes_no_str(False))
+
+ def test_get_on_off_str(self):
+ obj = config.LibvirtConfigObject(root_name="demo")
+ self.assertEqual('on', obj.get_on_off_str(True))
+ self.assertEqual('off', obj.get_on_off_str(False))
+
class LibvirtConfigCapsTest(LibvirtConfigBaseTest):
@@ -1519,7 +1537,7 @@ class LibvirtConfigGuestInputTest(LibvirtConfigBaseTest):
class LibvirtConfigGuestGraphicsTest(LibvirtConfigBaseTest):
- def test_config_graphics(self):
+ def test_config_graphics_vnc(self):
obj = config.LibvirtConfigGuestGraphics()
obj.type = "vnc"
obj.autoport = True
@@ -1531,6 +1549,30 @@ class LibvirtConfigGuestGraphicsTest(LibvirtConfigBaseTest):
<graphics type="vnc" autoport="yes" keymap="en_US" listen="127.0.0.1"/>
""")
+ def test_config_graphics_spice(self):
+ obj = config.LibvirtConfigGuestGraphics()
+ obj.type = "spice"
+ obj.autoport = False
+ obj.keymap = "en_US"
+ obj.listen = "127.0.0.1"
+
+ obj.image_compression = "auto_glz"
+ obj.jpeg_compression = "auto"
+ obj.zlib_compression = "always"
+ obj.playback_compression = True
+ obj.streaming_mode = "filter"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <graphics type="spice" autoport="no" keymap="en_US" listen="127.0.0.1">
+ <image compression="auto_glz"/>
+ <jpeg compression="auto"/>
+ <zlib compression="always"/>
+ <playback compression="on"/>
+ <streaming mode="filter"/>
+ </graphics>
+ """)
+
class LibvirtConfigGuestHostdev(LibvirtConfigBaseTest):
@@ -1573,7 +1615,7 @@ class LibvirtConfigGuestHostdevPCI(LibvirtConfigBaseTest):
</hostdev>
"""
- def test_config_guest_hosdev_pci(self):
+ def test_config_guest_hostdev_pci(self):
hostdev = config.LibvirtConfigGuestHostdevPCI()
hostdev.domain = "1234"
hostdev.bus = "11"
@@ -1582,7 +1624,7 @@ class LibvirtConfigGuestHostdevPCI(LibvirtConfigBaseTest):
xml = hostdev.to_xml()
self.assertXmlEqual(self.expected, xml)
- def test_parse_guest_hosdev_pci(self):
+ def test_parse_guest_hostdev_pci(self):
xmldoc = self.expected
obj = config.LibvirtConfigGuestHostdevPCI()
obj.parse_str(xmldoc)
@@ -1594,7 +1636,7 @@ class LibvirtConfigGuestHostdevPCI(LibvirtConfigBaseTest):
self.assertEqual(obj.slot, '0x22')
self.assertEqual(obj.function, '0x3')
- def test_parse_guest_hosdev_usb(self):
+ def test_parse_guest_hostdev_usb(self):
xmldoc = """<hostdev mode='subsystem' type='usb'>
<source startupPolicy='optional'>
<vendor id='0x1234'/>
@@ -2321,6 +2363,15 @@ class LibvirtConfigGuestFeatureTest(LibvirtConfigBaseTest):
obj.vapic = True
obj.spinlocks = True
obj.vendorid_spoof = True
+ obj.vpindex = True
+ obj.runtime = True
+ obj.synic = True
+ obj.reset = True
+ obj.frequencies = True
+ obj.reenlightenment = True
+ obj.tlbflush = True
+ obj.ipi = True
+ obj.evmcs = True
xml = obj.to_xml()
self.assertXmlEqual(xml, """
@@ -2329,6 +2380,15 @@ class LibvirtConfigGuestFeatureTest(LibvirtConfigBaseTest):
<vapic state="on"/>
<spinlocks state="on" retries="4095"/>
<vendor_id state="on" value="1234567890ab"/>
+ <vpindex state='on'/>
+ <runtime state='on'/>
+ <synic state='on'/>
+ <reset state='on'/>
+ <frequencies state='on'/>
+ <reenlightenment state='on'/>
+ <tlbflush state='on'/>
+ <ipi state='on'/>
+ <evmcs state='on'/>
</hyperv>""")
def test_feature_pmu(self):
@@ -2347,6 +2407,13 @@ class LibvirtConfigGuestFeatureTest(LibvirtConfigBaseTest):
xml = obj.to_xml()
self.assertXmlEqual(xml, "<pmu state='off'/>")
+ def test_feature_ioapic(self):
+ obj = config.LibvirtConfigGuestFeatureIOAPIC()
+ obj.driver = "libvirt"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, "<ioapic driver='libvirt'/>")
+
class LibvirtConfigGuestTest(LibvirtConfigBaseTest):
@@ -3138,6 +3205,32 @@ class LibvirtConfigNodeDeviceTest(LibvirtConfigBaseTest):
config.LibvirtConfigNodeDeviceMdevInformation)
self.assertEqual("nvidia-11", obj.mdev_information.type)
self.assertEqual(12, obj.mdev_information.iommu_group)
+ self.assertIsNone(obj.mdev_information.uuid)
+
+ def test_config_mdev_device_uuid(self):
+ xmlin = """
+ <device>
+ <name>mdev_b2107403_110c_45b0_af87_32cc91597b8a_0000_41_00_0</name>
+ <path>/sys/devices/pci0000:40/0000:40:03.1/0000:41:00.0/b2107403-110c-45b0-af87-32cc91597b8a</path>
+ <parent>pci_0000_41_00_0</parent>
+ <driver>
+ <name>vfio_mdev</name>
+ </driver>
+ <capability type='mdev'>
+ <type id='nvidia-442'/>
+ <uuid>b2107403-110c-45b0-af87-32cc91597b8a</uuid>
+ <iommuGroup number='57'/>
+ </capability>
+ </device>"""
+
+ obj = config.LibvirtConfigNodeDevice()
+ obj.parse_str(xmlin)
+ self.assertIsInstance(obj.mdev_information,
+ config.LibvirtConfigNodeDeviceMdevInformation)
+ self.assertEqual("nvidia-442", obj.mdev_information.type)
+ self.assertEqual(57, obj.mdev_information.iommu_group)
+ self.assertEqual("b2107403-110c-45b0-af87-32cc91597b8a",
+ obj.mdev_information.uuid)
def test_config_vdpa_device(self):
xmlin = """
@@ -3975,6 +4068,28 @@ class LibvirtConfigGuestVPMEMTest(LibvirtConfigBaseTest):
</memory>""")
+class LibvirtConfigGuestIOMMUTest(LibvirtConfigBaseTest):
+
+ def test_config_iommu(self):
+ obj = config.LibvirtConfigGuestIOMMU()
+ obj.model = "intel"
+ obj.interrupt_remapping = True
+ obj.caching_mode = True
+ obj.aw_bits = 48
+ obj.eim = True
+ obj.iotlb = True
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(
+ xml,
+ """
+<iommu model='intel'>
+ <driver intremap='on' caching_mode='on' aw_bits='48' eim='on' iotlb='on'/>
+</iommu>
+ """,
+ )
+
+
class LibvirtConfigDomainCapsVideoModelsTests(LibvirtConfigBaseTest):
def test_parse_video_model(self):
@@ -4091,7 +4206,8 @@ class LibvirtConfigDomainCapsDevicesTests(LibvirtConfigBaseTest):
obj.parse_str(xml)
# we only use the video and disk devices today.
device_types = [config.LibvirtConfigDomainCapsDiskBuses,
- config.LibvirtConfigDomainCapsVideoModels]
+ config.LibvirtConfigDomainCapsVideoModels,
+ ]
# so we assert there are only two device types parsed
self.assertEqual(2, len(obj.devices))
# we then assert that the parsed devices are of the correct type
diff --git a/nova/tests/unit/virt/libvirt/test_driver.py b/nova/tests/unit/virt/libvirt/test_driver.py
index d701506f06..04c80d662b 100644
--- a/nova/tests/unit/virt/libvirt/test_driver.py
+++ b/nova/tests/unit/virt/libvirt/test_driver.py
@@ -76,7 +76,6 @@ from nova.objects import block_device as block_device_obj
from nova.objects import fields
from nova.objects import migrate_data as migrate_data_obj
from nova.objects import virtual_interface as obj_vif
-from nova.pci import manager as pci_manager
from nova.pci import utils as pci_utils
import nova.privsep.fs
import nova.privsep.libvirt
@@ -740,16 +739,14 @@ class LibvirtConnTestCase(test.NoDBTestCase,
'resolve_driver_format',
imagebackend.Image._get_driver_format)
- self.useFixture(nova_fixtures.LibvirtFixture())
+ self.libvirt = self.useFixture(nova_fixtures.LibvirtFixture())
# ensure tests perform the same on all host architectures; this is
# already done by the fakelibvirt fixture but we want to change the
# architecture in some tests
- _p = mock.patch('os.uname')
- self.mock_uname = _p.start()
+ self.mock_uname = self.libvirt.mock_uname
self.mock_uname.return_value = fakelibvirt.os_uname(
'Linux', '', '5.4.0-0-generic', '', fields.Architecture.X86_64)
- self.addCleanup(_p.stop)
self.test_instance = _create_test_instance()
network_info = objects.InstanceInfoCache(
@@ -820,6 +817,16 @@ class LibvirtConnTestCase(test.NoDBTestCase,
"Driver capabilities for 'supports_socket_pci_numa_affinity' "
"is invalid",
)
+ self.assertFalse(
+ drvr.capabilities['supports_ephemeral_encryption'],
+ "Driver capabilities for 'supports_ephemeral_encryption' "
+ "is invalid",
+ )
+ self.assertFalse(
+ drvr.capabilities['supports_ephemeral_encryption_luks'],
+ "Driver capabilities for 'supports_ephemeral_encryption_luks' "
+ " is invalid",
+ )
def test_driver_capabilities_qcow2_with_rbd(self):
self.flags(images_type='rbd', group='libvirt')
@@ -963,9 +970,10 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch.object(libvirt_driver.LibvirtDriver, '_get_storage_bus_traits')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_get_video_model_traits')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_get_vif_model_traits')
+ @mock.patch.object(host.Host, "has_min_version")
def test_static_traits(
- self, mock_vif_traits, mock_video_traits, mock_storage_traits,
- mock_cpu_traits,
+ self, mock_version, mock_vif_traits, mock_video_traits,
+ mock_storage_traits, mock_cpu_traits,
):
"""Ensure driver capabilities are correctly retrieved and cached."""
@@ -976,14 +984,21 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_video_traits.return_value = {'COMPUTE_GRAPHICS_MODEL_VGA': True}
mock_vif_traits.return_value = {'COMPUTE_NET_VIF_MODEL_VIRTIO': True}
+ # for support COMPUTE_VIOMMU_MODEL_VIRTIO
+ mock_version.return_value = True
+
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
expected = {
- 'HW_CPU_HYPERTHREADING': True,
- 'COMPUTE_STORAGE_BUS_VIRTIO': True,
'COMPUTE_GRAPHICS_MODEL_VGA': True,
'COMPUTE_NET_VIF_MODEL_VIRTIO': True,
'COMPUTE_SECURITY_TPM_1_2': False,
'COMPUTE_SECURITY_TPM_2_0': False,
+ 'COMPUTE_STORAGE_BUS_VIRTIO': True,
+ 'COMPUTE_VIOMMU_MODEL_AUTO': True,
+ 'COMPUTE_VIOMMU_MODEL_INTEL': True,
+ 'COMPUTE_VIOMMU_MODEL_SMMUV3': True,
+ 'COMPUTE_VIOMMU_MODEL_VIRTIO': True,
+ 'HW_CPU_HYPERTHREADING': True
}
static_traits = drvr.static_traits
@@ -1029,6 +1044,10 @@ class LibvirtConnTestCase(test.NoDBTestCase,
'COMPUTE_NET_VIF_MODEL_VIRTIO': True,
'COMPUTE_SECURITY_TPM_1_2': False,
'COMPUTE_SECURITY_TPM_2_0': False,
+ 'COMPUTE_VIOMMU_MODEL_AUTO': True,
+ 'COMPUTE_VIOMMU_MODEL_INTEL': True,
+ 'COMPUTE_VIOMMU_MODEL_SMMUV3': True,
+ 'COMPUTE_VIOMMU_MODEL_VIRTIO': False
}
static_traits = drvr.static_traits
@@ -1311,7 +1330,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertRaises(exception.Invalid, drvr.init_host, "dummyhost")
- @mock.patch('nova.virt.libvirt.host.libvirt.Connection.compareCPU')
+ @mock.patch(
+ 'nova.virt.libvirt.host.libvirt.Connection.compareHypervisorCPU')
def test__check_cpu_compatibility_advance_model(self, mocked_compare):
mocked_compare.side_effect = (2, 0)
self.flags(cpu_mode="custom",
@@ -1324,6 +1344,22 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_register_all_undefined_instance_details',
new=mock.Mock())
+ @mock.patch('nova.virt.libvirt.host.libvirt.Connection.compareCPU')
+ def test__check_cpu_compatibility_skip_compare_at_init(
+ self, mocked_compare
+ ):
+ self.flags(group='workarounds', skip_cpu_compare_at_startup=True)
+ self.flags(cpu_mode="custom",
+ cpu_models=["Icelake-Server-noTSX"],
+ cpu_model_extra_flags = ["-mpx"],
+ group="libvirt")
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ drvr.init_host("dummyhost")
+ mocked_compare.assert_not_called()
+
+ @mock.patch.object(libvirt_driver.LibvirtDriver,
+ '_register_all_undefined_instance_details',
+ new=mock.Mock())
def test__check_cpu_compatibility_with_flag(self):
self.flags(cpu_mode="custom",
cpu_models=["Penryn"],
@@ -1332,9 +1368,10 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr.init_host("dummyhost")
- @mock.patch('nova.virt.libvirt.host.libvirt.Connection.compareCPU')
+ @mock.patch(
+ 'nova.virt.libvirt.host.libvirt.Connection.compareHypervisorCPU')
def test__check_cpu_compatibility_advance_flag(self, mocked_compare):
- mocked_compare.side_effect = (2, 0)
+ mocked_compare.side_effect = (-1, 0)
self.flags(cpu_mode="custom",
cpu_models=["qemu64"],
cpu_model_extra_flags = ["avx", "avx2"],
@@ -1343,11 +1380,12 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertRaises(exception.InvalidCPUInfo,
drvr.init_host, "dummyhost")
- @mock.patch('nova.virt.libvirt.host.libvirt.Connection.compareCPU')
+ @mock.patch(
+ 'nova.virt.libvirt.host.libvirt.Connection.compareHypervisorCPU')
def test__check_cpu_compatibility_wrong_flag(self, mocked_compare):
# here, and in the surrounding similar tests, the non-zero error
# code in the compareCPU() side effect indicates error
- mocked_compare.side_effect = (2, 0)
+ mocked_compare.side_effect = (-1, 0)
self.flags(cpu_mode="custom",
cpu_models=["Broadwell-noTSX"],
cpu_model_extra_flags = ["a v x"],
@@ -1356,11 +1394,12 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertRaises(exception.InvalidCPUInfo,
drvr.init_host, "dummyhost")
- @mock.patch('nova.virt.libvirt.host.libvirt.Connection.compareCPU')
+ @mock.patch(
+ 'nova.virt.libvirt.host.libvirt.Connection.compareHypervisorCPU')
def test__check_cpu_compatibility_enabled_and_disabled_flags(
self, mocked_compare
):
- mocked_compare.side_effect = (2, 0)
+ mocked_compare.side_effect = (-1, 0)
self.flags(
cpu_mode="custom",
cpu_models=["Cascadelake-Server"],
@@ -1813,6 +1852,22 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_guest.set_user_password.assert_called_once_with("root", "123")
+ @mock.patch('nova.virt.libvirt.host.Host.get_guest')
+ def test_qemu_announce_self(self, mock_get_guest):
+ # Enable the workaround, configure to call announce_self 3 times
+ self.flags(enable_qemu_monitor_announce_self=True, group='workarounds')
+
+ mock_guest = mock.Mock(spec=libvirt_guest.Guest)
+ mock_get_guest.return_value = mock_guest
+
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ drvr._qemu_monitor_announce_self(mock_guest)
+
+ # Ensure that 3 calls are made as defined by option
+ # enable_qemu_monitor_announce_self_retries default of 3
+ mock_guest.announce_self.assert_any_call()
+ self.assertEqual(3, mock_guest.announce_self.call_count)
+
@mock.patch('nova.utils.get_image_from_system_metadata')
@mock.patch.object(host.Host,
'has_min_version', return_value=True)
@@ -2260,6 +2315,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
instance_ref.info_cache = objects.InstanceInfoCache(
network_info=network_info)
+ pci_utils.get_mac_by_pci_address.side_effect = None
+ pci_utils.get_mac_by_pci_address.return_value = 'da:d1:f2:91:95:c1'
with test.nested(
mock.patch('nova.objects.VirtualInterfaceList'
'.get_by_instance_uuid', return_value=vifs),
@@ -2269,8 +2326,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
return_value=guest),
mock.patch.object(nova.virt.libvirt.guest.Guest, 'get_xml_desc',
return_value=xml),
- mock.patch.object(pci_utils, 'get_mac_by_pci_address',
- return_value='da:d1:f2:91:95:c1')):
+ ):
metadata_obj = drvr._build_device_metadata(self.context,
instance_ref)
metadata = metadata_obj.devices
@@ -2567,6 +2623,11 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch.object(time, "time")
def test_get_guest_config(self, time_mock):
+ """Generate a "standard" guest with minimal configuration.
+
+ This uses i440fx by default since that's our default machine type and
+ x86 is our default architecture (in our test env, anyway).
+ """
time_mock.return_value = 1234567.89
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
@@ -2575,178 +2636,249 @@ class LibvirtConnTestCase(test.NoDBTestCase,
test_instance["display_name"] = "purple tomatoes"
test_instance['system_metadata']['owner_project_name'] = 'sweetshop'
test_instance['system_metadata']['owner_user_name'] = 'cupcake'
-
- ctxt = context.RequestContext(project_id=123,
- project_name="aubergine",
- user_id=456,
- user_name="pie")
-
- flavor = objects.Flavor(name='m1.small',
- memory_mb=6,
- vcpus=28,
- root_gb=496,
- ephemeral_gb=8128,
- swap=33550336,
- extra_specs={})
+ ctxt = context.RequestContext(
+ project_id=123,
+ project_name="aubergine",
+ user_id=456,
+ user_name="pie",
+ )
+ flavor = objects.Flavor(
+ name='m1.small',
+ memory_mb=6,
+ vcpus=28,
+ root_gb=496,
+ ephemeral_gb=8128,
+ swap=33550336,
+ extra_specs={},
+ )
instance_ref = objects.Instance(**test_instance)
instance_ref.flavor = flavor
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
+ disk_info = blockinfo.get_disk_info(
+ CONF.libvirt.virt_type,
+ instance_ref,
+ image_meta,
+ )
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref,
- image_meta)
-
- cfg = drvr._get_guest_config(instance_ref,
- _fake_network_info(self),
- image_meta, disk_info,
- context=ctxt)
+ cfg = drvr._get_guest_config(
+ instance_ref,
+ _fake_network_info(self),
+ image_meta, disk_info,
+ context=ctxt,
+ )
self.assertEqual(cfg.uuid, instance_ref["uuid"])
- self.assertEqual(3, len(cfg.features))
- self.assertIsInstance(cfg.features[0],
- vconfig.LibvirtConfigGuestFeatureACPI)
- self.assertIsInstance(cfg.features[1],
- vconfig.LibvirtConfigGuestFeatureAPIC)
- self.assertIsInstance(
- cfg.features[2], vconfig.LibvirtConfigGuestFeatureVMCoreInfo)
self.assertEqual(cfg.memory, 6 * units.Ki)
self.assertEqual(cfg.vcpus, 28)
self.assertEqual(cfg.os_type, fields.VMMode.HVM)
self.assertEqual(cfg.os_boot_dev, ["hd"])
self.assertIsNone(cfg.os_root)
+
+ self.assertEqual(3, len(cfg.features))
+ for idx, device_type in enumerate([
+ vconfig.LibvirtConfigGuestFeatureACPI,
+ vconfig.LibvirtConfigGuestFeatureAPIC,
+ vconfig.LibvirtConfigGuestFeatureVMCoreInfo,
+ ]):
+ self.assertIsInstance(cfg.features[idx], device_type)
+
self.assertEqual(len(cfg.devices), 11)
- self.assertIsInstance(cfg.devices[0],
- vconfig.LibvirtConfigGuestDisk)
- self.assertIsInstance(cfg.devices[1],
- vconfig.LibvirtConfigGuestDisk)
- self.assertIsInstance(cfg.devices[2],
- vconfig.LibvirtConfigGuestDisk)
- self.assertIsInstance(cfg.devices[3],
- vconfig.LibvirtConfigGuestInterface)
- self.assertIsInstance(cfg.devices[4],
- vconfig.LibvirtConfigGuestSerial)
- self.assertIsInstance(cfg.devices[5],
- vconfig.LibvirtConfigGuestGraphics)
- self.assertIsInstance(cfg.devices[6],
- vconfig.LibvirtConfigGuestVideo)
- self.assertIsInstance(cfg.devices[7],
- vconfig.LibvirtConfigGuestInput)
- self.assertIsInstance(cfg.devices[8],
- vconfig.LibvirtConfigGuestRng)
- self.assertIsInstance(cfg.devices[9],
- vconfig.LibvirtConfigGuestUSBHostController)
- self.assertIsInstance(cfg.devices[10],
- vconfig.LibvirtConfigMemoryBalloon)
+ for idx, device_type in enumerate([
+ vconfig.LibvirtConfigGuestDisk,
+ vconfig.LibvirtConfigGuestDisk,
+ vconfig.LibvirtConfigGuestDisk,
+ vconfig.LibvirtConfigGuestInterface,
+ vconfig.LibvirtConfigGuestSerial,
+ vconfig.LibvirtConfigGuestGraphics,
+ vconfig.LibvirtConfigGuestVideo,
+ vconfig.LibvirtConfigGuestInput,
+ vconfig.LibvirtConfigGuestRng,
+ vconfig.LibvirtConfigGuestUSBHostController,
+ vconfig.LibvirtConfigMemoryBalloon,
+ ]):
+ self.assertIsInstance(cfg.devices[idx], device_type)
+
self.assertEqual(len(cfg.metadata), 1)
- self.assertIsInstance(cfg.metadata[0],
- vconfig.LibvirtConfigGuestMetaNovaInstance)
- self.assertEqual(version.version_string_with_package(),
- cfg.metadata[0].package)
- self.assertEqual("purple tomatoes",
- cfg.metadata[0].name)
- self.assertEqual(1234567.89,
- cfg.metadata[0].creationTime)
- self.assertEqual("image",
- cfg.metadata[0].roottype)
- self.assertEqual(str(instance_ref["image_ref"]),
- cfg.metadata[0].rootid)
-
- self.assertIsInstance(cfg.metadata[0].owner,
- vconfig.LibvirtConfigGuestMetaNovaOwner)
- self.assertEqual("838a72b0-0d54-4827-8fd6-fb1227633ceb",
- cfg.metadata[0].owner.userid)
- self.assertEqual("cupcake",
- cfg.metadata[0].owner.username)
- self.assertEqual("fake",
- cfg.metadata[0].owner.projectid)
- self.assertEqual("sweetshop",
- cfg.metadata[0].owner.projectname)
+ self.assertIsInstance(
+ cfg.metadata[0], vconfig.LibvirtConfigGuestMetaNovaInstance)
+ self.assertEqual(
+ version.version_string_with_package(), cfg.metadata[0].package)
+ self.assertEqual("purple tomatoes", cfg.metadata[0].name)
+ self.assertEqual(1234567.89, cfg.metadata[0].creationTime)
+ self.assertEqual("image", cfg.metadata[0].roottype)
+ self.assertEqual(
+ str(instance_ref["image_ref"]), cfg.metadata[0].rootid)
- self.assertIsInstance(cfg.metadata[0].flavor,
- vconfig.LibvirtConfigGuestMetaNovaFlavor)
- self.assertEqual("m1.small",
- cfg.metadata[0].flavor.name)
- self.assertEqual(6,
- cfg.metadata[0].flavor.memory)
- self.assertEqual(28,
- cfg.metadata[0].flavor.vcpus)
- self.assertEqual(496,
- cfg.metadata[0].flavor.disk)
- self.assertEqual(8128,
- cfg.metadata[0].flavor.ephemeral)
- self.assertEqual(33550336,
- cfg.metadata[0].flavor.swap)
+ self.assertIsInstance(
+ cfg.metadata[0].owner, vconfig.LibvirtConfigGuestMetaNovaOwner)
+ self.assertEqual(
+ "838a72b0-0d54-4827-8fd6-fb1227633ceb",
+ cfg.metadata[0].owner.userid)
+ self.assertEqual("cupcake", cfg.metadata[0].owner.username)
+ self.assertEqual("fake", cfg.metadata[0].owner.projectid)
+ self.assertEqual("sweetshop", cfg.metadata[0].owner.projectname)
+ self.assertIsInstance(
+ cfg.metadata[0].flavor, vconfig.LibvirtConfigGuestMetaNovaFlavor)
+ self.assertEqual("m1.small", cfg.metadata[0].flavor.name)
+ self.assertEqual(6, cfg.metadata[0].flavor.memory)
+ self.assertEqual(28, cfg.metadata[0].flavor.vcpus)
+ self.assertEqual(496, cfg.metadata[0].flavor.disk)
+ self.assertEqual(8128, cfg.metadata[0].flavor.ephemeral)
+ self.assertEqual(33550336, cfg.metadata[0].flavor.swap)
- @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
- def test_get_guest_config_q35(self):
- self.flags(virt_type="kvm",
- group='libvirt')
+ num_ports = 0
+ for device in cfg.devices:
+ try:
+ if (
+ device.root_name == 'controller' and
+ device.model == 'pcie-root-port'
+ ):
+ num_ports += 1
+ except AttributeError:
+ pass
- TEST_AMOUNT_OF_PCIE_SLOTS = 8
- CONF.set_override("num_pcie_ports", TEST_AMOUNT_OF_PCIE_SLOTS,
- group='libvirt')
+ # i440fx is not pcie machine so there should be no pcie ports
+ self.assertEqual(0, num_ports)
+
+ @mock.patch.object(time, "time")
+ def test_get_guest_config_no_pcie_ports(self, time_mock):
+ """Generate a "standard" guest with minimal configuration.
+
+ This uses i440fx by default since that's our default machine type and
+ x86 is our default architecture (in our test env, anyway).
+ """
+ time_mock.return_value = 1234567.89
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- instance_ref = objects.Instance(**self.test_instance)
- image_meta = objects.ImageMeta.from_dict({
- "disk_format": "raw",
- "properties": {"hw_machine_type":
- "pc-q35-test"}})
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref,
- image_meta)
+ test_instance = copy.deepcopy(self.test_instance)
+ test_instance["display_name"] = "purple tomatoes"
+ test_instance['system_metadata']['owner_project_name'] = 'sweetshop'
+ test_instance['system_metadata']['owner_user_name'] = 'cupcake'
+ ctxt = context.RequestContext(
+ project_id=123,
+ project_name="aubergine",
+ user_id=456,
+ user_name="pie",
+ )
+ flavor = objects.Flavor(
+ name='m1.small',
+ memory_mb=6,
+ vcpus=28,
+ root_gb=496,
+ ephemeral_gb=8128,
+ swap=33550336,
+ extra_specs={},
+ )
+ instance_ref = objects.Instance(**test_instance)
+ instance_ref.flavor = flavor
+ image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
+ disk_info = blockinfo.get_disk_info(
+ CONF.libvirt.virt_type,
+ instance_ref,
+ image_meta,
+ )
- cfg = drvr._get_guest_config(instance_ref,
- _fake_network_info(self),
- image_meta, disk_info)
+ cfg = drvr._get_guest_config(
+ instance_ref,
+ _fake_network_info(self),
+ image_meta, disk_info,
+ context=ctxt,
+ )
num_ports = 0
for device in cfg.devices:
try:
- if (device.root_name == 'controller' and
- device.model == 'pcie-root-port'):
+ if (
+ device.root_name == 'controller' and
+ device.model == 'pcie-root-port'
+ ):
num_ports += 1
except AttributeError:
pass
- self.assertEqual(TEST_AMOUNT_OF_PCIE_SLOTS, num_ports)
+ # i440fx is not pcie machine so there should be no pcie ports
+ self.assertEqual(0, num_ports)
@mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
- def test_get_guest_config_pcie_i440fx(self):
- self.flags(virt_type="kvm",
- group='libvirt')
+ def test_get_guest_config_q35(self):
+ """Generate a "q35" guest with minimal configuration.
+
+ This configures an explicit machine type (q35) but defaults to x86
+ since this is our default architecture (in our test env, anyway).
+ """
+ self.flags(virt_type="kvm", group='libvirt')
TEST_AMOUNT_OF_PCIE_SLOTS = 8
- CONF.set_override("num_pcie_ports", TEST_AMOUNT_OF_PCIE_SLOTS,
- group='libvirt')
+ CONF.set_override(
+ "num_pcie_ports", TEST_AMOUNT_OF_PCIE_SLOTS,
+ group='libvirt',
+ )
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
- "properties": {"hw_machine_type":
- "pc-i440fx-test"}})
+ "properties": {"hw_machine_type": "q35"},
+ })
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref,
- image_meta)
+ disk_info = blockinfo.get_disk_info(
+ CONF.libvirt.virt_type,
+ instance_ref,
+ image_meta,
+ )
- cfg = drvr._get_guest_config(instance_ref,
- _fake_network_info(self),
- image_meta, disk_info)
+ cfg = drvr._get_guest_config(
+ instance_ref,
+ _fake_network_info(self),
+ image_meta,
+ disk_info,
+ )
+
+ self.assertEqual(3, len(cfg.features))
+ for idx, device_type in enumerate([
+ vconfig.LibvirtConfigGuestFeatureACPI,
+ vconfig.LibvirtConfigGuestFeatureAPIC,
+ vconfig.LibvirtConfigGuestFeatureVMCoreInfo,
+ ]):
+ self.assertIsInstance(cfg.features[idx], device_type)
+
+ self.assertEqual(len(cfg.devices), 19)
+ for idx, device_type in enumerate([
+ vconfig.LibvirtConfigGuestDisk,
+ vconfig.LibvirtConfigGuestDisk,
+ vconfig.LibvirtConfigGuestInterface,
+ vconfig.LibvirtConfigGuestSerial,
+ vconfig.LibvirtConfigGuestGraphics,
+ vconfig.LibvirtConfigGuestVideo,
+ vconfig.LibvirtConfigGuestInput,
+ vconfig.LibvirtConfigGuestRng,
+ vconfig.LibvirtConfigGuestPCIeRootController,
+ vconfig.LibvirtConfigGuestPCIeRootPortController,
+ vconfig.LibvirtConfigGuestPCIeRootPortController,
+ vconfig.LibvirtConfigGuestPCIeRootPortController,
+ vconfig.LibvirtConfigGuestPCIeRootPortController,
+ vconfig.LibvirtConfigGuestPCIeRootPortController,
+ vconfig.LibvirtConfigGuestPCIeRootPortController,
+ vconfig.LibvirtConfigGuestPCIeRootPortController,
+ vconfig.LibvirtConfigGuestPCIeRootPortController,
+ vconfig.LibvirtConfigGuestUSBHostController,
+ vconfig.LibvirtConfigMemoryBalloon,
+ ]):
+ self.assertIsInstance(cfg.devices[idx], device_type)
num_ports = 0
for device in cfg.devices:
try:
- if (device.root_name == 'controller' and
- device.model == 'pcie-root-port'):
+ if (
+ device.root_name == 'controller' and
+ device.model == 'pcie-root-port'
+ ):
num_ports += 1
except AttributeError:
pass
- # i440fx is not pcie machine so there should be no pcie ports
- self.assertEqual(0, num_ports)
+ self.assertEqual(TEST_AMOUNT_OF_PCIE_SLOTS, num_ports)
@mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
@mock.patch('nova.virt.libvirt.utils.get_default_machine_type',
@@ -3132,6 +3264,41 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertTrue(membacking.locked)
self.assertFalse(membacking.sharedpages)
+ def test_get_guest_memory_backing_config_locked_flavor(self):
+ extra_specs = {
+ "hw:locked_memory": "True",
+ "hw:mem_page_size": 1000,
+ }
+ flavor = objects.Flavor(
+ name='m1.small', memory_mb=6, vcpus=28, root_gb=496,
+ ephemeral_gb=8128, swap=33550336, extra_specs=extra_specs)
+ image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ membacking = drvr._get_guest_memory_backing_config(
+ None, None, flavor, image_meta)
+ self.assertTrue(membacking.locked)
+
+ def test_get_guest_memory_backing_config_locked_image_meta(self):
+ extra_specs = {}
+ flavor = objects.Flavor(
+ name='m1.small',
+ memory_mb=6,
+ vcpus=28,
+ root_gb=496,
+ ephemeral_gb=8128,
+ swap=33550336,
+ extra_specs=extra_specs)
+ image_meta = objects.ImageMeta.from_dict({
+ "disk_format": "raw",
+ "properties": {
+ "hw_locked_memory": "True",
+ "hw_mem_page_size": 1000,
+ }})
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ membacking = drvr._get_guest_memory_backing_config(
+ None, None, flavor, image_meta)
+ self.assertTrue(membacking.locked)
+
def test_get_guest_memory_backing_config_realtime_invalid_share(self):
"""Test behavior when there is no pool of shared CPUS on which to place
the emulator threads, isolating them from the instance CPU processes.
@@ -3425,10 +3592,15 @@ class LibvirtConnTestCase(test.NoDBTestCase,
status=fields.PciDeviceStatus.AVAILABLE,
address='0000:00:00.1',
instance_uuid=None,
- request_id=None,
+ request_id=uuids.pci_req1,
extra_info={},
numa_node=None)
pci_device = objects.PciDevice(**pci_device_info)
+ instance_ref.pci_devices = objects.PciDeviceList(objects=[pci_device])
+ pci_req = objects.InstancePCIRequest(
+ request_id=uuids.pci_req1, alias_name='pci-alias-1')
+ instance_ref.pci_requests = objects.InstancePCIRequests(
+ requests=[pci_req])
with test.nested(
mock.patch.object(host.Host, 'has_min_version',
@@ -3436,9 +3608,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock.patch.object(host.Host, "get_capabilities",
return_value=caps),
mock.patch.object(host.Host, 'get_online_cpus',
- return_value=set([3])),
- mock.patch.object(pci_manager, "get_instance_pci_devs",
- return_value=[pci_device])):
+ return_value=set([3]))
+ ):
cfg = conn._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(set([3]), cfg.cpuset)
@@ -3477,23 +3648,31 @@ class LibvirtConnTestCase(test.NoDBTestCase,
status=fields.PciDeviceStatus.AVAILABLE,
address='0000:00:00.1',
instance_uuid=None,
- request_id=None,
+ request_id=uuids.pci_req1,
extra_info={},
numa_node=1)
pci_device = objects.PciDevice(**pci_device_info)
pci_device_info.update(numa_node=0, address='0000:00:00.2')
pci_device2 = objects.PciDevice(**pci_device_info)
+ instance_ref.pci_devices = objects.PciDeviceList(
+ objects=[pci_device, pci_device2]
+ )
+ instance_ref.pci_requests = objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ request_id=uuids.pci_req1, alias_name="pci-alias-1"
+ )
+ ]
+ )
with test.nested(
mock.patch.object(
host.Host, "get_capabilities", return_value=caps),
mock.patch.object(host.Host, 'get_online_cpus',
return_value=set([3])),
mock.patch.object(random, 'choice'),
- mock.patch.object(pci_manager, "get_instance_pci_devs",
- return_value=[pci_device, pci_device2]),
mock.patch.object(conn, '_has_numa_support',
return_value=False)
- ) as (_, _, choice_mock, pci_mock, _):
+ ) as (_, _, choice_mock, _):
cfg = conn._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertFalse(choice_mock.called)
@@ -5660,6 +5839,11 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual(cfg.devices[3].type, 'vnc')
self.assertEqual(cfg.devices[3].listen, '10.0.0.1')
self.assertIsNone(cfg.devices[3].keymap)
+ self.assertIsNone(cfg.devices[3].image_compression)
+ self.assertIsNone(cfg.devices[3].jpeg_compression)
+ self.assertIsNone(cfg.devices[3].zlib_compression)
+ self.assertIsNone(cfg.devices[3].playback_compression)
+ self.assertIsNone(cfg.devices[3].streaming_mode)
def test_get_guest_config_with_vnc_and_tablet(self):
self.flags(enabled=True, group='vnc')
@@ -5690,6 +5874,11 @@ class LibvirtConnTestCase(test.NoDBTestCase,
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[3].type, 'vnc')
+ self.assertIsNone(cfg.devices[3].image_compression)
+ self.assertIsNone(cfg.devices[3].jpeg_compression)
+ self.assertIsNone(cfg.devices[3].zlib_compression)
+ self.assertIsNone(cfg.devices[3].playback_compression)
+ self.assertIsNone(cfg.devices[3].streaming_mode)
self.assertEqual(cfg.devices[5].type, 'tablet')
def test_get_guest_config_with_spice_and_tablet(self):
@@ -5726,6 +5915,11 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual(cfg.devices[3].type, 'spice')
self.assertEqual(cfg.devices[3].listen, '10.0.0.1')
self.assertIsNone(cfg.devices[3].keymap)
+ self.assertIsNone(cfg.devices[3].image_compression)
+ self.assertIsNone(cfg.devices[3].jpeg_compression)
+ self.assertIsNone(cfg.devices[3].zlib_compression)
+ self.assertIsNone(cfg.devices[3].playback_compression)
+ self.assertIsNone(cfg.devices[3].streaming_mode)
self.assertEqual(cfg.devices[5].type, 'tablet')
@mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
@@ -5785,8 +5979,57 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual(cfg.devices[3].target_name, "com.redhat.spice.0")
self.assertEqual(cfg.devices[3].type, 'spicevmc')
self.assertEqual(cfg.devices[4].type, "spice")
+ self.assertIsNone(cfg.devices[4].image_compression)
+ self.assertIsNone(cfg.devices[4].jpeg_compression)
+ self.assertIsNone(cfg.devices[4].zlib_compression)
+ self.assertIsNone(cfg.devices[4].playback_compression)
+ self.assertIsNone(cfg.devices[4].streaming_mode)
self.assertEqual(cfg.devices[5].type, video_type)
+ def test_get_guest_config_with_spice_compression(self):
+ self.flags(enabled=False, group='vnc')
+ self.flags(virt_type='kvm', group='libvirt')
+ self.flags(enabled=True,
+ agent_enabled=False,
+ image_compression='auto_lz',
+ jpeg_compression='never',
+ zlib_compression='always',
+ playback_compression=False,
+ streaming_mode='all',
+ server_listen='10.0.0.1',
+ group='spice')
+ self.flags(pointer_model='usbtablet')
+
+ cfg = self._get_guest_config_with_graphics()
+
+ self.assertEqual(len(cfg.devices), 9)
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestInput)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigGuestRng)
+ self.assertIsInstance(cfg.devices[7],
+ vconfig.LibvirtConfigGuestUSBHostController)
+ self.assertIsInstance(cfg.devices[8],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ self.assertEqual(cfg.devices[3].type, 'spice')
+ self.assertEqual(cfg.devices[3].listen, '10.0.0.1')
+ self.assertEqual(cfg.devices[3].image_compression, 'auto_lz')
+ self.assertEqual(cfg.devices[3].jpeg_compression, 'never')
+ self.assertEqual(cfg.devices[3].zlib_compression, 'always')
+ self.assertFalse(cfg.devices[3].playback_compression)
+ self.assertEqual(cfg.devices[3].streaming_mode, 'all')
+
@mock.patch.object(host.Host, 'get_guest')
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_get_serial_ports_from_guest')
@@ -7478,12 +7721,19 @@ class LibvirtConnTestCase(test.NoDBTestCase,
address='0000:00:00.1',
compute_id=compute_ref.id,
instance_uuid=instance.uuid,
- request_id=None,
+ request_id=uuids.pci_req1,
extra_info={})
pci_device = objects.PciDevice(**pci_device_info)
pci_list = objects.PciDeviceList()
pci_list.objects.append(pci_device)
instance.pci_devices = pci_list
+ instance.pci_requests = objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ request_id=uuids.pci_req1, alias_name="pci-alias"
+ )
+ ]
+ )
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
@@ -8402,6 +8652,206 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual(conf.cpu.cores, 2)
self.assertEqual(conf.cpu.threads, 1)
+ def test_get_guest_iommu_not_enabled(self):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ test_instance = _create_test_instance()
+ instance_ref = objects.Instance(**test_instance)
+ image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref,
+ image_meta)
+
+ cfg = drvr._get_guest_config(instance_ref, [],
+ image_meta, disk_info)
+ for device in cfg.devices:
+ self.assertNotEqual('iommu', device.root_name)
+
+ def test_get_guest_iommu_config_model(self):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ image_meta = objects.ImageMeta.from_dict({
+ "disk_format": "raw",
+ "properties": {"hw_machine_type": "q35"},
+ })
+
+ extra_specs = {
+ "hw:viommu_model": 'intel',
+ }
+ test_instance = _create_test_instance()
+ test_instance["flavor"]["extra_specs"] = extra_specs
+ instance_ref = objects.Instance(**test_instance)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref,
+ image_meta)
+
+ cfg = drvr._get_guest_config(instance_ref, [],
+ image_meta, disk_info)
+ count = 0
+ for device in cfg.devices:
+ if device.root_name == 'iommu':
+ count += 1
+ self.assertIsInstance(device,
+ vconfig.LibvirtConfigGuestIOMMU)
+ self.assertEqual('intel', device.model)
+ self.assertFalse(hasattr(device, "aw_bits"))
+ self.assertTrue(device.interrupt_remapping)
+ self.assertTrue(device.caching_mode)
+ self.assertTrue(device.eim)
+ self.assertTrue(device.iotlb)
+
+ self.assertEqual(1, count)
+ self.assertEqual('q35', cfg.os_mach_type)
+
+ @mock.patch.object(host.Host, 'has_min_version', return_value=True)
+ def test_get_guest_iommu_config_model_auto(self, has_min_version):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ image_meta = objects.ImageMeta.from_dict({
+ "disk_format": "raw",
+ "properties": {"hw_machine_type": "q35"},
+ })
+
+ extra_specs = {
+ "hw:viommu_model": 'auto',
+ }
+ test_instance = _create_test_instance()
+ test_instance["flavor"]["extra_specs"] = extra_specs
+ instance_ref = objects.Instance(**test_instance)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref,
+ image_meta)
+
+ cfg = drvr._get_guest_config(instance_ref, [],
+ image_meta, disk_info)
+ count = 0
+ for device in cfg.devices:
+ if device.root_name == 'iommu':
+ count += 1
+ self.assertIsInstance(device,
+ vconfig.LibvirtConfigGuestIOMMU)
+ self.assertEqual('virtio', device.model)
+ self.assertEqual(48, device.aw_bits)
+ self.assertTrue(device.interrupt_remapping)
+ self.assertTrue(device.caching_mode)
+ self.assertTrue(device.eim)
+ self.assertTrue(device.iotlb)
+
+ self.assertEqual(1, count)
+ self.assertEqual('q35', cfg.os_mach_type)
+
+ @mock.patch.object(host.Host, 'has_min_version', return_value=False)
+ def test_get_guest_iommu_config_model_auto_intel(self, has_min_version):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ image_meta = objects.ImageMeta.from_dict({
+ "disk_format": "raw",
+ "properties": {"hw_machine_type": "q35"},
+ })
+
+ extra_specs = {
+ "hw:viommu_model": 'auto',
+ }
+ test_instance = _create_test_instance()
+ test_instance["flavor"]["extra_specs"] = extra_specs
+ instance_ref = objects.Instance(**test_instance)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref,
+ image_meta)
+
+ cfg = drvr._get_guest_config(instance_ref, [],
+ image_meta, disk_info)
+ count = 0
+ for device in cfg.devices:
+ if device.root_name == 'iommu':
+ count += 1
+ self.assertIsInstance(device,
+ vconfig.LibvirtConfigGuestIOMMU)
+ self.assertEqual('intel', device.model)
+ self.assertTrue(device.interrupt_remapping)
+ self.assertTrue(device.caching_mode)
+ self.assertTrue(device.eim)
+ self.assertTrue(device.iotlb)
+
+ self.assertEqual(1, count)
+ self.assertEqual('q35', cfg.os_mach_type)
+
+ @mock.patch.object(host.Host, 'has_min_version', return_value=False)
+ def test_get_guest_iommu_config_model_auto_aarch64(self, has_min_version):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ image_meta = objects.ImageMeta.from_dict({
+ "disk_format": "raw",
+ "properties": {"hw_viommu_model": 'auto',
+ "hw_architecture": fields.Architecture.AARCH64,
+ "hw_machine_type": "virt"},
+ })
+ extra_specs = {
+ "hw:viommu_model": 'auto',
+ }
+ test_instance = _create_test_instance()
+ test_instance["flavor"]["extra_specs"] = extra_specs
+ instance_ref = objects.Instance(**test_instance)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref,
+ image_meta)
+
+ cfg = drvr._get_guest_config(instance_ref, [],
+ image_meta, disk_info)
+ count = 0
+ for device in cfg.devices:
+ if device.root_name == 'iommu':
+ count += 1
+ self.assertIsInstance(device,
+ vconfig.LibvirtConfigGuestIOMMU)
+ self.assertEqual('smmuv3', device.model)
+ self.assertFalse(hasattr(device, "aw_bits"))
+ self.assertTrue(device.interrupt_remapping)
+ self.assertTrue(device.caching_mode)
+ self.assertFalse(device.eim)
+ self.assertTrue(device.iotlb)
+ self.assertEqual(1, count)
+
+ def test_get_guest_iommu_config_not_support_machine_type(self):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ image_meta = objects.ImageMeta.from_dict({
+ "disk_format": "raw",
+ "properties": {"hw_machine_type": "pc-i440fx-2.11"},
+ })
+ extra_specs = {
+ "hw:viommu_model": 'auto',
+ }
+ test_instance = _create_test_instance()
+ test_instance["flavor"]["extra_specs"] = extra_specs
+ instance_ref = objects.Instance(**test_instance)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref,
+ image_meta)
+
+ self.assertRaises(
+ exception.InvalidVIOMMUMachineType, drvr._get_guest_config,
+ instance_ref, [], image_meta, disk_info
+ )
+
+ def test_get_guest_iommu_config_not_support_architecture(self):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ image_meta = objects.ImageMeta.from_dict({
+ "disk_format": "raw",
+ "properties": {"hw_architecture": fields.Architecture.PPC64LE,
+ "hw_machine_type": "pc-i440fx-2.11"},
+ })
+ extra_specs = {
+ "hw:viommu_model": 'auto',
+ }
+ test_instance = _create_test_instance()
+ test_instance["flavor"]["extra_specs"] = extra_specs
+ instance_ref = objects.Instance(**test_instance)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref,
+ image_meta)
+
+ self.assertRaises(
+ exception.InvalidVIOMMUArchitecture, drvr._get_guest_config,
+ instance_ref, [], image_meta, disk_info
+ )
+
def test_get_guest_memory_balloon_config_by_default(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
@@ -8804,6 +9254,34 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.Invalid, drvr._get_pcpu_available)
+ @mock.patch('nova.virt.libvirt.host.Host.get_available_cpus',
+ return_value=set([0, 1, 2, 3]))
+ def test_get_pcpu_available_for_power_mgmt(self, get_available_cpus):
+ """Test what happens when the '[compute] cpu_dedicated_set' config
+ option is set and power management is defined.
+ """
+ self.flags(vcpu_pin_set=None)
+ self.flags(cpu_dedicated_set='2-3', cpu_shared_set=None,
+ group='compute')
+ self.flags(cpu_power_management=True, group='libvirt')
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ pcpus = drvr._get_pcpu_available()
+ self.assertEqual(set([2, 3]), pcpus)
+
+ @mock.patch('nova.virt.libvirt.host.Host.get_available_cpus',
+ return_value=set([4, 5]))
+ def test_get_pcpu_available__cpu_dedicated_set_invalid_for_pm(self,
+ get_available_cpus):
+ """Test what happens when the '[compute] cpu_dedicated_set' config
+ option is set but it's invalid with power management set.
+ """
+ self.flags(vcpu_pin_set=None)
+ self.flags(cpu_dedicated_set='4-6', cpu_shared_set=None,
+ group='compute')
+ self.flags(cpu_power_management=True, group='libvirt')
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.assertRaises(exception.Invalid, drvr._get_pcpu_available)
+
@mock.patch('nova.virt.libvirt.host.Host.get_online_cpus',
return_value=set([0, 1, 2, 3]))
def test_get_vcpu_available(self, get_online_cpus):
@@ -8904,6 +9382,26 @@ class LibvirtConnTestCase(test.NoDBTestCase,
image_meta))
mock_fsthaw.assert_called_once_with()
+ def test_set_quiesced_agent_connection_fails(self):
+ # This is require to mock guest host
+ self.create_fake_libvirt_mock(lookupByUUIDString=self.fake_lookup)
+
+ with mock.patch.object(FakeVirtDomain, "fsFreeze") as mock_fsfreeze:
+ error = fakelibvirt.make_libvirtError(
+ fakelibvirt.libvirtError,
+ "QEMU guest agent is not connected",
+ error_code=fakelibvirt.VIR_ERR_AGENT_UNRESPONSIVE)
+
+ mock_fsfreeze.side_effect = error
+ mock_fsfreeze.error_code = error.get_error_code()
+
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
+ instance = objects.Instance(**self.test_instance)
+ image_meta = objects.ImageMeta.from_dict(
+ {"properties": {"hw_qemu_guest_agent": "yes", }})
+ self.assertRaises(exception.InstanceQuiesceFailed,
+ drvr._set_quiesced, self.context, instance, image_meta, True)
+
def test_create_snapshot_metadata(self):
base = objects.ImageMeta.from_dict(
{'disk_format': 'raw'})
@@ -10786,7 +11284,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
new=mock.Mock(return_value=False))
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file')
- @mock.patch.object(fakelibvirt.Connection, 'compareCPU')
+ @mock.patch.object(fakelibvirt.Connection, 'compareHypervisorCPU')
def test_check_can_live_migrate_dest_all_pass_with_block_migration(
self, mock_cpu, mock_test_file,
):
@@ -10825,7 +11323,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
new=mock.Mock(return_value=False))
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file')
- @mock.patch.object(fakelibvirt.Connection, 'compareCPU')
+ @mock.patch.object(fakelibvirt.Connection, 'compareHypervisorCPU')
def test_check_can_live_migrate_dest_all_pass_with_over_commit(
self, mock_cpu, mock_test_file,
):
@@ -10865,7 +11363,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
new=mock.Mock(return_value=False))
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file')
- @mock.patch.object(fakelibvirt.Connection, 'compareCPU')
+ @mock.patch.object(fakelibvirt.Connection, 'compareHypervisorCPU')
def test_check_can_live_migrate_dest_all_pass_no_block_migration(
self, mock_cpu, mock_test_file,
):
@@ -10903,7 +11401,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file',
return_value='fake')
- @mock.patch.object(fakelibvirt.Connection, 'compareCPU')
+ @mock.patch.object(fakelibvirt.Connection, 'compareHypervisorCPU')
def test_check_can_live_migrate_dest_fills_listen_addrs(
self, mock_cpu, mock_test_file,
):
@@ -10935,7 +11433,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file',
return_value='fake')
- @mock.patch.object(fakelibvirt.Connection, 'compareCPU',
+ @mock.patch.object(fakelibvirt.Connection, 'compareHypervisorCPU',
return_value=1)
def test_check_can_live_migrate_dest_ensure_serial_adds_not_set(
self, mock_cpu, mock_test_file,
@@ -11043,7 +11541,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
new=mock.Mock(return_value=False))
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file')
- @mock.patch.object(fakelibvirt.Connection, 'compareCPU')
+ @mock.patch.object(fakelibvirt.Connection, 'compareHypervisorCPU')
def test_check_can_live_migrate_dest_no_instance_cpu_info(
self, mock_cpu, mock_test_file,
):
@@ -11084,7 +11582,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
new=mock.Mock(return_value=False))
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file')
- @mock.patch.object(fakelibvirt.Connection, 'compareCPU')
+ @mock.patch.object(fakelibvirt.Connection, 'compareHypervisorCPU')
def test_check_can_live_migrate_dest_file_backed(
self, mock_cpu, mock_test_file,
):
@@ -11110,7 +11608,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertTrue(return_value.dst_wants_file_backed_memory)
- @mock.patch.object(fakelibvirt.Connection, 'compareCPU')
+ @mock.patch.object(fakelibvirt.Connection, 'compareHypervisorCPU')
def test_check_can_live_migrate_dest_incompatible_cpu_raises(
self, mock_cpu):
instance_ref = objects.Instance(**self.test_instance)
@@ -11146,7 +11644,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
for vif in result.vifs:
self.assertTrue(vif.supports_os_vif_delegation)
- @mock.patch.object(host.Host, 'compare_cpu')
+ @mock.patch.object(host.Host, 'compare_hypervisor_cpu')
@mock.patch.object(nova.virt.libvirt, 'config')
def test_compare_cpu_compatible_host_cpu(self, mock_vconfig, mock_compare):
instance = objects.Instance(**self.test_instance)
@@ -11156,7 +11654,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
instance)
self.assertIsNone(ret)
- @mock.patch.object(host.Host, 'compare_cpu')
+ @mock.patch.object(host.Host, 'compare_hypervisor_cpu')
@mock.patch.object(nova.virt.libvirt, 'config')
def test_compare_cpu_handles_not_supported_error_gracefully(self,
mock_vconfig,
@@ -11193,7 +11691,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion',
return_value=versionutils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_AARCH64_CPU_COMPARE))
- @mock.patch.object(host.Host, 'compare_cpu')
+ @mock.patch.object(host.Host, 'compare_hypervisor_cpu')
def test_compare_cpu_host_aarch64(self,
mock_compare,
mock_get_libversion,
@@ -11216,7 +11714,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_compare.assert_called_once_with(caps.host.cpu.to_xml())
self.assertIsNone(ret)
- @mock.patch.object(host.Host, 'compare_cpu')
+ @mock.patch.object(host.Host, 'compare_hypervisor_cpu')
@mock.patch.object(nova.virt.libvirt.LibvirtDriver,
'_vcpu_model_to_cpu_config')
def test_compare_cpu_compatible_guest_cpu(self, mock_vcpu_to_cpu,
@@ -11235,7 +11733,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
ret = conn._compare_cpu(None, None, instance)
self.assertIsNone(ret)
- @mock.patch.object(host.Host, 'compare_cpu')
+ @mock.patch.object(host.Host, 'compare_hypervisor_cpu')
@mock.patch.object(nova.virt.libvirt, 'config')
def test_compare_cpu_invalid_cpuinfo_raises(self, mock_vconfig,
mock_compare):
@@ -11247,7 +11745,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
jsonutils.dumps(_fake_cpu_info),
instance)
- @mock.patch.object(host.Host, 'compare_cpu')
+ @mock.patch.object(host.Host, 'compare_hypervisor_cpu')
@mock.patch.object(nova.virt.libvirt, 'config')
def test_compare_cpu_incompatible_cpu_raises(self, mock_vconfig,
mock_compare):
@@ -13678,6 +14176,85 @@ class LibvirtConnTestCase(test.NoDBTestCase,
def test_live_migration_main_monitoring_failed(self):
self._test_live_migration_main(mon_side_effect=Exception)
+ @mock.patch.object(host.Host, "get_connection", new=mock.Mock())
+ @mock.patch.object(utils, "spawn", new=mock.Mock())
+ @mock.patch.object(host.Host, "get_guest")
+ @mock.patch.object(
+ libvirt_driver.LibvirtDriver, "_live_migration_copy_disk_paths")
+ def _test_live_migration_monitor_job_stats_exception(
+ self, exc, mock_copy_disk_paths, mock_get_guest, expect_success=True
+ ):
+ # Verify behavior when various exceptions are raised inside of
+ # Guest.get_job_info() during live migration monitoring.
+ mock_domain = mock.Mock(fakelibvirt.virDomain)
+ guest = libvirt_guest.Guest(mock_domain)
+ mock_get_guest.return_value = guest
+
+ # First, raise the exception from jobStats(), then return "completed"
+ # to make sure we exit the monitoring loop.
+ guest._domain.jobStats.side_effect = [
+ exc,
+ {'type': fakelibvirt.VIR_DOMAIN_JOB_COMPLETED},
+ ]
+
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ instance = objects.Instance(**self.test_instance)
+ post_method = mock.Mock()
+ migrate_data = mock.Mock()
+ disks_to_copy = (['/some/path/one', '/test/path/two'],
+ ['vda', 'vdb'])
+ mock_copy_disk_paths.return_value = disks_to_copy
+
+ func = drvr._live_migration
+ args = (self.context, instance, mock.sentinel.dest, post_method,
+ mock.sentinel.recover_method, mock.sentinel.block_migration,
+ migrate_data)
+
+ if expect_success:
+ func(*args)
+ post_method.assert_called_once_with(
+ self.context, instance, mock.sentinel.dest,
+ mock.sentinel.block_migration, migrate_data
+ )
+ else:
+ actual_exc = self.assertRaises(
+ fakelibvirt.libvirtError, func, *args)
+ self.assertEqual(exc, actual_exc)
+
+ def test_live_migration_monitor_job_stats_no_domain(self):
+ exp = fakelibvirt.make_libvirtError(
+ fakelibvirt.libvirtError, 'no domain',
+ error_code=fakelibvirt.VIR_ERR_NO_DOMAIN
+ )
+ self._test_live_migration_monitor_job_stats_exception(
+ exp, expect_success=True)
+
+ def test_live_migration_monitor_job_stats_op_invalid(self):
+ exp = fakelibvirt.make_libvirtError(
+ fakelibvirt.libvirtError, 'operation invalid',
+ error_code=fakelibvirt.VIR_ERR_OPERATION_INVALID
+ )
+ self._test_live_migration_monitor_job_stats_exception(
+ exp, expect_success=True)
+
+ def test_live_migration_monitor_job_stats_no_ram_info_set(self):
+ exp = fakelibvirt.make_libvirtError(
+ fakelibvirt.libvirtError, 'internal error',
+ error_message='migration was active, but no RAM info was set',
+ error_code=fakelibvirt.VIR_ERR_INTERNAL_ERROR
+ )
+ self._test_live_migration_monitor_job_stats_exception(
+ exp, expect_success=True)
+
+ def test_live_migration_monitor_job_stats_internal_error(self):
+ exp = fakelibvirt.make_libvirtError(
+ fakelibvirt.libvirtError,
+ 'some other internal error',
+ error_code=fakelibvirt.VIR_ERR_INTERNAL_ERROR
+ )
+ self._test_live_migration_monitor_job_stats_exception(
+ exp, expect_success=False)
+
@mock.patch('os.path.exists', return_value=False)
@mock.patch('nova.virt.libvirt.utils.create_image')
@mock.patch.object(libvirt_driver.LibvirtDriver,
@@ -13697,7 +14274,10 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_fetch.assert_called_once_with(self.context, instance,
fallback_from_host=None)
mock_create.assert_called_once_with(
- disk_info['type'], mock.ANY, disk_info['virt_disk_size'])
+ '/fake/instance/dir/foo',
+ disk_info['type'],
+ disk_info['virt_disk_size'],
+ )
mock_exists.assert_called_once_with('/fake/instance/dir/foo')
def test_create_images_and_backing_qcow2(self):
@@ -13729,7 +14309,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.context, instance,
"/fake/instance/dir", disk_info)
- @mock.patch('nova.virt.libvirt.utils.create_cow_image')
+ @mock.patch('nova.virt.libvirt.utils.create_image')
@mock.patch('nova.privsep.path.utime')
def test_create_images_and_backing_images_not_exist_fallback(
self, mock_utime, mock_create_cow_image):
@@ -13809,7 +14389,11 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_utime.assert_called()
mock_create_cow_image.assert_called_once_with(
- backfile_path, '/fake/instance/dir/disk_path', virt_disk_size)
+ '/fake/instance/dir/disk_path',
+ 'qcow2',
+ virt_disk_size,
+ backing_file=backfile_path,
+ )
@mock.patch('nova.virt.libvirt.imagebackend.Image.exists',
new=mock.Mock(return_value=True))
@@ -13902,7 +14486,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertFalse(mock_fetch_image.called)
@mock.patch('nova.privsep.path.utime')
- @mock.patch('nova.virt.libvirt.utils.create_cow_image')
+ @mock.patch('nova.virt.libvirt.utils.create_image')
def test_create_images_and_backing_ephemeral_gets_created(
self, mock_create_cow_image, mock_utime):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
@@ -13955,14 +14539,16 @@ class LibvirtConnTestCase(test.NoDBTestCase,
# TODO(efried): Should these be disk_info[path]??
mock_create_cow_image.assert_has_calls([
mock.call(
- root_backing,
CONF.instances_path + '/disk',
- disk_info_byname['disk']['virt_disk_size']
+ 'qcow2',
+ disk_info_byname['disk']['virt_disk_size'],
+ backing_file=root_backing,
),
mock.call(
- ephemeral_backing,
CONF.instances_path + '/disk.local',
- disk_info_byname['disk.local']['virt_disk_size']
+ 'qcow2',
+ disk_info_byname['disk.local']['virt_disk_size'],
+ backing_file=ephemeral_backing,
),
])
@@ -15644,7 +16230,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch('nova.privsep.path.utime')
@mock.patch('nova.virt.libvirt.utils.fetch_image')
- @mock.patch('nova.virt.libvirt.utils.create_cow_image')
+ @mock.patch('nova.virt.libvirt.utils.create_image')
def test_create_ephemeral_specified_fs_not_valid(
self, mock_create_cow_image, mock_fetch_image, mock_utime):
CONF.set_override('default_ephemeral_format', 'ext4')
@@ -15660,10 +16246,9 @@ class LibvirtConnTestCase(test.NoDBTestCase,
instance = objects.Instance(**instance_ref)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
image_meta = objects.ImageMeta.from_dict({'disk_format': 'raw'})
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance,
- image_meta)
- disk_info['mapping'].pop('disk.local')
+ disk_info = blockinfo.get_disk_info(
+ CONF.libvirt.virt_type, instance, image_meta,
+ block_device_info=block_device_info)
with test.nested(
mock.patch('oslo_concurrency.processutils.execute'),
@@ -16075,9 +16660,10 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual(ip, CONF.my_ip)
@mock.patch.object(libvirt_driver.LOG, 'warning')
- @mock.patch('nova.compute.utils.get_machine_ips')
- def test_check_my_ip(self, mock_ips, mock_log):
- mock_ips.return_value = ['8.8.8.8', '75.75.75.75']
+ def test_check_my_ip(self, mock_log):
+
+ self.libvirt.mock_get_machine_ips.return_value = [
+ '8.8.8.8', '75.75.75.75']
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr._check_my_ip()
mock_log.assert_called_once_with(u'my_ip address (%(my_ip)s) was '
@@ -16099,6 +16685,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
service_mock = mock.MagicMock()
service_mock.disabled.return_value = False
+ drvr._host._init_events.return_value = None
with test.nested(
mock.patch.object(drvr._host, "_connect",
side_effect=fakelibvirt.make_libvirtError(
@@ -16106,8 +16693,6 @@ class LibvirtConnTestCase(test.NoDBTestCase,
"Failed to connect to host",
error_code=
fakelibvirt.VIR_ERR_INTERNAL_ERROR)),
- mock.patch.object(drvr._host, "_init_events",
- return_value=None),
mock.patch.object(objects.Service, "get_by_compute_host",
return_value=service_mock)):
@@ -16122,6 +16707,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
service_mock = mock.MagicMock()
service_mock.disabled.return_value = False
+ drvr._host._init_events.return_value = None
with test.nested(
mock.patch.object(drvr._host, "_connect",
side_effect=fakelibvirt.make_libvirtError(
@@ -16129,8 +16715,6 @@ class LibvirtConnTestCase(test.NoDBTestCase,
"Failed to connect to host",
error_code=
fakelibvirt.VIR_ERR_INTERNAL_ERROR)),
- mock.patch.object(drvr._host, "_init_events",
- return_value=None),
mock.patch.object(host.Host, "has_min_version",
return_value=True),
mock.patch.object(drvr, "_do_quality_warnings",
@@ -16150,11 +16734,10 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
service_mock = mock.MagicMock()
service_mock.disabled.return_value = True
+ drvr._host._init_events.return_value = None
with test.nested(
mock.patch.object(drvr._host, "_connect",
return_value=mock.MagicMock()),
- mock.patch.object(drvr._host, "_init_events",
- return_value=None),
mock.patch.object(host.Host, "has_min_version",
return_value=True),
mock.patch.object(drvr, "_do_quality_warnings",
@@ -16313,7 +16896,6 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_get.return_value = fake_guest
self.stub_out('oslo_service.loopingcall.FixedIntervalLoopingCall',
lambda *a, **k: FakeLoopingCall())
- self.stub_out('nova.pci.manager.get_instance_pci_devs', lambda *a: [])
drvr.reboot(None, instance, [], 'SOFT')
@@ -16325,14 +16907,12 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_get.assert_has_calls([mock.call(instance)] * 2, any_order=True)
self.assertEqual(2, mock_get.call_count)
- @mock.patch.object(pci_manager, 'get_instance_pci_devs')
@mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
@mock.patch.object(greenthread, 'sleep')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_hard_reboot')
@mock.patch.object(host.Host, '_get_domain')
def test_reboot_same_ids(self, mock_get_domain, mock_hard_reboot,
- mock_sleep, mock_loopingcall,
- mock_get_instance_pci_devs):
+ mock_sleep, mock_loopingcall):
class FakeLoopingCall(object):
def start(self, *a, **k):
return self
@@ -16360,7 +16940,6 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_get_domain.return_value = mock_domain
mock_hard_reboot.side_effect = fake_hard_reboot
mock_loopingcall.return_value = FakeLoopingCall()
- mock_get_instance_pci_devs.return_value = []
drvr.reboot(None, instance, [], 'SOFT')
self.assertTrue(self.reboot_hard_reboot_called)
@@ -16558,7 +17137,6 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch('oslo_utils.fileutils.ensure_tree')
@mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall')
- @mock.patch('nova.pci.manager.get_instance_pci_devs')
@mock.patch('nova.virt.libvirt.LibvirtDriver._create_guest_with_network')
@mock.patch('nova.virt.libvirt.LibvirtDriver._create_images_and_backing')
@mock.patch('nova.virt.libvirt.LibvirtDriver.'
@@ -16575,7 +17153,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_get_guest_config, mock_get_instance_path,
mock_get_instance_disk_info, mock_create_images_and_backing,
mock_create_domand_and_network,
- mock_get_instance_pci_devs, mock_looping_call, mock_ensure_tree):
+ mock_looping_call, mock_ensure_tree):
"""For a hard reboot, we shouldn't need an additional call to glance
to get the image metadata.
@@ -16621,10 +17199,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch.object(conn, '_detach_mediated_devices')
@mock.patch.object(conn, '_detach_direct_passthrough_ports')
@mock.patch.object(conn, '_detach_pci_devices')
- @mock.patch.object(pci_manager, 'get_instance_pci_devs',
- return_value='pci devs')
@mock.patch.object(conn._host, 'get_guest', return_value=guest)
- def suspend(mock_get_guest, mock_get_instance_pci_devs,
+ def suspend(mock_get_guest,
mock_detach_pci_devices,
mock_detach_direct_passthrough_ports,
mock_detach_mediated_devices,
@@ -16767,15 +17343,17 @@ class LibvirtConnTestCase(test.NoDBTestCase,
instance.system_metadata)
self.assertTrue(mock_attachDevice.called)
- @mock.patch.object(host.Host,
- 'has_min_version', return_value=True)
- def _test_detach_direct_passthrough_ports(self,
- mock_has_min_version, vif_type):
+ @mock.patch.object(
+ host.Host, 'has_min_version', new=mock.Mock(return_value=True)
+ )
+ def _test_detach_direct_passthrough_ports(
+ self, vif_type, detach_device=True,
+ vnic_type=network_model.VNIC_TYPE_DIRECT):
instance = objects.Instance(**self.test_instance)
expeted_pci_slot = "0000:00:00.0"
network_info = _fake_network_info(self)
- network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT
+ network_info[0]['vnic_type'] = vnic_type
# some more adjustments for the fake network_info so that
# the correct get_config function will be executed (vif's
# get_config_hw_veb - which is according to the real SRIOV vif)
@@ -16788,32 +17366,55 @@ class LibvirtConnTestCase(test.NoDBTestCase,
instance.info_cache = objects.InstanceInfoCache(
network_info=network_info)
# fill the pci_devices of the instance so that
- # pci_manager.get_instance_pci_devs will not return an empty list
+ # instance.get_instance_pci_devs will not return an empty list
# which will eventually fail the assertion for detachDeviceFlags
expected_pci_device_obj = (
- objects.PciDevice(address=expeted_pci_slot, request_id=None))
+ objects.PciDevice(
+ address=expeted_pci_slot, request_id=None, compute_node_id=42
+ )
+ )
instance.pci_devices = objects.PciDeviceList()
instance.pci_devices.objects = [expected_pci_device_obj]
- domain = FakeVirtDomain()
+ domain = FakeVirtDomain(id=24601, name='Jean Valjean')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
guest = libvirt_guest.Guest(domain)
- with mock.patch.object(drvr, '_detach_pci_devices') as mock_detach_pci:
+ with mock.patch.object(
+ drvr, '_detach_pci_devices'
+ ) as mock_detach_pci, mock.patch.object(
+ drvr, 'detach_interface'
+ ) as mock_detach_interface:
drvr._detach_direct_passthrough_ports(
self.context, instance, guest)
- mock_detach_pci.assert_called_once_with(
- guest, [expected_pci_device_obj])
+ if detach_device:
+ mock_detach_pci.assert_called_once_with(
+ guest, [expected_pci_device_obj])
+ else:
+ mock_detach_interface.assert_called_once()
+
+ def test_detach_direct_passthrough_ports_ovs_hw_offload(self):
+ # Note: test detach_direct_passthrough_ports method for vif with config
+ # LibvirtConfigGuestInterface
+ self._test_detach_direct_passthrough_ports("ovs", detach_device=False)
- def test_detach_direct_passthrough_ports_interface_interface_hostdev(self):
+ def test_detach_direct_passthrough_ports_sriov_nic_agent(self):
# Note: test detach_direct_passthrough_ports method for vif with config
# LibvirtConfigGuestInterface
- self._test_detach_direct_passthrough_ports(vif_type="hw_veb")
+ self._test_detach_direct_passthrough_ports(
+ "hw_veb", detach_device=False
+ )
+
+ def test_detach_direct_physical_passthrough_ports_sriov_nic_agent(self):
+ self._test_detach_direct_passthrough_ports(
+ "hostdev_physical",
+ vnic_type=network_model.VNIC_TYPE_DIRECT_PHYSICAL
+ )
- def test_detach_direct_passthrough_ports_interface_pci_hostdev(self):
+ def test_detach_direct_passthrough_ports_infiniband(self):
# Note: test detach_direct_passthrough_ports method for vif with config
# LibvirtConfigGuestHostdevPCI
- self._test_detach_direct_passthrough_ports(vif_type="ib_hostdev")
+ self._test_detach_direct_passthrough_ports("ib_hostdev")
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
@mock.patch.object(FakeVirtDomain, 'detachDeviceFlags')
@@ -16823,9 +17424,10 @@ class LibvirtConnTestCase(test.NoDBTestCase,
network_info = _fake_network_info(self, 2)
+ direct_physical = network_model.VNIC_TYPE_DIRECT_PHYSICAL
for network_info_inst in network_info:
- network_info_inst['vnic_type'] = network_model.VNIC_TYPE_DIRECT
- network_info_inst['type'] = "hw_veb"
+ network_info_inst['vnic_type'] = direct_physical
+ network_info_inst['type'] = "hostdev_physical"
network_info_inst['details'] = dict(vlan="2145")
network_info_inst['address'] = "fa:16:3e:96:2a:48"
@@ -16835,7 +17437,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
instance.info_cache = objects.InstanceInfoCache(
network_info=network_info)
# fill the pci_devices of the instance so that
- # pci_manager.get_instance_pci_devs will not return an empty list
+ # instance.get_instance_pci_devs will not return an empty list
# which will eventually fail the assertion for detachDeviceFlags
instance.pci_devices = objects.PciDeviceList()
instance.pci_devices.objects = [
@@ -16890,8 +17492,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock.patch.object(drvr, '_create_guest_with_network',
return_value=guest),
mock.patch.object(drvr, '_attach_pci_devices'),
- mock.patch.object(pci_manager, 'get_instance_pci_devs',
- return_value='fake_pci_devs'),
+ mock.patch('nova.objects.Instance.get_pci_devices',
+ return_value='fake_pci_devs'),
mock.patch.object(utils, 'get_image_from_system_metadata'),
mock.patch.object(guest, 'sync_guest_time'),
mock.patch.object(drvr, '_wait_for_running',
@@ -17642,12 +18244,11 @@ class LibvirtConnTestCase(test.NoDBTestCase,
got = drvr._get_cpu_info()
self.assertEqual(want, got)
- @mock.patch.object(pci_utils, 'get_ifname_by_pci_address',
- return_value='ens1')
@mock.patch.object(host.Host, 'list_pci_devices',
return_value=['pci_0000_04_00_3', 'pci_0000_04_10_7',
'pci_0000_04_11_7'])
- def test_get_pci_passthrough_devices(self, mock_list, mock_get_ifname):
+ def test_get_pci_passthrough_devices(self, mock_list):
+ pci_utils.get_ifname_by_pci_address.return_value = 'ens1'
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
@@ -17720,7 +18321,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
# The first call for every VF is to determine parent_ifname and
# the second call to determine the MAC address.
- mock_get_ifname.assert_has_calls([
+ pci_utils.get_ifname_by_pci_address.assert_has_calls([
mock.call('0000:04:10.7', pf_interface=True),
mock.call('0000:04:11.7', pf_interface=True),
])
@@ -20133,7 +20734,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock.patch.object(drvr._conn, 'defineXML', create=True),
mock.patch('nova.virt.libvirt.utils.get_disk_size'),
mock.patch('nova.virt.libvirt.utils.get_disk_backing_file'),
- mock.patch('nova.virt.libvirt.utils.create_cow_image'),
+ mock.patch('nova.virt.libvirt.utils.create_image'),
mock.patch('nova.virt.libvirt.utils.extract_snapshot'),
mock.patch.object(drvr, '_set_quiesced'),
mock.patch.object(drvr, '_can_quiesce')
@@ -20176,7 +20777,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_size.assert_called_once_with(srcfile, format="qcow2")
mock_backing.assert_called_once_with(srcfile, basename=False,
format="qcow2")
- mock_create_cow.assert_called_once_with(bckfile, dltfile, 1004009)
+ mock_create_cow.assert_called_once_with(
+ dltfile, 'qcow2', 1004009, backing_file=bckfile)
mock_chown.assert_called_once_with(dltfile, uid=os.getuid())
mock_snapshot.assert_called_once_with(dltfile, "qcow2",
dstfile, "qcow2")
@@ -20410,7 +21012,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
**fake_block_device.FakeDbBlockDeviceDict(
{'id': 0,
'source_type': 'volume', 'destination_type': 'volume',
- 'device_name': '/dev/sda'}))
+ 'device_name': '/dev/sda', 'boot_index': 0}))
info = {'block_device_mapping': driver_block_device.convert_volumes(
[bdm])}
info['block_device_mapping'][0]['connection_info'] = conn_info
@@ -20520,8 +21122,10 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr._get_disk_config_image_type())
self.assertEqual(2, drvr.image_backend.by_name.call_count)
- call1 = mock.call(instance, 'disk.config', 'rbd')
- call2 = mock.call(instance, 'disk.config', 'flat')
+ call1 = mock.call(instance, 'disk.config', 'rbd',
+ disk_info_mapping=disk_mapping['disk.config'])
+ call2 = mock.call(instance, 'disk.config', 'flat',
+ disk_info_mapping=disk_mapping['disk.config'])
drvr.image_backend.by_name.assert_has_calls([call1, call2])
self.assertEqual(mock.sentinel.diskconfig, diskconfig)
@@ -20564,7 +21168,6 @@ class LibvirtConnTestCase(test.NoDBTestCase,
guest = mock.Mock()
with test.nested(
- mock.patch.object(pci_manager, 'get_instance_pci_devs'),
mock.patch.object(drvr, '_attach_pci_devices'),
mock.patch.object(drvr, '_attach_direct_passthrough_ports'),
):
@@ -21000,7 +21603,7 @@ class TestUpdateProviderTree(test.NoDBTestCase):
'min_unit': 1,
'max_unit': self.vcpus,
'step_size': 1,
- 'allocation_ratio': 16.0,
+ 'allocation_ratio': 4.0,
'reserved': 0,
},
orc.PCPU: {
@@ -21016,7 +21619,7 @@ class TestUpdateProviderTree(test.NoDBTestCase):
'min_unit': 1,
'max_unit': self.memory_mb,
'step_size': 1,
- 'allocation_ratio': 1.5,
+ 'allocation_ratio': 1.0,
'reserved': 512,
},
orc.DISK_GB: {
@@ -23138,6 +23741,9 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
}
instance = self._create_instance(params=inst_params)
+ image_meta = objects.ImageMeta.from_dict({})
+ disk_info = blockinfo.get_disk_info(
+ CONF.libvirt.virt_type, instance, image_meta)
disk_images = {'image_id': instance.image_ref}
instance_dir = libvirt_utils.get_instance_path(instance)
disk_path = os.path.join(instance_dir, 'disk')
@@ -23157,7 +23763,8 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
]
drvr._create_and_inject_local_root(
- self.context, instance, False, '', disk_images, None, None)
+ self.context, instance, disk_info['mapping'], False, '',
+ disk_images, None, None)
mock_fetch_calls = [
mock.call(test.MatchType(nova.virt.libvirt.imagebackend.Qcow2),
@@ -23240,9 +23847,13 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
# config_drive is True by default, configdrive.required_by()
# returns True
instance_ref = self._create_instance()
+ image_meta = objects.ImageMeta.from_dict({})
+ disk_info = blockinfo.get_disk_info(
+ CONF.libvirt.virt_type, instance_ref, image_meta)
disk_images = {'image_id': None}
- drvr._create_and_inject_local_root(self.context, instance_ref, False,
+ drvr._create_and_inject_local_root(self.context, instance_ref,
+ disk_info['mapping'], False,
'', disk_images, get_injection_info(),
None)
self.assertFalse(mock_inject.called)
@@ -23262,6 +23873,9 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
mock_image.get.return_value = {'locations': [], 'disk_format': 'raw'}
instance = self._create_instance()
+ image_meta = objects.ImageMeta.from_dict({})
+ disk_info = blockinfo.get_disk_info(
+ CONF.libvirt.virt_type, instance, image_meta)
disk_images = {'image_id': 'foo'}
self.flags(images_type='rbd', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
@@ -23272,6 +23886,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
mock_fetch.reset_mock()
drvr._create_and_inject_local_root(self.context,
instance,
+ disk_info['mapping'],
False,
'',
disk_images,
@@ -24631,7 +25246,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
self.assertEqual('raw', disk.image_type)
# Assert that the root rescue disk was created as the default type
- self.assertIsNone(disks['disk.rescue'].image_type)
+ self.assertEqual('default', disks['disk.rescue'].image_type)
# We expect the generated domain to contain disk.rescue and
# disk, in that order
@@ -24703,7 +25318,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
self.assertEqual('raw', disk.image_type)
# Assert that the root rescue disk was created as the default type
- self.assertIsNone(disks['disk.rescue'].image_type)
+ self.assertEqual('default', disks['disk.rescue'].image_type)
# We expect the generated domain to contain disk.rescue, disk, and
# disk.config.rescue in that order
@@ -24941,7 +25556,8 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
'id': 1,
'source_type': 'volume',
'destination_type': 'volume',
- 'device_name': '/dev/vda'}))
+ 'device_name': '/dev/vda',
+ 'boot_index': 0}))
bdms = driver_block_device.convert_volumes([bdm])
block_device_info = {'root_device_name': '/dev/vda',
'ephemerals': [],
@@ -27178,6 +27794,35 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
mock_get_guest.return_value.assert_not_called()
self.assertIsNone(mock_find.call_args.args[3])
+ def test_set_features_windows(self):
+ self.flags(virt_type='kvm', group='libvirt')
+ guest = vconfig.LibvirtConfigGuest()
+ self.drvr._set_features(
+ guest, 'windows',
+ objects.ImageMeta(
+ properties=objects.ImageMetaProps()
+ ),
+ objects.Flavor(extra_specs={})
+ )
+ features = guest.features
+ hv = None
+ for feature in features:
+ if feature.root_name == 'hyperv':
+ hv = feature
+ self.assertTrue(hv.relaxed)
+ self.assertTrue(hv.vapic)
+ self.assertTrue(hv.spinlocks)
+ self.assertEqual(8191, hv.spinlock_retries)
+ self.assertTrue(hv.vpindex)
+ self.assertTrue(hv.runtime)
+ self.assertTrue(hv.synic)
+ self.assertTrue(hv.reset)
+ self.assertTrue(hv.frequencies)
+ self.assertTrue(hv.reenlightenment)
+ self.assertTrue(hv.tlbflush)
+ self.assertTrue(hv.ipi)
+ self.assertTrue(hv.evmcs)
+
class LibvirtVolumeUsageTestCase(test.NoDBTestCase):
"""Test for LibvirtDriver.get_all_volume_usage."""
@@ -28248,7 +28893,7 @@ class _BaseSnapshotTests(test.NoDBTestCase):
@mock.patch.object(host.Host, '_get_domain')
@mock.patch('nova.virt.libvirt.utils.get_disk_size',
new=mock.Mock(return_value=0))
- @mock.patch('nova.virt.libvirt.utils.create_cow_image',
+ @mock.patch('nova.virt.libvirt.utils.create_image',
new=mock.Mock())
@mock.patch('nova.virt.libvirt.utils.get_disk_backing_file',
new=mock.Mock(return_value=None))
diff --git a/nova/tests/unit/virt/libvirt/test_guest.py b/nova/tests/unit/virt/libvirt/test_guest.py
index f662c108a9..5b181b8f06 100644
--- a/nova/tests/unit/virt/libvirt/test_guest.py
+++ b/nova/tests/unit/virt/libvirt/test_guest.py
@@ -404,9 +404,21 @@ class GuestTestCase(test.NoDBTestCase):
self.assertIsNotNone(
self.guest.get_interface_by_cfg(
cfg, from_persistent_config=True))
+ cfg = vconfig.LibvirtConfigGuestInterface()
+ # NOTE(sean-k-mooney): a default constructed object is not valid
+ # to pass to get_interface_by_cfg as so we just modify the xml to
+ # make it not match
+ cfg.parse_str("""
+ <interface type="wont_match">
+ <mac address="fa:16:3e:f9:af:ae"/>
+ <model type="virtio"/>
+ <driver name="qemu"/>
+ <source bridge="qbr84008d03-11"/>
+ <target dev="tap84008d03-11"/>
+ </interface>""")
self.assertIsNone(
self.guest.get_interface_by_cfg(
- vconfig.LibvirtConfigGuestInterface(),
+ cfg,
from_persistent_config=True))
self.domain.XMLDesc.assert_has_calls(
[
@@ -1041,3 +1053,25 @@ class JobInfoTestCase(test.NoDBTestCase):
mock_stats.assert_called_once_with()
mock_info.assert_called_once_with()
+
+ @mock.patch.object(fakelibvirt.virDomain, "jobInfo")
+ @mock.patch.object(fakelibvirt.virDomain, "jobStats")
+ def test_job_stats_no_ram(self, mock_stats, mock_info):
+ mock_stats.side_effect = fakelibvirt.make_libvirtError(
+ fakelibvirt.libvirtError,
+ "internal error: migration was active, but no RAM info was set",
+ error_code=fakelibvirt.VIR_ERR_INTERNAL_ERROR,
+ error_message="migration was active, but no RAM info was set")
+
+ info = self.guest.get_job_info()
+
+ self.assertIsInstance(info, libvirt_guest.JobInfo)
+ self.assertEqual(fakelibvirt.VIR_DOMAIN_JOB_NONE, info.type)
+ self.assertEqual(0, info.time_elapsed)
+ self.assertEqual(0, info.time_remaining)
+ self.assertEqual(0, info.memory_total)
+ self.assertEqual(0, info.memory_processed)
+ self.assertEqual(0, info.memory_remaining)
+
+ mock_stats.assert_called_once_with()
+ self.assertFalse(mock_info.called)
diff --git a/nova/tests/unit/virt/libvirt/test_host.py b/nova/tests/unit/virt/libvirt/test_host.py
index 07366ef028..631b10d81a 100644
--- a/nova/tests/unit/virt/libvirt/test_host.py
+++ b/nova/tests/unit/virt/libvirt/test_host.py
@@ -72,11 +72,10 @@ class HostTestCase(test.NoDBTestCase):
self.useFixture(nova_fixtures.LibvirtFixture())
self.host = host.Host("qemu:///system")
- @mock.patch("nova.virt.libvirt.host.Host._init_events")
- def test_repeat_initialization(self, mock_init_events):
+ def test_repeat_initialization(self):
for i in range(3):
self.host.initialize()
- mock_init_events.assert_called_once_with()
+ self.host._init_events.assert_called_once_with()
@mock.patch.object(fakelibvirt.virConnect, "registerCloseCallback")
def test_close_callback(self, mock_close):
@@ -1053,6 +1052,12 @@ Active: 8381604 kB
'iowait': 6121490000000},
stats)
+ @mock.patch.object(fakelibvirt.virConnect, "getCPUMap")
+ def test_get_available_cpus(self, mock_map):
+ mock_map.return_value = (4, [True, True, False, False], None)
+ result = self.host.get_available_cpus()
+ self.assertEqual(result, {0, 1, 2, 3})
+
@mock.patch.object(fakelibvirt.virConnect, "defineXML")
def test_write_instance_config(self, mock_defineXML):
fake_dom_xml = """
@@ -1156,12 +1161,9 @@ Active: 8381604 kB
expect_vf = ["rx", "tx", "sg", "tso", "gso", "gro", "rxvlan", "txvlan"]
self.assertEqual(expect_vf, actualvf)
- @mock.patch.object(pci_utils, 'get_mac_by_pci_address',
- new=mock.MagicMock(
- side_effect=exception.PciDeviceNotFoundById(
- '0000:04:00.3')))
- @mock.patch.object(pci_utils, 'get_ifname_by_pci_address')
- def test_get_pcidev_info_non_nic(self, mock_get_ifname):
+ def test_get_pcidev_info_non_nic(self):
+ pci_utils.get_mac_by_pci_address.side_effect = (
+ exception.PciDeviceNotFoundById('0000:04:00.3'))
dev_name = "pci_0000_04_11_7"
pci_dev = fakelibvirt.NodeDevice(
self.host._get_connection(),
@@ -1175,11 +1177,10 @@ Active: 8381604 kB
'parent_addr': '0000:04:00.3',
}
self.assertEqual(expect_vf, actual_vf)
- mock_get_ifname.assert_not_called()
+ pci_utils.get_ifname_by_pci_address.assert_not_called()
- @mock.patch.object(pci_utils, 'get_ifname_by_pci_address',
- return_value='ens1')
- def test_get_pcidev_info(self, mock_get_ifname):
+ def test_get_pcidev_info(self):
+ pci_utils.get_ifname_by_pci_address.return_value = 'ens1'
devs = {
"pci_0000_04_00_3", "pci_0000_04_10_7", "pci_0000_04_11_7",
"pci_0000_04_00_1", "pci_0000_03_00_0", "pci_0000_03_00_1",
diff --git a/nova/tests/unit/virt/libvirt/test_imagebackend.py b/nova/tests/unit/virt/libvirt/test_imagebackend.py
index fdac091985..0dc1009c92 100644
--- a/nova/tests/unit/virt/libvirt/test_imagebackend.py
+++ b/nova/tests/unit/virt/libvirt/test_imagebackend.py
@@ -163,7 +163,13 @@ class _ImageTestCase(object):
self.assertEqual(fs.source_file, image.path)
def test_libvirt_info(self):
- image = self.image_class(self.INSTANCE, self.NAME)
+ disk_info = {
+ 'bus': 'virtio',
+ 'dev': '/dev/vda',
+ 'type': 'cdrom',
+ }
+ image = self.image_class(
+ self.INSTANCE, self.NAME, disk_info_mapping=disk_info)
extra_specs = {
'quota:disk_read_bytes_sec': 10 * units.Mi,
'quota:disk_read_iops_sec': 1 * units.Ki,
@@ -172,15 +178,9 @@ class _ImageTestCase(object):
'quota:disk_total_bytes_sec': 30 * units.Mi,
'quota:disk_total_iops_sec': 3 * units.Ki,
}
- disk_info = {
- 'bus': 'virtio',
- 'dev': '/dev/vda',
- 'type': 'cdrom',
- }
disk = image.libvirt_info(
- disk_info, cache_mode="none", extra_specs=extra_specs,
- boot_order="1")
+ cache_mode="none", extra_specs=extra_specs, boot_order="1")
self.assertIsInstance(disk, vconfig.LibvirtConfigGuestDisk)
self.assertEqual("/dev/vda", disk.target_dev)
@@ -205,16 +205,18 @@ class _ImageTestCase(object):
get_disk_size.assert_called_once_with(image.path)
def _test_libvirt_info_scsi_with_unit(self, disk_unit):
- # The address should be set if bus is scsi and unit is set.
- # Otherwise, it should not be set at all.
- image = self.image_class(self.INSTANCE, self.NAME)
disk_info = {
'bus': 'scsi',
'dev': '/dev/sda',
'type': 'disk',
}
+ # The address should be set if bus is scsi and unit is set.
+ # Otherwise, it should not be set at all.
+ image = self.image_class(
+ self.INSTANCE, self.NAME, disk_info_mapping=disk_info)
+
disk = image.libvirt_info(
- disk_info, cache_mode='none', extra_specs={}, disk_unit=disk_unit)
+ cache_mode='none', extra_specs={}, disk_unit=disk_unit)
if disk_unit:
self.assertEqual(0, disk.device_addr.controller)
self.assertEqual(disk_unit, disk.device_addr.unit)
@@ -523,7 +525,7 @@ class Qcow2TestCase(_ImageTestCase, test.NoDBTestCase):
mock_exists.assert_has_calls(exist_calls)
@mock.patch.object(imagebackend.utils, 'synchronized')
- @mock.patch('nova.virt.libvirt.utils.create_cow_image')
+ @mock.patch('nova.virt.libvirt.utils.create_image')
@mock.patch.object(os.path, 'exists', side_effect=[])
@mock.patch.object(imagebackend.Image, 'verify_base_size')
@mock.patch('nova.privsep.path.utime')
@@ -544,14 +546,14 @@ class Qcow2TestCase(_ImageTestCase, test.NoDBTestCase):
mock_verify.assert_called_once_with(self.TEMPLATE_PATH, self.SIZE)
mock_create.assert_called_once_with(
- self.TEMPLATE_PATH, self.PATH, self.SIZE)
+ self.PATH, 'qcow2', self.SIZE, backing_file=self.TEMPLATE_PATH)
fn.assert_called_once_with(target=self.TEMPLATE_PATH)
mock_exist.assert_has_calls(exist_calls)
self.assertTrue(mock_sync.called)
mock_utime.assert_called()
@mock.patch.object(imagebackend.utils, 'synchronized')
- @mock.patch('nova.virt.libvirt.utils.create_cow_image')
+ @mock.patch('nova.virt.libvirt.utils.create_image')
@mock.patch.object(imagebackend.disk, 'extend')
@mock.patch.object(os.path, 'exists', side_effect=[])
@mock.patch.object(imagebackend.Qcow2, 'get_disk_size')
@@ -576,7 +578,7 @@ class Qcow2TestCase(_ImageTestCase, test.NoDBTestCase):
self.assertFalse(mock_extend.called)
@mock.patch.object(imagebackend.utils, 'synchronized')
- @mock.patch('nova.virt.libvirt.utils.create_cow_image')
+ @mock.patch('nova.virt.libvirt.utils.create_image')
@mock.patch('nova.virt.libvirt.utils.get_disk_backing_file')
@mock.patch.object(imagebackend.disk, 'extend')
@mock.patch.object(os.path, 'exists', side_effect=[])
@@ -615,7 +617,7 @@ class Qcow2TestCase(_ImageTestCase, test.NoDBTestCase):
mock_utime.assert_called()
@mock.patch.object(imagebackend.utils, 'synchronized')
- @mock.patch('nova.virt.libvirt.utils.create_cow_image')
+ @mock.patch('nova.virt.libvirt.utils.create_image')
@mock.patch('nova.virt.libvirt.utils.get_disk_backing_file')
@mock.patch.object(imagebackend.disk, 'extend')
@mock.patch.object(os.path, 'exists', side_effect=[])
diff --git a/nova/tests/unit/virt/libvirt/test_utils.py b/nova/tests/unit/virt/libvirt/test_utils.py
index 8c24f3fb92..c648108f56 100644
--- a/nova/tests/unit/virt/libvirt/test_utils.py
+++ b/nova/tests/unit/virt/libvirt/test_utils.py
@@ -103,33 +103,98 @@ class LibvirtUtilsTestCase(test.NoDBTestCase):
def test_valid_hostname_bad(self):
self.assertFalse(libvirt_utils.is_valid_hostname("foo/?com=/bin/sh"))
- @mock.patch('oslo_concurrency.processutils.execute')
- def test_create_image(self, mock_execute):
- libvirt_utils.create_image('raw', '/some/path', '10G')
- libvirt_utils.create_image('qcow2', '/some/stuff', '1234567891234')
- expected_args = [(('qemu-img', 'create', '-f', 'raw',
- '/some/path', '10G'),),
- (('qemu-img', 'create', '-f', 'qcow2',
- '/some/stuff', '1234567891234'),)]
- self.assertEqual(expected_args, mock_execute.call_args_list)
-
- @mock.patch('os.path.exists', return_value=True)
+ @mock.patch('tempfile.NamedTemporaryFile')
@mock.patch('oslo_concurrency.processutils.execute')
@mock.patch('nova.virt.images.qemu_img_info')
- def test_create_cow_image(self, mock_info, mock_execute, mock_exists):
- mock_execute.return_value = ('stdout', None)
+ def _test_create_image(
+ self, path, disk_format, disk_size, mock_info, mock_execute,
+ mock_ntf, backing_file=None, encryption=None
+ ):
mock_info.return_value = mock.Mock(
file_format=mock.sentinel.backing_fmt,
- cluster_size=mock.sentinel.cluster_size)
- libvirt_utils.create_cow_image(mock.sentinel.backing_path,
- mock.sentinel.new_path)
- mock_info.assert_called_once_with(mock.sentinel.backing_path)
- mock_execute.assert_has_calls([mock.call(
- 'qemu-img', 'create', '-f', 'qcow2', '-o',
- 'backing_file=%s,backing_fmt=%s,cluster_size=%s' % (
- mock.sentinel.backing_path, mock.sentinel.backing_fmt,
- mock.sentinel.cluster_size),
- mock.sentinel.new_path)])
+ cluster_size=mock.sentinel.cluster_size,
+ )
+ fh = mock_ntf.return_value.__enter__.return_value
+
+ libvirt_utils.create_image(
+ path, disk_format, disk_size, backing_file=backing_file,
+ encryption=encryption,
+ )
+
+ cow_opts = []
+
+ if backing_file is None:
+ mock_info.assert_not_called()
+ else:
+ mock_info.assert_called_once_with(backing_file)
+ cow_opts = [
+ '-o',
+ f'backing_file={mock.sentinel.backing_file},'
+ f'backing_fmt={mock.sentinel.backing_fmt},'
+ f'cluster_size={mock.sentinel.cluster_size}',
+ ]
+
+ encryption_opts = []
+
+ if encryption:
+ encryption_opts = [
+ '--object', f"secret,id=sec,file={fh.name}",
+ '-o', 'encrypt.key-secret=sec',
+ '-o', f"encrypt.format={encryption.get('format')}",
+ ]
+
+ encryption_options = {
+ 'cipher-alg': 'aes-256',
+ 'cipher-mode': 'xts',
+ 'hash-alg': 'sha256',
+ 'iter-time': 2000,
+ 'ivgen-alg': 'plain64',
+ 'ivgen-hash-alg': 'sha256',
+ }
+ for option, value in encryption_options.items():
+ encryption_opts += [
+ '-o',
+ f'encrypt.{option}={value}',
+ ]
+
+ expected_args = (
+ 'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'create', '-f',
+ disk_format, *cow_opts, *encryption_opts, path,
+ )
+ if disk_size is not None:
+ expected_args += (disk_size,)
+
+ self.assertEqual([(expected_args,)], mock_execute.call_args_list)
+
+ def test_create_image_raw(self):
+ self._test_create_image('/some/path', 'raw', '10G')
+
+ def test_create_image_qcow2(self):
+ self._test_create_image(
+ '/some/stuff', 'qcow2', '1234567891234',
+ )
+
+ def test_create_image_backing_file(self):
+ self._test_create_image(
+ '/some/stuff', 'qcow2', '1234567891234',
+ backing_file=mock.sentinel.backing_file,
+ )
+
+ def test_create_image_size_none(self):
+ self._test_create_image(
+ '/some/stuff', 'qcow2', None,
+ backing_file=mock.sentinel.backing_file,
+ )
+
+ def test_create_image_encryption(self):
+ encryption = {
+ 'secret': 'a_secret',
+ 'format': 'luks',
+ }
+ self._test_create_image(
+ '/some/stuff', 'qcow2', '1234567891234',
+ encryption=encryption,
+ )
@ddt.unpack
@ddt.data({'fs_type': 'some_fs_type',
diff --git a/nova/tests/unit/virt/libvirt/test_vif.py b/nova/tests/unit/virt/libvirt/test_vif.py
index d89aa279d8..6d87ed727c 100644
--- a/nova/tests/unit/virt/libvirt/test_vif.py
+++ b/nova/tests/unit/virt/libvirt/test_vif.py
@@ -518,18 +518,17 @@ class LibvirtVifTestCase(test.NoDBTestCase):
def setUp(self):
super(LibvirtVifTestCase, self).setUp()
- self.useFixture(nova_fixtures.LibvirtFixture(stub_os_vif=False))
+ self.libvirt = self.useFixture(
+ nova_fixtures.LibvirtFixture(stub_os_vif=False))
# os_vif.initialize is typically done in nova-compute startup
os_vif.initialize()
self.setup_os_vif_objects()
# multiqueue configuration is host OS specific
- _a = mock.patch('os.uname')
- self.mock_uname = _a.start()
+ self.mock_uname = self.libvirt.mock_uname
self.mock_uname.return_value = fakelibvirt.os_uname(
'Linux', '', '5.10.13-200-generic', '', 'x86_64')
- self.addCleanup(_a.stop)
def _get_node(self, xml):
doc = etree.fromstring(xml)
@@ -984,14 +983,9 @@ class LibvirtVifTestCase(test.NoDBTestCase):
self.vif_bridge,
self.vif_bridge['network']['bridge'])
- @mock.patch.object(pci_utils, 'get_ifname_by_pci_address')
- @mock.patch.object(pci_utils, 'get_vf_num_by_pci_address', return_value=1)
- @mock.patch('nova.privsep.linux_net.set_device_macaddr')
- @mock.patch('nova.privsep.linux_net.set_device_macaddr_and_vlan')
- def _test_hw_veb_op(self, op, vlan, mock_set_macaddr_and_vlan,
- mock_set_macaddr, mock_get_vf_num,
- mock_get_ifname):
- mock_get_ifname.side_effect = ['eth1', 'eth13']
+ def _test_hw_veb_op(self, op, vlan):
+ self.libvirt.mock_get_vf_num_by_pci_address.return_value = 1
+ pci_utils.get_ifname_by_pci_address.side_effect = ['eth1', 'eth13']
vlan_id = int(vlan)
port_state = 'up' if vlan_id > 0 else 'down'
mac = ('00:00:00:00:00:00' if op.__name__ == 'unplug'
@@ -1006,10 +1000,13 @@ class LibvirtVifTestCase(test.NoDBTestCase):
'set_macaddr': [mock.call('eth13', mac, port_state=port_state)]
}
op(self.instance, self.vif_hw_veb_macvtap)
- mock_get_ifname.assert_has_calls(calls['get_ifname'])
- mock_get_vf_num.assert_has_calls(calls['get_vf_num'])
- mock_set_macaddr.assert_has_calls(calls['set_macaddr'])
- mock_set_macaddr_and_vlan.assert_called_once_with(
+ pci_utils.get_ifname_by_pci_address.assert_has_calls(
+ calls['get_ifname'])
+ self.libvirt.mock_get_vf_num_by_pci_address.assert_has_calls(
+ calls['get_vf_num'])
+ self.libvirt.mock_set_device_macaddr.assert_has_calls(
+ calls['set_macaddr'])
+ self.libvirt.mock_set_device_macaddr_and_vlan.assert_called_once_with(
'eth1', 1, mock.ANY, vlan_id)
def test_plug_hw_veb(self):
@@ -1219,9 +1216,8 @@ class LibvirtVifTestCase(test.NoDBTestCase):
self.assertEqual(1, len(node))
self._assertPciEqual(node, self.vif_hostdev_physical)
- @mock.patch.object(pci_utils, 'get_ifname_by_pci_address',
- return_value='eth1')
- def test_hw_veb_driver_macvtap(self, mock_get_ifname):
+ def test_hw_veb_driver_macvtap(self):
+ pci_utils.get_ifname_by_pci_address.return_value = 'eth1'
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_hw_veb_macvtap)
node = self._get_node(xml)
diff --git a/nova/tests/unit/virt/test_block_device.py b/nova/tests/unit/virt/test_block_device.py
index 79dbc44bf1..703f15967c 100644
--- a/nova/tests/unit/virt/test_block_device.py
+++ b/nova/tests/unit/virt/test_block_device.py
@@ -49,7 +49,8 @@ class TestDriverBlockDevice(test.NoDBTestCase):
'volume': driver_block_device.DriverVolumeBlockDevice,
'volsnapshot': driver_block_device.DriverVolSnapshotBlockDevice,
'volimage': driver_block_device.DriverVolImageBlockDevice,
- 'volblank': driver_block_device.DriverVolBlankBlockDevice
+ 'volblank': driver_block_device.DriverVolBlankBlockDevice,
+ 'image': driver_block_device.DriverImageBlockDevice,
}
swap_bdm_dict = block_device.BlockDeviceDict(
@@ -78,14 +79,22 @@ class TestDriverBlockDevice(test.NoDBTestCase):
'volume_size': 4,
'guest_format': 'ext4',
'delete_on_termination': True,
- 'boot_index': -1})
+ 'boot_index': -1,
+ 'encrypted': False,
+ 'encryption_secret_uuid': None,
+ 'encryption_format': None,
+ 'encryption_options': None})
ephemeral_driver_bdm = {
'device_name': '/dev/sdc1',
'size': 4,
'device_type': 'disk',
'guest_format': 'ext4',
- 'disk_bus': 'scsi'}
+ 'disk_bus': 'scsi',
+ 'encrypted': False,
+ 'encryption_secret_uuid': None,
+ 'encryption_format': None,
+ 'encryption_options': None}
volume_bdm_dict = block_device.BlockDeviceDict(
{'id': 3, 'instance_uuid': uuids.instance,
@@ -210,6 +219,35 @@ class TestDriverBlockDevice(test.NoDBTestCase):
'boot_index': -1,
'volume_type': None}
+ image_bdm_dict = block_device.BlockDeviceDict(
+ {'id': 7, 'instance_uuid': uuids.instance,
+ 'device_name': '/dev/vda',
+ 'source_type': 'image',
+ 'destination_type': 'local',
+ 'disk_bus': 'virtio',
+ 'device_type': 'disk',
+ 'guest_format': 'ext4',
+ 'boot_index': 0,
+ 'image_id': 'fake-image-id-1',
+ 'volume_size': 5,
+ 'encrypted': True,
+ 'encryption_secret_uuid': uuids.secret,
+ 'encryption_format': 'plain',
+ 'encryption_options': None})
+
+ image_driver_bdm = {
+ 'device_name': '/dev/vda',
+ 'device_type': 'disk',
+ 'guest_format': 'ext4',
+ 'disk_bus': 'virtio',
+ 'boot_index': 0,
+ 'image_id': 'fake-image-id-1',
+ 'size': 5,
+ 'encrypted': True,
+ 'encryption_secret_uuid': uuids.secret,
+ 'encryption_format': 'plain',
+ 'encryption_options': None}
+
def setUp(self):
super(TestDriverBlockDevice, self).setUp()
self.volume_api = mock.MagicMock(autospec=cinder.API)
@@ -219,6 +257,8 @@ class TestDriverBlockDevice(test.NoDBTestCase):
# create bdm objects for testing
self.swap_bdm = fake_block_device.fake_bdm_object(
self.context, self.swap_bdm_dict)
+ self.image_bdm = fake_block_device.fake_bdm_object(
+ self.context, self.image_bdm_dict)
self.ephemeral_bdm = fake_block_device.fake_bdm_object(
self.context, self.ephemeral_bdm_dict)
self.volume_bdm = fake_block_device.fake_bdm_object(
@@ -337,6 +377,10 @@ class TestDriverBlockDevice(test.NoDBTestCase):
if field == 'attachment_id':
# Must set UUID values on UUID fields.
fake_value = ATTACHMENT_ID
+ elif isinstance(test_bdm._bdm_obj.fields[fld],
+ fields.UUIDField):
+ # Generically handle other UUID fields.
+ fake_value = uuids.fake_value
else:
fake_value = 'fake_changed_value'
test_bdm[field] = fake_value
@@ -377,6 +421,20 @@ class TestDriverBlockDevice(test.NoDBTestCase):
def test_driver_swap_default_size(self):
self._test_driver_default_size('swap')
+ def test_driver_image_block_device(self):
+ self._test_driver_device("image")
+
+ def test_driver_image_default_size(self):
+ self._test_driver_default_size('image')
+
+ def test_driver_image_block_device_destination_not_local(self):
+ self._test_driver_device('image')
+ bdm = self.image_bdm_dict.copy()
+ bdm['destination_type'] = 'volume'
+ self.assertRaises(driver_block_device._InvalidType,
+ self.driver_classes['image'],
+ fake_block_device.fake_bdm_object(self.context, bdm))
+
def test_driver_ephemeral_block_device(self):
self._test_driver_device("ephemeral")
@@ -406,7 +464,7 @@ class TestDriverBlockDevice(test.NoDBTestCase):
self.assertEqual(test_bdm.volume_size, 3)
self.assertEqual('fake-snapshot-id-1', test_bdm.get('snapshot_id'))
- def test_driver_image_block_device(self):
+ def test_driver_volume_image_block_device(self):
self._test_driver_device('volimage')
test_bdm = self.driver_classes['volimage'](
@@ -416,7 +474,7 @@ class TestDriverBlockDevice(test.NoDBTestCase):
self.assertEqual(test_bdm.volume_size, 1)
self.assertEqual('fake-image-id-1', test_bdm.get('image_id'))
- def test_driver_image_block_device_destination_local(self):
+ def test_driver_volume_image_block_device_destination_local(self):
self._test_driver_device('volimage')
bdm = self.volimage_bdm_dict.copy()
bdm['destination_type'] = 'local'
@@ -1263,12 +1321,8 @@ class TestDriverBlockDevice(test.NoDBTestCase):
def test_is_implemented(self):
for bdm in (self.volimage_bdm, self.volume_bdm, self.swap_bdm,
- self.ephemeral_bdm, self.volsnapshot_bdm):
+ self.ephemeral_bdm, self.volsnapshot_bdm, self.image_bdm):
self.assertTrue(driver_block_device.is_implemented(bdm))
- local_image = self.volimage_bdm_dict.copy()
- local_image['destination_type'] = 'local'
- self.assertFalse(driver_block_device.is_implemented(
- fake_block_device.fake_bdm_object(self.context, local_image)))
def test_is_block_device_mapping(self):
test_swap = self.driver_classes['swap'](self.swap_bdm)
diff --git a/nova/tests/unit/virt/test_hardware.py b/nova/tests/unit/virt/test_hardware.py
index 3954e8d805..016c478f8c 100644
--- a/nova/tests/unit/virt/test_hardware.py
+++ b/nova/tests/unit/virt/test_hardware.py
@@ -2638,45 +2638,45 @@ class VirtNUMAHostTopologyTestCase(test.NoDBTestCase):
def test_get_fitting_success_no_limits(self):
fitted_instance1 = hw.numa_fit_instance_to_host(
- self.host, self.instance1)
+ self.host, self.instance1, {})
self.assertIsInstance(fitted_instance1, objects.InstanceNUMATopology)
self.host = hw.numa_usage_from_instance_numa(
self.host, fitted_instance1)
fitted_instance2 = hw.numa_fit_instance_to_host(
- self.host, self.instance3)
+ self.host, self.instance3, {})
self.assertIsInstance(fitted_instance2, objects.InstanceNUMATopology)
def test_get_fitting_success_limits(self):
fitted_instance = hw.numa_fit_instance_to_host(
- self.host, self.instance3, self.limits)
+ self.host, self.instance3, {}, self.limits)
self.assertIsInstance(fitted_instance, objects.InstanceNUMATopology)
self.assertEqual(1, fitted_instance.cells[0].id)
def test_get_fitting_fails_no_limits(self):
fitted_instance = hw.numa_fit_instance_to_host(
- self.host, self.instance2, self.limits)
+ self.host, self.instance2, {}, self.limits)
self.assertIsNone(fitted_instance)
def test_get_fitting_cumulative_fails_limits(self):
fitted_instance1 = hw.numa_fit_instance_to_host(
- self.host, self.instance1, self.limits)
+ self.host, self.instance1, {}, self.limits)
self.assertIsInstance(fitted_instance1, objects.InstanceNUMATopology)
self.assertEqual(1, fitted_instance1.cells[0].id)
self.host = hw.numa_usage_from_instance_numa(
self.host, fitted_instance1)
fitted_instance2 = hw.numa_fit_instance_to_host(
- self.host, self.instance2, self.limits)
+ self.host, self.instance2, {}, self.limits)
self.assertIsNone(fitted_instance2)
def test_get_fitting_cumulative_success_limits(self):
fitted_instance1 = hw.numa_fit_instance_to_host(
- self.host, self.instance1, self.limits)
+ self.host, self.instance1, {}, self.limits)
self.assertIsInstance(fitted_instance1, objects.InstanceNUMATopology)
self.assertEqual(1, fitted_instance1.cells[0].id)
self.host = hw.numa_usage_from_instance_numa(
self.host, fitted_instance1)
fitted_instance2 = hw.numa_fit_instance_to_host(
- self.host, self.instance3, self.limits)
+ self.host, self.instance3, {}, self.limits)
self.assertIsInstance(fitted_instance2, objects.InstanceNUMATopology)
self.assertEqual(2, fitted_instance2.cells[0].id)
@@ -2691,7 +2691,7 @@ class VirtNUMAHostTopologyTestCase(test.NoDBTestCase):
network_metadata=network_metadata)
fitted_instance = hw.numa_fit_instance_to_host(
- self.host, self.instance1, limits=limits)
+ self.host, self.instance1, {}, limits=limits)
self.assertIsInstance(fitted_instance, objects.InstanceNUMATopology)
mock_supports.assert_called_once_with(
@@ -2708,7 +2708,7 @@ class VirtNUMAHostTopologyTestCase(test.NoDBTestCase):
network_metadata=network_metadata)
fitted_instance = hw.numa_fit_instance_to_host(
- self.host, self.instance1, limits=limits)
+ self.host, self.instance1, {}, limits=limits)
self.assertIsNone(fitted_instance)
mock_supports.assert_has_calls([
@@ -2725,6 +2725,7 @@ class VirtNUMAHostTopologyTestCase(test.NoDBTestCase):
'support_requests', return_value= True):
fitted_instance1 = hw.numa_fit_instance_to_host(self.host,
self.instance1,
+ {},
pci_requests=pci_reqs,
pci_stats=pci_stats)
self.assertIsInstance(fitted_instance1,
@@ -2740,6 +2741,7 @@ class VirtNUMAHostTopologyTestCase(test.NoDBTestCase):
fitted_instance1 = hw.numa_fit_instance_to_host(
self.host,
self.instance1,
+ {},
pci_requests=pci_reqs,
pci_stats=pci_stats)
self.assertIsNone(fitted_instance1)
@@ -2758,7 +2760,7 @@ class VirtNUMAHostTopologyTestCase(test.NoDBTestCase):
# ...therefore an instance without a PCI device should get host cell 2
instance_topology = hw.numa_fit_instance_to_host(
- self.host, self.instance1, pci_stats=pci_stats)
+ self.host, self.instance1, {}, pci_stats=pci_stats)
self.assertIsInstance(instance_topology, objects.InstanceNUMATopology)
# TODO(sfinucan): We should be comparing this against the HOST cell
self.assertEqual(2, instance_topology.cells[0].id)
@@ -2768,7 +2770,7 @@ class VirtNUMAHostTopologyTestCase(test.NoDBTestCase):
# ...therefore an instance without a PCI device should get host cell 1
instance_topology = hw.numa_fit_instance_to_host(
- self.host, self.instance1, pci_stats=pci_stats)
+ self.host, self.instance1, {}, pci_stats=pci_stats)
self.assertIsInstance(instance_topology, objects.InstanceNUMATopology)
self.assertEqual(1, instance_topology.cells[0].id)
@@ -2814,6 +2816,54 @@ class NumberOfSerialPortsTest(test.NoDBTestCase):
flavor, image_meta)
+class VirtLockMemoryTestCase(test.NoDBTestCase):
+ def _test_get_locked_memory_constraint(self, spec=None, props=None):
+ flavor = objects.Flavor(vcpus=16, memory_mb=2048,
+ extra_specs=spec or {})
+ image_meta = objects.ImageMeta.from_dict({"properties": props or {}})
+ return hw.get_locked_memory_constraint(flavor, image_meta)
+
+ def test_get_locked_memory_constraint_image(self):
+ self.assertTrue(
+ self._test_get_locked_memory_constraint(
+ spec={"hw:mem_page_size": "small"},
+ props={"hw_locked_memory": "True"}))
+
+ def test_get_locked_memory_conflict(self):
+ ex = self.assertRaises(
+ exception.FlavorImageLockedMemoryConflict,
+ self._test_get_locked_memory_constraint,
+ spec={
+ "hw:locked_memory": "False",
+ "hw:mem_page_size": "small"
+ },
+ props={"hw_locked_memory": "True"}
+ )
+ ex_msg = ("locked_memory value in image (True) and flavor (False) "
+ "conflict. A consistent value is expected if both "
+ "specified.")
+ self.assertEqual(ex_msg, str(ex))
+
+ def test_get_locked_memory_constraint_forbidden(self):
+ self.assertRaises(
+ exception.LockMemoryForbidden,
+ self._test_get_locked_memory_constraint,
+ {"hw:locked_memory": "True"})
+
+ self.assertRaises(
+ exception.LockMemoryForbidden,
+ self._test_get_locked_memory_constraint,
+ {},
+ {"hw_locked_memory": "True"})
+
+ def test_get_locked_memory_constraint_image_false(self):
+ # False value of locked_memory will not raise LockMemoryForbidden
+ self.assertFalse(
+ self._test_get_locked_memory_constraint(
+ spec=None,
+ props={"hw_locked_memory": "False"}))
+
+
class VirtMemoryPagesTestCase(test.NoDBTestCase):
def test_cell_instance_pagesize(self):
cell = objects.InstanceNUMACell(
@@ -3836,11 +3886,18 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
siblings=[set([2]), set([3])])
])
inst_topo = objects.InstanceNUMATopology(
- cells=[objects.InstanceNUMACell(
- cpuset=set(), pcpuset=set([0, 1]), memory=2048,
- cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
+ cells=[
+ objects.InstanceNUMACell(
+ id=0,
+ cpuset=set(),
+ pcpuset=set([0, 1]),
+ memory=2048,
+ cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
+ )
+ ]
+ )
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
for cell in inst_topo.cells:
self.assertInstanceCellPinned(cell, cell_ids=(0, 1))
@@ -3867,11 +3924,18 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
siblings=[set([2]), set([3])])
])
inst_topo = objects.InstanceNUMATopology(
- cells=[objects.InstanceNUMACell(
- cpuset=set(), pcpuset=set([0, 1]), memory=2048,
- cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
+ cells=[
+ objects.InstanceNUMACell(
+ id=0,
+ cpuset=set(),
+ pcpuset=set([0, 1]),
+ memory=2048,
+ cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
+ )
+ ]
+ )
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
for cell in inst_topo.cells:
self.assertInstanceCellPinned(cell, cell_ids=(1,))
@@ -3898,11 +3962,18 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
siblings=[set([2]), set([3])])
])
inst_topo = objects.InstanceNUMATopology(
- cells=[objects.InstanceNUMACell(
- cpuset=set(), pcpuset=set([0, 1]), memory=2048,
- cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
+ cells=[
+ objects.InstanceNUMACell(
+ id=0,
+ cpuset=set(),
+ pcpuset=set([0, 1]),
+ memory=2048,
+ cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
+ )
+ ]
+ )
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
self.assertIsNone(inst_topo)
def test_host_numa_fit_instance_to_host_fit(self):
@@ -3927,13 +3998,25 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
siblings=[set([4]), set([5]), set([6]), set([7])])
])
inst_topo = objects.InstanceNUMATopology(
- cells=[objects.InstanceNUMACell(
- cpuset=set(), pcpuset=set([0, 1]), memory=2048,
- cpu_policy=fields.CPUAllocationPolicy.DEDICATED),
- objects.InstanceNUMACell(
- cpuset=set(), pcpuset=set([2, 3]), memory=2048,
- cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ cells=[
+ objects.InstanceNUMACell(
+ id=0,
+ cpuset=set(),
+ pcpuset=set([0, 1]),
+ memory=2048,
+ cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
+ ),
+ objects.InstanceNUMACell(
+ id=1,
+ cpuset=set(),
+ pcpuset=set([2, 3]),
+ memory=2048,
+ cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
+ ),
+ ]
+ )
+
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
for cell in inst_topo.cells:
self.assertInstanceCellPinned(cell, cell_ids=(0, 1))
@@ -3970,13 +4053,25 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
])
inst_topo = objects.InstanceNUMATopology(
- cells=[objects.InstanceNUMACell(
- cpuset=set(), pcpuset=set([0, 1]), memory=2048,
- cpu_policy=fields.CPUAllocationPolicy.DEDICATED),
- objects.InstanceNUMACell(
- cpuset=set(), pcpuset=set([2, 3]), memory=2048,
- cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ cells=[
+ objects.InstanceNUMACell(
+ id=0,
+ cpuset=set(),
+ pcpuset=set([0, 1]),
+ memory=2048,
+ cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
+ ),
+ objects.InstanceNUMACell(
+ id=1,
+ cpuset=set(),
+ pcpuset=set([2, 3]),
+ memory=2048,
+ cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
+ ),
+ ]
+ )
+
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
for cell in inst_topo.cells:
self.assertInstanceCellPinned(cell, cell_ids=(0, 2))
@@ -4003,13 +4098,25 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
siblings=[set([4]), set([5]), set([6]), set([7])])
])
inst_topo = objects.InstanceNUMATopology(
- cells=[objects.InstanceNUMACell(
- cpuset=set(), pcpuset=set([0, 1]), memory=2048,
- cpu_policy=fields.CPUAllocationPolicy.DEDICATED),
- objects.InstanceNUMACell(
- cpuset=set(), pcpuset=set([2, 3]), memory=2048,
- cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ cells=[
+ objects.InstanceNUMACell(
+ id=0,
+ cpuset=set(),
+ pcpuset=set([0, 1]),
+ memory=2048,
+ cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
+ ),
+ objects.InstanceNUMACell(
+ id=1,
+ cpuset=set(),
+ pcpuset=set([2, 3]),
+ memory=2048,
+ cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
+ ),
+ ]
+ )
+
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
self.assertIsNone(inst_topo)
def test_host_numa_fit_instance_to_host_fail_topology(self):
@@ -4043,7 +4150,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
objects.InstanceNUMACell(
cpuset=set(), pcpuset=set([4, 5]), memory=1024,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
self.assertIsNone(inst_topo)
def test_cpu_pinning_usage_from_instances(self):
@@ -4683,7 +4790,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
cpuset=set(), pcpuset=set([0]), memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
self.assertEqual({0: 0}, inst_topo.cells[0].cpu_pinning)
self.assertIsNone(inst_topo.cells[0].cpuset_reserved)
@@ -4697,7 +4804,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
cpuset=set(), pcpuset=set([0]), memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
self.assertEqual({0: 0}, inst_topo.cells[0].cpu_pinning)
self.assertIsNone(inst_topo.cells[0].cpuset_reserved)
@@ -4711,7 +4818,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
cpuset=set(), pcpuset=set([0]), memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
self.assertEqual({0: 0}, inst_topo.cells[0].cpu_pinning)
self.assertEqual(set([1]), inst_topo.cells[0].cpuset_reserved)
@@ -4725,7 +4832,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
cpuset=set(), pcpuset=set([0, 1, 2, 4]), memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
self.assertIsNone(inst_topo)
def test_multi_nodes_isolate(self):
@@ -4742,7 +4849,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
cpuset=set(), pcpuset=set([1]), memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
self.assertEqual({0: 0}, inst_topo.cells[0].cpu_pinning)
self.assertEqual(set([1]), inst_topo.cells[0].cpuset_reserved)
self.assertEqual({1: 2}, inst_topo.cells[1].cpu_pinning)
@@ -4762,7 +4869,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
cpuset=set(), pcpuset=set([2]), memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
# The guest NUMA node 0 is requesting 2pCPUs + 1 additional
# pCPU for emulator threads, the host can't handle the
# request.
@@ -4782,7 +4889,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
cpuset=set(), pcpuset=set([1, 2]), memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
self.assertEqual({0: 0}, inst_topo.cells[0].cpu_pinning)
self.assertEqual(set([1]), inst_topo.cells[0].cpuset_reserved)
self.assertEqual({1: 2, 2: 3}, inst_topo.cells[1].cpu_pinning)
@@ -4857,7 +4964,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
cpu_thread_policy=fields.CPUThreadAllocationPolicy.ISOLATE
)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
self.assertEqual({0: 0, 1: 2}, inst_topo.cells[0].cpu_pinning)
self.assertEqual(set([4]), inst_topo.cells[0].cpuset_reserved)
@@ -4887,7 +4994,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
cpu_thread_policy=fields.CPUThreadAllocationPolicy.ISOLATE
)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
self.assertEqual({0: 2, 1: 4}, inst_topo.cells[0].cpu_pinning)
self.assertEqual(set([1]), inst_topo.cells[0].cpuset_reserved)
@@ -4916,7 +5023,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
if policy:
inst_topo.emulator_threads_policy = policy
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
return inst_topo
def test_mixed_instance_not_define(self):
@@ -4973,7 +5080,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
cpuset=set(), pcpuset=set([0, 1]), memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
self.assertEqual({0: 2, 1: 3}, inst_topo.cells[0].cpu_pinning)
self.assertEqual(set([1]), inst_topo.cells[0].cpuset_reserved)
@@ -5002,7 +5109,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
cpu_thread_policy=fields.CPUThreadAllocationPolicy.ISOLATE
)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
self.assertEqual({0: 2, 1: 4}, inst_topo.cells[0].cpu_pinning)
self.assertEqual(set([1]), inst_topo.cells[0].cpuset_reserved)
@@ -5791,7 +5898,7 @@ class HostCellsSortingTestCase(test.NoDBTestCase):
def test_sort_host_numa_cell_num_equal_instance_cell_num(self):
instance_topology = hw.numa_fit_instance_to_host(
- self.host, self.instance0)
+ self.host, self.instance0, {})
self.assertInstanceNUMAcellOrder([0, 1, 2, 3], instance_topology)
def test_sort_no_pci_stats_no_shared_cpu_policy(self):
@@ -5800,14 +5907,14 @@ class HostCellsSortingTestCase(test.NoDBTestCase):
True,
group = 'compute')
instance_topology = hw.numa_fit_instance_to_host(
- self.host, self.instance2)
+ self.host, self.instance2, {})
self.assertInstanceNUMAcellOrder([0, 1, 3], instance_topology)
CONF.set_override(
'packing_host_numa_cells_allocation_strategy',
False,
group = 'compute')
instance_topology = hw.numa_fit_instance_to_host(
- self.host, self.instance2)
+ self.host, self.instance2, {})
self.assertInstanceNUMAcellOrder([2, 3, 0], instance_topology)
def test_sort_no_pci_stats_shared_cpu_policy(self):
@@ -5816,14 +5923,14 @@ class HostCellsSortingTestCase(test.NoDBTestCase):
True,
group = 'compute')
instance_topology = hw.numa_fit_instance_to_host(
- self.host, self.instance1)
+ self.host, self.instance1, {})
self.assertInstanceNUMAcellOrder([0, 1, 2], instance_topology)
CONF.set_override(
'packing_host_numa_cells_allocation_strategy',
False,
group = 'compute')
instance_topology = hw.numa_fit_instance_to_host(
- self.host, self.instance1)
+ self.host, self.instance1, {})
self.assertInstanceNUMAcellOrder([3, 1, 2], instance_topology)
def test_sort_pci_stats_pci_req_no_shared_cpu_policy(self):
@@ -5836,6 +5943,7 @@ class HostCellsSortingTestCase(test.NoDBTestCase):
pci_reqs = [pci_request]
instance_topology = hw.numa_fit_instance_to_host(
self.host, self.instance2,
+ {},
pci_requests = pci_reqs,
pci_stats = self.pci_stats)
self.assertInstanceNUMAcellOrder([1, 0, 3], instance_topology)
@@ -5845,6 +5953,7 @@ class HostCellsSortingTestCase(test.NoDBTestCase):
group = 'compute')
instance_topology = hw.numa_fit_instance_to_host(
self.host, self.instance2,
+ {},
pci_requests = pci_reqs,
pci_stats = self.pci_stats)
self.assertInstanceNUMAcellOrder([1, 2, 3], instance_topology)
@@ -5859,6 +5968,7 @@ class HostCellsSortingTestCase(test.NoDBTestCase):
pci_reqs = [pci_request]
instance_topology = hw.numa_fit_instance_to_host(
self.host, self.instance1,
+ {},
pci_requests = pci_reqs,
pci_stats = self.pci_stats)
self.assertInstanceNUMAcellOrder([1, 0, 2], instance_topology)
@@ -5868,6 +5978,7 @@ class HostCellsSortingTestCase(test.NoDBTestCase):
group = 'compute')
instance_topology = hw.numa_fit_instance_to_host(
self.host, self.instance1,
+ {},
pci_requests = pci_reqs,
pci_stats = self.pci_stats)
self.assertInstanceNUMAcellOrder([1, 3, 2], instance_topology)
@@ -5879,6 +5990,7 @@ class HostCellsSortingTestCase(test.NoDBTestCase):
group = 'compute')
instance_topology = hw.numa_fit_instance_to_host(
self.host, self.instance2,
+ {},
pci_stats = self.pci_stats)
self.assertInstanceNUMAcellOrder([0, 3, 2], instance_topology)
CONF.set_override(
@@ -5887,6 +5999,7 @@ class HostCellsSortingTestCase(test.NoDBTestCase):
group = 'compute')
instance_topology = hw.numa_fit_instance_to_host(
self.host, self.instance2,
+ {},
pci_stats = self.pci_stats)
self.assertInstanceNUMAcellOrder([2, 3, 0], instance_topology)
@@ -5897,6 +6010,7 @@ class HostCellsSortingTestCase(test.NoDBTestCase):
group = 'compute')
instance_topology = hw.numa_fit_instance_to_host(
self.host, self.instance1,
+ {},
pci_stats = self.pci_stats)
self.assertInstanceNUMAcellOrder([0, 2, 3], instance_topology)
CONF.set_override(
@@ -5905,5 +6019,6 @@ class HostCellsSortingTestCase(test.NoDBTestCase):
group = 'compute')
instance_topology = hw.numa_fit_instance_to_host(
self.host, self.instance1,
+ {},
pci_stats = self.pci_stats)
self.assertInstanceNUMAcellOrder([3, 2, 0], instance_topology)
diff --git a/nova/tests/unit/virt/test_images.py b/nova/tests/unit/virt/test_images.py
index 58581d93ba..62a61c1e8b 100644
--- a/nova/tests/unit/virt/test_images.py
+++ b/nova/tests/unit/virt/test_images.py
@@ -16,6 +16,8 @@ import os
from unittest import mock
from oslo_concurrency import processutils
+from oslo_serialization import jsonutils
+from oslo_utils import imageutils
from nova.compute import utils as compute_utils
from nova import exception
@@ -135,3 +137,47 @@ class QemuTestCase(test.NoDBTestCase):
'-O', 'out_format', '-f', 'in_format', 'source', 'dest')
mock_disk_op_sema.__enter__.assert_called_once()
self.assertTupleEqual(expected, mock_execute.call_args[0])
+
+ def test_convert_image_vmdk_allowed_list_checking(self):
+ info = {'format': 'vmdk',
+ 'format-specific': {
+ 'type': 'vmdk',
+ 'data': {
+ 'create-type': 'monolithicFlat',
+ }}}
+
+ # If the format is not in the allowed list, we should get an error
+ self.assertRaises(exception.ImageUnacceptable,
+ images.check_vmdk_image, 'foo',
+ imageutils.QemuImgInfo(jsonutils.dumps(info),
+ format='json'))
+
+ # With the format in the allowed list, no error
+ self.flags(vmdk_allowed_types=['streamOptimized', 'monolithicFlat',
+ 'monolithicSparse'],
+ group='compute')
+ images.check_vmdk_image('foo',
+ imageutils.QemuImgInfo(jsonutils.dumps(info),
+ format='json'))
+
+ # With an empty list, allow nothing
+ self.flags(vmdk_allowed_types=[], group='compute')
+ self.assertRaises(exception.ImageUnacceptable,
+ images.check_vmdk_image, 'foo',
+ imageutils.QemuImgInfo(jsonutils.dumps(info),
+ format='json'))
+
+ @mock.patch.object(images, 'fetch')
+ @mock.patch('nova.privsep.qemu.unprivileged_qemu_img_info')
+ def test_fetch_checks_vmdk_rules(self, mock_info, mock_fetch):
+ info = {'format': 'vmdk',
+ 'format-specific': {
+ 'type': 'vmdk',
+ 'data': {
+ 'create-type': 'monolithicFlat',
+ }}}
+ mock_info.return_value = jsonutils.dumps(info)
+ with mock.patch('os.path.exists', return_value=True):
+ e = self.assertRaises(exception.ImageUnacceptable,
+ images.fetch_to_raw, None, 'foo', 'anypath')
+ self.assertIn('Invalid VMDK create-type specified', str(e))
diff --git a/nova/tests/unit/virt/test_netutils.py b/nova/tests/unit/virt/test_netutils.py
index de3f451351..fa0e16df19 100644
--- a/nova/tests/unit/virt/test_netutils.py
+++ b/nova/tests/unit/virt/test_netutils.py
@@ -17,6 +17,17 @@ from nova.virt import netutils
class TestNetUtilsTestCase(test.NoDBTestCase):
+
+ def _get_fake_instance_nw_info(self, num_networks, dhcp_server, mtu):
+ network_info = fake_network.fake_get_instance_nw_info(self,
+ num_networks)
+ for vif in network_info:
+ for subnet in vif['network']['subnets']:
+ subnet['meta']['dhcp_server'] = dhcp_server
+ vif['network']['meta']['mtu'] = mtu
+
+ return network_info
+
def test_get_cached_vifs_with_vlan_no_nw_info(self):
# Make sure that an empty dictionary will be returned when
# nw_info is None
@@ -39,3 +50,15 @@ class TestNetUtilsTestCase(test.NoDBTestCase):
expected = {'fa:16:3e:d1:28:e4': '2145'}
self.assertEqual(expected,
netutils.get_cached_vifs_with_vlan(network_info))
+
+ def test__get_link_mtu(self):
+ network_info_dhcp = self._get_fake_instance_nw_info(
+ 1, '192.168.0.100', 9000)
+ network_info_no_dhcp = self._get_fake_instance_nw_info(
+ 1, None, 9000)
+
+ for vif in network_info_dhcp:
+ self.assertIsNone(netutils._get_link_mtu(vif))
+
+ for vif in network_info_no_dhcp:
+ self.assertEqual(9000, netutils._get_link_mtu(vif))
diff --git a/nova/tests/unit/virt/test_node.py b/nova/tests/unit/virt/test_node.py
new file mode 100644
index 0000000000..668b762520
--- /dev/null
+++ b/nova/tests/unit/virt/test_node.py
@@ -0,0 +1,142 @@
+# Copyright 2022 Red Hat, inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+from unittest import mock
+import uuid
+
+import fixtures
+from oslo_config import cfg
+from oslo_utils.fixture import uuidsentinel as uuids
+import testtools
+
+from nova import exception
+from nova import test
+from nova.tests import fixtures as nova_fixtures
+from nova.virt import node
+
+CONF = cfg.CONF
+
+
+# NOTE(danms): We do not inherit from test.TestCase because we need
+# our node methods not stubbed out in order to exercise them.
+class TestNodeIdentity(testtools.TestCase):
+ def flags(self, **kw):
+ """Override flag variables for a test."""
+ group = kw.pop('group', None)
+ for k, v in kw.items():
+ CONF.set_override(k, v, group)
+
+ def setUp(self):
+ super().setUp()
+ self.useFixture(nova_fixtures.ConfFixture(CONF))
+ self.tempdir = self.useFixture(fixtures.TempDir()).path
+ self.identity_file = os.path.join(self.tempdir, node.COMPUTE_ID_FILE)
+ self.fake_config_files = ['%s/etc/nova.conf' % self.tempdir,
+ '%s/etc/nova/nova.conf' % self.tempdir,
+ '%s/opt/etc/nova/nova.conf' % self.tempdir]
+ for fn in self.fake_config_files:
+ os.makedirs(os.path.dirname(fn))
+ self.flags(state_path=self.tempdir,
+ config_file=self.fake_config_files)
+ node.LOCAL_NODE_UUID = None
+
+ def test_generate_local_node_uuid(self):
+ node_uuid = uuids.node
+ node.write_local_node_uuid(node_uuid)
+
+ e = self.assertRaises(exception.InvalidNodeConfiguration,
+ node.write_local_node_uuid, 'anything')
+ self.assertIn(
+ 'Identity file %s appeared unexpectedly' % self.identity_file,
+ str(e))
+
+ def test_generate_local_node_uuid_unexpected_open_fail(self):
+ with mock.patch('builtins.open') as mock_open:
+ mock_open.side_effect = IndexError()
+ e = self.assertRaises(exception.InvalidNodeConfiguration,
+ node.write_local_node_uuid, 'foo')
+ self.assertIn('Unable to write uuid to %s' % (
+ self.identity_file), str(e))
+
+ def test_generate_local_node_uuid_unexpected_write_fail(self):
+ with mock.patch('builtins.open') as mock_open:
+ mock_write = mock_open.return_value.__enter__.return_value.write
+ mock_write.side_effect = IndexError()
+ e = self.assertRaises(exception.InvalidNodeConfiguration,
+ node.write_local_node_uuid, 'foo')
+ self.assertIn('Unable to write uuid to %s' % (
+ self.identity_file), str(e))
+
+ def test_get_local_node_uuid_simple_exists(self):
+ node_uuid = uuids.node
+ with test.patch_open('%s/etc/nova/compute_id' % self.tempdir,
+ node_uuid):
+ self.assertEqual(node_uuid, node.get_local_node_uuid())
+
+ def test_get_local_node_uuid_simple_exists_whitespace(self):
+ node_uuid = uuids.node
+ # Make sure we strip whitespace from the file contents
+ with test.patch_open('%s/etc/nova/compute_id' % self.tempdir,
+ ' %s \n' % node_uuid):
+ self.assertEqual(node_uuid, node.get_local_node_uuid())
+
+ def test_get_local_node_uuid_simple_generate(self):
+ self.assertIsNone(node.LOCAL_NODE_UUID)
+ node_uuid1 = node.get_local_node_uuid()
+ self.assertEqual(node_uuid1, node.LOCAL_NODE_UUID)
+ node_uuid2 = node.get_local_node_uuid()
+ self.assertEqual(node_uuid2, node.LOCAL_NODE_UUID)
+
+ # Make sure we got the same thing each time, and that it's a
+ # valid uuid. Since we provided no uuid, it must have been
+ # generated the first time and read/returned the second.
+ self.assertEqual(node_uuid1, node_uuid2)
+ uuid.UUID(node_uuid1)
+
+ # Try to read it directly to make sure the file was really
+ # created and with the right value.
+ self.assertEqual(node_uuid1, node.read_local_node_uuid())
+
+ def test_get_local_node_uuid_two(self):
+ node_uuid = uuids.node
+
+ # Write the uuid to two of our locations
+ for cf in (self.fake_config_files[0], self.fake_config_files[1]):
+ open(os.path.join(os.path.dirname(cf),
+ node.COMPUTE_ID_FILE), 'w').write(node_uuid)
+
+ # Make sure we got the expected uuid and that no exceptions
+ # were raised about the files disagreeing
+ self.assertEqual(node_uuid, node.get_local_node_uuid())
+
+ def test_get_local_node_uuid_two_mismatch(self):
+ node_uuids = [uuids.node1, uuids.node2]
+
+ # Write a different uuid to each file
+ for id, fn in zip(node_uuids, self.fake_config_files):
+ open(os.path.join(
+ os.path.dirname(fn),
+ node.COMPUTE_ID_FILE), 'w').write(id)
+
+ # Make sure we get an error that identifies the mismatching
+ # file with its uuid, as well as what we expected to find
+ e = self.assertRaises(exception.InvalidNodeConfiguration,
+ node.get_local_node_uuid)
+ expected = ('UUID %s in %s does not match %s' % (
+ node_uuids[1],
+ os.path.join(os.path.dirname(self.fake_config_files[1]),
+ 'compute_id'),
+ node_uuids[0]))
+ self.assertIn(expected, str(e))
diff --git a/nova/tests/unit/virt/test_virt.py b/nova/tests/unit/virt/test_virt.py
index 935af880bc..2d108c6f2d 100644
--- a/nova/tests/unit/virt/test_virt.py
+++ b/nova/tests/unit/virt/test_virt.py
@@ -102,6 +102,33 @@ class TestVirtDriver(test.NoDBTestCase):
self.assertFalse(traits[os_traits.COMPUTE_IMAGE_TYPE_RAW])
self.assertFalse(traits[os_traits.COMPUTE_IMAGE_TYPE_VHD])
+ def test_block_device_info_get_encrypted_disks(self):
+ block_device_info = {
+ 'swap': {'device_name': '/dev/sdb', 'swap_size': 1},
+ 'image': [
+ {'device_name': '/dev/vda', 'encrypted': True},
+ ],
+ 'ephemerals': [
+ {'device_name': '/dev/vdb', 'encrypted': True},
+ {'device_name': '/dev/vdc', 'encrypted': False},
+ ],
+ }
+ disks = driver.block_device_info_get_encrypted_disks(block_device_info)
+ expected = [
+ {'device_name': '/dev/vda', 'encrypted': True},
+ {'device_name': '/dev/vdb', 'encrypted': True},
+ ]
+ self.assertEqual(expected, disks)
+ # Try removing 'image'
+ block_device_info.pop('image')
+ disks = driver.block_device_info_get_encrypted_disks(block_device_info)
+ expected = [{'device_name': '/dev/vdb', 'encrypted': True}]
+ self.assertEqual(expected, disks)
+ # Remove 'ephemerals'
+ block_device_info.pop('ephemerals')
+ disks = driver.block_device_info_get_encrypted_disks(block_device_info)
+ self.assertEqual([], disks)
+
class FakeMount(object):
def __init__(self, image, mount_dir, partition=None, device=None):
diff --git a/nova/tests/unit/virt/vmwareapi/test_driver_api.py b/nova/tests/unit/virt/vmwareapi/test_driver_api.py
index e2a321b50b..ac473c8c09 100644
--- a/nova/tests/unit/virt/vmwareapi/test_driver_api.py
+++ b/nova/tests/unit/virt/vmwareapi/test_driver_api.py
@@ -2123,7 +2123,7 @@ class VMwareAPIVMTestCase(test.NoDBTestCase,
'min_unit': 1,
'max_unit': 16,
'step_size': 1,
- 'allocation_ratio': 16.0,
+ 'allocation_ratio': 4.0,
},
orc.MEMORY_MB: {
'total': 2048,
@@ -2131,7 +2131,7 @@ class VMwareAPIVMTestCase(test.NoDBTestCase,
'min_unit': 1,
'max_unit': 1024,
'step_size': 1,
- 'allocation_ratio': 1.5,
+ 'allocation_ratio': 1.0,
},
orc.DISK_GB: {
'total': 95,
diff --git a/nova/virt/block_device.py b/nova/virt/block_device.py
index 4a41703174..28a866a817 100644
--- a/nova/virt/block_device.py
+++ b/nova/virt/block_device.py
@@ -227,9 +227,70 @@ class DriverSwapBlockDevice(DriverBlockDevice):
})
+class DriverImageBlockDevice(DriverBlockDevice):
+ _valid_source = 'image'
+ _proxy_as_attr_inherited = set(['image_id'])
+ _new_only_fields = set([
+ 'disk_bus',
+ 'device_type',
+ 'guest_format',
+ 'boot_index',
+ 'encrypted',
+ 'encryption_secret_uuid',
+ 'encryption_format',
+ 'encryption_options'
+ ])
+ _fields = set([
+ 'device_name',
+ 'size']) | _new_only_fields
+ _legacy_fields = (
+ _fields - _new_only_fields | set(['num', 'virtual_name']))
+ _update_on_save = {
+ 'disk_bus': None,
+ 'device_name': None,
+ 'device_type': None,
+ 'encryption_secret_uuid': None,
+ 'encryption_format': None,
+ 'encryption_options': None,
+ }
+
+ def _transform(self):
+ if (not self._bdm_obj.get('source_type') == 'image' or
+ not self._bdm_obj.get('destination_type') == 'local'):
+ raise _InvalidType
+ self.update({
+ 'device_name': self._bdm_obj.device_name,
+ 'size': self._bdm_obj.volume_size or 0,
+ 'disk_bus': self._bdm_obj.disk_bus,
+ 'device_type': self._bdm_obj.device_type,
+ 'guest_format': self._bdm_obj.guest_format,
+ 'image_id': self._bdm_obj.image_id,
+ 'boot_index': 0,
+ 'encrypted': self._bdm_obj.encrypted,
+ 'encryption_secret_uuid': self._bdm_obj.encryption_secret_uuid,
+ 'encryption_format': self._bdm_obj.encryption_format,
+ 'encryption_options': self._bdm_obj.encryption_options
+ })
+
+
class DriverEphemeralBlockDevice(DriverBlockDevice):
- _new_only_fields = set(['disk_bus', 'device_type', 'guest_format'])
+ _new_only_fields = set([
+ 'disk_bus',
+ 'device_type',
+ 'guest_format',
+ 'encrypted',
+ 'encryption_secret_uuid',
+ 'encryption_format',
+ 'encryption_options'])
_fields = set(['device_name', 'size']) | _new_only_fields
+ _update_on_save = {
+ 'disk_bus': None,
+ 'device_name': None,
+ 'device_type': None,
+ 'encryption_secret_uuid': None,
+ 'encryption_format': None,
+ 'encryption_options': None,
+ }
def _transform(self):
if not block_device.new_format_is_ephemeral(self._bdm_obj):
@@ -239,7 +300,11 @@ class DriverEphemeralBlockDevice(DriverBlockDevice):
'size': self._bdm_obj.volume_size or 0,
'disk_bus': self._bdm_obj.disk_bus,
'device_type': self._bdm_obj.device_type,
- 'guest_format': self._bdm_obj.guest_format
+ 'guest_format': self._bdm_obj.guest_format,
+ 'encrypted': self._bdm_obj.encrypted,
+ 'encryption_secret_uuid': self._bdm_obj.encryption_secret_uuid,
+ 'encryption_format': self._bdm_obj.encryption_format,
+ 'encryption_options': self._bdm_obj.encryption_options
})
@@ -802,15 +867,15 @@ def _convert_block_devices(device_type, block_device_mapping):
convert_swap = functools.partial(_convert_block_devices,
DriverSwapBlockDevice)
+convert_local_images = functools.partial(_convert_block_devices,
+ DriverImageBlockDevice)
convert_ephemerals = functools.partial(_convert_block_devices,
DriverEphemeralBlockDevice)
-
convert_volumes = functools.partial(_convert_block_devices,
DriverVolumeBlockDevice)
-
convert_snapshots = functools.partial(_convert_block_devices,
DriverVolSnapshotBlockDevice)
@@ -897,9 +962,15 @@ def get_swap(transformed_list):
return None
-_IMPLEMENTED_CLASSES = (DriverSwapBlockDevice, DriverEphemeralBlockDevice,
- DriverVolumeBlockDevice, DriverVolSnapshotBlockDevice,
- DriverVolImageBlockDevice, DriverVolBlankBlockDevice)
+_IMPLEMENTED_CLASSES = (
+ DriverSwapBlockDevice,
+ DriverEphemeralBlockDevice,
+ DriverVolumeBlockDevice,
+ DriverVolSnapshotBlockDevice,
+ DriverVolImageBlockDevice,
+ DriverVolBlankBlockDevice,
+ DriverImageBlockDevice
+)
def is_implemented(bdm):
@@ -912,6 +983,10 @@ def is_implemented(bdm):
return False
+def is_local_image(bdm):
+ return bdm.source_type == 'image' and bdm.destination_type == 'local'
+
+
def is_block_device_mapping(bdm):
return (bdm.source_type in ('image', 'volume', 'snapshot', 'blank') and
bdm.destination_type == 'volume' and
diff --git a/nova/virt/driver.py b/nova/virt/driver.py
index 904ab4af9d..5d42a392d8 100644
--- a/nova/virt/driver.py
+++ b/nova/virt/driver.py
@@ -20,7 +20,9 @@ Driver base-classes:
types that support that contract
"""
+import itertools
import sys
+import typing as ty
import os_resource_classes as orc
import os_traits
@@ -32,6 +34,7 @@ from nova import context as nova_context
from nova.i18n import _
from nova import objects
from nova.virt import event as virtevent
+import nova.virt.node
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
@@ -44,6 +47,7 @@ def get_block_device_info(instance, block_device_mapping):
of a dict containing the following keys:
- root_device_name: device name of the root disk
+ - image: An instance of DriverImageBlockDevice or None
- ephemerals: a (potentially empty) list of DriverEphemeralBlockDevice
instances
- swap: An instance of DriverSwapBlockDevice or None
@@ -52,18 +56,18 @@ def get_block_device_info(instance, block_device_mapping):
specialized subclasses.
"""
from nova.virt import block_device as virt_block_device
-
- block_device_info = {
+ return {
'root_device_name': instance.root_device_name,
+ 'image': virt_block_device.convert_local_images(
+ block_device_mapping),
'ephemerals': virt_block_device.convert_ephemerals(
block_device_mapping),
'block_device_mapping':
- virt_block_device.convert_all_volumes(*block_device_mapping)
+ virt_block_device.convert_all_volumes(*block_device_mapping),
+ 'swap':
+ virt_block_device.get_swap(
+ virt_block_device.convert_swap(block_device_mapping))
}
- swap_list = virt_block_device.convert_swap(block_device_mapping)
- block_device_info['swap'] = virt_block_device.get_swap(swap_list)
-
- return block_device_info
def block_device_info_get_root_device(block_device_info):
@@ -81,6 +85,14 @@ def swap_is_usable(swap):
return swap and swap['device_name'] and swap['swap_size'] > 0
+def block_device_info_get_image(block_device_info):
+ block_device_info = block_device_info or {}
+ # get_disk_mapping() supports block_device_info=None and thus requires that
+ # we return a list here.
+ image = block_device_info.get('image') or []
+ return image
+
+
def block_device_info_get_ephemerals(block_device_info):
block_device_info = block_device_info or {}
ephemerals = block_device_info.get('ephemerals') or []
@@ -93,6 +105,19 @@ def block_device_info_get_mapping(block_device_info):
return block_device_mapping
+def block_device_info_get_encrypted_disks(
+ block_device_info: ty.Mapping[str, ty.Any],
+) -> ty.List['nova.virt.block_device.DriverBlockDevice']:
+ block_device_info = block_device_info or {}
+ return [
+ driver_bdm for driver_bdm in itertools.chain(
+ block_device_info.get('image', []),
+ block_device_info.get('ephemerals', []),
+ )
+ if driver_bdm.get('encrypted')
+ ]
+
+
# NOTE(aspiers): When adding new capabilities, ensure they are
# mirrored in ComputeDriver.capabilities, and that the corresponding
# values should always be standard traits in os_traits. If something
@@ -127,6 +152,11 @@ CAPABILITY_TRAITS_MAP = {
"supports_socket_pci_numa_affinity":
os_traits.COMPUTE_SOCKET_PCI_NUMA_AFFINITY,
"supports_remote_managed_ports": os_traits.COMPUTE_REMOTE_MANAGED_PORTS,
+ "supports_ephemeral_encryption": os_traits.COMPUTE_EPHEMERAL_ENCRYPTION,
+ "supports_ephemeral_encryption_luks":
+ os_traits.COMPUTE_EPHEMERAL_ENCRYPTION_LUKS,
+ "supports_ephemeral_encryption_plain":
+ os_traits.COMPUTE_EPHEMERAL_ENCRYPTION_PLAIN,
}
@@ -197,6 +227,11 @@ class ComputeDriver(object):
"supports_socket_pci_numa_affinity": False,
"supports_remote_managed_ports": False,
+ # Ephemeral encryption support flags
+ "supports_ephemeral_encryption": False,
+ "supports_ephemeral_encryption_luks": False,
+ "supports_ephemeral_encryption_plain": False,
+
# Image type support flags
"supports_image_type_aki": False,
"supports_image_type_ami": False,
@@ -299,7 +334,8 @@ class ComputeDriver(object):
admin_password, allocations, bdms, detach_block_devices,
attach_block_devices, network_info=None,
evacuate=False, block_device_info=None,
- preserve_ephemeral=False, accel_uuids=None):
+ preserve_ephemeral=False, accel_uuids=None,
+ reimage_boot_volume=False):
"""Destroy and re-make this instance.
A 'rebuild' effectively purges all existing data from the system and
@@ -337,6 +373,7 @@ class ComputeDriver(object):
:param preserve_ephemeral: True if the default ephemeral storage
partition must be preserved on rebuild
:param accel_uuids: Accelerator UUIDs.
+ :param reimage_boot_volume: Re-image the volume backed instance.
"""
raise NotImplementedError()
@@ -1559,6 +1596,11 @@ class ComputeDriver(object):
"""
raise NotImplementedError()
+ def get_nodenames_by_uuid(self, refresh=False):
+ """Returns a dict of {uuid: nodename} for all managed nodes."""
+ nodename = self.get_available_nodes()[0]
+ return {nova.virt.node.get_local_node_uuid(): nodename}
+
def node_is_available(self, nodename):
"""Return whether this compute service manages a particular node."""
if nodename in self.get_available_nodes():
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index 02fc1f07bc..bf7dc8fc72 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -32,6 +32,7 @@ import fixtures
import os_resource_classes as orc
from oslo_log import log as logging
from oslo_serialization import jsonutils
+from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import versionutils
from nova.compute import power_state
@@ -48,6 +49,7 @@ from nova.objects import migrate_data
from nova.virt import driver
from nova.virt import hardware
from nova.virt.ironic import driver as ironic
+import nova.virt.node
from nova.virt import virtapi
CONF = nova.conf.CONF
@@ -160,8 +162,8 @@ class FakeDriver(driver.ComputeDriver):
self._host = host
# NOTE(gibi): this is unnecessary complex and fragile but this is
# how many current functional sample tests expect the node name.
- self._nodes = (['fake-mini'] if self._host == 'compute'
- else [self._host])
+ self._set_nodes(['fake-mini'] if self._host == 'compute'
+ else [self._host])
def _set_nodes(self, nodes):
# NOTE(gibi): this is not part of the driver interface but used
@@ -504,6 +506,12 @@ class FakeDriver(driver.ComputeDriver):
host_status['host_hostname'] = nodename
host_status['host_name_label'] = nodename
host_status['cpu_info'] = jsonutils.dumps(cpu_info)
+ # NOTE(danms): Because the fake driver runs on the same host
+ # in tests, potentially with multiple nodes, we need to
+ # control our node uuids. Make sure we return a unique and
+ # consistent uuid for each node we are responsible for to
+ # avoid the persistent local node identity from taking over.
+ host_status['uuid'] = str(getattr(uuids, 'node_%s' % nodename))
return host_status
def update_provider_tree(self, provider_tree, nodename, allocations=None):
@@ -646,6 +654,10 @@ class FakeDriver(driver.ComputeDriver):
def get_available_nodes(self, refresh=False):
return self._nodes
+ def get_nodenames_by_uuid(self, refresh=False):
+ return {str(getattr(uuids, 'node_%s' % n)): n
+ for n in self.get_available_nodes()}
+
def instance_on_disk(self, instance):
return False
@@ -764,7 +776,7 @@ class PredictableNodeUUIDDriver(SmallFakeDriver):
PredictableNodeUUIDDriver, self).get_available_resource(nodename)
# This is used in ComputeNode.update_from_virt_driver which is called
# from the ResourceTracker when creating a ComputeNode.
- resources['uuid'] = uuid.uuid5(uuid.NAMESPACE_DNS, nodename)
+ resources['uuid'] = str(uuid.uuid5(uuid.NAMESPACE_DNS, nodename))
return resources
@@ -936,7 +948,7 @@ class FakeDriverWithPciResources(SmallFakeDriver):
def setUp(self):
super(FakeDriverWithPciResources.
FakeDriverWithPciResourcesConfigFixture, self).setUp()
- # Set passthrough_whitelist before the compute node starts to match
+ # Set device_spec before the compute node starts to match
# with the PCI devices reported by this fake driver.
# NOTE(gibi): 0000:01:00 is tagged to physnet1 and therefore not a
@@ -951,7 +963,7 @@ class FakeDriverWithPciResources(SmallFakeDriver):
# Having two PFs on the same physnet will allow us to test the
# placement allocation - physical allocation matching based on the
# bandwidth allocation in the future.
- CONF.set_override('passthrough_whitelist', override=[
+ CONF.set_override('device_spec', override=[
jsonutils.dumps(
{
"address": {
@@ -1099,3 +1111,42 @@ class FakeDriverWithCaching(FakeDriver):
else:
self.cached_images.add(image_id)
return True
+
+
+class EphEncryptionDriver(MediumFakeDriver):
+ capabilities = dict(
+ FakeDriver.capabilities,
+ supports_ephemeral_encryption=True)
+
+
+class EphEncryptionDriverLUKS(MediumFakeDriver):
+ capabilities = dict(
+ FakeDriver.capabilities,
+ supports_ephemeral_encryption=True,
+ supports_ephemeral_encryption_luks=True)
+
+
+class EphEncryptionDriverPLAIN(MediumFakeDriver):
+ capabilities = dict(
+ FakeDriver.capabilities,
+ supports_ephemeral_encryption=True,
+ supports_ephemeral_encryption_plain=True)
+
+
+class FakeDriverWithoutFakeNodes(FakeDriver):
+ """FakeDriver that behaves like a real single-node driver.
+
+ This behaves like a real virt driver from the perspective of its
+ nodes, with a stable nodename and use of the global node identity
+ stuff to provide a stable node UUID.
+ """
+
+ def get_available_resource(self, nodename):
+ resources = super().get_available_resource(nodename)
+ resources['uuid'] = nova.virt.node.get_local_node_uuid()
+ return resources
+
+ def get_nodenames_by_uuid(self, refresh=False):
+ return {
+ nova.virt.node.get_local_node_uuid(): self.get_available_nodes()[0]
+ }
diff --git a/nova/virt/hardware.py b/nova/virt/hardware.py
index c4ebae11ca..96a7198db2 100644
--- a/nova/virt/hardware.py
+++ b/nova/virt/hardware.py
@@ -1337,6 +1337,48 @@ def _get_constraint_mappings_from_flavor(flavor, key, func):
return hw_numa_map or None
+def get_locked_memory_constraint(
+ flavor: 'objects.Flavor',
+ image_meta: 'objects.ImageMeta',
+) -> ty.Optional[bool]:
+ """Validate and return the requested locked memory.
+
+ :param flavor: ``nova.objects.Flavor`` instance
+ :param image_meta: ``nova.objects.ImageMeta`` instance
+ :raises: exception.LockMemoryForbidden if mem_page_size is not set
+ while provide locked_memory value in image or flavor.
+ :returns: The locked memory flag requested.
+ """
+ mem_page_size_flavor, mem_page_size_image = _get_flavor_image_meta(
+ 'mem_page_size', flavor, image_meta)
+
+ locked_memory_flavor, locked_memory_image = _get_flavor_image_meta(
+ 'locked_memory', flavor, image_meta)
+
+ if locked_memory_flavor is not None:
+ # locked_memory_image is boolean type already
+ locked_memory_flavor = strutils.bool_from_string(locked_memory_flavor)
+
+ if locked_memory_image is not None and (
+ locked_memory_flavor != locked_memory_image
+ ):
+ # We don't allow provide different value to flavor and image
+ raise exception.FlavorImageLockedMemoryConflict(
+ image=locked_memory_image, flavor=locked_memory_flavor)
+
+ locked_memory = locked_memory_flavor
+
+ else:
+ locked_memory = locked_memory_image
+
+ if locked_memory and not (
+ mem_page_size_flavor or mem_page_size_image
+ ):
+ raise exception.LockMemoryForbidden()
+
+ return locked_memory
+
+
def _get_numa_cpu_constraint(
flavor: 'objects.Flavor',
image_meta: 'objects.ImageMeta',
@@ -2107,6 +2149,8 @@ def numa_get_constraints(flavor, image_meta):
pagesize = _get_numa_pagesize_constraint(flavor, image_meta)
vpmems = get_vpmems(flavor)
+ get_locked_memory_constraint(flavor, image_meta)
+
# If 'hw:cpu_dedicated_mask' is not found in flavor extra specs, the
# 'dedicated_cpus' variable is None, while we hope it being an empty set.
dedicated_cpus = dedicated_cpus or set()
@@ -2251,6 +2295,7 @@ def _numa_cells_support_network_metadata(
def numa_fit_instance_to_host(
host_topology: 'objects.NUMATopology',
instance_topology: 'objects.InstanceNUMATopology',
+ provider_mapping: ty.Optional[ty.Dict[str, ty.List[str]]],
limits: ty.Optional['objects.NUMATopologyLimit'] = None,
pci_requests: ty.Optional['objects.InstancePCIRequests'] = None,
pci_stats: ty.Optional[stats.PciDeviceStats] = None,
@@ -2266,6 +2311,12 @@ def numa_fit_instance_to_host(
:param host_topology: objects.NUMATopology object to fit an
instance on
:param instance_topology: objects.InstanceNUMATopology to be fitted
+ :param provider_mapping: A dict keyed by RequestGroup requester_id,
+ to a list of resource provider UUIDs which provide resource
+ for that RequestGroup. If it is None then it signals that the
+ InstancePCIRequest objects already stores a mapping per request.
+ I.e.: we are called _after_ the scheduler made allocations for this
+ request in placement.
:param limits: objects.NUMATopologyLimits that defines limits
:param pci_requests: instance pci_requests
:param pci_stats: pci_stats for the host
@@ -2357,12 +2408,37 @@ def numa_fit_instance_to_host(
host_cells,
key=lambda cell: total_pci_in_cell.get(cell.id, 0))
+ # a set of host_cell.id, instance_cell.id pairs where we already checked
+ # that the instance cell does not fit
+ not_fit_cache = set()
+ # a set of host_cell.id, instance_cell.id pairs where we already checked
+ # that the instance cell does fit
+ fit_cache = set()
for host_cell_perm in itertools.permutations(
host_cells, len(instance_topology)):
chosen_instance_cells: ty.List['objects.InstanceNUMACell'] = []
chosen_host_cells: ty.List['objects.NUMACell'] = []
for host_cell, instance_cell in zip(
host_cell_perm, instance_topology.cells):
+
+ cell_pair = (host_cell.id, instance_cell.id)
+
+ # if we already checked this pair, and they did not fit then no
+ # need to check again just move to the next permutation
+ if cell_pair in not_fit_cache:
+ break
+
+ # if we already checked this pair, and they fit before that they
+ # will fit now too. So no need to check again. Just continue with
+ # the next cell pair in the permutation
+ if cell_pair in fit_cache:
+ chosen_host_cells.append(host_cell)
+ # Normally this would have done by _numa_fit_instance_cell
+ # but we optimized that out here based on the cache
+ instance_cell.id = host_cell.id
+ chosen_instance_cells.append(instance_cell)
+ continue
+
try:
cpuset_reserved = 0
if (instance_topology.emulator_threads_isolated and
@@ -2379,17 +2455,24 @@ def numa_fit_instance_to_host(
# This exception will been raised if instance cell's
# custom pagesize is not supported with host cell in
# _numa_cell_supports_pagesize_request function.
+
+ # cache the result
+ not_fit_cache.add(cell_pair)
break
if got_cell is None:
+ # cache the result
+ not_fit_cache.add(cell_pair)
break
chosen_host_cells.append(host_cell)
chosen_instance_cells.append(got_cell)
+ # cache the result
+ fit_cache.add(cell_pair)
if len(chosen_instance_cells) != len(host_cell_perm):
continue
if pci_requests and pci_stats and not pci_stats.support_requests(
- pci_requests, chosen_instance_cells):
+ pci_requests, provider_mapping, chosen_instance_cells):
continue
if network_metadata and not _numa_cells_support_network_metadata(
@@ -2566,3 +2649,73 @@ def check_hw_rescue_props(image_meta):
"""
hw_rescue_props = ['hw_rescue_device', 'hw_rescue_bus']
return any(key in image_meta.properties for key in hw_rescue_props)
+
+
+def get_ephemeral_encryption_constraint(
+ flavor: 'objects.Flavor',
+ image_meta: 'objects.ImageMeta',
+) -> bool:
+ """Get the ephemeral encryption constrants based on the flavor and image.
+
+ :param flavor: an objects.Flavor object
+ :param image_meta: an objects.ImageMeta object
+ :raises: nova.exception.FlavorImageConflict
+ :returns: boolean indicating whether encryption of guest ephemeral storage
+ was requested
+ """
+ flavor_eph_encryption_str, image_eph_encryption = _get_flavor_image_meta(
+ 'ephemeral_encryption', flavor, image_meta)
+
+ flavor_eph_encryption = None
+ if flavor_eph_encryption_str is not None:
+ flavor_eph_encryption = strutils.bool_from_string(
+ flavor_eph_encryption_str)
+
+ # Check for conflicts between explicit requirements regarding
+ # ephemeral encryption.
+ # TODO(layrwood): make _check_for_mem_encryption_requirement_conflicts
+ # generic and reuse here
+ if (
+ flavor_eph_encryption is not None and
+ image_eph_encryption is not None and
+ flavor_eph_encryption != image_eph_encryption
+ ):
+ emsg = _(
+ "Flavor %(flavor_name)s has hw:ephemeral_encryption extra spec "
+ "explicitly set to %(flavor_val)s, conflicting with "
+ "image %(image_name)s which has hw_eph_encryption property "
+ "explicitly set to %(image_val)s"
+ )
+ data = {
+ 'flavor_name': flavor.name,
+ 'flavor_val': flavor_eph_encryption_str,
+ 'image_name': image_meta.name,
+ 'image_val': image_eph_encryption,
+ }
+ raise exception.FlavorImageConflict(emsg % data)
+
+ return flavor_eph_encryption or image_eph_encryption
+
+
+def get_ephemeral_encryption_format(
+ flavor: 'objects.Flavor',
+ image_meta: 'objects.ImageMeta',
+) -> ty.Optional[str]:
+ """Get the ephemeral encryption format.
+
+ :param flavor: an objects.Flavor object
+ :param image_meta: an objects.ImageMeta object
+ :raises: nova.exception.FlavorImageConflict or nova.exception.Invalid
+ :returns: BlockDeviceEncryptionFormatType or None
+ """
+ eph_format = _get_unique_flavor_image_meta(
+ 'ephemeral_encryption_format', flavor, image_meta)
+ if eph_format:
+ if eph_format not in fields.BlockDeviceEncryptionFormatType.ALL:
+ allowed = fields.BlockDeviceEncryptionFormatType.ALL
+ raise exception.Invalid(
+ f"Invalid ephemeral encryption format {eph_format}. "
+ f"Allowed values: {', '.join(allowed)}"
+ )
+ return eph_format
+ return None
diff --git a/nova/virt/hyperv/serialproxy.py b/nova/virt/hyperv/serialproxy.py
index 4f8a99dcf6..d12fb8bf6e 100644
--- a/nova/virt/hyperv/serialproxy.py
+++ b/nova/virt/hyperv/serialproxy.py
@@ -46,7 +46,7 @@ class SerialProxy(threading.Thread):
def __init__(self, instance_name, addr, port, input_queue,
output_queue, client_connected):
super(SerialProxy, self).__init__()
- self.setDaemon(True)
+ self.daemon = True
self._instance_name = instance_name
self._addr = addr
@@ -99,7 +99,7 @@ class SerialProxy(threading.Thread):
workers = []
for job in [self._get_data, self._send_data]:
worker = threading.Thread(target=job)
- worker.setDaemon(True)
+ worker.daemon = True
worker.start()
workers.append(worker)
diff --git a/nova/virt/images.py b/nova/virt/images.py
index 5358f3766a..f13c872290 100644
--- a/nova/virt/images.py
+++ b/nova/virt/images.py
@@ -110,6 +110,34 @@ def get_info(context, image_href):
return IMAGE_API.get(context, image_href)
+def check_vmdk_image(image_id, data):
+ # Check some rules about VMDK files. Specifically we want to make
+ # sure that the "create-type" of the image is one that we allow.
+ # Some types of VMDK files can reference files outside the disk
+ # image and we do not want to allow those for obvious reasons.
+
+ types = CONF.compute.vmdk_allowed_types
+
+ if not len(types):
+ LOG.warning('Refusing to allow VMDK image as vmdk_allowed_'
+ 'types is empty')
+ msg = _('Invalid VMDK create-type specified')
+ raise exception.ImageUnacceptable(image_id=image_id, reason=msg)
+
+ try:
+ create_type = data.format_specific['data']['create-type']
+ except KeyError:
+ msg = _('Unable to determine VMDK create-type')
+ raise exception.ImageUnacceptable(image_id=image_id, reason=msg)
+
+ if create_type not in CONF.compute.vmdk_allowed_types:
+ LOG.warning('Refusing to process VMDK file with create-type of %r '
+ 'which is not in allowed set of: %s', create_type,
+ ','.join(CONF.compute.vmdk_allowed_types))
+ msg = _('Invalid VMDK create-type specified')
+ raise exception.ImageUnacceptable(image_id=image_id, reason=msg)
+
+
def fetch_to_raw(context, image_href, path, trusted_certs=None):
path_tmp = "%s.part" % path
fetch(context, image_href, path_tmp, trusted_certs)
@@ -129,6 +157,9 @@ def fetch_to_raw(context, image_href, path, trusted_certs=None):
reason=(_("fmt=%(fmt)s backed by: %(backing_file)s") %
{'fmt': fmt, 'backing_file': backing_file}))
+ if fmt == 'vmdk':
+ check_vmdk_image(image_href, data)
+
if fmt != "raw" and CONF.force_raw_images:
staged = "%s.converted" % path
LOG.debug("%s was %s, converting to raw", image_href, fmt)
diff --git a/nova/virt/ironic/driver.py b/nova/virt/ironic/driver.py
index 1cfb3a8237..77fefb81ea 100644
--- a/nova/virt/ironic/driver.py
+++ b/nova/virt/ironic/driver.py
@@ -20,13 +20,13 @@ bare metal resources.
"""
import base64
-from distutils import version
import gzip
import shutil
import tempfile
import time
from urllib import parse as urlparse
+import microversion_parse
from openstack import exceptions as sdk_exc
from oslo_log import log as logging
from oslo_serialization import jsonutils
@@ -397,6 +397,18 @@ class IronicDriver(virt_driver.ComputeDriver):
_("Ironic node uuid not supplied to "
"driver for instance %s.") % instance.uuid)
node = self._get_node(node_uuid)
+
+ # Its possible this node has just moved from deleting
+ # to cleaning. Placement will update the inventory
+ # as all reserved, but this instance might have got here
+ # before that happened, but after the previous allocation
+ # got deleted. We trigger a re-schedule to another node.
+ if (self._node_resources_used(node) or
+ self._node_resources_unavailable(node)):
+ msg = "Chosen ironic node %s is not available" % node_uuid
+ LOG.info(msg, instance=instance)
+ raise exception.ComputeResourcesUnavailable(reason=msg)
+
self._set_instance_id(node, instance)
def failed_spawn_cleanup(self, instance):
@@ -827,6 +839,13 @@ class IronicDriver(virt_driver.ComputeDriver):
return node_uuids
+ def get_nodenames_by_uuid(self, refresh=False):
+ nodes = self.get_available_nodes(refresh=refresh)
+ # We use the uuid for compute_node.uuid and
+ # compute_node.hypervisor_hostname, so the dict keys and values are
+ # the same.
+ return dict(zip(nodes, nodes))
+
def update_provider_tree(self, provider_tree, nodename, allocations=None):
"""Update a ProviderTree object with current resource provider and
inventory information.
@@ -874,15 +893,25 @@ class IronicDriver(virt_driver.ComputeDriver):
"""
# nodename is the ironic node's UUID.
node = self._node_from_cache(nodename)
+
reserved = False
- if (not self._node_resources_used(node) and
- self._node_resources_unavailable(node)):
- LOG.debug('Node %(node)s is not ready for a deployment, '
- 'reporting resources as reserved for it. Node\'s '
- 'provision state is %(prov)s, power state is '
- '%(power)s and maintenance is %(maint)s.',
- {'node': node.uuid, 'prov': node.provision_state,
- 'power': node.power_state, 'maint': node.maintenance})
+ if self._node_resources_unavailable(node):
+ # Operators might mark a node as in maintainance,
+ # even when an instance is on the node,
+ # either way lets mark this as reserved
+ reserved = True
+
+ if (self._node_resources_used(node) and
+ not CONF.workarounds.skip_reserve_in_use_ironic_nodes):
+ # Make resources as reserved once we have
+ # and instance here.
+ # When the allocation is deleted, most likely
+ # automatic clean will start, so we keep the node
+ # reserved until it becomes available again.
+ # In the case without automatic clean, once
+ # the allocation is removed in placement it
+ # also stays as reserved until we notice on
+ # the next periodic its actually available.
reserved = True
info = self._node_resource(node)
@@ -1630,7 +1659,8 @@ class IronicDriver(virt_driver.ComputeDriver):
admin_password, allocations, bdms, detach_block_devices,
attach_block_devices, network_info=None,
evacuate=False, block_device_info=None,
- preserve_ephemeral=False, accel_uuids=None):
+ preserve_ephemeral=False, accel_uuids=None,
+ reimage_boot_volume=False):
"""Rebuild/redeploy an instance.
This version of rebuild() allows for supporting the option to
@@ -1671,7 +1701,13 @@ class IronicDriver(virt_driver.ComputeDriver):
:param preserve_ephemeral: Boolean value; if True the ephemeral
must be preserved on rebuild.
:param accel_uuids: Accelerator UUIDs. Ignored by this driver.
+ :param reimage_boot_volume: Re-image the volume backed instance.
"""
+ if reimage_boot_volume:
+ raise exception.NovaException(
+ _("Ironic doesn't support rebuilding volume backed "
+ "instances."))
+
LOG.debug('Rebuild called for instance', instance=instance)
instance.task_state = task_states.REBUILD_SPAWNING
@@ -2067,13 +2103,17 @@ class IronicDriver(virt_driver.ComputeDriver):
if self.ironicclient.is_api_version_negotiated:
current_api_version = self.ironicclient.current_api_version
if (min_version and
- version.StrictVersion(current_api_version) <
- version.StrictVersion(min_version)):
+ microversion_parse.parse_version_string(
+ current_api_version) <
+ microversion_parse.parse_version_string(
+ min_version)):
raise exception.IronicAPIVersionNotAvailable(
version=min_version)
if (max_version and
- version.StrictVersion(current_api_version) >
- version.StrictVersion(max_version)):
+ microversion_parse.parse_version_string(
+ current_api_version) >
+ microversion_parse.parse_version_string(
+ max_version)):
raise exception.IronicAPIVersionNotAvailable(
version=max_version)
diff --git a/nova/virt/libvirt/blockinfo.py b/nova/virt/libvirt/blockinfo.py
index f86a9c461c..4efc6fbaeb 100644
--- a/nova/virt/libvirt/blockinfo.py
+++ b/nova/virt/libvirt/blockinfo.py
@@ -73,6 +73,7 @@ import itertools
import operator
from oslo_config import cfg
+from oslo_serialization import jsonutils
from nova import block_device
@@ -400,6 +401,16 @@ def get_info_from_bdm(instance, virt_type, image_meta, bdm,
# NOTE(ndipanov): libvirt starts ordering from 1, not 0
bdm_info['boot_index'] = str(boot_index + 1)
+ # If the device is encrypted pass through the secret, format and options
+ if bdm.get('encrypted'):
+ bdm_info['encrypted'] = bdm.get('encrypted')
+ bdm_info['encryption_secret_uuid'] = bdm.get('encryption_secret_uuid')
+ bdm_info['encryption_format'] = bdm.get('encryption_format')
+ encryption_options = bdm.get('encryption_options')
+ if encryption_options:
+ bdm_info['encryption_options'] = jsonutils.loads(
+ encryption_options)
+
return bdm_info
@@ -414,13 +425,7 @@ def get_device_name(bdm):
def get_root_info(instance, virt_type, image_meta, root_bdm,
disk_bus, cdrom_bus, root_device_name=None):
- # NOTE (ndipanov): This is a hack to avoid considering an image
- # BDM with local target, as we don't support them
- # yet. Only applies when passed non-driver format
- no_root_bdm = (not root_bdm or (
- root_bdm.get('source_type') == 'image' and
- root_bdm.get('destination_type') == 'local'))
- if no_root_bdm:
+ if root_bdm is None:
# NOTE(mriedem): In case the image_meta object was constructed from
# an empty dict, like in the case of evacuate, we have to first check
# if disk_format is set on the ImageMeta object.
@@ -452,10 +457,13 @@ def default_device_names(virt_type, context, instance, block_device_info,
image_meta):
get_disk_info(virt_type, instance, image_meta, block_device_info)
- for driver_bdm in itertools.chain(block_device_info['ephemerals'],
- [block_device_info['swap']] if
- block_device_info['swap'] else [],
- block_device_info['block_device_mapping']):
+ for driver_bdm in itertools.chain(
+ block_device_info['image'],
+ block_device_info['ephemerals'],
+ [block_device_info['swap']] if
+ block_device_info['swap'] else [],
+ block_device_info['block_device_mapping']
+ ):
driver_bdm.save()
@@ -563,41 +571,48 @@ def _get_disk_mapping(virt_type, instance, disk_bus, cdrom_bus, image_meta,
:returns: Disk mapping for the given instance.
"""
mapping = {}
- pre_assigned_device_names = \
- [block_device.strip_dev(get_device_name(bdm)) for bdm in itertools.chain(
+
+ driver_bdms = itertools.chain(
+ driver.block_device_info_get_image(block_device_info),
driver.block_device_info_get_ephemerals(block_device_info),
[driver.block_device_info_get_swap(block_device_info)],
- driver.block_device_info_get_mapping(block_device_info))
- if get_device_name(bdm)]
-
- # NOTE (ndipanov): root_bdm can be None when we boot from image
- # as there is no driver representation of local targeted images
- # and they will not be in block_device_info list.
- root_bdm = block_device.get_root_bdm(
- driver.block_device_info_get_mapping(block_device_info))
+ driver.block_device_info_get_mapping(block_device_info)
+ )
+ pre_assigned_device_names = [
+ block_device.strip_dev(get_device_name(bdm))
+ for bdm in driver_bdms if get_device_name(bdm)
+ ]
+
+ # Try to find the root driver bdm, either an image based disk or volume
+ root_bdm = None
+ if any(driver.block_device_info_get_image(block_device_info)):
+ root_bdm = driver.block_device_info_get_image(block_device_info)[0]
+ elif driver.block_device_info_get_mapping(block_device_info):
+ root_bdm = block_device.get_root_bdm(
+ driver.block_device_info_get_mapping(block_device_info))
root_device_name = block_device.strip_dev(
driver.block_device_info_get_root_device(block_device_info))
root_info = get_root_info(
instance, virt_type, image_meta, root_bdm,
disk_bus, cdrom_bus, root_device_name)
-
mapping['root'] = root_info
- # NOTE (ndipanov): This implicitly relies on image->local BDMs not
- # being considered in the driver layer - so missing
- # bdm with boot_index 0 means - use image, unless it was
- # overridden. This can happen when using legacy syntax and
- # no root_device_name is set on the instance.
- if not root_bdm and not block_device.volume_in_mapping(root_info['dev'],
- block_device_info):
- mapping['disk'] = root_info
- elif root_bdm:
- # NOTE (ft): If device name is not set in root bdm, root_info has a
- # generated one. We have to copy device name to root bdm to prevent its
- # second generation in loop through bdms. If device name is already
- # set, nothing is changed.
+
+ # NOTE (ft): If device name is not set in root bdm, root_info has a
+ # generated one. We have to copy device name to root bdm to prevent its
+ # second generation in loop through bdms. If device name is already
+ # set, nothing is changed.
+ # NOTE(melwitt): root_bdm can be None in the case of a ISO root device, for
+ # example.
+ if root_bdm:
update_bdm(root_bdm, root_info)
+ if (
+ driver.block_device_info_get_image(block_device_info) or
+ root_bdm is None
+ ):
+ mapping['disk'] = root_info
+
default_eph = get_default_ephemeral_info(instance, disk_bus,
block_device_info, mapping)
if default_eph:
diff --git a/nova/virt/libvirt/config.py b/nova/virt/libvirt/config.py
index b5fbe4b094..231283b8dd 100644
--- a/nova/virt/libvirt/config.py
+++ b/nova/virt/libvirt/config.py
@@ -24,6 +24,7 @@ helpers for populating up config object instances.
"""
import time
+import typing as ty
from collections import OrderedDict
from lxml import etree
@@ -32,6 +33,7 @@ from oslo_utils import units
from nova import exception
from nova.i18n import _
+from nova.objects import fields
from nova.pci import utils as pci_utils
from nova.virt import hardware
@@ -66,9 +68,6 @@ class LibvirtConfigObject(object):
child.text = str(value)
return child
- def get_yes_no_str(self, value):
- return 'yes' if value else 'no'
-
def format_dom(self):
return self._new_node(self.root_name)
@@ -87,6 +86,25 @@ class LibvirtConfigObject(object):
pretty_print=pretty_print)
return xml_str
+ @classmethod
+ def parse_on_off_str(self, value: ty.Optional[str]) -> bool:
+ if value is not None and value not in ('on', 'off'):
+ msg = _(
+ "Element should contain either 'on' or 'off'; "
+ "found: '%(value)s'"
+ )
+ raise exception.InvalidInput(msg % {'value': value})
+
+ return value == 'on'
+
+ @classmethod
+ def get_yes_no_str(self, value: bool) -> str:
+ return 'yes' if value else 'no'
+
+ @classmethod
+ def get_on_off_str(self, value: bool) -> str:
+ return 'on' if value else 'off'
+
def __repr__(self):
return self.to_xml(pretty_print=False)
@@ -1920,6 +1938,8 @@ class LibvirtConfigGuestInterface(LibvirtConfigGuestDevice):
if self.net_type == 'direct':
self.source_dev = c.get('dev')
self.source_mode = c.get('mode', 'private')
+ elif self.net_type == 'vdpa':
+ self.source_dev = c.get('dev')
elif self.net_type == 'vhostuser':
self.vhostuser_type = c.get('type')
self.vhostuser_mode = c.get('mode')
@@ -2027,6 +2047,12 @@ class LibvirtConfigGuestGraphics(LibvirtConfigGuestDevice):
self.keymap = None
self.listen = None
+ self.image_compression = None
+ self.jpeg_compression = None
+ self.zlib_compression = None
+ self.playback_compression = None
+ self.streaming_mode = None
+
def format_dom(self):
dev = super(LibvirtConfigGuestGraphics, self).format_dom()
@@ -2037,6 +2063,24 @@ class LibvirtConfigGuestGraphics(LibvirtConfigGuestDevice):
if self.listen:
dev.set("listen", self.listen)
+ if self.type == "spice":
+ if self.image_compression is not None:
+ dev.append(etree.Element(
+ 'image', compression=self.image_compression))
+ if self.jpeg_compression is not None:
+ dev.append(etree.Element(
+ 'jpeg', compression=self.jpeg_compression))
+ if self.zlib_compression is not None:
+ dev.append(etree.Element(
+ 'zlib', compression=self.zlib_compression))
+ if self.playback_compression is not None:
+ dev.append(etree.Element(
+ 'playback', compression=self.get_on_off_str(
+ self.playback_compression)))
+ if self.streaming_mode is not None:
+ dev.append(etree.Element(
+ 'streaming', mode=self.streaming_mode))
+
return dev
@@ -2733,6 +2777,18 @@ class LibvirtConfigGuestFeaturePMU(LibvirtConfigGuestFeature):
return root
+class LibvirtConfigGuestFeatureIOAPIC(LibvirtConfigGuestFeature):
+
+ def __init__(self, **kwargs):
+ super().__init__("ioapic", **kwargs)
+ self.driver = "qemu"
+
+ def format_dom(self):
+ root = super().format_dom()
+ root.set('driver', self.driver)
+ return root
+
+
class LibvirtConfigGuestFeatureHyperV(LibvirtConfigGuestFeature):
# QEMU requires at least this value to be set
@@ -2748,6 +2804,15 @@ class LibvirtConfigGuestFeatureHyperV(LibvirtConfigGuestFeature):
self.vapic = False
self.spinlocks = False
self.spinlock_retries = self.MIN_SPINLOCK_RETRIES
+ self.vpindex = False
+ self.runtime = False
+ self.synic = False
+ self.reset = False
+ self.frequencies = False
+ self.reenlightenment = False
+ self.tlbflush = False
+ self.ipi = False
+ self.evmcs = False
self.vendorid_spoof = False
self.vendorid = self.SPOOFED_VENDOR_ID
@@ -2764,6 +2829,24 @@ class LibvirtConfigGuestFeatureHyperV(LibvirtConfigGuestFeature):
if self.vendorid_spoof:
root.append(etree.Element("vendor_id", state="on",
value=self.vendorid))
+ if self.vpindex:
+ root.append(etree.Element('vpindex', state='on'))
+ if self.runtime:
+ root.append(etree.Element('runtime', state='on'))
+ if self.synic:
+ root.append(etree.Element('synic', state='on'))
+ if self.reset:
+ root.append(etree.Element('reset', state='on'))
+ if self.frequencies:
+ root.append(etree.Element('frequencies', state='on'))
+ if self.reenlightenment:
+ root.append(etree.Element('reenlightenment', state='on'))
+ if self.tlbflush:
+ root.append(etree.Element('tlbflush', state='on'))
+ if self.ipi:
+ root.append(etree.Element('ipi', state='on'))
+ if self.evmcs:
+ root.append(etree.Element('evmcs', state='on'))
return root
@@ -3061,6 +3144,7 @@ class LibvirtConfigGuest(LibvirtConfigObject):
# LibvirtConfigGuestGidMap
# LibvirtConfigGuestCPU
# LibvirtConfigGuestVPMEM
+ # LibvirtConfigGuestIOMMU
for c in xmldoc:
if c.tag == 'devices':
for d in c:
@@ -3088,6 +3172,10 @@ class LibvirtConfigGuest(LibvirtConfigObject):
obj = LibvirtConfigGuestVPMEM()
obj.parse_dom(d)
self.devices.append(obj)
+ elif d.tag == 'iommu':
+ obj = LibvirtConfigGuestIOMMU()
+ obj.parse_dom(d)
+ self.devices.append(obj)
if c.tag == 'idmap':
for idmap in c:
obj = None
@@ -3112,7 +3200,10 @@ class LibvirtConfigGuest(LibvirtConfigObject):
else:
self._parse_basic_props(c)
- def add_device(self, dev):
+ def add_feature(self, dev: LibvirtConfigGuestFeature) -> None:
+ self.features.append(dev)
+
+ def add_device(self, dev: LibvirtConfigGuestDevice) -> None:
self.devices.append(dev)
def add_perf_event(self, event):
@@ -3315,6 +3406,7 @@ class LibvirtConfigNodeDeviceMdevInformation(LibvirtConfigObject):
root_name="capability", **kwargs)
self.type = None
self.iommu_group = None
+ self.uuid = None
def parse_dom(self, xmldoc):
super(LibvirtConfigNodeDeviceMdevInformation,
@@ -3324,6 +3416,8 @@ class LibvirtConfigNodeDeviceMdevInformation(LibvirtConfigObject):
self.type = c.get('id')
if c.tag == "iommuGroup":
self.iommu_group = int(c.get('number'))
+ if c.tag == "uuid":
+ self.uuid = c.text
class LibvirtConfigNodeDeviceVpdCap(LibvirtConfigObject):
@@ -3651,6 +3745,53 @@ class LibvirtConfigGuestVPMEM(LibvirtConfigGuestDevice):
self.target_size = sub.text
+class LibvirtConfigGuestIOMMU(LibvirtConfigGuestDevice):
+ """https://libvirt.org/formatdomain.html#iommu-devices"""
+
+ def __init__(self, **kwargs):
+ super().__init__(root_name="iommu", **kwargs)
+
+ self.model: str = fields.VIOMMUModel.AUTO
+ self.interrupt_remapping: bool = False
+ self.caching_mode: bool = False
+ self.eim: bool = False
+ self.iotlb: bool = False
+
+ def format_dom(self):
+ iommu = super().format_dom()
+ iommu.set("model", self.model)
+
+ driver = etree.Element("driver")
+ driver.set("intremap", self.get_on_off_str(self.interrupt_remapping))
+ driver.set("caching_mode", self.get_on_off_str(self.caching_mode))
+
+ # Set aw_bits to None when the Libvirt version not satisfy
+ # MIN_LIBVIRT_VIOMMU_AW_BITS in driver. When it's None, means it's not
+ # supported to have aw_bits.
+ if hasattr(self, "aw_bits"):
+ driver.set("aw_bits", str(self.aw_bits))
+ driver.set("eim", self.get_on_off_str(self.eim))
+ driver.set("iotlb", self.get_on_off_str(self.iotlb))
+ iommu.append(driver)
+
+ return iommu
+
+ def parse_dom(self, xmldoc):
+ super().parse_dom(xmldoc)
+ self.model = xmldoc.get("model")
+
+ driver = xmldoc.find("./driver")
+ if driver:
+ self.interrupt_remapping = self.parse_on_off_str(
+ driver.get("intremap"))
+ self.caching_mode = self.parse_on_off_str(
+ driver.get("caching_mode"))
+ if driver.get("aw_bits") is not None:
+ self.aw_bits = int(driver.get("aw_bits"))
+ self.iotlb = self.parse_on_off_str(driver.get("iotlb"))
+ self.eim = self.parse_on_off_str(driver.get("eim"))
+
+
class LibvirtConfigGuestMetaNovaPorts(LibvirtConfigObject):
def __init__(self, ports=None):
diff --git a/nova/virt/libvirt/cpu/__init__.py b/nova/virt/libvirt/cpu/__init__.py
new file mode 100644
index 0000000000..4410a4e579
--- /dev/null
+++ b/nova/virt/libvirt/cpu/__init__.py
@@ -0,0 +1,22 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.virt.libvirt.cpu import api
+
+
+Core = api.Core
+
+
+power_up = api.power_up
+power_down = api.power_down
+validate_all_dedicated_cpus = api.validate_all_dedicated_cpus
+power_down_all_dedicated_cpus = api.power_down_all_dedicated_cpus
diff --git a/nova/virt/libvirt/cpu/api.py b/nova/virt/libvirt/cpu/api.py
new file mode 100644
index 0000000000..1c17458d6b
--- /dev/null
+++ b/nova/virt/libvirt/cpu/api.py
@@ -0,0 +1,157 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from dataclasses import dataclass
+
+from oslo_log import log as logging
+
+import nova.conf
+from nova import exception
+from nova.i18n import _
+from nova import objects
+from nova.virt import hardware
+from nova.virt.libvirt.cpu import core
+
+LOG = logging.getLogger(__name__)
+
+CONF = nova.conf.CONF
+
+
+@dataclass
+class Core:
+ """Class to model a CPU core as reported by sysfs.
+
+ It may be a physical CPU core or a hardware thread on a shared CPU core
+ depending on if the system supports SMT.
+ """
+
+ # NOTE(sbauza): ident is a mandatory field.
+ # The CPU core id/number
+ ident: int
+
+ @property
+ def online(self) -> bool:
+ return core.get_online(self.ident)
+
+ @online.setter
+ def online(self, state: bool) -> None:
+ if state:
+ core.set_online(self.ident)
+ else:
+ core.set_offline(self.ident)
+
+ def __hash__(self):
+ return hash(self.ident)
+
+ def __eq__(self, other):
+ return self.ident == other.ident
+
+ def __str__(self):
+ return str(self.ident)
+
+ @property
+ def governor(self) -> str:
+ return core.get_governor(self.ident)
+
+ def set_high_governor(self) -> None:
+ core.set_governor(self.ident, CONF.libvirt.cpu_power_governor_high)
+
+ def set_low_governor(self) -> None:
+ core.set_governor(self.ident, CONF.libvirt.cpu_power_governor_low)
+
+
+def power_up(instance: objects.Instance) -> None:
+ if not CONF.libvirt.cpu_power_management:
+ return
+ if instance.numa_topology is None:
+ return
+
+ cpu_dedicated_set = hardware.get_cpu_dedicated_set() or set()
+ pcpus = instance.numa_topology.cpu_pinning
+ powered_up = set()
+ for pcpu in pcpus:
+ if pcpu in cpu_dedicated_set:
+ pcpu = Core(pcpu)
+ if CONF.libvirt.cpu_power_management_strategy == 'cpu_state':
+ pcpu.online = True
+ else:
+ pcpu.set_high_governor()
+ powered_up.add(str(pcpu))
+ LOG.debug("Cores powered up : %s", powered_up)
+
+
+def power_down(instance: objects.Instance) -> None:
+ if not CONF.libvirt.cpu_power_management:
+ return
+ if instance.numa_topology is None:
+ return
+
+ cpu_dedicated_set = hardware.get_cpu_dedicated_set() or set()
+ pcpus = instance.numa_topology.cpu_pinning
+ powered_down = set()
+ for pcpu in pcpus:
+ if pcpu in cpu_dedicated_set:
+ pcpu = Core(pcpu)
+ if CONF.libvirt.cpu_power_management_strategy == 'cpu_state':
+ pcpu.online = False
+ else:
+ pcpu.set_low_governor()
+ powered_down.add(str(pcpu))
+ LOG.debug("Cores powered down : %s", powered_down)
+
+
+def power_down_all_dedicated_cpus() -> None:
+ if not CONF.libvirt.cpu_power_management:
+ return
+ if (CONF.libvirt.cpu_power_management and
+ not CONF.compute.cpu_dedicated_set
+ ):
+ msg = _("'[compute]/cpu_dedicated_set' is mandatory to be set if "
+ "'[libvirt]/cpu_power_management' is set."
+ "Please provide the CPUs that can be pinned or don't use the "
+ "power management if you only use shared CPUs.")
+ raise exception.InvalidConfiguration(msg)
+
+ cpu_dedicated_set = hardware.get_cpu_dedicated_set() or set()
+ for pcpu in cpu_dedicated_set:
+ pcpu = Core(pcpu)
+ if CONF.libvirt.cpu_power_management_strategy == 'cpu_state':
+ pcpu.online = False
+ else:
+ pcpu.set_low_governor()
+ LOG.debug("Cores powered down : %s", cpu_dedicated_set)
+
+
+def validate_all_dedicated_cpus() -> None:
+ if not CONF.libvirt.cpu_power_management:
+ return
+ cpu_dedicated_set = hardware.get_cpu_dedicated_set() or set()
+ governors = set()
+ cpu_states = set()
+ for pcpu in cpu_dedicated_set:
+ pcpu = Core(pcpu)
+ # we need to collect the governors strategy and the CPU states
+ governors.add(pcpu.governor)
+ cpu_states.add(pcpu.online)
+ if CONF.libvirt.cpu_power_management_strategy == 'cpu_state':
+ # all the cores need to have the same governor strategy
+ if len(governors) > 1:
+ msg = _("All the cores need to have the same governor strategy"
+ "before modifying the CPU states. You can reboot the "
+ "compute node if you prefer.")
+ raise exception.InvalidConfiguration(msg)
+ elif CONF.libvirt.cpu_power_management_strategy == 'governor':
+ # all the cores need to be online
+ if False in cpu_states:
+ msg = _("All the cores need to be online before modifying the "
+ "governor strategy.")
+ raise exception.InvalidConfiguration(msg)
diff --git a/nova/virt/libvirt/cpu/core.py b/nova/virt/libvirt/cpu/core.py
new file mode 100644
index 0000000000..782f028fee
--- /dev/null
+++ b/nova/virt/libvirt/cpu/core.py
@@ -0,0 +1,78 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+import typing as ty
+
+from oslo_log import log as logging
+
+from nova import exception
+from nova import filesystem
+import nova.privsep
+from nova.virt import hardware
+
+LOG = logging.getLogger(__name__)
+
+AVAILABLE_PATH = '/sys/devices/system/cpu/present'
+
+CPU_PATH_TEMPLATE = '/sys/devices/system/cpu/cpu%(core)s'
+
+
+def get_available_cores() -> ty.Set[int]:
+ cores = filesystem.read_sys(AVAILABLE_PATH)
+ return hardware.parse_cpu_spec(cores) if cores else set()
+
+
+def exists(core: int) -> bool:
+ return core in get_available_cores()
+
+
+def gen_cpu_path(core: int) -> str:
+ if not exists(core):
+ LOG.warning('Unable to access CPU: %s', core)
+ raise ValueError('CPU: %(core)s does not exist', core)
+ return CPU_PATH_TEMPLATE % {'core': core}
+
+
+def get_online(core: int) -> bool:
+ try:
+ online = filesystem.read_sys(
+ os.path.join(gen_cpu_path(core), 'online')).strip()
+ except exception.FileNotFound:
+ # The online file may not exist if we haven't written it yet.
+ # By default, this means that the CPU is online.
+ online = '1'
+ return online == '1'
+
+
+@nova.privsep.sys_admin_pctxt.entrypoint
+def set_online(core: int) -> bool:
+ filesystem.write_sys(os.path.join(gen_cpu_path(core), 'online'), data='1')
+ return get_online(core)
+
+
+def set_offline(core: int) -> bool:
+ filesystem.write_sys(os.path.join(gen_cpu_path(core), 'online'), data='0')
+ return not get_online(core)
+
+
+def get_governor(core: int) -> str:
+ return filesystem.read_sys(
+ os.path.join(gen_cpu_path(core), 'cpufreq/scaling_governor')).strip()
+
+
+@nova.privsep.sys_admin_pctxt.entrypoint
+def set_governor(core: int, governor: str) -> str:
+ filesystem.write_sys(
+ os.path.join(gen_cpu_path(core), 'cpufreq/scaling_governor'),
+ data=governor)
+ return get_governor(core)
diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py
index 1eb4109da2..869996f615 100644
--- a/nova/virt/libvirt/driver.py
+++ b/nova/virt/libvirt/driver.py
@@ -96,7 +96,6 @@ from nova import objects
from nova.objects import diagnostics as diagnostics_obj
from nova.objects import fields
from nova.objects import migrate_data as migrate_data_obj
-from nova.pci import manager as pci_manager
from nova.pci import utils as pci_utils
import nova.privsep.libvirt
import nova.privsep.path
@@ -115,6 +114,7 @@ from nova.virt.image import model as imgmodel
from nova.virt import images
from nova.virt.libvirt import blockinfo
from nova.virt.libvirt import config as vconfig
+from nova.virt.libvirt import cpu as libvirt_cpu
from nova.virt.libvirt import designer
from nova.virt.libvirt import event as libvirtevent
from nova.virt.libvirt import guest as libvirt_guest
@@ -221,6 +221,12 @@ MIN_QEMU_VERSION = (4, 2, 0)
NEXT_MIN_LIBVIRT_VERSION = (7, 0, 0)
NEXT_MIN_QEMU_VERSION = (5, 2, 0)
+# vIOMMU driver attribute aw_bits minimal support version.
+MIN_LIBVIRT_VIOMMU_AW_BITS = (6, 5, 0)
+
+# vIOMMU model value `virtio` minimal support version
+MIN_LIBVIRT_VIOMMU_VIRTIO_MODEL = (8, 3, 0)
+
MIN_LIBVIRT_AARCH64_CPU_COMPARE = (6, 9, 0)
# Virtuozzo driver support
@@ -407,6 +413,8 @@ class LibvirtDriver(driver.ComputeDriver):
not CONF.force_raw_images)
requires_ploop_image = CONF.libvirt.virt_type == 'parallels'
+ self.image_backend = imagebackend.Backend(CONF.use_cow_images)
+
self.capabilities = {
"has_imagecache": True,
"supports_evacuate": True,
@@ -434,6 +442,10 @@ class LibvirtDriver(driver.ComputeDriver):
"supports_bfv_rescue": True,
"supports_vtpm": CONF.libvirt.swtpm_enabled,
"supports_socket_pci_numa_affinity": True,
+ "supports_ephemeral_encryption":
+ self.image_backend.backend().SUPPORTS_LUKS,
+ "supports_ephemeral_encryption_luks":
+ self.image_backend.backend().SUPPORTS_LUKS,
}
super(LibvirtDriver, self).__init__(virtapi)
@@ -458,7 +470,6 @@ class LibvirtDriver(driver.ComputeDriver):
self._disk_cachemode = None
self.image_cache_manager = imagecache.ImageCacheManager()
- self.image_backend = imagebackend.Backend(CONF.use_cow_images)
self.disk_cachemodes = {}
@@ -807,6 +818,18 @@ class LibvirtDriver(driver.ComputeDriver):
"force_raw_images to True.")
raise exception.InvalidConfiguration(msg)
+ # NOTE(sbauza): We verify first if the dedicated CPU performances were
+ # modified by Nova before. Note that it can provide an exception if
+ # either the governor strategies are different between the cores or if
+ # the cores are offline.
+ libvirt_cpu.validate_all_dedicated_cpus()
+ # NOTE(sbauza): We powerdown all dedicated CPUs but if some instances
+ # exist that are pinned for some CPUs, then we'll later powerup those
+ # CPUs when rebooting the instance in _init_instance()
+ # Note that it can provide an exception if the config options are
+ # wrongly modified.
+ libvirt_cpu.power_down_all_dedicated_cpus()
+
# TODO(sbauza): Remove this code once mediated devices are persisted
# across reboots.
self._recreate_assigned_mediated_devices()
@@ -979,33 +1002,26 @@ class LibvirtDriver(driver.ComputeDriver):
msg = _("The cpu_models option is required when cpu_mode=custom")
raise exception.Invalid(msg)
- cpu = vconfig.LibvirtConfigGuestCPU()
- for model in models:
- cpu.model = self._get_cpu_model_mapping(model)
- try:
- self._compare_cpu(cpu, self._get_cpu_info(), None)
- except exception.InvalidCPUInfo as e:
- msg = (_("Configured CPU model: %(model)s is not "
- "compatible with host CPU. Please correct your "
- "config and try again. %(e)s") % {
- 'model': model, 'e': e})
- raise exception.InvalidCPUInfo(msg)
-
- # Use guest CPU model to check the compatibility between guest CPU and
- # configured extra_flags
- cpu = vconfig.LibvirtConfigGuestCPU()
- cpu.model = self._host.get_capabilities().host.cpu.model
- for flag in set(x.lower() for x in CONF.libvirt.cpu_model_extra_flags):
- cpu_feature = self._prepare_cpu_flag(flag)
- cpu.add_feature(cpu_feature)
- try:
- self._compare_cpu(cpu, self._get_cpu_info(), None)
- except exception.InvalidCPUInfo as e:
- msg = (_("Configured extra flag: %(flag)s it not correct, or "
- "the host CPU does not support this flag. Please "
- "correct the config and try again. %(e)s") % {
- 'flag': flag, 'e': e})
- raise exception.InvalidCPUInfo(msg)
+ if not CONF.workarounds.skip_cpu_compare_at_startup:
+ # Use guest CPU model to check the compatibility between
+ # guest CPU and configured extra_flags
+ for model in models:
+ cpu = vconfig.LibvirtConfigGuestCPU()
+ cpu.model = self._get_cpu_model_mapping(model)
+ for flag in set(x.lower() for
+ x in CONF.libvirt.cpu_model_extra_flags):
+ cpu_feature = self._prepare_cpu_flag(flag)
+ cpu.add_feature(cpu_feature)
+ try:
+ self._compare_cpu(cpu, self._get_cpu_info(), None)
+ except exception.InvalidCPUInfo as e:
+ msg = (_("Configured CPU model: %(model)s "
+ "and CPU Flags %(flags)s ar not "
+ "compatible with host CPU. Please correct your "
+ "config and try again. %(e)s") % {
+ 'model': model, 'e': e,
+ 'flags': CONF.libvirt.cpu_model_extra_flags})
+ raise exception.InvalidCPUInfo(msg)
def _check_vtpm_support(self) -> None:
# TODO(efried): A key manager must be configured to create/retrieve
@@ -1509,6 +1525,8 @@ class LibvirtDriver(driver.ComputeDriver):
# NOTE(GuanQiang): teardown container to avoid resource leak
if CONF.libvirt.virt_type == 'lxc':
self._teardown_container(instance)
+ # We're sure the instance is gone, we can shutdown the core if so
+ libvirt_cpu.power_down(instance)
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, destroy_secrets=True):
@@ -3161,11 +3179,16 @@ class LibvirtDriver(driver.ComputeDriver):
current_power_state = guest.get_power_state(self._host)
+ libvirt_cpu.power_up(instance)
# TODO(stephenfin): Any reason we couldn't use 'self.resume' here?
guest.launch(pause=current_power_state == power_state.PAUSED)
self._attach_pci_devices(
- guest, pci_manager.get_instance_pci_devs(instance))
+ guest,
+ instance.get_pci_devices(
+ source=objects.InstancePCIRequest.FLAVOR_ALIAS
+ ),
+ )
self._attach_direct_passthrough_ports(context, instance, guest)
def _can_set_admin_password(self, image_meta):
@@ -3241,7 +3264,13 @@ class LibvirtDriver(driver.ComputeDriver):
'[Error Code %(error_code)s] %(ex)s')
% {'instance_name': instance.name,
'error_code': error_code, 'ex': err_msg})
- raise exception.InternalError(msg)
+
+ if error_code == libvirt.VIR_ERR_AGENT_UNRESPONSIVE:
+ msg += (", libvirt cannot connect to the qemu-guest-agent"
+ " inside the instance.")
+ raise exception.InstanceQuiesceFailed(reason=msg)
+ else:
+ raise exception.InternalError(msg)
def quiesce(self, context, instance, image_meta):
"""Freeze the guest filesystems to prepare for snapshot.
@@ -3279,8 +3308,8 @@ class LibvirtDriver(driver.ComputeDriver):
format=source_format,
basename=False)
disk_delta = out_path + '.delta'
- libvirt_utils.create_cow_image(src_back_path, disk_delta,
- src_disk_size)
+ libvirt_utils.create_image(
+ disk_delta, 'qcow2', src_disk_size, backing_file=src_back_path)
try:
self._can_quiesce(instance, image_meta)
@@ -4101,8 +4130,12 @@ class LibvirtDriver(driver.ComputeDriver):
"""Suspend the specified instance."""
guest = self._host.get_guest(instance)
- self._detach_pci_devices(guest,
- pci_manager.get_instance_pci_devs(instance))
+ self._detach_pci_devices(
+ guest,
+ instance.get_pci_devices(
+ source=objects.InstancePCIRequest.FLAVOR_ALIAS
+ ),
+ )
self._detach_direct_passthrough_ports(context, instance, guest)
self._detach_mediated_devices(guest)
guest.save_memory_state()
@@ -4120,8 +4153,12 @@ class LibvirtDriver(driver.ComputeDriver):
guest = self._create_guest_with_network(
context, xml, instance, network_info, block_device_info,
vifs_already_plugged=True)
- self._attach_pci_devices(guest,
- pci_manager.get_instance_pci_devs(instance))
+ self._attach_pci_devices(
+ guest,
+ instance.get_pci_devices(
+ source=objects.InstancePCIRequest.FLAVOR_ALIAS
+ ),
+ )
self._attach_direct_passthrough_ports(
context, instance, guest, network_info)
self._attach_mediated_devices(guest, mdevs)
@@ -4506,7 +4543,7 @@ class LibvirtDriver(driver.ComputeDriver):
'%dG' % ephemeral_size,
specified_fs)
return
- libvirt_utils.create_image('raw', target, '%dG' % ephemeral_size)
+ libvirt_utils.create_image(target, 'raw', f'{ephemeral_size}G')
# Run as root only for block devices.
disk_api.mkfs(os_type, fs_label, target, run_as_root=is_block_dev,
@@ -4515,7 +4552,7 @@ class LibvirtDriver(driver.ComputeDriver):
@staticmethod
def _create_swap(target, swap_mb, context=None):
"""Create a swap file of specified size."""
- libvirt_utils.create_image('raw', target, '%dM' % swap_mb)
+ libvirt_utils.create_image(target, 'raw', f'{swap_mb}M')
nova.privsep.fs.unprivileged_mkfs('swap', target)
@staticmethod
@@ -4638,12 +4675,16 @@ class LibvirtDriver(driver.ComputeDriver):
ignore_bdi_for_swap=False):
booted_from_volume = self._is_booted_from_volume(block_device_info)
- def image(fname, image_type=CONF.libvirt.images_type):
- return self.image_backend.by_name(instance,
- fname + suffix, image_type)
+ def image(
+ fname, image_type=CONF.libvirt.images_type, disk_info_mapping=None
+ ):
+ return self.image_backend.by_name(
+ instance, fname + suffix, image_type,
+ disk_info_mapping=disk_info_mapping)
- def raw(fname):
- return image(fname, image_type='raw')
+ def raw(fname, disk_info_mapping=None):
+ return image(
+ fname, image_type='raw', disk_info_mapping=disk_info_mapping)
created_instance_dir = True
@@ -4657,13 +4698,11 @@ class LibvirtDriver(driver.ComputeDriver):
LOG.debug("Creating instance directory", instance=instance)
fileutils.ensure_tree(libvirt_utils.get_instance_path(instance))
- LOG.info('Creating image', instance=instance)
+ LOG.info('Creating image(s)', instance=instance)
flavor = instance.get_flavor()
swap_mb = 0
if 'disk.swap' in disk_mapping:
- mapping = disk_mapping['disk.swap']
-
if ignore_bdi_for_swap:
# This is a workaround to support legacy swap resizing,
# which does not touch swap size specified in bdm,
@@ -4677,12 +4716,17 @@ class LibvirtDriver(driver.ComputeDriver):
# leaving the work with bdm only.
swap_mb = flavor['swap']
else:
+ disk_info_mapping = disk_mapping['disk.swap']
+ disk_device = disk_info_mapping['dev']
swap = driver.block_device_info_get_swap(block_device_info)
if driver.swap_is_usable(swap):
swap_mb = swap['swap_size']
- elif (flavor['swap'] > 0 and
- not block_device.volume_in_mapping(
- mapping['dev'], block_device_info)):
+ elif (
+ flavor['swap'] > 0 and
+ not block_device.volume_in_mapping(
+ disk_device, block_device_info,
+ )
+ ):
swap_mb = flavor['swap']
if swap_mb > 0:
@@ -4715,8 +4759,8 @@ class LibvirtDriver(driver.ComputeDriver):
image_id=disk_images['ramdisk_id'])
created_disks = self._create_and_inject_local_root(
- context, instance, booted_from_volume, suffix, disk_images,
- injection_info, fallback_from_host)
+ context, instance, disk_mapping, booted_from_volume, suffix,
+ disk_images, injection_info, fallback_from_host)
# Lookup the filesystem type if required
os_type_with_default = nova.privsep.fs.get_fs_type_for_os_type(
@@ -4729,7 +4773,9 @@ class LibvirtDriver(driver.ComputeDriver):
vm_mode = fields.VMMode.get_from_instance(instance)
ephemeral_gb = instance.flavor.ephemeral_gb
if 'disk.local' in disk_mapping:
- disk_image = image('disk.local')
+ disk_info_mapping = disk_mapping['disk.local']
+ disk_image = image(
+ 'disk.local', disk_info_mapping=disk_info_mapping)
# Short circuit the exists() tests if we already created a disk
created_disks = created_disks or not disk_image.exists()
@@ -4748,7 +4794,9 @@ class LibvirtDriver(driver.ComputeDriver):
for idx, eph in enumerate(driver.block_device_info_get_ephemerals(
block_device_info)):
- disk_image = image(blockinfo.get_eph_disk(idx))
+ disk_name = blockinfo.get_eph_disk(idx)
+ disk_info_mapping = disk_mapping[disk_name]
+ disk_image = image(disk_name, disk_info_mapping=disk_info_mapping)
# Short circuit the exists() tests if we already created a disk
created_disks = created_disks or not disk_image.exists()
@@ -4787,7 +4835,7 @@ class LibvirtDriver(driver.ComputeDriver):
return (created_instance_dir, created_disks)
- def _create_and_inject_local_root(self, context, instance,
+ def _create_and_inject_local_root(self, context, instance, disk_mapping,
booted_from_volume, suffix, disk_images,
injection_info, fallback_from_host):
created_disks = False
@@ -4797,9 +4845,6 @@ class LibvirtDriver(driver.ComputeDriver):
injection_info is not None and
CONF.libvirt.inject_partition != -2)
- # NOTE(ndipanov): Even if disk_mapping was passed in, which
- # currently happens only on rescue - we still don't want to
- # create a base image.
if not booted_from_volume:
root_fname = imagecache.get_cache_fname(disk_images['image_id'])
size = instance.flavor.root_gb * units.Gi
@@ -4807,8 +4852,10 @@ class LibvirtDriver(driver.ComputeDriver):
if size == 0 or suffix == '.rescue':
size = None
- backend = self.image_backend.by_name(instance, 'disk' + suffix,
- CONF.libvirt.images_type)
+ disk_name = 'disk' + suffix
+ disk_info_mapping = disk_mapping[disk_name]
+ backend = self.image_backend.by_name(
+ instance, disk_name, disk_info_mapping=disk_info_mapping)
created_disks = not backend.exists()
if instance.task_state == task_states.RESIZE_FINISH:
@@ -4986,16 +5033,18 @@ class LibvirtDriver(driver.ComputeDriver):
guest_config = vconfig.LibvirtConfigGuest()
guest_config.parse_dom(xml_doc)
- for hdev in [d for d in guest_config.devices
- if isinstance(d, vconfig.LibvirtConfigGuestHostdevPCI)]:
+ for hdev in [
+ d for d in guest_config.devices
+ if isinstance(d, vconfig.LibvirtConfigGuestHostdevPCI)
+ ]:
hdbsf = [hdev.domain, hdev.bus, hdev.slot, hdev.function]
dbsf = pci_utils.parse_address(dev.address)
- if [int(x, 16) for x in hdbsf] ==\
- [int(x, 16) for x in dbsf]:
- raise exception.PciDeviceDetachFailed(reason=
- "timeout",
- dev=dev)
-
+ if (
+ [int(x, 16) for x in hdbsf] ==
+ [int(x, 16) for x in dbsf]
+ ):
+ raise exception.PciDeviceDetachFailed(
+ reason="timeout", dev=dev)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
@@ -5043,33 +5092,76 @@ class LibvirtDriver(driver.ComputeDriver):
instance=instance)
guest.attach_device(cfg)
+ # TODO(sean-k-mooney): we should try and converge this fuction with
+ # _detach_direct_passthrough_vifs which does the same operation correctly
+ # for live migration
def _detach_direct_passthrough_ports(self, context, instance, guest):
network_info = instance.info_cache.network_info
if network_info is None:
return
if self._has_direct_passthrough_port(network_info):
- # In case of VNIC_TYPES_DIRECT_PASSTHROUGH ports we create
- # pci request per direct passthrough port. Therefore we can trust
- # that pci_slot value in the vif is correct.
- direct_passthrough_pci_addresses = [
+
+ attached_via_hostdev_element = []
+ attached_via_interface_element = []
+
+ for vif in network_info:
+ if vif['profile'].get('pci_slot') is None:
+ # this is not an sriov interface so skip it
+ continue
+
+ if (vif['vnic_type'] not in
+ network_model.VNIC_TYPES_DIRECT_PASSTHROUGH):
+ continue
+
+ cfg = self.vif_driver.get_config(
+ instance, vif, instance.image_meta, instance.flavor,
+ CONF.libvirt.virt_type)
+ LOG.debug(f'Detaching type: {type(cfg)}, data: {cfg}')
+ if isinstance(cfg, vconfig.LibvirtConfigGuestHostdevPCI):
+ attached_via_hostdev_element.append(vif)
+ else:
+ attached_via_interface_element.append(vif)
+
+ pci_devs = instance.get_pci_devices()
+ hostdev_pci_addresses = {
vif['profile']['pci_slot']
- for vif in network_info
- if (vif['vnic_type'] in
- network_model.VNIC_TYPES_DIRECT_PASSTHROUGH and
- vif['profile'].get('pci_slot') is not None)
+ for vif in attached_via_hostdev_element
+ }
+ direct_passthrough_pci_addresses = [
+ pci_dev for pci_dev in pci_devs
+ if pci_dev.address in hostdev_pci_addresses
]
- # use detach_pci_devices to avoid failure in case of
- # multiple guest direct passthrough ports with the same MAC
- # (protection use-case, ports are on different physical
- # interfaces)
- pci_devs = pci_manager.get_instance_pci_devs(instance, 'all')
- direct_passthrough_pci_addresses = (
- [pci_dev for pci_dev in pci_devs
- if pci_dev.address in direct_passthrough_pci_addresses])
+ # FIXME(sean-k-mooney): i am using _detach_pci_devices because
+ # of the previous comment introduced by change-id:
+ # I3a45b1fb41e8e446d1f25d7a1d77991c8bf2a1ed
+ # in relation to bug 1563874 however i'm not convinced that
+ # patch was correct so we should reevaluate if we should do this.
+ # The intent of using _detach_pci_devices is
+ # to somehow cater for the use case where multiple ports have
+ # the same MAC address however _detach_pci_device can only remove
+ # device that are attached as hostdev elements, not via the
+ # interface element.
+ # So using it for all devices would break vnic-type direct when
+ # using the sriov_nic_agent ml2 driver or vif of vnic_type vdpa.
+ # Since PF ports cant have the same MAC that means that this
+ # use case was for hardware offloaded OVS? many NICs do not allow
+ # two VFs to have the same MAC on different VLANs due to the
+ # ordering of the VLAN and MAC filters in there static packet
+ # processing pipeline as such its unclear if this will work in any
+ # non ovs offload case. We should look into this more closely
+ # as from my testing in this patch we appear to use the interface
+ # element for hardware offloaded ovs too. Infiniband and vnic_type
+ # direct-physical port type do need this code path, both those cant
+ # have duplicate MACs...
self._detach_pci_devices(guest, direct_passthrough_pci_addresses)
+ # for ports that are attached with interface elements we cannot use
+ # _detach_pci_devices so we use detach_interface
+ for vif in attached_via_interface_element:
+ self.detach_interface(context, instance, vif)
+
def _update_compute_provider_status(self, context, service):
"""Calls the ComputeVirtAPI.update_compute_provider_status method
@@ -5360,8 +5452,15 @@ class LibvirtDriver(driver.ComputeDriver):
self, instance, name, disk_mapping, flavor, image_type=None,
boot_order=None,
):
+ # NOTE(artom) To pass unit tests, wherein the code here is loaded
+ # *before* any config with self.flags() is done, we need to have the
+ # default inline in the method, and not in the kwarg declaration.
+ if image_type is None:
+ image_type = CONF.libvirt.images_type
disk_unit = None
- disk = self.image_backend.by_name(instance, name, image_type)
+ disk_info_mapping = disk_mapping[name]
+ disk = self.image_backend.by_name(
+ instance, name, image_type, disk_info_mapping=disk_info_mapping)
if (name == 'disk.config' and image_type == 'rbd' and
not disk.exists()):
# This is likely an older config drive that has not been migrated
@@ -5370,21 +5469,26 @@ class LibvirtDriver(driver.ComputeDriver):
# remove this fall back once we know all config drives are in rbd.
# NOTE(vladikr): make sure that the flat image exist, otherwise
# the image will be created after the domain definition.
- flat_disk = self.image_backend.by_name(instance, name, 'flat')
+ flat_disk = self.image_backend.by_name(
+ instance, name, 'flat', disk_info_mapping=disk_info_mapping)
if flat_disk.exists():
disk = flat_disk
LOG.debug('Config drive not found in RBD, falling back to the '
'instance directory', instance=instance)
- disk_info = disk_mapping[name]
- if 'unit' in disk_mapping and disk_info['bus'] == 'scsi':
+ # The 'unit' key is global to the disk_mapping (rather than for an
+ # individual disk) because it is used solely to track the incrementing
+ # unit number.
+ if 'unit' in disk_mapping and disk_info_mapping['bus'] == 'scsi':
disk_unit = disk_mapping['unit']
- disk_mapping['unit'] += 1 # Increments for the next disk added
+ disk_mapping['unit'] += 1 # Increments for the next disk
conf = disk.libvirt_info(
- disk_info, self.disk_cachemode, flavor['extra_specs'],
- disk_unit=disk_unit, boot_order=boot_order)
+ self.disk_cachemode, flavor['extra_specs'], disk_unit=disk_unit,
+ boot_order=boot_order)
return conf
- def _get_guest_fs_config(self, instance, name, image_type=None):
+ def _get_guest_fs_config(
+ self, instance, name, image_type=CONF.libvirt.images_type
+ ):
disk = self.image_backend.by_name(instance, name, image_type)
return disk.libvirt_fs_info("/", "ploop")
@@ -6067,9 +6171,9 @@ class LibvirtDriver(driver.ComputeDriver):
image_meta.properties.get('img_hide_hypervisor_id'))
if CONF.libvirt.virt_type in ('qemu', 'kvm'):
- guest.features.append(vconfig.LibvirtConfigGuestFeatureACPI())
+ guest.add_feature(vconfig.LibvirtConfigGuestFeatureACPI())
if not CONF.workarounds.libvirt_disable_apic:
- guest.features.append(vconfig.LibvirtConfigGuestFeatureAPIC())
+ guest.add_feature(vconfig.LibvirtConfigGuestFeatureAPIC())
if CONF.libvirt.virt_type in ('qemu', 'kvm') and os_type == 'windows':
hv = vconfig.LibvirtConfigGuestFeatureHyperV()
@@ -6081,6 +6185,15 @@ class LibvirtDriver(driver.ComputeDriver):
# with Microsoft
hv.spinlock_retries = 8191
hv.vapic = True
+ hv.vpindex = True
+ hv.runtime = True
+ hv.synic = True
+ hv.reset = True
+ hv.frequencies = True
+ hv.reenlightenment = True
+ hv.tlbflush = True
+ hv.ipi = True
+ hv.evmcs = True
# NOTE(kosamara): Spoofing the vendor_id aims to allow the nvidia
# driver to work on windows VMs. At the moment, the nvidia driver
@@ -6104,16 +6217,16 @@ class LibvirtDriver(driver.ComputeDriver):
fields.Architecture.I686, fields.Architecture.X86_64,
fields.Architecture.AARCH64,
):
- guest.features.append(
+ guest.add_feature(
vconfig.LibvirtConfigGuestFeatureVMCoreInfo())
if hide_hypervisor_id:
- guest.features.append(
+ guest.add_feature(
vconfig.LibvirtConfigGuestFeatureKvmHidden())
pmu = hardware.get_pmu_constraint(flavor, image_meta)
if pmu is not None:
- guest.features.append(
+ guest.add_feature(
vconfig.LibvirtConfigGuestFeaturePMU(pmu))
def _check_number_of_serial_console(self, num_ports):
@@ -6330,6 +6443,11 @@ class LibvirtDriver(driver.ComputeDriver):
membacking = vconfig.LibvirtConfigGuestMemoryBacking()
membacking.locked = True
+ if hardware.get_locked_memory_constraint(flavor, image_meta):
+ if not membacking:
+ membacking = vconfig.LibvirtConfigGuestMemoryBacking()
+ membacking.locked = True
+
return membacking
def _get_memory_backing_hugepages_support(self, inst_topology, numatune):
@@ -6590,18 +6708,26 @@ class LibvirtDriver(driver.ComputeDriver):
self._create_consoles_qemu_kvm(
guest_cfg, instance, flavor, image_meta)
- def _is_mipsel_guest(self, image_meta):
+ def _is_mipsel_guest(self, image_meta: 'objects.ImageMeta') -> bool:
archs = (fields.Architecture.MIPSEL, fields.Architecture.MIPS64EL)
return self._check_emulation_arch(image_meta) in archs
- def _is_s390x_guest(self, image_meta):
+ def _is_s390x_guest(self, image_meta: 'objects.ImageMeta') -> bool:
archs = (fields.Architecture.S390, fields.Architecture.S390X)
return self._check_emulation_arch(image_meta) in archs
- def _is_ppc64_guest(self, image_meta):
+ def _is_ppc64_guest(self, image_meta: 'objects.ImageMeta') -> bool:
archs = (fields.Architecture.PPC64, fields.Architecture.PPC64LE)
return self._check_emulation_arch(image_meta) in archs
+ def _is_aarch64_guest(self, image_meta: 'objects.ImageMeta') -> bool:
+ arch = fields.Architecture.AARCH64
+ return self._check_emulation_arch(image_meta) == arch
+
+ def _is_x86_guest(self, image_meta: 'objects.ImageMeta') -> bool:
+ archs = (fields.Architecture.I686, fields.Architecture.X86_64)
+ return self._check_emulation_arch(image_meta) in archs
+
def _create_consoles_qemu_kvm(self, guest_cfg, instance, flavor,
image_meta):
char_dev_cls = vconfig.LibvirtConfigGuestSerial
@@ -6979,6 +7105,8 @@ class LibvirtDriver(driver.ComputeDriver):
if vpmems:
self._guest_add_vpmems(guest, vpmems)
+ self._guest_add_iommu_device(guest, image_meta, flavor)
+
return guest
def _get_ordered_vpmems(self, instance, flavor):
@@ -7143,11 +7271,13 @@ class LibvirtDriver(driver.ComputeDriver):
def _guest_add_pci_devices(self, guest, instance):
if CONF.libvirt.virt_type in ('qemu', 'kvm'):
# Get all generic PCI devices (non-SR-IOV).
- for pci_dev in pci_manager.get_instance_pci_devs(instance):
+ for pci_dev in instance.get_pci_devices(
+ source=objects.InstancePCIRequest.FLAVOR_ALIAS
+ ):
guest.add_device(self._get_guest_pci_device(pci_dev))
else:
# PCI devices is only supported for QEMU/KVM hypervisor
- if pci_manager.get_instance_pci_devs(instance, 'all'):
+ if instance.get_pci_devices():
raise exception.PciDeviceUnsupportedHypervisor(
type=CONF.libvirt.virt_type
)
@@ -7186,6 +7316,11 @@ class LibvirtDriver(driver.ComputeDriver):
graphics = vconfig.LibvirtConfigGuestGraphics()
graphics.type = "spice"
graphics.listen = CONF.spice.server_listen
+ graphics.image_compression = CONF.spice.image_compression
+ graphics.jpeg_compression = CONF.spice.jpeg_compression
+ graphics.zlib_compression = CONF.spice.zlib_compression
+ graphics.playback_compression = CONF.spice.playback_compression
+ graphics.streaming_mode = CONF.spice.streaming_mode
guest.add_device(graphics)
add_video_driver = True
@@ -7284,6 +7419,92 @@ class LibvirtDriver(driver.ComputeDriver):
# returned for unit testing purposes
return keyboard
+ def _get_iommu_model(
+ self,
+ guest: vconfig.LibvirtConfigGuest,
+ image_meta: 'objects.ImageMeta',
+ flavor: 'objects.Flavor',
+ ) -> ty.Optional[str]:
+ model = flavor.extra_specs.get(
+ 'hw:viommu_model') or image_meta.properties.get(
+ 'hw_viommu_model')
+ if not model:
+ return None
+
+ is_x86 = self._is_x86_guest(image_meta)
+ is_aarch64 = self._is_aarch64_guest(image_meta)
+
+ if is_x86:
+ if guest.os_mach_type is not None and not (
+ 'q35' in guest.os_mach_type
+ ):
+ arch = self._check_emulation_arch(image_meta)
+ mtype = guest.os_mach_type if (
+ guest.os_mach_type is not None
+ ) else "unknown"
+ raise exception.InvalidVIOMMUMachineType(
+ mtype=mtype, arch=arch)
+ elif is_aarch64:
+ if guest.os_mach_type is not None and not (
+ 'virt' in guest.os_mach_type
+ ):
+ arch = self._check_emulation_arch(image_meta)
+ mtype = guest.os_mach_type if (
+ guest.os_mach_type is not None
+ ) else "unknown"
+ raise exception.InvalidVIOMMUMachineType(
+ mtype=mtype, arch=arch)
+ else:
+ raise exception.InvalidVIOMMUArchitecture(
+ arch=self._check_emulation_arch(image_meta))
+
+ if model == fields.VIOMMUModel.AUTO:
+ if self._host.has_min_version(MIN_LIBVIRT_VIOMMU_VIRTIO_MODEL):
+ model = fields.VIOMMUModel.VIRTIO
+ elif self._is_x86_guest(image_meta) and (
+ guest.os_mach_type is not None and 'q35' in guest.os_mach_type
+ ):
+ model = fields.VIOMMUModel.INTEL
+ else:
+ # AArch64
+ model = fields.VIOMMUModel.SMMUV3
+ return model
+
+ def _guest_add_iommu_device(
+ self,
+ guest: vconfig.LibvirtConfigGuest,
+ image_meta: 'objects.ImageMeta',
+ flavor: 'objects.Flavor',
+ ) -> None:
+ """Add a virtual IOMMU device to allow e.g. vfio-pci usage."""
+ if CONF.libvirt.virt_type not in ('qemu', 'kvm'):
+ # vIOMMU requires QEMU
+ return
+
+ iommu = vconfig.LibvirtConfigGuestIOMMU()
+
+ iommu.model = self._get_iommu_model(guest, image_meta, flavor)
+ if iommu.model is None:
+ return
+
+ iommu.interrupt_remapping = True
+ iommu.caching_mode = True
+ iommu.iotlb = True
+
+ # As Qemu supported values are 39 and 48, we set this to
+ # larger width (48) by default and will not exposed to end user.
+ if self._host.has_min_version(MIN_LIBVIRT_VIOMMU_AW_BITS):
+ iommu.aw_bits = 48
+
+ if guest.os_mach_type is not None and 'q35' in guest.os_mach_type:
+ iommu.eim = True
+ else:
+ iommu.eim = False
+ guest.add_device(iommu)
+
+ ioapic = vconfig.LibvirtConfigGuestFeatureIOAPIC()
+ guest.add_feature(ioapic)
+
def _get_guest_xml(self, context, instance, network_info, disk_info,
image_meta, rescue=None,
block_device_info=None,
@@ -7441,6 +7662,7 @@ class LibvirtDriver(driver.ComputeDriver):
post_xml_callback()
if power_on or pause:
+ libvirt_cpu.power_up(instance)
guest.launch(pause=pause)
return guest
@@ -7545,15 +7767,18 @@ class LibvirtDriver(driver.ComputeDriver):
if not CONF.compute.cpu_dedicated_set:
return set()
- online_cpus = self._host.get_online_cpus()
+ if CONF.libvirt.cpu_power_management:
+ available_cpus = self._host.get_available_cpus()
+ else:
+ available_cpus = self._host.get_online_cpus()
dedicated_cpus = hardware.get_cpu_dedicated_set()
- if not dedicated_cpus.issubset(online_cpus):
+ if not dedicated_cpus.issubset(available_cpus):
msg = _("Invalid '[compute] cpu_dedicated_set' config: one or "
- "more of the configured CPUs is not online. Online "
- "cpuset(s): %(online)s, configured cpuset(s): %(req)s")
+ "more of the configured CPUs is not available. Available "
+ "cpuset(s): %(available)s, configured cpuset(s): %(req)s")
raise exception.Invalid(msg % {
- 'online': sorted(online_cpus),
+ 'available': sorted(available_cpus),
'req': sorted(dedicated_cpus)})
return dedicated_cpus
@@ -8031,15 +8256,52 @@ class LibvirtDriver(driver.ComputeDriver):
def _get_mediated_device_information(self, devname):
"""Returns a dict of a mediated device."""
- virtdev = self._host.device_lookup_by_name(devname)
+ # LP #1951656 - In Libvirt 7.7, the mdev name now includes the PCI
+ # address of the parent device (e.g. mdev_<uuid>_<pci_address>) due to
+ # the mdevctl allowing for multiple mediated devs having the same UUID
+ # defined (only one can be active at a time). Since the guest
+ # information doesn't have the parent ID, try to lookup which
+ # mediated device is available that matches the UUID. If multiple
+ # devices are found that match the UUID, then this is an error
+ # condition.
+ try:
+ virtdev = self._host.device_lookup_by_name(devname)
+ except libvirt.libvirtError as ex:
+ if ex.get_error_code() != libvirt.VIR_ERR_NO_NODE_DEVICE:
+ raise
+ mdevs = [dev for dev in self._host.list_mediated_devices()
+ if dev.startswith(devname)]
+ # If no matching devices are found, simply raise the original
+ # exception indicating that no devices are found.
+ if not mdevs:
+ raise
+ elif len(mdevs) > 1:
+ msg = ("The mediated device name %(devname)s refers to a UUID "
+ "that is present in multiple libvirt mediated devices. "
+ "Matching libvirt mediated devices are %(devices)s. "
+ "Mediated device UUIDs must be unique for Nova." %
+ {'devname': devname,
+ 'devices': ', '.join(mdevs)})
+ raise exception.InvalidLibvirtMdevConfig(reason=msg)
+
+ LOG.debug('Found requested device %s as %s. Using that.',
+ devname, mdevs[0])
+ virtdev = self._host.device_lookup_by_name(mdevs[0])
xmlstr = virtdev.XMLDesc(0)
cfgdev = vconfig.LibvirtConfigNodeDevice()
cfgdev.parse_str(xmlstr)
+ # Starting with Libvirt 7.3, the uuid information is available in the
+ # node device information. If its there, use that. Otherwise,
+ # fall back to the previous behavior of parsing the uuid from the
+ # devname.
+ if cfgdev.mdev_information.uuid:
+ mdev_uuid = cfgdev.mdev_information.uuid
+ else:
+ mdev_uuid = libvirt_utils.mdev_name2uuid(cfgdev.name)
device = {
"dev_id": cfgdev.name,
- # name is like mdev_00ead764_fdc0_46b6_8db9_2963f5c815b4
- "uuid": libvirt_utils.mdev_name2uuid(cfgdev.name),
+ "uuid": mdev_uuid,
# the physical GPU PCI device
"parent": cfgdev.parent,
"type": cfgdev.mdev_information.type,
@@ -8127,6 +8389,7 @@ class LibvirtDriver(driver.ComputeDriver):
:param requested_types: Filter out the result for only mediated devices
having those types.
"""
+ LOG.debug('Searching for available mdevs...')
allocated_mdevs = self._get_all_assigned_mediated_devices()
mdevs = self._get_mediated_devices(requested_types)
available_mdevs = set()
@@ -8142,6 +8405,7 @@ class LibvirtDriver(driver.ComputeDriver):
available_mdevs.add(mdev["uuid"])
available_mdevs -= set(allocated_mdevs)
+ LOG.info('Available mdevs at: %s.', available_mdevs)
return available_mdevs
def _create_new_mediated_device(self, parent, uuid=None):
@@ -8153,6 +8417,7 @@ class LibvirtDriver(driver.ComputeDriver):
:returns: the newly created mdev UUID or None if not possible
"""
+ LOG.debug('Attempting to create new mdev...')
supported_types = self.supported_vgpu_types
# Try to see if we can still create a new mediated device
devices = self._get_mdev_capable_devices(supported_types)
@@ -8164,6 +8429,7 @@ class LibvirtDriver(driver.ComputeDriver):
# The device is not the one that was called, not creating
# the mdev
continue
+ LOG.debug('Trying on: %s.', dev_name)
dev_supported_type = self._get_vgpu_type_per_pgpu(dev_name)
if dev_supported_type and device['types'][
dev_supported_type]['availableInstances'] > 0:
@@ -8173,7 +8439,13 @@ class LibvirtDriver(driver.ComputeDriver):
pci_addr = "{}:{}:{}.{}".format(*dev_name[4:].split('_'))
chosen_mdev = nova.privsep.libvirt.create_mdev(
pci_addr, dev_supported_type, uuid=uuid)
+ LOG.info('Created mdev: %s on pGPU: %s.',
+ chosen_mdev, pci_addr)
return chosen_mdev
+ LOG.debug('Failed: No available instances on device.')
+ LOG.info('Failed to create mdev. '
+ 'No free space found among the following devices: %s.',
+ [dev['dev_id'] for dev in devices])
@utils.synchronized(VGPU_RESOURCE_SEMAPHORE)
def _allocate_mdevs(self, allocations):
@@ -8256,6 +8528,8 @@ class LibvirtDriver(driver.ComputeDriver):
# Take the first available mdev
chosen_mdev = mdevs_available.pop()
else:
+ LOG.debug('No available mdevs where found. '
+ 'Creating an new one...')
chosen_mdev = self._create_new_mediated_device(parent_device)
if not chosen_mdev:
# If we can't find devices having available VGPUs, just raise
@@ -8263,6 +8537,7 @@ class LibvirtDriver(driver.ComputeDriver):
reason='mdev-capable resource is not available')
else:
chosen_mdevs.append(chosen_mdev)
+ LOG.info('Allocated mdev: %s.', chosen_mdev)
return chosen_mdevs
def _detach_mediated_devices(self, guest):
@@ -8739,6 +9014,7 @@ class LibvirtDriver(driver.ComputeDriver):
traits.update(self._get_storage_bus_traits())
traits.update(self._get_video_model_traits())
traits.update(self._get_vif_model_traits())
+ traits.update(self._get_iommu_model_traits())
traits.update(self._get_tpm_traits())
_, invalid_traits = ot.check_traits(traits)
@@ -9264,6 +9540,7 @@ class LibvirtDriver(driver.ComputeDriver):
data["hypervisor_type"] = self._host.get_driver_type()
data["hypervisor_version"] = self._host.get_version()
data["hypervisor_hostname"] = self._host.get_hostname()
+ data["uuid"] = self._host.get_node_uuid()
# TODO(berrange): why do we bother converting the
# libvirt capabilities XML into a special JSON format ?
# The data format is different across all the drivers
@@ -9717,7 +9994,7 @@ class LibvirtDriver(driver.ComputeDriver):
try:
cpu_xml = cpu.to_xml()
LOG.debug("cpu compare xml: %s", cpu_xml, instance=instance)
- ret = self._host.compare_cpu(cpu_xml)
+ ret = self._host.compare_hypervisor_cpu(cpu_xml)
except libvirt.libvirtError as e:
error_code = e.get_error_code()
if error_code == libvirt.VIR_ERR_NO_SUPPORT:
@@ -9809,6 +10086,24 @@ class LibvirtDriver(driver.ComputeDriver):
:param instance: instance object that is in migration
"""
+ current = eventlet.getcurrent()
+ # NOTE(gibi) not all eventlet spawn is under our control, so
+ # there can be senders without test_case_id set, find the first
+ # ancestor that was spawned from nova.utils.spawn[_n] and
+ # therefore has the id set.
+ while (
+ current is not None and
+ not getattr(current, 'test_case_id', None)
+ ):
+ current = current.parent
+
+ if current is not None:
+ LOG.warning(
+ "!!!---!!! live_migration_abort thread was spawned by "
+ "TestCase ID: %s. If you see this in a failed functional test "
+ "then please let #openstack-nova on IRC know about it. "
+ "!!!---!!!", current.test_case_id
+ )
guest = self._host.get_guest(instance)
dom = guest._domain
@@ -10718,14 +11013,13 @@ class LibvirtDriver(driver.ComputeDriver):
# create backing file in case of qcow2.
instance_disk = os.path.join(instance_dir, base)
if not info['backing_file'] and not os.path.exists(instance_disk):
- libvirt_utils.create_image(info['type'], instance_disk,
- info['virt_disk_size'])
+ libvirt_utils.create_image(
+ instance_disk, info['type'], info['virt_disk_size'])
elif info['backing_file']:
# Creating backing file follows same way as spawning instances.
cache_name = os.path.basename(info['backing_file'])
- disk = self.image_backend.by_name(instance, instance_disk,
- CONF.libvirt.images_type)
+ disk = self.image_backend.by_name(instance, instance_disk)
if cache_name.startswith('ephemeral'):
# The argument 'size' is used by image.cache to
# validate disk size retrieved from cache against
@@ -10801,16 +11095,37 @@ class LibvirtDriver(driver.ComputeDriver):
if not CONF.workarounds.enable_qemu_monitor_announce_self:
return
- LOG.info('Sending announce-self command to QEMU monitor',
- instance=instance)
+ current_attempt = 0
- try:
- guest = self._host.get_guest(instance)
- guest.announce_self()
- except Exception:
- LOG.warning('Failed to send announce-self command to QEMU monitor',
- instance=instance)
- LOG.exception()
+ max_attempts = (
+ CONF.workarounds.qemu_monitor_announce_self_count)
+ # qemu_monitor_announce_retry_interval specified in seconds
+ announce_pause = (
+ CONF.workarounds.qemu_monitor_announce_self_interval)
+
+ while(current_attempt < max_attempts):
+ # Increment attempt
+ current_attempt += 1
+
+ # Only use announce_pause after the first attempt to avoid
+ # pausing before calling announce_self for the first attempt
+ if current_attempt != 1:
+ greenthread.sleep(announce_pause)
+
+ LOG.info('Sending announce-self command to QEMU monitor. '
+ 'Attempt %(current_attempt)s of %(max_attempts)s',
+ {'current_attempt': current_attempt,
+ 'max_attempts': max_attempts}, instance=instance)
+ try:
+ guest = self._host.get_guest(instance)
+ guest.announce_self()
+ except Exception:
+ LOG.warning('Failed to send announce-self command to '
+ 'QEMU monitor. Attempt %(current_attempt)s of '
+ '%(max_attempts)s',
+ {'current_attempt': current_attempt,
+ 'max_attempts': max_attempts}, instance=instance)
+ LOG.exception()
def post_live_migration_at_destination(self, context,
instance,
@@ -11060,6 +11375,9 @@ class LibvirtDriver(driver.ComputeDriver):
def get_available_nodes(self, refresh=False):
return [self._host.get_hostname()]
+ def get_nodenames_by_uuid(self, refresh=False):
+ return {self._host.get_node_uuid(): self._host.get_hostname()}
+
def get_host_cpu_stats(self):
"""Return the current CPU state of the host."""
return self._host.get_cpu_stats()
@@ -12016,6 +12334,30 @@ class LibvirtDriver(driver.ComputeDriver):
in supported_models for model in all_models
}
+ def _get_iommu_model_traits(self) -> ty.Dict[str, bool]:
+ """Get iommu model traits based on the currently enabled virt_type.
+ Not all traits generated by this function may be valid and the result
+ should be validated.
+ :return: A dict of trait names mapped to boolean values.
+ """
+ dom_caps = self._host.get_domain_capabilities()
+ supported_models: ty.Set[str] = {fields.VIOMMUModel.AUTO}
+ # our min version of qemu/libvirt supprot q35 and virt machine types.
+ # They also support the smmuv3 and intel iommu modeles so if the qemu
+ # binary is avaiable we can report the trait.
+ if fields.Architecture.AARCH64 in dom_caps:
+ supported_models.add(fields.VIOMMUModel.SMMUV3)
+ if fields.Architecture.X86_64 in dom_caps:
+ supported_models.add(fields.VIOMMUModel.INTEL)
+ # the virtio iommu model requires a newer libvirt then our min
+ # libvirt so we need to check the version explcitly.
+ if self._host.has_min_version(MIN_LIBVIRT_VIOMMU_VIRTIO_MODEL):
+ supported_models.add(fields.VIOMMUModel.VIRTIO)
+ return {
+ f'COMPUTE_VIOMMU_MODEL_{model.replace("-", "_").upper()}': model
+ in supported_models for model in fields.VIOMMUModel.ALL
+ }
+
def _get_storage_bus_traits(self) -> ty.Dict[str, bool]:
"""Get storage bus traits based on the currently enabled virt_type.
diff --git a/nova/virt/libvirt/guest.py b/nova/virt/libvirt/guest.py
index 7dd84973b5..c40c3c4a7f 100644
--- a/nova/virt/libvirt/guest.py
+++ b/nova/virt/libvirt/guest.py
@@ -254,8 +254,17 @@ class Guest(object):
"""
if cfg:
+ LOG.debug(f'looking for interface given config: {cfg}')
interfaces = self.get_all_devices(
type(cfg), from_persistent_config)
+ if not interfaces:
+ LOG.debug(f'No interface of type: {type(cfg)} found in domain')
+ return None
+ # FIXME(sean-k-mooney): we should be able to print the list of
+ # interfaces however some tests use incomplete objects that cant
+ # be printed due to incomplete mocks or defects in the libvirt
+ # fixture. Lets address this later.
+ # LOG.debug(f'within interfaces: {list(interfaces)}')
for interface in interfaces:
# NOTE(leehom) LibvirtConfigGuest get from domain and
# LibvirtConfigGuest generated by
@@ -264,6 +273,16 @@ class Guest(object):
# equality check based on available information on nova side
if cfg == interface:
return interface
+ else:
+ # NOTE(sean-k-mooney): {list(interfaces)} could be used
+ # instead of self._domain.XMLDesc(0) once all tests have
+ # printable interfaces see the comment above ^.
+ # While the XML is more verbose it should always work
+ # for our current test suite and in production code.
+ LOG.debug(
+ f'interface for config: {cfg}'
+ f'not found in domain: {self._domain.XMLDesc(0)}'
+ )
return None
def get_vcpus_info(self):
@@ -655,6 +674,7 @@ class Guest(object):
stats = self._domain.jobStats()
return JobInfo(**stats)
except libvirt.libvirtError as ex:
+ errmsg = ex.get_error_message()
if ex.get_error_code() == libvirt.VIR_ERR_NO_SUPPORT:
# Remote libvirt doesn't support new API
LOG.debug("Missing remote virDomainGetJobStats: %s", ex)
@@ -667,6 +687,12 @@ class Guest(object):
# away completclsely
LOG.debug("Domain has shutdown/gone away: %s", ex)
return JobInfo(type=libvirt.VIR_DOMAIN_JOB_COMPLETED)
+ elif (ex.get_error_code() == libvirt.VIR_ERR_INTERNAL_ERROR and
+ errmsg and "migration was active, "
+ "but no RAM info was set" in errmsg):
+ LOG.debug("Migration is active or completed but "
+ "virDomainGetJobStats is missing ram: %s", ex)
+ return JobInfo(type=libvirt.VIR_DOMAIN_JOB_NONE)
else:
LOG.debug("Failed to get job stats: %s", ex)
raise
diff --git a/nova/virt/libvirt/host.py b/nova/virt/libvirt/host.py
index 2bc2bb337a..9658a5791d 100644
--- a/nova/virt/libvirt/host.py
+++ b/nova/virt/libvirt/host.py
@@ -66,6 +66,7 @@ from nova.virt.libvirt import event as libvirtevent
from nova.virt.libvirt import guest as libvirt_guest
from nova.virt.libvirt import migration as libvirt_migrate
from nova.virt.libvirt import utils as libvirt_utils
+import nova.virt.node # noqa
if ty.TYPE_CHECKING:
import libvirt
@@ -138,6 +139,7 @@ class Host(object):
self._caps = None
self._domain_caps = None
self._hostname = None
+ self._node_uuid = None
self._wrapped_conn = None
self._wrapped_conn_lock = threading.Lock()
@@ -490,7 +492,7 @@ class Host(object):
LOG.debug("Starting native event thread")
self._event_thread = native_threading.Thread(
target=self._native_thread)
- self._event_thread.setDaemon(True)
+ self._event_thread.daemon = True
self._event_thread.start()
LOG.debug("Starting green dispatch thread")
@@ -738,6 +740,14 @@ class Host(object):
return doms
+ def get_available_cpus(self):
+ """Get the set of CPUs that exist on the host.
+
+ :returns: set of CPUs, raises libvirtError on error
+ """
+ cpus, cpu_map, online = self.get_connection().getCPUMap()
+ return {cpu for cpu in range(cpus)}
+
def get_online_cpus(self):
"""Get the set of CPUs that are online on the host
@@ -1059,6 +1069,12 @@ class Host(object):
{'old': self._hostname, 'new': hostname})
return self._hostname
+ def get_node_uuid(self):
+ """Returns the UUID of this node."""
+ if not self._node_uuid:
+ self._node_uuid = nova.virt.node.get_local_node_uuid()
+ return self._node_uuid
+
def find_secret(self, usage_type, usage_id):
"""Find a secret.
@@ -1566,7 +1582,7 @@ class Host(object):
def list_mediated_devices(self, flags=0):
"""Lookup mediated devices.
- :returns: a list of virNodeDevice instance
+ :returns: a list of strings with the name of the instance
"""
return self._list_devices("mdev", flags=flags)
@@ -1605,6 +1621,22 @@ class Host(object):
"""Compares the given CPU description with the host CPU."""
return self.get_connection().compareCPU(xmlDesc, flags)
+ def compare_hypervisor_cpu(self, xmlDesc, flags=0):
+ """Compares the given CPU description with the CPU provided by
+ the host hypervisor. This is different from the older method,
+ compare_cpu(), which compares a given CPU definition with the
+ host CPU without considering the abilities of the host
+ hypervisor. Except @xmlDesc, rest of all the parameters to
+ compareHypervisorCPU API are optional (libvirt will choose
+ sensible defaults).
+ """
+ emulator = None
+ arch = None
+ machine = None
+ virttype = None
+ return self.get_connection().compareHypervisorCPU(
+ emulator, arch, machine, virttype, xmlDesc, flags)
+
def is_cpu_control_policy_capable(self):
"""Returns whether kernel configuration CGROUP_SCHED is enabled
diff --git a/nova/virt/libvirt/imagebackend.py b/nova/virt/libvirt/imagebackend.py
index 617adfe030..0a64ef43dd 100644
--- a/nova/virt/libvirt/imagebackend.py
+++ b/nova/virt/libvirt/imagebackend.py
@@ -81,14 +81,24 @@ def _update_utime_ignore_eacces(path):
class Image(metaclass=abc.ABCMeta):
SUPPORTS_CLONE = False
-
- def __init__(self, path, source_type, driver_format, is_block_dev=False):
+ SUPPORTS_LUKS = False
+
+ def __init__(
+ self,
+ path,
+ source_type,
+ driver_format,
+ is_block_dev=False,
+ disk_info_mapping=None
+ ):
"""Image initialization.
:param path: libvirt's representation of the path of this disk.
:param source_type: block or file
:param driver_format: raw or qcow2
:param is_block_dev:
+ :param disk_info_mapping: disk_info['mapping'][device] metadata
+ specific to this image generated by nova.virt.libvirt.blockinfo.
"""
if (CONF.ephemeral_storage_encryption.enabled and
not self._supports_encryption()):
@@ -105,6 +115,8 @@ class Image(metaclass=abc.ABCMeta):
self.is_block_dev = is_block_dev
self.preallocate = False
+ self.disk_info_mapping = disk_info_mapping
+
# NOTE(dripton): We store lines of json (path, disk_format) in this
# file, for some image types, to prevent attacks based on changing the
# disk_format.
@@ -145,22 +157,23 @@ class Image(metaclass=abc.ABCMeta):
pass
def libvirt_info(
- self, disk_info, cache_mode, extra_specs, boot_order=None,
- disk_unit=None,
+ self, cache_mode, extra_specs, boot_order=None, disk_unit=None,
):
"""Get `LibvirtConfigGuestDisk` filled for this image.
- :disk_info: Metadata generated by libvirt.blockinfo.get_disk_mapping
:cache_mode: Caching mode for this image
:extra_specs: Instance type extra specs dict.
:boot_order: Disk device boot order
"""
- disk_bus = disk_info['bus']
+ if self.disk_info_mapping is None:
+ raise AttributeError(
+ 'Image must have disk_info_mapping to call libvirt_info()')
+ disk_bus = self.disk_info_mapping['bus']
info = vconfig.LibvirtConfigGuestDisk()
info.source_type = self.source_type
- info.source_device = disk_info['type']
+ info.source_device = self.disk_info_mapping['type']
info.target_bus = disk_bus
- info.target_dev = disk_info['dev']
+ info.target_dev = self.disk_info_mapping['dev']
info.driver_cache = cache_mode
info.driver_discard = self.discard_mode
info.driver_io = self.driver_io
@@ -522,11 +535,16 @@ class Flat(Image):
when creating a disk from a qcow2 if force_raw_images is not set in config.
"""
- def __init__(self, instance=None, disk_name=None, path=None):
+ def __init__(
+ self, instance=None, disk_name=None, path=None, disk_info_mapping=None
+ ):
self.disk_name = disk_name
path = (path or os.path.join(libvirt_utils.get_instance_path(instance),
disk_name))
- super(Flat, self).__init__(path, "file", "raw", is_block_dev=False)
+ super().__init__(
+ path, "file", "raw", is_block_dev=False,
+ disk_info_mapping=disk_info_mapping
+ )
self.preallocate = (
strutils.to_slug(CONF.preallocate_images) == 'space')
@@ -614,10 +632,15 @@ class Flat(Image):
class Qcow2(Image):
- def __init__(self, instance=None, disk_name=None, path=None):
+ def __init__(
+ self, instance=None, disk_name=None, path=None, disk_info_mapping=None
+ ):
path = (path or os.path.join(libvirt_utils.get_instance_path(instance),
disk_name))
- super(Qcow2, self).__init__(path, "file", "qcow2", is_block_dev=False)
+ super().__init__(
+ path, "file", "qcow2", is_block_dev=False,
+ disk_info_mapping=disk_info_mapping
+ )
self.preallocate = (
strutils.to_slug(CONF.preallocate_images) == 'space')
@@ -631,7 +654,8 @@ class Qcow2(Image):
@utils.synchronized(filename, external=True, lock_path=self.lock_path)
def create_qcow2_image(base, target, size):
- libvirt_utils.create_cow_image(base, target, size)
+ libvirt_utils.create_image(
+ target, 'qcow2', size, backing_file=base)
# Download the unmodified base image unless we already have a copy.
if not os.path.exists(base):
@@ -695,7 +719,10 @@ class Lvm(Image):
def escape(filename):
return filename.replace('_', '__')
- def __init__(self, instance=None, disk_name=None, path=None):
+ def __init__(
+ self, instance=None, disk_name=None, path=None,
+ disk_info_mapping=None
+ ):
self.ephemeral_key_uuid = instance.get('ephemeral_key_uuid')
if self.ephemeral_key_uuid is not None:
@@ -724,7 +751,10 @@ class Lvm(Image):
self.lv_path = os.path.join('/dev', self.vg, self.lv)
path = '/dev/mapper/' + dmcrypt.volume_name(self.lv)
- super(Lvm, self).__init__(path, "block", "raw", is_block_dev=True)
+ super(Lvm, self).__init__(
+ path, "block", "raw", is_block_dev=True,
+ disk_info_mapping=disk_info_mapping
+ )
# TODO(sbauza): Remove the config option usage and default the
# LVM logical volume creation to preallocate the full size only.
@@ -832,7 +862,9 @@ class Rbd(Image):
SUPPORTS_CLONE = True
- def __init__(self, instance=None, disk_name=None, path=None):
+ def __init__(
+ self, instance=None, disk_name=None, path=None, disk_info_mapping=None
+ ):
if not CONF.libvirt.images_rbd_pool:
raise RuntimeError(_('You should specify'
' images_rbd_pool'
@@ -854,31 +886,32 @@ class Rbd(Image):
if self.driver.ceph_conf:
path += ':conf=' + self.driver.ceph_conf
- super(Rbd, self).__init__(path, "block", "rbd", is_block_dev=False)
+ super().__init__(
+ path, "block", "rbd", is_block_dev=False,
+ disk_info_mapping=disk_info_mapping
+ )
self.discard_mode = CONF.libvirt.hw_disk_discard
def libvirt_info(
- self, disk_info, cache_mode, extra_specs, boot_order=None,
- disk_unit=None,
+ self, cache_mode, extra_specs, boot_order=None, disk_unit=None
):
"""Get `LibvirtConfigGuestDisk` filled for this image.
- :disk_info: Metadata generated by libvirt.blockinfo.get_disk_mapping
:cache_mode: Caching mode for this image
:extra_specs: Instance type extra specs dict.
:boot_order: Disk device boot order
"""
info = vconfig.LibvirtConfigGuestDisk()
- disk_bus = disk_info['bus']
+ disk_bus = self.disk_info_mapping['bus']
hosts, ports = self.driver.get_mon_addrs()
- info.source_device = disk_info['type']
+ info.source_device = self.disk_info_mapping['type']
info.driver_format = 'raw'
info.driver_cache = cache_mode
info.driver_discard = self.discard_mode
info.target_bus = disk_bus
- info.target_dev = disk_info['dev']
+ info.target_dev = self.disk_info_mapping['dev']
info.source_type = 'network'
info.source_protocol = 'rbd'
info.source_name = '%s/%s' % (self.driver.pool, self.rbd_name)
@@ -1195,10 +1228,15 @@ class Rbd(Image):
class Ploop(Image):
- def __init__(self, instance=None, disk_name=None, path=None):
+ def __init__(
+ self, instance=None, disk_name=None, path=None, disk_info_mapping=None
+ ):
path = (path or os.path.join(libvirt_utils.get_instance_path(instance),
disk_name))
- super(Ploop, self).__init__(path, "file", "ploop", is_block_dev=False)
+ super().__init__(
+ path, "file", "ploop", is_block_dev=False,
+ disk_info_mapping=disk_info_mapping
+ )
self.resolve_driver_format()
@@ -1301,18 +1339,25 @@ class Backend(object):
raise RuntimeError(_('Unknown image_type=%s') % image_type)
return image
- def by_name(self, instance, name, image_type=None):
+ def by_name(self, instance, name, image_type=None, disk_info_mapping=None):
"""Return an Image object for a disk with the given name.
:param instance: the instance which owns this disk
:param name: The name of the disk
:param image_type: (Optional) Image type.
Default is CONF.libvirt.images_type.
+ :param disk_info_mapping: (Optional) Disk info mapping dict
:return: An Image object for the disk with given name and instance.
:rtype: Image
"""
+ # NOTE(artom) To pass functional tests, wherein the code here is loaded
+ # *before* any config with self.flags() is done, we need to have the
+ # default inline in the method, and not in the kwarg declaration.
+ image_type = image_type or CONF.libvirt.images_type
backend = self.backend(image_type)
- return backend(instance=instance, disk_name=name)
+ return backend(
+ instance=instance, disk_name=name,
+ disk_info_mapping=disk_info_mapping)
def by_libvirt_path(self, instance, path, image_type=None):
"""Return an Image object for a disk with the given libvirt path.
diff --git a/nova/virt/libvirt/utils.py b/nova/virt/libvirt/utils.py
index 834f242c79..adb2ec45a1 100644
--- a/nova/virt/libvirt/utils.py
+++ b/nova/virt/libvirt/utils.py
@@ -22,6 +22,7 @@ import grp
import os
import pwd
import re
+import tempfile
import typing as ty
import uuid
@@ -110,55 +111,99 @@ VTPM_DIR = '/var/lib/libvirt/swtpm/'
def create_image(
- disk_format: str, path: str, size: ty.Union[str, int],
+ path: str,
+ disk_format: str,
+ disk_size: ty.Optional[ty.Union[str, int]],
+ backing_file: ty.Optional[str] = None,
+ encryption: ty.Optional[ty.Dict[str, ty.Any]] = None
) -> None:
- """Create a disk image
-
- :param disk_format: Disk image format (as known by qemu-img)
+ """Disk image creation with qemu-img
:param path: Desired location of the disk image
- :param size: Desired size of disk image. May be given as an int or
- a string. If given as an int, it will be interpreted
- as bytes. If it's a string, it should consist of a number
- with an optional suffix ('K' for Kibibytes,
- M for Mebibytes, 'G' for Gibibytes, 'T' for Tebibytes).
- If no suffix is given, it will be interpreted as bytes.
+ :param disk_format: Disk image format (as known by qemu-img)
+ :param disk_size: Desired size of disk image. May be given as an int or
+ a string. If given as an int, it will be interpreted as bytes. If it's
+ a string, it should consist of a number with an optional suffix ('K'
+ for Kibibytes, M for Mebibytes, 'G' for Gibibytes, 'T' for Tebibytes).
+ If no suffix is given, it will be interpreted as bytes.
+ Can be None in the case of a COW image.
+ :param backing_file: (Optional) Backing file to use.
+ :param encryption: (Optional) Dict detailing various encryption attributes
+ such as the format and passphrase.
"""
- processutils.execute('qemu-img', 'create', '-f', disk_format, path, size)
-
+ cmd = [
+ 'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'create', '-f', disk_format
+ ]
-def create_cow_image(
- backing_file: ty.Optional[str], path: str, size: ty.Optional[int] = None,
-) -> None:
- """Create COW image
-
- Creates a COW image with the given backing file
-
- :param backing_file: Existing image on which to base the COW image
- :param path: Desired location of the COW image
- """
- base_cmd = ['qemu-img', 'create', '-f', 'qcow2']
- cow_opts = []
if backing_file:
base_details = images.qemu_img_info(backing_file)
- cow_opts += ['backing_file=%s' % backing_file]
- cow_opts += ['backing_fmt=%s' % base_details.file_format]
- else:
- base_details = None
- # Explicitly inherit the value of 'cluster_size' property of a qcow2
- # overlay image from its backing file. This can be useful in cases
- # when people create a base image with a non-default 'cluster_size'
- # value or cases when images were created with very old QEMU
- # versions which had a different default 'cluster_size'.
- if base_details and base_details.cluster_size is not None:
- cow_opts += ['cluster_size=%s' % base_details.cluster_size]
- if size is not None:
- cow_opts += ['size=%s' % size]
- if cow_opts:
+ cow_opts = [
+ f'backing_file={backing_file}',
+ f'backing_fmt={base_details.file_format}'
+ ]
+ # Explicitly inherit the value of 'cluster_size' property of a qcow2
+ # overlay image from its backing file. This can be useful in cases when
+ # people create a base image with a non-default 'cluster_size' value or
+ # cases when images were created with very old QEMU versions which had
+ # a different default 'cluster_size'.
+ if base_details.cluster_size is not None:
+ cow_opts += [f'cluster_size={base_details.cluster_size}']
+
# Format as a comma separated list
csv_opts = ",".join(cow_opts)
- cow_opts = ['-o', csv_opts]
- cmd = base_cmd + cow_opts + [path]
- processutils.execute(*cmd)
+ cmd += ['-o', csv_opts]
+
+ # Disk size can be None in the case of a COW image
+ disk_size_arg = [str(disk_size)] if disk_size is not None else []
+
+ if encryption:
+ with tempfile.NamedTemporaryFile(mode='tr+', encoding='utf-8') as f:
+ # Write out the passphrase secret to a temp file
+ f.write(encryption.get('secret'))
+
+ # Ensure the secret is written to disk, we can't .close() here as
+ # that removes the file when using NamedTemporaryFile
+ f.flush()
+
+ # The basic options include the secret and encryption format
+ encryption_opts = [
+ '--object', f"secret,id=sec,file={f.name}",
+ '-o', 'encrypt.key-secret=sec',
+ '-o', f"encrypt.format={encryption.get('format')}",
+ ]
+ # Supported luks options:
+ # cipher-alg=<str> - Name of cipher algorithm and key length
+ # cipher-mode=<str> - Name of encryption cipher mode
+ # hash-alg=<str> - Name of hash algorithm to use for PBKDF
+ # iter-time=<num> - Time to spend in PBKDF in milliseconds
+ # ivgen-alg=<str> - Name of IV generator algorithm
+ # ivgen-hash-alg=<str> - Name of IV generator hash algorithm
+ #
+ # NOTE(melwitt): Sensible defaults (that match the qemu defaults)
+ # are hardcoded at this time for simplicity and consistency when
+ # instances are migrated. Configuration of luks options could be
+ # added in a future release.
+ encryption_options = {
+ 'cipher-alg': 'aes-256',
+ 'cipher-mode': 'xts',
+ 'hash-alg': 'sha256',
+ 'iter-time': 2000,
+ 'ivgen-alg': 'plain64',
+ 'ivgen-hash-alg': 'sha256',
+ }
+
+ for option, value in encryption_options.items():
+ encryption_opts += [
+ '-o',
+ f'encrypt.{option}={value}',
+ ]
+
+ # We need to execute the command while the NamedTemporaryFile still
+ # exists
+ cmd += encryption_opts + [path] + disk_size_arg
+ processutils.execute(*cmd)
+ else:
+ cmd += [path] + disk_size_arg
+ processutils.execute(*cmd)
def create_ploop_image(
@@ -581,17 +626,31 @@ def get_default_machine_type(arch: str) -> ty.Optional[str]:
def mdev_name2uuid(mdev_name: str) -> str:
- """Convert an mdev name (of the form mdev_<uuid_with_underscores>) to a
- uuid (of the form 8-4-4-4-12).
+ """Convert an mdev name (of the form mdev_<uuid_with_underscores> or
+ mdev_<uuid_with_underscores>_<pciaddress>) to a uuid
+ (of the form 8-4-4-4-12).
+
+ :param mdev_name: the name of the mdev to parse the UUID from
+ :returns: string containing the uuid
"""
- return str(uuid.UUID(mdev_name[5:].replace('_', '-')))
+ mdev_uuid = mdev_name[5:].replace('_', '-')
+ # Unconditionnally remove the PCI address from the name
+ mdev_uuid = mdev_uuid[:36]
+ return str(uuid.UUID(mdev_uuid))
+
+def mdev_uuid2name(mdev_uuid: str, parent: str = None) -> str:
+ """Convert an mdev uuid (of the form 8-4-4-4-12) and optionally its parent
+ device to a name (of the form mdev_<uuid_with_underscores>[_<pciid>]).
-def mdev_uuid2name(mdev_uuid: str) -> str:
- """Convert an mdev uuid (of the form 8-4-4-4-12) to a name (of the form
- mdev_<uuid_with_underscores>).
+ :param mdev_uuid: the uuid of the mediated device
+ :param parent: the parent device id for the mediated device
+ :returns: name of the mdev to reference in libvirt
"""
- return "mdev_" + mdev_uuid.replace('-', '_')
+ name = "mdev_" + mdev_uuid.replace('-', '_')
+ if parent and parent.startswith('pci_'):
+ name = name + parent[4:]
+ return name
def get_flags_by_flavor_specs(flavor: 'objects.Flavor') -> ty.Set[str]:
diff --git a/nova/virt/libvirt/volume/fibrechannel.py b/nova/virt/libvirt/volume/fibrechannel.py
index b50db3aa1c..22c65e99c0 100644
--- a/nova/virt/libvirt/volume/fibrechannel.py
+++ b/nova/virt/libvirt/volume/fibrechannel.py
@@ -79,7 +79,6 @@ class LibvirtFibreChannelVolumeDriver(libvirt_volume.LibvirtBaseVolumeDriver):
"""Extend the volume."""
LOG.debug("calling os-brick to extend FC Volume", instance=instance)
new_size = self.connector.extend_volume(connection_info['data'])
- LOG.debug("Extend FC Volume %s; new_size=%s",
- connection_info['data']['device_path'],
+ LOG.debug("Extend FC Volume: new_size=%s",
new_size, instance=instance)
return new_size
diff --git a/nova/virt/netutils.py b/nova/virt/netutils.py
index 6ea91e2221..0ab3ddc4c1 100644
--- a/nova/virt/netutils.py
+++ b/nova/virt/netutils.py
@@ -263,12 +263,19 @@ def _get_eth_link(vif, ifc_num):
'id': link_id,
'vif_id': vif['id'],
'type': nic_type,
- 'mtu': vif['network']['meta'].get('mtu'),
+ 'mtu': _get_link_mtu(vif),
'ethernet_mac_address': vif.get('address'),
}
return link
+def _get_link_mtu(vif):
+ for subnet in vif['network']['subnets']:
+ if subnet['meta'].get('dhcp_server'):
+ return None
+ return vif['network']['meta'].get('mtu')
+
+
def _get_nets(vif, subnet, version, net_num, link_id):
"""Get networks for the given VIF and subnet
diff --git a/nova/virt/node.py b/nova/virt/node.py
new file mode 100644
index 0000000000..4cb3d0a573
--- /dev/null
+++ b/nova/virt/node.py
@@ -0,0 +1,108 @@
+# Copyright 2022 Red Hat, inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import logging
+import os
+import uuid
+
+from oslo_utils import uuidutils
+
+import nova.conf
+from nova import exception
+
+CONF = nova.conf.CONF
+LOG = logging.getLogger(__name__)
+COMPUTE_ID_FILE = 'compute_id'
+LOCAL_NODE_UUID = None
+
+
+def write_local_node_uuid(node_uuid):
+ # We only ever write an identity file in the CONF.state_path
+ # location
+ fn = os.path.join(CONF.state_path, COMPUTE_ID_FILE)
+
+ # Try to create the identity file and write our uuid into it. Fail
+ # if the file exists (since it shouldn't if we made it here).
+ try:
+ with open(fn, 'x') as f:
+ f.write(node_uuid)
+ except FileExistsError:
+ # If the file exists, we must either fail or re-survey all the
+ # potential files. If we just read and return it, it could be
+ # inconsistent with files in the other locations.
+ raise exception.InvalidNodeConfiguration(
+ reason='Identity file %s appeared unexpectedly' % fn)
+ except Exception as e:
+ raise exception.InvalidNodeConfiguration(
+ reason='Unable to write uuid to %s: %s' % (fn, e))
+
+ LOG.info('Wrote node identity %s to %s', node_uuid, fn)
+
+
+def read_local_node_uuid():
+ locations = ([os.path.dirname(f) for f in CONF.config_file] +
+ [CONF.state_path])
+
+ uuids = []
+ found = []
+ for location in locations:
+ fn = os.path.join(location, COMPUTE_ID_FILE)
+ try:
+ # UUIDs should be 36 characters in canonical format. Read
+ # a little more to be graceful about whitespace in/around
+ # the actual value we want to read. However, it must parse
+ # to a legit UUID once we strip the whitespace.
+ with open(fn) as f:
+ content = f.read(40)
+ node_uuid = str(uuid.UUID(content.strip()))
+ except FileNotFoundError:
+ continue
+ except ValueError:
+ raise exception.InvalidNodeConfiguration(
+ reason='Unable to parse UUID from %s' % fn)
+ uuids.append(node_uuid)
+ found.append(fn)
+
+ if uuids:
+ # Any identities we found must be consistent, or we fail
+ first = uuids[0]
+ for i, (node_uuid, fn) in enumerate(zip(uuids, found)):
+ if node_uuid != first:
+ raise exception.InvalidNodeConfiguration(
+ reason='UUID %s in %s does not match %s' % (
+ node_uuid, fn, uuids[i - 1]))
+ LOG.info('Determined node identity %s from %s', first, found[0])
+ return first
+ else:
+ return None
+
+
+def get_local_node_uuid():
+ """Read or create local node uuid file.
+
+ :returns: UUID string read from file, or generated
+ """
+ global LOCAL_NODE_UUID
+
+ if LOCAL_NODE_UUID is not None:
+ return LOCAL_NODE_UUID
+
+ node_uuid = read_local_node_uuid()
+ if not node_uuid:
+ node_uuid = uuidutils.generate_uuid()
+ LOG.info('Generated node identity %s', node_uuid)
+ write_local_node_uuid(node_uuid)
+
+ LOCAL_NODE_UUID = node_uuid
+ return node_uuid
diff --git a/playbooks/ceph/glance-copy-policy.yaml b/playbooks/ceph/glance-copy-policy.yaml
deleted file mode 100644
index 41654a103d..0000000000
--- a/playbooks/ceph/glance-copy-policy.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
-- hosts: controller
- tasks:
- - name: create local.sh
- become: yes
- blockinfile:
- path: /opt/stack/devstack/local.sh
- create: True
- mode: 0777
- block: |
- # This policy is default to admin only in glance. Override
- # here to allow everyone and every type of image (private
- # or public) to copy. This way we will be able to test copy
- # image via non-admin as well as on private images.
- echo $'"copy_image": ""' >> /etc/glance/policy.yaml
- sudo systemctl restart 'devstack@g-*'
diff --git a/playbooks/ceph/glance-setup.yaml b/playbooks/ceph/glance-setup.yaml
new file mode 100644
index 0000000000..5792c72237
--- /dev/null
+++ b/playbooks/ceph/glance-setup.yaml
@@ -0,0 +1,39 @@
+- hosts: controller
+ tasks:
+ - name: create local.sh
+ become: yes
+ blockinfile:
+ path: /opt/stack/devstack/local.sh
+ create: True
+ mode: 0777
+ block: |
+ # Delete all existing images
+ source /opt/stack/devstack/openrc admin
+ for img in $(openstack image list -f value -c ID); do
+ openstack image show $img
+ echo Deleting $img
+ openstack image delete $img
+ done
+
+ # Inflate our cirros image to 1G raw
+ arch=$(uname -m)
+ image=$(ls /opt/stack/devstack/files/cirros*${arch}-disk.img | tail -n1)
+ rawimage="/opt/stack/devstack/files/cirros-raw.img"
+ qemu-img convert -O raw "$image" "$rawimage"
+ truncate --size $((950 << 20)) "$rawimage"
+
+ # Upload it to glance as the sole image available so tempest
+ # config will find it. Wait ten seconds after doing this
+ # before the restart below.
+ openstack image create --container-format bare --disk-format raw --public "cirros-raw" < "$rawimage"
+ sleep 10
+ openstack image list
+ openstack image show cirros-raw
+
+ # This policy is default to admin only in glance. Override
+ # here to allow everyone and every type of image (private
+ # or public) to copy. This way we will be able to test copy
+ # image via non-admin as well as on private images.
+ echo $'"copy_image": ""' >> /etc/glance/policy.yaml
+ sudo systemctl restart 'devstack@g-*'
+
diff --git a/releasenotes/notes/Do-not-send-mtu-value-in-metadata-for-networks-with-enabled-dhcp-641506f2a13b540f.yaml b/releasenotes/notes/Do-not-send-mtu-value-in-metadata-for-networks-with-enabled-dhcp-641506f2a13b540f.yaml
new file mode 100644
index 0000000000..b5232f5ea2
--- /dev/null
+++ b/releasenotes/notes/Do-not-send-mtu-value-in-metadata-for-networks-with-enabled-dhcp-641506f2a13b540f.yaml
@@ -0,0 +1,5 @@
+---
+other:
+ - |
+ For networks which have any subnets with enabled DHCP, MTU value is not send
+ in the metadata. In such case MTU is configured through the DHCP server.
diff --git a/releasenotes/notes/add-spice-compression-support-e41676f445544e8d.yaml b/releasenotes/notes/add-spice-compression-support-e41676f445544e8d.yaml
new file mode 100644
index 0000000000..b370889171
--- /dev/null
+++ b/releasenotes/notes/add-spice-compression-support-e41676f445544e8d.yaml
@@ -0,0 +1,23 @@
+---
+features:
+ - |
+ The following SPICE-related options are added to the ``spice``
+ configuration group of a Nova configuration:
+
+ - ``image_compression``
+ - ``jpeg_compression``
+ - ``zlib_compression``
+ - ``playback_compression``
+ - ``streaming_mode``
+
+ These configuration options can be used to enable and set the
+ SPICE compression settings for libvirt (QEMU/KVM) provisioned
+ instances. Each configuration option is optional and can be set
+ explictly to configure the associated SPICE compression setting
+ for libvirt. If all configuration options are not set, then none
+ of the SPICE compression settings will be configured for libvirt,
+ which corresponds to the behavior before this change. In this case,
+ the built-in defaults from the libvirt backend (e.g. QEMU) are used.
+
+ Note that those options are only taken into account if SPICE support
+ is enabled (and the VNC support is disabled).
diff --git a/releasenotes/notes/add-volume-rebuild-b973562ea8f49347.yaml b/releasenotes/notes/add-volume-rebuild-b973562ea8f49347.yaml
new file mode 100644
index 0000000000..47c6b38265
--- /dev/null
+++ b/releasenotes/notes/add-volume-rebuild-b973562ea8f49347.yaml
@@ -0,0 +1,10 @@
+---
+features:
+ - |
+ Added support for rebuilding a volume-backed instance with a different
+ image. This is achieved by reimaging the boot volume i.e. writing new
+ image on the boot volume at cinder side.
+ Previously rebuilding volume-backed instances with same image was
+ possible but this feature allows rebuilding volume-backed instances
+ with a different image than the existing one in the boot volume.
+ This is supported starting from API microversion 2.93.
diff --git a/releasenotes/notes/allowing-target-state-for-evacuate-d4c1912c481973d6.yaml b/releasenotes/notes/allowing-target-state-for-evacuate-d4c1912c481973d6.yaml
new file mode 100644
index 0000000000..6c5bc98046
--- /dev/null
+++ b/releasenotes/notes/allowing-target-state-for-evacuate-d4c1912c481973d6.yaml
@@ -0,0 +1,13 @@
+---
+features:
+ - |
+ Starting with v2.95 any evacuated instance will be stopped at
+ destination. The required minimum version for Nova computes is
+ 27.0.0 (antelope 2023.1). Operator can still continue using
+ previous behavior by selecting microversion below v2.95.
+upgrade:
+ - |
+ Operators will have to consider upgrading compute hosts to Nova
+ 27.0.0 (antelope 2023.1) in order to take advantage of the new
+ (microversion v2.95) evacuate API behavior. An exception will be
+ raised for older versions.
diff --git a/releasenotes/notes/bp-libvirt-cpu-state-mgmt-fbc9c1f9f473003c.yaml b/releasenotes/notes/bp-libvirt-cpu-state-mgmt-fbc9c1f9f473003c.yaml
new file mode 100644
index 0000000000..95422fce67
--- /dev/null
+++ b/releasenotes/notes/bp-libvirt-cpu-state-mgmt-fbc9c1f9f473003c.yaml
@@ -0,0 +1,18 @@
+---
+features:
+ - |
+ This is now possible to configure nova-compute services using libvirt driver
+ by setting ``[libvirt]cpu_power_management`` to ``True`` in order to let the
+ service to powering down or up physical CPUs depending on whether those CPUs
+ are pinned or not to instances. In order on to support this feature, the
+ compute service needs to be set with ``[compute]cpu_dedicated_set``. If so,
+ all the related CPUs will be powering down until they are used by an
+ instance where the related pinned CPU will be powering up just before
+ starting the guest. If ``[compute]cpu_dedicated_set`` isn't set, then the
+ compute service will refuse to start.
+ By default the power strategy will offline CPUs when powering down and
+ online the CPUs on powering up but another strategy is possible by using
+ ``[libvirt]cpu_power_management_strategy=governor`` which will rather modify
+ the related CPU governor using ``[libvirt]cpu_power_governor_low`` and
+ ``[libvirt]cpu_power_governor_high`` configuration values (respective
+ defaults being ``powersave`` and ``performance``)
diff --git a/releasenotes/notes/bp-pci-device-tracking-in-placement-75ee1d20a57662f2.yaml b/releasenotes/notes/bp-pci-device-tracking-in-placement-75ee1d20a57662f2.yaml
new file mode 100644
index 0000000000..6d30f7c398
--- /dev/null
+++ b/releasenotes/notes/bp-pci-device-tracking-in-placement-75ee1d20a57662f2.yaml
@@ -0,0 +1,9 @@
+---
+features:
+ - |
+ Nova started tracking PCI devices in Placement. This is an optional feature
+ disabled by default while we are implementing inventory tracking and
+ scheduling support for both PCI passthrough devices and SR-IOV devices
+ consumed via Neutron ports. Please read our
+ `documentation <https://docs.openstack.org/nova/latest/admin/pci-passthrough.html#pci-tracking-in-placement>`_
+ for more details on what is supported how this feature can be enabled.
diff --git a/releasenotes/notes/bp-pci-device-tracking-in-placement-antelope-082310a2b0337e0e.yaml b/releasenotes/notes/bp-pci-device-tracking-in-placement-antelope-082310a2b0337e0e.yaml
new file mode 100644
index 0000000000..7a9e53ed26
--- /dev/null
+++ b/releasenotes/notes/bp-pci-device-tracking-in-placement-antelope-082310a2b0337e0e.yaml
@@ -0,0 +1,8 @@
+---
+features:
+ - |
+ Since 26.0.0 (Zed) Nova supports tracking PCI devices in Placement. Now
+ Nova also supports scheduling flavor based PCI device requests via
+ Placement. This support is disable by default. Please read
+ `documentation <https://docs.openstack.org/nova/latest/admin/pci-passthrough.html#pci-tracking-in-placement>`_
+ for more details on what is supported how this feature can be enabled.
diff --git a/releasenotes/notes/bug-1978372-optimized-numa-fitting-algorithm-5d5b922b0bdbf818.yaml b/releasenotes/notes/bug-1978372-optimized-numa-fitting-algorithm-5d5b922b0bdbf818.yaml
new file mode 100644
index 0000000000..3f42f70908
--- /dev/null
+++ b/releasenotes/notes/bug-1978372-optimized-numa-fitting-algorithm-5d5b922b0bdbf818.yaml
@@ -0,0 +1,9 @@
+---
+fixes:
+ - |
+ The algorithm that is used to see if a multi NUMA guest fits to
+ a multi NUMA host has been optimized to speed up the decision
+ on hosts with high number of NUMA nodes ( > 8). For details see
+ `bug 1978372`_
+
+ .. _bug 1978372: https://bugs.launchpad.net/nova/+bug/1978372
diff --git a/releasenotes/notes/bug-1981813-vnic-type-change-9f3e16fae885b57f.yaml b/releasenotes/notes/bug-1981813-vnic-type-change-9f3e16fae885b57f.yaml
new file mode 100644
index 0000000000..a5a3b7c8c2
--- /dev/null
+++ b/releasenotes/notes/bug-1981813-vnic-type-change-9f3e16fae885b57f.yaml
@@ -0,0 +1,9 @@
+---
+fixes:
+ - |
+ `Bug #1981813 <https://bugs.launchpad.net/nova/+bug/1981813>`_: Now nova
+ detects if the ``vnic_type`` of a bound port has been changed in neutron
+ and leaves an ERROR message in the compute service log as such change on a
+ bound port is not supported. Also the restart of the nova-compute service
+ will not crash any more after such port change. Nova will log an ERROR and
+ skip the initialization of the instance with such port during the startup.
diff --git a/releasenotes/notes/bug-1982284-libvirt-handle-no-ram-info-was-set-99784934ed80fd72.yaml b/releasenotes/notes/bug-1982284-libvirt-handle-no-ram-info-was-set-99784934ed80fd72.yaml
new file mode 100644
index 0000000000..943aa99a43
--- /dev/null
+++ b/releasenotes/notes/bug-1982284-libvirt-handle-no-ram-info-was-set-99784934ed80fd72.yaml
@@ -0,0 +1,11 @@
+---
+other:
+ - |
+ A workaround has been added to the libvirt driver to catch and pass
+ migrations that were previously failing with the error:
+
+ ``libvirt.libvirtError: internal error: migration was active, but no RAM info was set``
+
+ See `bug 1982284`_ for more details.
+
+ .. _bug 1982284: https://bugs.launchpad.net/nova/+bug/1982284
diff --git a/releasenotes/notes/bug-1983753-update-requestspec-pci_request-for-resize-a3c6b0a979db723f.yaml b/releasenotes/notes/bug-1983753-update-requestspec-pci_request-for-resize-a3c6b0a979db723f.yaml
new file mode 100644
index 0000000000..89edd12b3d
--- /dev/null
+++ b/releasenotes/notes/bug-1983753-update-requestspec-pci_request-for-resize-a3c6b0a979db723f.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ `Bug #1941005 <https://bugs.launchpad.net/nova/+bug/1941005>`_ is fixed.
+ During resize Nova now uses the PCI requests from the new flavor to select
+ the destination host.
diff --git a/releasenotes/notes/bug-1986838-pci-double-booking-1da71ea4399db65a.yaml b/releasenotes/notes/bug-1986838-pci-double-booking-1da71ea4399db65a.yaml
new file mode 100644
index 0000000000..7200290780
--- /dev/null
+++ b/releasenotes/notes/bug-1986838-pci-double-booking-1da71ea4399db65a.yaml
@@ -0,0 +1,8 @@
+---
+fixes:
+ - |
+ `Bug #1986838 <https://bugs.launchpad.net/nova/+bug/1986838>`_: Nova now
+ correctly schedules an instance that requests multiple PCI devices via
+ multiple PCI aliases in the flavor extra_spec when multiple similar devices
+ are requested but the compute host has only one such device matching with
+ each request individually.
diff --git a/releasenotes/notes/bug-1996995-qemu_monitor_announce_self-add-configurables-2b2f19d238442f72.yaml b/releasenotes/notes/bug-1996995-qemu_monitor_announce_self-add-configurables-2b2f19d238442f72.yaml
new file mode 100644
index 0000000000..0941dd7450
--- /dev/null
+++ b/releasenotes/notes/bug-1996995-qemu_monitor_announce_self-add-configurables-2b2f19d238442f72.yaml
@@ -0,0 +1,28 @@
+---
+fixes:
+ - |
+ Fixes `bug 1996995`_ in which VMs live migrated on certain VXLAN Arista
+ network fabrics were inaccessible until the switch arp cache expired.
+
+ A Nova workaround option of ``enable_qemu_monitor_announce_self`` was added
+ to fix `bug 1815989`_ which when enabled would interact with the QEMU
+ monitor and force a VM to announce itself.
+
+ On certain network fabrics, VMs that are live migrated remain inaccessible
+ via the network despite the QEMU monitor announce_self command successfully
+ being called.
+
+ It was noted that on Arista VXLAN fabrics, testing showed that it required
+ several attempts of running the QEMU announce_self monitor command before
+ the switch would acknowledge a VM's new location on the fabric.
+
+ This fix introduces two operator configurable options.
+ The first option sets the number of times the QEMU monitor announce_self
+ command is called - ``qemu_announce_self_count``
+
+ The second option allows operators to set the delay between the QEMU
+ announce_self commands in seconds for subsequent announce_self commands
+ with ``qemu_announce_self_interval``
+
+ .. _`bug 1996995`: https://bugs.launchpad.net/nova/+bug/1996995
+ .. _`bug 1815989`: https://bugs.launchpad.net/nova/+bug/1815989
diff --git a/releasenotes/notes/default-host-numa-strategy-to-spread-18668c6d80154042.yaml b/releasenotes/notes/default-host-numa-strategy-to-spread-18668c6d80154042.yaml
new file mode 100644
index 0000000000..aec87dc887
--- /dev/null
+++ b/releasenotes/notes/default-host-numa-strategy-to-spread-18668c6d80154042.yaml
@@ -0,0 +1,12 @@
+---
+upgrade:
+ - |
+ During the triage of https://bugs.launchpad.net/nova/+bug/1978372
+ we compared the performance of nova's numa allocations strategies
+ as it applied to the large numbers of host and guest numa nodes.
+ Prior to ``Xena`` nova only supported a linear packing strategy.
+ In ``Xena`` ``[compute]/packing_host_numa_cells_allocation_strategy``
+ was introduced maintaining the previous packing behavior by default.
+ The numa allocation strategy has now been defaulted to spread.
+ The old behavior can be restored by defining:
+ ``[compute]/packing_host_numa_cells_allocation_strategy=true``
diff --git a/releasenotes/notes/deprecate-passthrough_whitelist-config-name-0530d502c960d753.yaml b/releasenotes/notes/deprecate-passthrough_whitelist-config-name-0530d502c960d753.yaml
new file mode 100644
index 0000000000..a0c707def4
--- /dev/null
+++ b/releasenotes/notes/deprecate-passthrough_whitelist-config-name-0530d502c960d753.yaml
@@ -0,0 +1,6 @@
+---
+deprecations:
+ - |
+ The [pci]passthrough_whitelist config option is renamed to
+ [pci]device_spec. The old name is deprecated and aliased to the new one.
+ The old name will be removed in a future release.
diff --git a/releasenotes/notes/enable-enforce-scope-and-new-defaults-14db8c75b263b599.yaml b/releasenotes/notes/enable-enforce-scope-and-new-defaults-14db8c75b263b599.yaml
new file mode 100644
index 0000000000..72a6f861b6
--- /dev/null
+++ b/releasenotes/notes/enable-enforce-scope-and-new-defaults-14db8c75b263b599.yaml
@@ -0,0 +1,23 @@
+---
+upgrade:
+ - |
+ The Nova service enable the API policies (RBAC) new defaults and scope by
+ default. The Default value of config options ``[oslo_policy] enforce_scope``
+ and ``[oslo_policy] oslo_policy.enforce_new_defaults`` have been changed
+ to ``True``.
+
+ This means if you are using system scope token to access Nova API then
+ the request will be failed with 403 error code. Also, new defaults will be
+ enforced by default. To know about the new defaults of each policy
+ rule, refer to the `Policy New Defaults`_. For more detail about the Nova
+ API policies changes, refer to `Policy Concepts`_.
+
+ If you want to disable them then modify the below config options value in
+ ``nova.conf`` file::
+
+ [oslo_policy]
+ enforce_new_defaults=False
+ enforce_scope=False
+
+ .. _`Policy New Defaults`: https://docs.openstack.org/nova/latest/configuration/policy.html
+ .. _`Policy Concepts`: https://docs.openstack.org/nova/latest/configuration/policy-concepts.html
diff --git a/releasenotes/notes/fix-ironic-scheduler-race-08cf8aba0365f512.yaml b/releasenotes/notes/fix-ironic-scheduler-race-08cf8aba0365f512.yaml
new file mode 100644
index 0000000000..4fd2cc1ca9
--- /dev/null
+++ b/releasenotes/notes/fix-ironic-scheduler-race-08cf8aba0365f512.yaml
@@ -0,0 +1,11 @@
+---
+fixes:
+ - |
+ Fixed when placement returns ironic nodes that have just started automatic
+ cleaning as possible valid candidates. This is done by marking all ironic
+ nodes with an instance on them as reserved, such that nova only makes them
+ available once we have double checked Ironic reports the node as available.
+ If you don't have automatic cleaning on, this might mean it takes longer
+ than normal for Ironic nodes to become available for new instances.
+ If you want the old behaviour use the following workaround config:
+ `[workarounds]skip_reserve_in_use_ironic_nodes=true`
diff --git a/releasenotes/notes/guest-iommu-device-4795c3a060aca424.yaml b/releasenotes/notes/guest-iommu-device-4795c3a060aca424.yaml
new file mode 100644
index 0000000000..314c2c0ffe
--- /dev/null
+++ b/releasenotes/notes/guest-iommu-device-4795c3a060aca424.yaml
@@ -0,0 +1,21 @@
+---
+features:
+ - |
+ The Libvirt driver can now add a virtual IOMMU device
+ to all created guests, when running on an x86 host and using the Q35
+ machine type or on AArch64.
+
+ To enable this, provide `hw:viommu_model` in flavor extra
+ spec or equivalent image metadata property `hw_viommu_model` and with the
+ guest CPU architecture and OS allows, we will enable viommu in Libvirt
+ driver. Support values intel|smmuv3|virtio|auto. Default to ``auto``.
+ Which ``auto`` will automatically select ``virtio`` if Libvirt supports it,
+ else ``intel`` on X86 (Q35) and ``smmuv3`` on AArch64.
+ vIOMMU config will raise invalid exception if the guest architecture is
+ neither X86 (Q35) or AArch64.
+
+ Note that, enable vIOMMU might introduce significant performance overhead.
+ You can see performance comparision table from
+ `AMD vIOMMU session on KVM Forum 2021`_.
+ For above reason, vIOMMU should only be enable for workflow that require it.
+ .. _`AMD vIOMMU session on KVM Forum 2021`: https://static.sched.com/hosted_files/kvmforum2021/da/vIOMMU%20KVM%20Forum%202021%20-%20v4.pdf
diff --git a/releasenotes/notes/ignore-instance-task-state-for-evacuation-e000f141d0153638.yaml b/releasenotes/notes/ignore-instance-task-state-for-evacuation-e000f141d0153638.yaml
new file mode 100644
index 0000000000..46ebf0bd2d
--- /dev/null
+++ b/releasenotes/notes/ignore-instance-task-state-for-evacuation-e000f141d0153638.yaml
@@ -0,0 +1,11 @@
+---
+fixes:
+ - |
+ If compute service is down in source node and user try to stop
+ instance, instance gets stuck at powering-off, hence evacuation fails with
+ msg: Cannot 'evacuate' instance <instance-id> while it is in
+ task_state powering-off.
+ It is now possible for evacuation to ignore the vm task state.
+ For more details see: `bug 1978983`_
+
+ .. _`bug 1978983`: https://bugs.launchpad.net/nova/+bug/1978983 \ No newline at end of file
diff --git a/releasenotes/notes/microversion-2-94-59649401d5763286.yaml b/releasenotes/notes/microversion-2-94-59649401d5763286.yaml
new file mode 100644
index 0000000000..d0927e6f75
--- /dev/null
+++ b/releasenotes/notes/microversion-2-94-59649401d5763286.yaml
@@ -0,0 +1,22 @@
+---
+features:
+ - |
+ The 2.94 microversion has been added. This microversion extends
+ microversion 2.90 by allowing Fully Qualified Domain Names (FQDN) wherever
+ the ``hostname`` is able to be specified. This consists of creating an
+ instance (``POST /servers``), updating an instance
+ (``PUT /servers/{id}``), or rebuilding an instance
+ (``POST /servers/{server_id}/action (rebuild)``). When using an FQDN as the
+ instance hostname, the ``[api]dhcp_domain`` configuration option must be
+ set to the empty string in order for the correct FQDN to appear in the
+ ``hostname`` field in the metadata API.
+
+upgrade:
+ - |
+ In order to make use of microversion's 2.94 FQDN hostnames, the
+ ``[api]dhcp_domain`` config option must be set to the empty string. If
+ this is not done, the ``hostname`` field in the metadata API will be
+ incorrect, as it will include the value of ``[api]dhcp_domain`` appended to
+ the instance's FQDN. Note that simply not setting ``[api]dhcp_domain`` is
+ not enough, as it has a default value of ``novalocal``. It must explicitly
+ be set to the empty string.
diff --git a/releasenotes/notes/multiple-config-files-with-mod_wsgi-f114ea5fdd8b9a51.yaml b/releasenotes/notes/multiple-config-files-with-mod_wsgi-f114ea5fdd8b9a51.yaml
new file mode 100644
index 0000000000..f4361477de
--- /dev/null
+++ b/releasenotes/notes/multiple-config-files-with-mod_wsgi-f114ea5fdd8b9a51.yaml
@@ -0,0 +1,14 @@
+---
+fixes:
+ - |
+ apache mod_wsgi does not support passing commandline arguments to the wsgi
+ application that it hosts. As a result when the nova api or metadata api
+ where run under mod_wsgi it was not posible to use multiple config files
+ or non-default file names i.e. nova-api.conf
+ This has been adressed by the intoduction of a new, optional, envionment
+ varible ``OS_NOVA_CONFIG_FILES``. ``OS_NOVA_CONFIG_FILES`` is a ``;``
+ seperated list fo file path relitive to ``OS_NOVA_CONFIG_DIR``.
+ When unset the default ``api-paste.ini`` and ``nova.conf`` will be used
+ form ``/etc/nova``. This is supported for the nova api and nova metadata
+ wsgi applications.
+
diff --git a/releasenotes/notes/new_locked_memory_option-b68a031779366828.yaml b/releasenotes/notes/new_locked_memory_option-b68a031779366828.yaml
new file mode 100644
index 0000000000..72d6e763aa
--- /dev/null
+++ b/releasenotes/notes/new_locked_memory_option-b68a031779366828.yaml
@@ -0,0 +1,13 @@
+---
+features:
+ - |
+ Add new ``hw:locked_memory`` extra spec and ``hw_locked_memory`` image
+ property to lock memory on libvirt guest. Locking memory marks the guest
+ memory allocations as unmovable and unswappable.
+ ``hw:locked_memory`` extra spec and ``hw_locked_memory`` image property
+ accept boolean values in string format like 'Yes' or 'false' value.
+ Exception `LockMemoryForbidden` will raise, if you set lock memory value
+ but not set either flavor extra spec
+ ``hw:mem_page_size`` or image property ``hw_mem_page_size``,
+ so we can ensure that the scheduler can actually account for this correctly
+ and prevent out of memory events.
diff --git a/releasenotes/notes/project-reader-rbac-8a1d11b3b2e776fd.yaml b/releasenotes/notes/project-reader-rbac-8a1d11b3b2e776fd.yaml
new file mode 100644
index 0000000000..171b07d025
--- /dev/null
+++ b/releasenotes/notes/project-reader-rbac-8a1d11b3b2e776fd.yaml
@@ -0,0 +1,36 @@
+---
+features:
+ - |
+ The Nova policies have been modified to drop the system scope. Every
+ API policy is scoped to project. This means that system scoped users
+ will get 403 permission denied error.
+
+ Also, the project reader role is ready to use. Users with reader role
+ can only perform the read-only operations within their project. This
+ role can be used for the audit purposes.
+
+ Currently, nova supports the following roles:
+
+ * ``admin`` (Legacy admin)
+ * ``project member``
+ * ``project reader``
+
+ For the details on what changed from the existing policy, please refer
+ to the `RBAC new guidelines`_. We have implemented only phase-1 of the
+ `RBAC new guidelines`_.
+ Currently, scope checks and new defaults are disabled by default. You can
+ enable them by switching the below config option in ``nova.conf`` file::
+
+ [oslo_policy]
+ enforce_new_defaults=True
+ enforce_scope=True
+
+ We recommend to enable the both scope as well new defaults together
+ otherwise you may experience some late failures with unclear error
+ messages.
+
+ Please refer `Policy New Defaults`_ for detail about policy new defaults
+ and migration plan.
+
+ .. _`RBAC new guidelines`: https://governance.openstack.org/tc/goals/selected/consistent-and-secure-rbac.html#phase-1
+ .. _`Policy New Defaults`: https://docs.openstack.org/nova/latest/configuration/policy-concepts.html
diff --git a/releasenotes/notes/rescue-volume-based-instance-c6e3fba236d90be7.yaml b/releasenotes/notes/rescue-volume-based-instance-c6e3fba236d90be7.yaml
new file mode 100644
index 0000000000..7e80059b80
--- /dev/null
+++ b/releasenotes/notes/rescue-volume-based-instance-c6e3fba236d90be7.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ Fix rescuing volume based instance by adding a check for 'hw_rescue_disk'
+ and 'hw_rescue_device' properties in image metadata before attempting
+ to rescue instance.
diff --git a/releasenotes/notes/stable-compute-uuid-08663a0955616728.yaml b/releasenotes/notes/stable-compute-uuid-08663a0955616728.yaml
new file mode 100644
index 0000000000..fdeb593bd2
--- /dev/null
+++ b/releasenotes/notes/stable-compute-uuid-08663a0955616728.yaml
@@ -0,0 +1,19 @@
+---
+features:
+ - |
+ The compute manager now uses a local file to provide node uuid persistence
+ to guard against problems with renamed services, among other things.
+ Deployers wishing to ensure that *new* compute services get a predicatble
+ uuid before initial startup may provision that file and nova will use it,
+ otherwise nova will generate and write one to a `compute_id` file in
+ `CONF.state_path` the first time it starts up. Accidental renames of a
+ compute node's hostname will be detected and the manager will exit to avoid
+ database corruption. Note that none of this applies to Ironic computes, as
+ they manage nodes and uuids differently.
+upgrade:
+ - |
+ Existing compute nodes will, upon upgrade, perist the uuid of the compute
+ node assigned to their hostname at first startup. Since this must match
+ what is currently in the database, it is important to let nova provision
+ this file from its database. Nova will only persist to a `compute_id` file
+ in the `CONF.state_path` directory, which should already be writable.
diff --git a/releasenotes/notes/update-initial-allocation-defaults-94106033b66b8fa0.yaml b/releasenotes/notes/update-initial-allocation-defaults-94106033b66b8fa0.yaml
new file mode 100644
index 0000000000..937c8d1c8a
--- /dev/null
+++ b/releasenotes/notes/update-initial-allocation-defaults-94106033b66b8fa0.yaml
@@ -0,0 +1,21 @@
+---
+upgrade:
+ - |
+ In this release the default values for the initial ram and cpu allocation
+ ratios have been updated to 1.0 and 4.0 respectively. This will not
+ affect any existing compute node resource providers but the new default
+ will take effect on the creation of new resource providers.
+other:
+ - |
+ The default initial allocation ratios enabled ram over commit by default
+ with a factor of ``1.5``. This value was chosen early in nova's history
+ as the predominant workload was web hosting or other light weight
+ virtualization. Similarly the default initial cpu allocation ratio
+ defaulted to 16. As more demanding workload from telco, enterprise,
+ scientific and governmental users became the norm the initial values we
+ had chosen became less and less correct overtime. These have now been
+ updated to reflect a more reasonable default for the majority of our users.
+ As of this release the initial ram allocation value is 1.0 disabling
+ overcommit by default for new compute nodes and the initial cpu allocation
+ ratio is now 4.0 which is a more reasonable overcommit for non idle
+ workloads.
diff --git a/releasenotes/notes/update-libvirt-enlightenments-for-windows-23abea98cc1db667.yaml b/releasenotes/notes/update-libvirt-enlightenments-for-windows-23abea98cc1db667.yaml
new file mode 100644
index 0000000000..c262be1527
--- /dev/null
+++ b/releasenotes/notes/update-libvirt-enlightenments-for-windows-23abea98cc1db667.yaml
@@ -0,0 +1,21 @@
+---
+features:
+ - |
+ The following enlightenments are now added by default to the libvirt XML for Windows guests:
+
+ * vpindex
+ * runtime
+ * synic
+ * reset
+ * frequencies
+ * reenlightenment
+ * tlbflush
+ * ipi
+ * evmc
+
+ This adds to the list of already existing enlightenments, namely:
+
+ * relaxed
+ * vapic
+ * spinlocks retries
+ * vendor_id spoofing
diff --git a/releasenotes/notes/use-compareHypervisorCPU-b75c8f097cc73556.yaml b/releasenotes/notes/use-compareHypervisorCPU-b75c8f097cc73556.yaml
new file mode 100644
index 0000000000..924e09a602
--- /dev/null
+++ b/releasenotes/notes/use-compareHypervisorCPU-b75c8f097cc73556.yaml
@@ -0,0 +1,12 @@
+---
+fixes:
+ - |
+ Nova's use of libvirt's compareCPU() API has become error-prone as
+ it doesn't take into account host hypervisor's capabilities. With
+ QEMU >=2.9 and libvirt >= 4.4.0, libvirt will do the right thing in
+ terms of CPU comparison checks via a new replacement API,
+ compareHypervisorCPU(). Nova satisfies the said minimum version
+ requirements of QEMU and libvirt by a good margin.
+
+ This change replaces the usage of older API, compareCPU(), with the
+ new one, compareHypervisorCPU().
diff --git a/nova/releasenotes/notes/uwsgi-gmr-c00631db79836340.yaml b/releasenotes/notes/uwsgi-gmr-c00631db79836340.yaml
index d6b6e45968..d6b6e45968 100644
--- a/nova/releasenotes/notes/uwsgi-gmr-c00631db79836340.yaml
+++ b/releasenotes/notes/uwsgi-gmr-c00631db79836340.yaml
diff --git a/releasenotes/notes/vdpa-move-ops-a7b3799807807a92.yaml b/releasenotes/notes/vdpa-move-ops-a7b3799807807a92.yaml
new file mode 100644
index 0000000000..2580f73d35
--- /dev/null
+++ b/releasenotes/notes/vdpa-move-ops-a7b3799807807a92.yaml
@@ -0,0 +1,11 @@
+---
+fixes:
+ - |
+ When vDPA was first introduced move operations were implemented in the code
+ but untested either in a real environment or in functional tests. Due to
+ this gap nova elected to block move operations for instance with vDPA
+ devices. All move operations except for live migration have now been tested
+ and found to indeed work so the API blocks have now been removed and
+ functional tests introduced. Other operations such as suspend and
+ live migration require code changes to support and will be enabled as new
+ features in the future.
diff --git a/releasenotes/notes/vdpa-suspend-detach-and-live-migrate-e591e6a03a0c834d.yaml b/releasenotes/notes/vdpa-suspend-detach-and-live-migrate-e591e6a03a0c834d.yaml
new file mode 100644
index 0000000000..45092b5a00
--- /dev/null
+++ b/releasenotes/notes/vdpa-suspend-detach-and-live-migrate-e591e6a03a0c834d.yaml
@@ -0,0 +1,25 @@
+---
+features:
+ - |
+ vDPA support was first introduced in the 23.0.0 (Wallaby)
+ release with limited instance lifecycle operations. Nova now supports
+ all instance lifecycle operations including suspend, attach/detach
+ and hot-plug live migration.
+
+ QEMU and the Linux kernel do not currently support transparent
+ live migration of vDPA devices at this time. Hot-plug live migration
+ unplugs the VDPA device on the source host before the VM is live migrated
+ and automatically hot-plugs the device on the destination after the
+ migration. While this can lead to packet loss it enable live migration
+ to be used when needed until transparent live migration can be added
+ in a future release.
+
+ VDPA Hot-plug live migration requires all compute services to be upgraded
+ to service level 63 to be enabled. Similarly suspend resume need service
+ level 63 and attach/detach require service level 62.
+ As such it will not be available to use during a rolling upgrade but will
+ become available when all host are upgraded to the 26.0.0 (Zed) release.
+
+ With the addition of these features, all instance lifecycle operations are
+ now valid for VMs with VDPA neutron ports.
+
diff --git a/releasenotes/notes/zed-prelude-a3cddb8b2ac8e293.yaml b/releasenotes/notes/zed-prelude-a3cddb8b2ac8e293.yaml
new file mode 100644
index 0000000000..71fb1fc1f0
--- /dev/null
+++ b/releasenotes/notes/zed-prelude-a3cddb8b2ac8e293.yaml
@@ -0,0 +1,46 @@
+---
+prelude: |
+ The 26.0.0 release includes many new features and bug fixes. Please be
+ sure to read the upgrade section which describes the required actions to
+ upgrade your cloud from 25.0.0 (Yoga) to 26.0.0 (Zed).
+
+ There are a few major changes worth mentioning. This is not an exhaustive
+ list:
+
+ - The latest Compute API microversion supported for Zed is `v2.93`__.
+
+ .. __: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#maximum-in-zed
+
+ - `Virtual IOMMU devices`__ can now be created and attached to an instance
+ when running on a x86 host
+ and using the libvirt driver.
+
+ .. __: https://docs.openstack.org/nova/latest/admin/pci-passthrough.html#virtual-iommu-support
+
+ - Improved behavior for Windows guest by adding by default following
+ `Hyper-V enlightments`__ on all libvirt guests : `vpindex`, `runtime`,
+ `synic`, `reset`, `frequencies`, `reenlightenment`, `tlbflush`, `ipi` and
+ `evmc`.
+
+ .. __: https://libvirt.org/formatdomain.html#hypervisor-features
+
+ - All lifecycle actions are now fully supported for
+ `instances with vDPA ports`__, including vDPA hot-plug live migration,
+ suspend and attach/detach.
+
+ .. __: https://docs.openstack.org/nova/latest/admin/vdpa.html
+
+ - Volume-backed instances (instances with root disk attached as a volume)
+ can now be rebuilt by specifying a 2.93 microversion instead of returning
+ a HTTP400 exception.
+
+ - The `unshelve` instance API action now provides a new `host` parameter
+ with 2.91 microversion (for only admins).
+
+ - With microversion 2.92, you can only import a public key and not generate
+ a keypair. You can also use an extended name pattern.
+
+ - The default system scope is removed from all APIs hence finishing to
+ implement `phase #1 of new RBAC guidelines`__ that are opt-in.
+
+ .. __: https://governance.openstack.org/tc/goals/selected/consistent-and-secure-rbac.html#phase-1
diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst
index 2f4bfafb61..6bff00e25a 100644
--- a/releasenotes/source/index.rst
+++ b/releasenotes/source/index.rst
@@ -6,6 +6,7 @@ Nova Release Notes
:maxdepth: 1
unreleased
+ zed
yoga
xena
wallaby
diff --git a/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po b/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po
new file mode 100644
index 0000000000..d90391af7c
--- /dev/null
+++ b/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po
@@ -0,0 +1,694 @@
+# Andi Chandler <andi@gowling.com>, 2017. #zanata
+# Andi Chandler <andi@gowling.com>, 2018. #zanata
+# Andi Chandler <andi@gowling.com>, 2020. #zanata
+# Andi Chandler <andi@gowling.com>, 2022. #zanata
+msgid ""
+msgstr ""
+"Project-Id-Version: nova\n"
+"Report-Msgid-Bugs-To: \n"
+"POT-Creation-Date: 2022-09-16 12:59+0000\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"PO-Revision-Date: 2022-09-15 09:04+0000\n"
+"Last-Translator: Andi Chandler <andi@gowling.com>\n"
+"Language-Team: English (United Kingdom)\n"
+"Language: en_GB\n"
+"X-Generator: Zanata 4.3.3\n"
+"Plural-Forms: nplurals=2; plural=(n != 1)\n"
+
+msgid "'3840x2160' is only available on Windows / Hyper-V Server 2016."
+msgstr "'3840x2160' is only available on Windows / Hyper-V Server 2016."
+
+msgid ""
+"'nova-manage db sync' can now sync the cell0 database. The cell0 db is "
+"required to store instances that cannot be scheduled to any cell. Before the "
+"'db sync' command is called a cell mapping for cell0 must have been created "
+"using 'nova-manage cell_v2 map_cell0'. This command only needs to be called "
+"when upgrading to CellsV2."
+msgstr ""
+"'nova-manage db sync' can now sync the cell0 database. The cell0 db is "
+"required to store instances that cannot be scheduled to any cell. Before the "
+"'db sync' command is called a cell mapping for cell0 must have been created "
+"using 'nova-manage cell_v2 map_cell0'. This command only needs to be called "
+"when upgrading to CellsV2."
+
+msgid ""
+"(Only if you do continuous deployment) "
+"1337890ace918fa2555046c01c8624be014ce2d8 drops support for an instance major "
+"version, which means that you must have deployed at least commit "
+"713d8cb0777afb9fe4f665b9a40cac894b04aacb before deploying this one."
+msgstr ""
+"(Only if you do continuous deployment) "
+"1337890ace918fa2555046c01c8624be014ce2d8 drops support for an instance major "
+"version, which means that you must have deployed at least commit "
+"713d8cb0777afb9fe4f665b9a40cac894b04aacb before deploying this one."
+
+msgid "**Filtering**"
+msgstr "**Filtering**"
+
+msgid "**New Defaults(Admin, Member and Reader)**"
+msgstr "**New Defaults(Admin, Member and Reader)**"
+
+msgid "**Other**"
+msgstr "**Other**"
+
+msgid "**Policies granularity**"
+msgstr "**Policies granularity**"
+
+msgid "**Ports**"
+msgstr "**Ports**"
+
+msgid "**Scope**"
+msgstr "**Scope**"
+
+msgid "**Sorting**"
+msgstr "**Sorting**"
+
+msgid "**Volumes**"
+msgstr "**Volumes**"
+
+msgid "**os:monitors**. Guest VM number of monitors. Acceptable values::"
+msgstr "**os:monitors**. Guest VM number of monitors. Acceptable values::"
+
+msgid "**os:resolution**. Guest VM screen resolution size. Acceptable values::"
+msgstr ""
+"**os:resolution**. Guest VM screen resolution size. Acceptable values::"
+
+msgid ""
+"**os:vram**. Guest VM VRAM amount. Only available on Windows / Hyper-V "
+"Server 2016. Acceptable values::"
+msgstr ""
+"**os:vram**. Guest VM VRAM amount. Only available on Windows / Hyper-V "
+"Server 2016. Acceptable values::"
+
+msgid "/etc/nova/placement-policy.yaml"
+msgstr "/etc/nova/placement-policy.yaml"
+
+msgid "/etc/nova/policy.yaml"
+msgstr "/etc/nova/policy.yaml"
+
+msgid "/etc/placement/policy.yaml"
+msgstr "/etc/placement/policy.yaml"
+
+msgid "/images"
+msgstr "/images"
+
+msgid "/os-baremetal-nodes"
+msgstr "/os-baremetal-nodes"
+
+msgid "/os-fixed-ips"
+msgstr "/os-fixed-ips"
+
+msgid "/os-floating-ip-dns"
+msgstr "/os-floating-ip-dns"
+
+msgid "/os-floating-ip-pools"
+msgstr "/os-floating-ip-pools"
+
+msgid "/os-floating-ips"
+msgstr "/os-floating-ips"
+
+msgid "/os-floating-ips-bulk"
+msgstr "/os-floating-ips-bulk"
+
+msgid "/os-fping"
+msgstr "/os-fping"
+
+msgid "/os-networks"
+msgstr "/os-networks"
+
+msgid "/os-security-group-default-rules"
+msgstr "/os-security-group-default-rules"
+
+msgid "/os-security-group-rules"
+msgstr "/os-security-group-rules"
+
+msgid "/os-security-groups"
+msgstr "/os-security-groups"
+
+msgid "/os-snapshots"
+msgstr "/os-snapshots"
+
+msgid "/os-volumes"
+msgstr "/os-volumes"
+
+msgid "12.0.1"
+msgstr "12.0.1"
+
+msgid "12.0.3"
+msgstr "12.0.3"
+
+msgid "12.0.4"
+msgstr "12.0.4"
+
+msgid "12.0.5"
+msgstr "12.0.5"
+
+msgid "13.0.0"
+msgstr "13.0.0"
+
+msgid "13.1.0"
+msgstr "13.1.0"
+
+msgid "13.1.1"
+msgstr "13.1.1"
+
+msgid "13.1.2"
+msgstr "13.1.2"
+
+msgid "13.1.3"
+msgstr "13.1.3"
+
+msgid "13.1.4"
+msgstr "13.1.4"
+
+msgid "14.0.0"
+msgstr "14.0.0"
+
+msgid "14.0.1"
+msgstr "14.0.1"
+
+msgid "14.0.10"
+msgstr "14.0.10"
+
+msgid "14.0.2"
+msgstr "14.0.2"
+
+msgid "14.0.4"
+msgstr "14.0.4"
+
+msgid "14.0.5"
+msgstr "14.0.5"
+
+msgid "14.0.7"
+msgstr "14.0.7"
+
+msgid "14.1.0"
+msgstr "14.1.0"
+
+msgid "15.0.0"
+msgstr "15.0.0"
+
+msgid "15.0.1"
+msgstr "15.0.1"
+
+msgid "15.0.2"
+msgstr "15.0.2"
+
+msgid "15.0.5"
+msgstr "15.0.5"
+
+msgid "15.0.7"
+msgstr "15.0.7"
+
+msgid "15.0.8"
+msgstr "15.0.8"
+
+msgid "15.1.0"
+msgstr "15.1.0"
+
+msgid "15.1.1"
+msgstr "15.1.1"
+
+msgid "15.1.3"
+msgstr "15.1.3"
+
+msgid "15.1.4"
+msgstr "15.1.4"
+
+msgid "15.1.5"
+msgstr "15.1.5"
+
+msgid "15.1.5-28"
+msgstr "15.1.5-28"
+
+msgid "16.0.0"
+msgstr "16.0.0"
+
+msgid "16.0.1"
+msgstr "16.0.1"
+
+msgid "16.0.2"
+msgstr "16.0.2"
+
+msgid "16.0.3"
+msgstr "16.0.3"
+
+msgid "16.0.4"
+msgstr "16.0.4"
+
+msgid "16.1.0"
+msgstr "16.1.0"
+
+msgid "16.1.1"
+msgstr "16.1.1"
+
+msgid "16.1.2"
+msgstr "16.1.2"
+
+msgid "16.1.5"
+msgstr "16.1.5"
+
+msgid "16.1.7"
+msgstr "16.1.7"
+
+msgid "16.1.8"
+msgstr "16.1.8"
+
+msgid "16.1.8-57"
+msgstr "16.1.8-57"
+
+msgid "17.0.0"
+msgstr "17.0.0"
+
+msgid "17.0.10"
+msgstr "17.0.10"
+
+msgid "17.0.11"
+msgstr "17.0.11"
+
+msgid "17.0.12"
+msgstr "17.0.12"
+
+msgid "17.0.13"
+msgstr "17.0.13"
+
+msgid "17.0.13-73"
+msgstr "17.0.13-73"
+
+msgid "17.0.2"
+msgstr "17.0.2"
+
+msgid "17.0.3"
+msgstr "17.0.3"
+
+msgid "17.0.4"
+msgstr "17.0.4"
+
+msgid "17.0.5"
+msgstr "17.0.5"
+
+msgid "17.0.6"
+msgstr "17.0.6"
+
+msgid "17.0.8"
+msgstr "17.0.8"
+
+msgid "17.0.9"
+msgstr "17.0.9"
+
+msgid "18.0.0"
+msgstr "18.0.0"
+
+msgid "18.0.1"
+msgstr "18.0.1"
+
+msgid "18.0.3"
+msgstr "18.0.3"
+
+msgid "18.1.0"
+msgstr "18.1.0"
+
+msgid "18.2.0"
+msgstr "18.2.0"
+
+msgid "18.2.1"
+msgstr "18.2.1"
+
+msgid "18.2.2"
+msgstr "18.2.2"
+
+msgid "18.2.3"
+msgstr "18.2.3"
+
+msgid "18.3.0"
+msgstr "18.3.0"
+
+msgid "18.3.0-55"
+msgstr "18.3.0-55"
+
+msgid "19.0.0"
+msgstr "19.0.0"
+
+msgid "19.0.1"
+msgstr "19.0.1"
+
+msgid "19.0.2"
+msgstr "19.0.2"
+
+msgid "19.0.3"
+msgstr "19.0.3"
+
+msgid "19.1.0"
+msgstr "19.1.0"
+
+msgid "19.2.0"
+msgstr "19.2.0"
+
+msgid "19.3.0"
+msgstr "19.3.0"
+
+msgid "19.3.2"
+msgstr "19.3.2"
+
+msgid "19.3.2-19"
+msgstr "19.3.2-19"
+
+msgid "20.0.0"
+msgstr "20.0.0"
+
+msgid "20.1.0"
+msgstr "20.1.0"
+
+msgid "20.1.1"
+msgstr "20.1.1"
+
+msgid "20.2.0"
+msgstr "20.2.0"
+
+msgid "20.3.0"
+msgstr "20.3.0"
+
+msgid "20.4.0"
+msgstr "20.4.0"
+
+msgid "20.4.1"
+msgstr "20.4.1"
+
+msgid "20.5.0"
+msgstr "20.5.0"
+
+msgid "20.6.1"
+msgstr "20.6.1"
+
+msgid "20.6.1-29"
+msgstr "20.6.1-29"
+
+msgid "204 NoContent on success"
+msgstr "204 NoContent on success"
+
+msgid "21.0.0"
+msgstr "21.0.0"
+
+msgid "21.1.0"
+msgstr "21.1.0"
+
+msgid "21.1.1"
+msgstr "21.1.1"
+
+msgid "21.1.2"
+msgstr "21.1.2"
+
+msgid "21.2.0"
+msgstr "21.2.0"
+
+msgid "21.2.2"
+msgstr "21.2.2"
+
+msgid "21.2.3"
+msgstr "21.2.3"
+
+msgid "21.2.4-12"
+msgstr "21.2.4-12"
+
+msgid "22.0.0"
+msgstr "22.0.0"
+
+msgid "22.0.1"
+msgstr "22.0.1"
+
+msgid "22.1.0"
+msgstr "22.1.0"
+
+msgid "22.2.1"
+msgstr "22.2.1"
+
+msgid "22.2.2"
+msgstr "22.2.2"
+
+msgid "22.3.0"
+msgstr "22.3.0"
+
+msgid "22.4.0"
+msgstr "22.4.0"
+
+msgid "22.4.0-6"
+msgstr "22.4.0-6"
+
+msgid "23.0.0"
+msgstr "23.0.0"
+
+msgid "23.0.2"
+msgstr "23.0.2"
+
+msgid "23.1.0"
+msgstr "23.1.0"
+
+msgid "23.2.0"
+msgstr "23.2.0"
+
+msgid "23.2.1"
+msgstr "23.2.1"
+
+msgid "23.2.1-13"
+msgstr "23.2.1-13"
+
+msgid "24.0.0"
+msgstr "24.0.0"
+
+msgid "24.1.0"
+msgstr "24.1.0"
+
+msgid "24.1.1"
+msgstr "24.1.1"
+
+msgid "24.1.1-7"
+msgstr "24.1.1-7"
+
+msgid "25.0.0"
+msgstr "25.0.0"
+
+msgid "25.0.1"
+msgstr "25.0.1"
+
+msgid "25.0.1-5"
+msgstr "25.0.1-5"
+
+msgid "400 for unknown param for query param and for request body."
+msgstr "400 for unknown param for query param and for request body."
+
+msgid "404 NotFound for missing resource provider"
+msgstr "404 NotFound for missing resource provider"
+
+msgid "405 MethodNotAllowed if a microversion is specified that is before"
+msgstr "405 MethodNotAllowed if a microversion is specified that is before"
+
+msgid "409 Conflict if inventory in use or if some other request concurrently"
+msgstr "409 Conflict if inventory in use or if some other request concurrently"
+
+msgid ""
+"A ``default_floating_pool`` configuration option has been added in the "
+"``[neutron]`` group. The existing ``default_floating_pool`` option in the "
+"``[DEFAULT]`` group is retained and should be used by nova-network users. "
+"Neutron users meanwhile should migrate to the new option."
+msgstr ""
+"A ``default_floating_pool`` configuration option has been added in the "
+"``[neutron]`` group. The existing ``default_floating_pool`` option in the "
+"``[DEFAULT]`` group is retained and should be used by nova-network users. "
+"Neutron users meanwhile should migrate to the new option."
+
+msgid ""
+"A ``nova-manage db purge`` command to `purge archived shadow table data`_ is "
+"now available. A new ``--purge`` option is also available for the ``nova-"
+"manage db archive_deleted_rows`` command."
+msgstr ""
+"A ``nova-manage db purge`` command to `purge archived shadow table data`_ is "
+"now available. A new ``--purge`` option is also available for the ``nova-"
+"manage db archive_deleted_rows`` command."
+
+msgid ""
+"A ``nova-manage placement heal_allocations`` command is now available to "
+"allow users of the CachingScheduler to get the placement service populated "
+"for their eventual migration to the FilterScheduler. The CachingScheduler is "
+"deprecated and could be removed as early as Stein."
+msgstr ""
+"A ``nova-manage placement heal_allocations`` command is now available to "
+"allow users of the CachingScheduler to get the placement service populated "
+"for their eventual migration to the FilterScheduler. The CachingScheduler is "
+"deprecated and could be removed as early as Stein."
+
+msgid "A few examples of versioned notifications that use InstancePayload:"
+msgstr "A few examples of versioned notifications that use InstancePayload:"
+
+msgid "Current Series Release Notes"
+msgstr "Current Series Release Notes"
+
+msgid "Liberty Series Release Notes"
+msgstr "Liberty Series Release Notes"
+
+msgid "Mitaka Series Release Notes"
+msgstr "Mitaka Series Release Notes"
+
+msgid ""
+"New configuration option sync_power_state_pool_size has been added to set "
+"the number of greenthreads available for use to sync power states. Default "
+"value (1000) matches the previous implicit default value provided by "
+"Greenpool. This option can be used to reduce the number of concurrent "
+"requests made to the hypervisor or system with real instance power states "
+"for performance reasons."
+msgstr ""
+"New configuration option sync_power_state_pool_size has been added to set "
+"the number of greenthreads available for use to sync power states. The "
+"default value (1000) matches the previous implicit default value provided by "
+"Greenpool. This option can be used to reduce the number of concurrent "
+"requests made to the hypervisor or system with real instance power states "
+"for performance reasons."
+
+msgid "Newton Series Release Notes"
+msgstr "Newton Series Release Notes"
+
+msgid ""
+"Nova option 'use_usb_tablet' will be deprecated in favor of the global "
+"'pointer_model'."
+msgstr ""
+"Nova option 'use_usb_tablet' will be deprecated in favour of the global "
+"'pointer_model'."
+
+msgid "Ocata Series Release Notes"
+msgstr "Ocata Series Release Notes"
+
+msgid "Pike Series Release Notes"
+msgstr "Pike Series Release Notes"
+
+msgid "Queens Series Release Notes"
+msgstr "Queens Series Release Notes"
+
+msgid "Rocky Series Release Notes"
+msgstr "Rocky Series Release Notes"
+
+msgid "Stein Series Release Notes"
+msgstr "Stein Series Release Notes"
+
+msgid ""
+"The ``nova-manage vm list`` command is deprecated and will be removed in the "
+"15.0.0 Ocata release. Use the ``nova list`` command from python-novaclient "
+"instead."
+msgstr ""
+"The ``nova-manage vm list`` command is deprecated and will be removed in the "
+"15.0.0 Ocata release. Use the ``nova list`` command from python-novaclient "
+"instead."
+
+msgid ""
+"These commands only work with nova-network which is itself deprecated in "
+"favor of Neutron."
+msgstr ""
+"These commands only work with nova-network which is itself deprecated in "
+"favour of Neutron."
+
+msgid "Train Series Release Notes"
+msgstr "Train Series Release Notes"
+
+msgid "Ussuri Series Release Notes"
+msgstr "Ussuri Series Release Notes"
+
+msgid "Victoria Series Release Notes"
+msgstr "Victoria Series Release Notes"
+
+msgid ""
+"Virt drivers are no longer loaded with the import_object_ns function, which "
+"means that only virt drivers in the nova.virt namespace can be loaded."
+msgstr ""
+"Virt drivers are no longer loaded with the import_object_ns function, which "
+"means that only virt drivers in the nova.virt namespace can be loaded."
+
+msgid "Wallaby Series Release Notes"
+msgstr "Wallaby Series Release Notes"
+
+msgid "Xena Series Release Notes"
+msgstr "Xena Series Release Notes"
+
+msgid "Yoga Series Release Notes"
+msgstr "Yoga Series Release Notes"
+
+msgid "kernels 3.x: 8"
+msgstr "kernels 3.x: 8"
+
+msgid "kernels 4.x: 256"
+msgstr "kernels 4.x: 256"
+
+msgid "kernels prior to 3.0: 1"
+msgstr "kernels prior to 3.0: 1"
+
+msgid ""
+"network_allocate_retries config param now allows only positive integer "
+"values or 0."
+msgstr ""
+"network_allocate_retries config param now allows only positive integer "
+"values or 0."
+
+msgid "nova-maange account scrub"
+msgstr "nova-maange account scrub"
+
+msgid "nova-manage fixed *"
+msgstr "nova-manage fixed *"
+
+msgid "nova-manage floating *"
+msgstr "nova-manage floating *"
+
+msgid "nova-manage network *"
+msgstr "nova-manage network *"
+
+msgid "nova-manage project scrub"
+msgstr "nova-manage project scrub"
+
+msgid "nova-manage vpn *"
+msgstr "nova-manage vpn *"
+
+msgid "system_metadata"
+msgstr "system_metadata"
+
+msgid "tags (available in 2.26+)"
+msgstr "tags (available in 2.26+)"
+
+msgid "tags-any (available in 2.26+)"
+msgstr "tags-any (available in 2.26+)"
+
+msgid "task_state"
+msgstr "task_state"
+
+msgid "tenant_id"
+msgstr "tenant_id"
+
+msgid "terminated_at"
+msgstr "terminated_at"
+
+msgid "this change (1.5)"
+msgstr "this change (1.5)"
+
+msgid "total_bytes_sec"
+msgstr "total_bytes_sec"
+
+msgid "total_iops_sec - normalized IOPS"
+msgstr "total_iops_sec - normalised IOPS"
+
+msgid "trait:HW_CPU_X86_AVX2=required"
+msgstr "trait:HW_CPU_X86_AVX2=required"
+
+msgid "trait:STORAGE_DISK_SSD=required"
+msgstr "trait:STORAGE_DISK_SSD=required"
+
+msgid "updated_at"
+msgstr "updated_at"
+
+msgid "updates this resource provider"
+msgstr "updates this resource provider"
+
+msgid "user_id"
+msgstr "user_id"
+
+msgid "uuid"
+msgstr "UUID"
+
+msgid "vm_state"
+msgstr "vm_state"
diff --git a/releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po b/releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po
new file mode 100644
index 0000000000..eece7a459c
--- /dev/null
+++ b/releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po
@@ -0,0 +1,126 @@
+# Gérald LONLAS <g.lonlas@gmail.com>, 2016. #zanata
+msgid ""
+msgstr ""
+"Project-Id-Version: nova\n"
+"Report-Msgid-Bugs-To: \n"
+"POT-Creation-Date: 2022-09-16 12:59+0000\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"PO-Revision-Date: 2016-10-22 05:56+0000\n"
+"Last-Translator: Gérald LONLAS <g.lonlas@gmail.com>\n"
+"Language-Team: French\n"
+"Language: fr\n"
+"X-Generator: Zanata 4.3.3\n"
+"Plural-Forms: nplurals=2; plural=(n > 1)\n"
+
+msgid "/images"
+msgstr "/images"
+
+msgid "/os-baremetal-nodes"
+msgstr "/os-baremetal-nodes"
+
+msgid "/os-fixed-ips"
+msgstr "/os-fixed-ips"
+
+msgid "/os-floating-ip-dns"
+msgstr "/os-floating-ip-dns"
+
+msgid "/os-floating-ip-pools"
+msgstr "/os-floating-ip-pools"
+
+msgid "/os-floating-ips"
+msgstr "/os-floating-ips"
+
+msgid "/os-floating-ips-bulk"
+msgstr "/os-floating-ips-bulk"
+
+msgid "/os-fping"
+msgstr "/os-fping"
+
+msgid "/os-networks"
+msgstr "/os-networks"
+
+msgid "/os-security-group-default-rules"
+msgstr "/os-security-group-default-rules"
+
+msgid "/os-security-group-rules"
+msgstr "/os-security-group-rules"
+
+msgid "/os-security-groups"
+msgstr "/os-security-groups"
+
+msgid "/os-snapshots"
+msgstr "/os-snapshots"
+
+msgid "/os-volumes"
+msgstr "/os-volumes"
+
+msgid "12.0.1"
+msgstr "12.0.1"
+
+msgid "12.0.3"
+msgstr "12.0.3"
+
+msgid "12.0.4"
+msgstr "12.0.4"
+
+msgid "12.0.5"
+msgstr "12.0.5"
+
+msgid "13.0.0"
+msgstr "13.0.0"
+
+msgid "13.1.0"
+msgstr "13.1.0"
+
+msgid "13.1.1"
+msgstr "13.1.1"
+
+msgid "13.1.2"
+msgstr "13.1.2"
+
+msgid "14.0.0"
+msgstr "14.0.0"
+
+msgid "14.0.1"
+msgstr "14.0.1"
+
+msgid "Bug Fixes"
+msgstr "Corrections de bugs"
+
+msgid "Current Series Release Notes"
+msgstr "Note de la release actuelle"
+
+msgid "DEFAULT.debug"
+msgstr "DEFAULT.debug"
+
+msgid "Deprecation Notes"
+msgstr "Notes dépréciées "
+
+msgid "Known Issues"
+msgstr "Problèmes connus"
+
+msgid "Liberty Series Release Notes"
+msgstr "Note de release pour Liberty"
+
+msgid "Mitaka Series Release Notes"
+msgstr "Note de release pour Mitaka"
+
+msgid "New Features"
+msgstr "Nouvelles fonctionnalités"
+
+msgid "Newton Series Release Notes"
+msgstr "Note de release pour Newton"
+
+msgid "Other Notes"
+msgstr "Autres notes"
+
+msgid "Security Issues"
+msgstr "Problèmes de sécurités"
+
+msgid "Start using reno to manage release notes."
+msgstr "Commence à utiliser reno pour la gestion des notes de release"
+
+msgid "Upgrade Notes"
+msgstr "Notes de mises à jours"
diff --git a/releasenotes/source/zed.rst b/releasenotes/source/zed.rst
new file mode 100644
index 0000000000..9608c05e45
--- /dev/null
+++ b/releasenotes/source/zed.rst
@@ -0,0 +1,6 @@
+========================
+Zed Series Release Notes
+========================
+
+.. release-notes::
+ :branch: stable/zed
diff --git a/requirements.txt b/requirements.txt
index a0fcc1195c..9954d06bc9 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -31,7 +31,7 @@ requests>=2.25.1 # Apache-2.0
stevedore>=1.20.0 # Apache-2.0
websockify>=0.9.0 # LGPLv3
oslo.cache>=1.26.0 # Apache-2.0
-oslo.concurrency>=4.5.0 # Apache-2.0
+oslo.concurrency>=5.0.1 # Apache-2.0
oslo.config>=8.6.0 # Apache-2.0
oslo.context>=3.4.0 # Apache-2.0
oslo.log>=4.6.1 # Apache-2.0
@@ -42,8 +42,8 @@ oslo.upgradecheck>=1.3.0
oslo.utils>=4.12.1 # Apache-2.0
oslo.db>=10.0.0 # Apache-2.0
oslo.rootwrap>=5.15.0 # Apache-2.0
-oslo.messaging>=10.3.0 # Apache-2.0
-oslo.policy>=3.7.0 # Apache-2.0
+oslo.messaging>=14.1.0 # Apache-2.0
+oslo.policy>=3.11.0 # Apache-2.0
oslo.privsep>=2.6.2 # Apache-2.0
oslo.i18n>=5.1.0 # Apache-2.0
oslo.service>=2.8.0 # Apache-2.0
@@ -53,8 +53,8 @@ psutil>=3.2.2 # BSD
oslo.versionedobjects>=1.35.0 # Apache-2.0
os-brick>=5.2 # Apache-2.0
os-resource-classes>=1.1.0 # Apache-2.0
-os-traits>=2.7.0 # Apache-2.0
-os-vif>=1.15.2 # Apache-2.0
+os-traits>=2.9.0 # Apache-2.0
+os-vif>=3.1.0 # Apache-2.0
castellan>=0.16.0 # Apache-2.0
microversion-parse>=0.2.1 # Apache-2.0
tooz>=1.58.0 # Apache-2.0
diff --git a/setup.cfg b/setup.cfg
index 58594e229c..fa6f6af656 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -22,6 +22,7 @@ classifiers =
Programming Language :: Python :: 3
Programming Language :: Python :: 3.8
Programming Language :: Python :: 3.9
+ Programming Language :: Python :: 3.10
Programming Language :: Python :: 3 :: Only
Programming Language :: Python :: Implementation :: CPython
diff --git a/tools/test-setup.sh b/tools/test-setup.sh
index 96a08c4a98..fced9be5e0 100755
--- a/tools/test-setup.sh
+++ b/tools/test-setup.sh
@@ -27,13 +27,19 @@ function is_rhel8 {
cat /etc/*release | grep -q 'release 8'
}
+function is_rhel9 {
+ [ -f /usr/bin/dnf ] && \
+ cat /etc/*release | grep -q -e "Red Hat" -e "CentOS" -e "CloudLinux" && \
+ cat /etc/*release | grep -q 'release 9'
+}
+
function set_conf_line { # file regex value
sudo sh -c "grep -q -e '$2' $1 && \
sed -i 's|$2|$3|g' $1 || \
echo '$3' >> $1"
}
-if is_rhel7 || is_rhel8; then
+if is_rhel7 || is_rhel8 || is_rhel9; then
# mysql needs to be started on centos/rhel
sudo systemctl restart mariadb.service
diff --git a/tox.ini b/tox.ini
index 24e9e894cf..097edbe827 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,13 +1,8 @@
[tox]
minversion = 3.18.0
-envlist = py39,functional,pep8
-# Automatic envs (pyXX) will only use the python version appropriate to that
-# env and ignore basepython inherited from [testenv] if we set
-# ignore_basepython_conflict.
-ignore_basepython_conflict = True
+envlist = py3,functional,pep8
[testenv]
-basepython = python3
usedevelop = True
allowlist_externals =
bash
@@ -15,6 +10,7 @@ allowlist_externals =
rm
env
make
+install_command = python -I -m pip install -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} {opts} {packages}
setenv =
VIRTUAL_ENV={envdir}
LANGUAGE=en_US
@@ -26,15 +22,19 @@ setenv =
# TODO(stephenfin): Remove once we bump our upper-constraint to SQLAlchemy 2.0
SQLALCHEMY_WARN_20=1
deps =
- -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
- -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
extras =
zvm
hyperv
vmware
passenv =
- OS_DEBUG GENERATE_HASHES
+ OS_DEBUG
+ GENERATE_HASHES
+# NOTE(sean-k-mooney) optimization is enabled by default and when enabled
+# asserts are complied out. Disable optimization to allow asserts in
+# nova to fire in unit and functional tests. This can be useful for
+# debugging issue with fixtures and mocks.
+ PYTHONOPTIMIZE
# there is also secret magic in subunit-trace which lets you run in a fail only
# mode. To do this define the TRACE_FAILONLY environmental variable.
commands =
@@ -61,7 +61,7 @@ description =
# because we do not want placement present during unit tests.
deps =
{[testenv]deps}
- openstack-placement>=1.0.0
+ openstack-placement>=9.0.0.0b1
extras =
commands =
stestr --test-path=./nova/tests/functional run {posargs}
@@ -345,6 +345,7 @@ extension =
N369 = checks:check_lockutils_rwlocks
N370 = checks:check_six
N371 = checks:import_stock_mock
+ N372 = checks:check_set_daemon
paths =
./nova/hacking