summaryrefslogtreecommitdiff
path: root/nova/tests/unit
diff options
context:
space:
mode:
Diffstat (limited to 'nova/tests/unit')
-rw-r--r--nova/tests/unit/accelerator/test_cyborg.py2
-rw-r--r--nova/tests/unit/api/openstack/compute/admin_only_action_common.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/test_admin_password.py4
-rw-r--r--nova/tests/unit/api/openstack/compute/test_aggregates.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/test_attach_interfaces.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/test_availability_zone.py2
-rw-r--r--nova/tests/unit/api/openstack/compute/test_baremetal_nodes.py5
-rw-r--r--nova/tests/unit/api/openstack/compute/test_console_auth_tokens.py2
-rw-r--r--nova/tests/unit/api/openstack/compute/test_console_output.py2
-rw-r--r--nova/tests/unit/api/openstack/compute/test_create_backup.py7
-rw-r--r--nova/tests/unit/api/openstack/compute/test_deferred_delete.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/test_disk_config.py2
-rw-r--r--nova/tests/unit/api/openstack/compute/test_evacuate.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/test_flavor_access.py2
-rw-r--r--nova/tests/unit/api/openstack/compute/test_flavor_manage.py2
-rw-r--r--nova/tests/unit/api/openstack/compute/test_flavors.py2
-rw-r--r--nova/tests/unit/api/openstack/compute/test_flavors_extra_specs.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/test_floating_ip_pools.py2
-rw-r--r--nova/tests/unit/api/openstack/compute/test_floating_ips.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/test_hosts.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/test_hypervisors.py396
-rw-r--r--nova/tests/unit/api/openstack/compute/test_image_metadata.py2
-rw-r--r--nova/tests/unit/api/openstack/compute/test_images.py2
-rw-r--r--nova/tests/unit/api/openstack/compute/test_instance_actions.py2
-rw-r--r--nova/tests/unit/api/openstack/compute/test_keypairs.py128
-rw-r--r--nova/tests/unit/api/openstack/compute/test_limits.py166
-rw-r--r--nova/tests/unit/api/openstack/compute/test_lock_server.py2
-rw-r--r--nova/tests/unit/api/openstack/compute/test_microversions.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/test_migrate_server.py6
-rw-r--r--nova/tests/unit/api/openstack/compute/test_migrations.py2
-rw-r--r--nova/tests/unit/api/openstack/compute/test_multinic.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/test_quota_classes.py4
-rw-r--r--nova/tests/unit/api/openstack/compute/test_quotas.py8
-rw-r--r--nova/tests/unit/api/openstack/compute/test_remote_consoles.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/test_rescue.py2
-rw-r--r--nova/tests/unit/api/openstack/compute/test_security_groups.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/test_server_actions.py41
-rw-r--r--nova/tests/unit/api/openstack/compute/test_server_diagnostics.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/test_server_external_events.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/test_server_group_quotas.py10
-rw-r--r--nova/tests/unit/api/openstack/compute/test_server_groups.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/test_server_metadata.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/test_server_migrations.py2
-rw-r--r--nova/tests/unit/api/openstack/compute/test_server_password.py2
-rw-r--r--nova/tests/unit/api/openstack/compute/test_server_reset_state.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/test_server_start_stop.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/test_server_tags.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/test_server_topology.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/test_servers.py33
-rw-r--r--nova/tests/unit/api/openstack/compute/test_services.py2
-rw-r--r--nova/tests/unit/api/openstack/compute/test_shelve.py258
-rw-r--r--nova/tests/unit/api/openstack/compute/test_simple_tenant_usage.py2
-rw-r--r--nova/tests/unit/api/openstack/compute/test_snapshots.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/test_suspend_server.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/test_tenant_networks.py2
-rw-r--r--nova/tests/unit/api/openstack/compute/test_volumes.py12
-rw-r--r--nova/tests/unit/api/openstack/test_common.py3
-rw-r--r--nova/tests/unit/api/openstack/test_faults.py3
-rw-r--r--nova/tests/unit/api/openstack/test_requestlog.py2
-rw-r--r--nova/tests/unit/api/openstack/test_wsgi.py3
-rw-r--r--nova/tests/unit/api/openstack/test_wsgi_app.py17
-rw-r--r--nova/tests/unit/api/test_auth.py3
-rw-r--r--nova/tests/unit/api/test_wsgi.py2
-rw-r--r--nova/tests/unit/api/validation/extra_specs/test_validators.py11
-rw-r--r--nova/tests/unit/cmd/test_baseproxy.py2
-rw-r--r--nova/tests/unit/cmd/test_common.py2
-rw-r--r--nova/tests/unit/cmd/test_compute.py2
-rw-r--r--nova/tests/unit/cmd/test_manage.py2
-rw-r--r--nova/tests/unit/cmd/test_nova_api.py2
-rw-r--r--nova/tests/unit/cmd/test_policy.py6
-rw-r--r--nova/tests/unit/cmd/test_scheduler.py2
-rw-r--r--nova/tests/unit/cmd/test_status.py58
-rw-r--r--nova/tests/unit/compute/monitors/cpu/test_virt_driver.py2
-rw-r--r--nova/tests/unit/compute/monitors/test_monitors.py2
-rw-r--r--nova/tests/unit/compute/test_api.py283
-rw-r--r--nova/tests/unit/compute/test_claims.py2
-rw-r--r--nova/tests/unit/compute/test_compute.py258
-rw-r--r--nova/tests/unit/compute/test_compute_mgr.py352
-rw-r--r--nova/tests/unit/compute/test_host_api.py3
-rw-r--r--nova/tests/unit/compute/test_instance_list.py3
-rw-r--r--nova/tests/unit/compute/test_keypairs.py21
-rw-r--r--nova/tests/unit/compute/test_multi_cell_list.py3
-rw-r--r--nova/tests/unit/compute/test_pci_placement_translator.py291
-rw-r--r--nova/tests/unit/compute/test_resource_tracker.py300
-rw-r--r--nova/tests/unit/compute/test_rpcapi.py46
-rw-r--r--nova/tests/unit/compute/test_shelve.py628
-rw-r--r--nova/tests/unit/compute/test_utils.py2
-rw-r--r--nova/tests/unit/compute/test_virtapi.py22
-rw-r--r--nova/tests/unit/conductor/tasks/test_base.py2
-rw-r--r--nova/tests/unit/conductor/tasks/test_cross_cell_migrate.py2
-rw-r--r--nova/tests/unit/conductor/tasks/test_live_migrate.py33
-rw-r--r--nova/tests/unit/conductor/tasks/test_migrate.py3
-rw-r--r--nova/tests/unit/conductor/test_conductor.py76
-rw-r--r--nova/tests/unit/console/rfb/test_auth.py2
-rw-r--r--nova/tests/unit/console/rfb/test_authnone.py2
-rw-r--r--nova/tests/unit/console/rfb/test_authvencrypt.py2
-rw-r--r--nova/tests/unit/console/securityproxy/test_rfb.py2
-rw-r--r--nova/tests/unit/console/test_serial.py3
-rw-r--r--nova/tests/unit/console/test_websocketproxy.py63
-rw-r--r--nova/tests/unit/db/api/test_api.py2
-rw-r--r--nova/tests/unit/db/api/test_migrations.py3
-rw-r--r--nova/tests/unit/db/main/test_api.py20
-rw-r--r--nova/tests/unit/db/main/test_migrations.py36
-rw-r--r--nova/tests/unit/db/test_migration.py2
-rw-r--r--nova/tests/unit/fixtures/test_libvirt.py3
-rw-r--r--nova/tests/unit/image/test_glance.py2
-rw-r--r--nova/tests/unit/limit/test_local.py3
-rw-r--r--nova/tests/unit/limit/test_placement.py2
-rw-r--r--nova/tests/unit/network/test_neutron.py212
-rw-r--r--nova/tests/unit/network/test_security_group.py3
-rw-r--r--nova/tests/unit/notifications/objects/test_flavor.py2
-rw-r--r--nova/tests/unit/notifications/objects/test_instance.py3
-rw-r--r--nova/tests/unit/notifications/objects/test_notification.py4
-rw-r--r--nova/tests/unit/notifications/objects/test_service.py2
-rw-r--r--nova/tests/unit/notifications/test_base.py2
-rw-r--r--nova/tests/unit/objects/test_aggregate.py3
-rw-r--r--nova/tests/unit/objects/test_block_device.py16
-rw-r--r--nova/tests/unit/objects/test_build_request.py3
-rw-r--r--nova/tests/unit/objects/test_cell_mapping.py3
-rw-r--r--nova/tests/unit/objects/test_compute_node.py14
-rw-r--r--nova/tests/unit/objects/test_console_auth_token.py2
-rw-r--r--nova/tests/unit/objects/test_ec2.py3
-rw-r--r--nova/tests/unit/objects/test_external_event.py2
-rw-r--r--nova/tests/unit/objects/test_fields.py6
-rw-r--r--nova/tests/unit/objects/test_flavor.py2
-rw-r--r--nova/tests/unit/objects/test_host_mapping.py3
-rw-r--r--nova/tests/unit/objects/test_image_meta.py68
-rw-r--r--nova/tests/unit/objects/test_instance.py164
-rw-r--r--nova/tests/unit/objects/test_instance_action.py2
-rw-r--r--nova/tests/unit/objects/test_instance_device_metadata.py3
-rw-r--r--nova/tests/unit/objects/test_instance_fault.py3
-rw-r--r--nova/tests/unit/objects/test_instance_group.py2
-rw-r--r--nova/tests/unit/objects/test_instance_info_cache.py2
-rw-r--r--nova/tests/unit/objects/test_instance_mapping.py3
-rw-r--r--nova/tests/unit/objects/test_instance_numa.py3
-rw-r--r--nova/tests/unit/objects/test_instance_pci_requests.py3
-rw-r--r--nova/tests/unit/objects/test_keypair.py3
-rw-r--r--nova/tests/unit/objects/test_migrate_data.py61
-rw-r--r--nova/tests/unit/objects/test_migration.py3
-rw-r--r--nova/tests/unit/objects/test_migration_context.py3
-rw-r--r--nova/tests/unit/objects/test_objects.py28
-rw-r--r--nova/tests/unit/objects/test_pci_device.py2
-rw-r--r--nova/tests/unit/objects/test_quotas.py2
-rw-r--r--nova/tests/unit/objects/test_request_spec.py249
-rw-r--r--nova/tests/unit/objects/test_resource.py3
-rw-r--r--nova/tests/unit/objects/test_security_group.py3
-rw-r--r--nova/tests/unit/objects/test_service.py3
-rw-r--r--nova/tests/unit/objects/test_tag.py2
-rw-r--r--nova/tests/unit/objects/test_task_log.py2
-rw-r--r--nova/tests/unit/objects/test_trusted_certs.py2
-rw-r--r--nova/tests/unit/objects/test_virtual_interface.py3
-rw-r--r--nova/tests/unit/objects/test_volume_usage.py3
-rw-r--r--nova/tests/unit/pci/fakes.py2
-rw-r--r--nova/tests/unit/pci/test_devspec.py51
-rw-r--r--nova/tests/unit/pci/test_manager.py44
-rw-r--r--nova/tests/unit/pci/test_request.py18
-rw-r--r--nova/tests/unit/pci/test_stats.py563
-rw-r--r--nova/tests/unit/pci/test_utils.py2
-rw-r--r--nova/tests/unit/policies/base.py42
-rw-r--r--nova/tests/unit/policies/test_admin_actions.py16
-rw-r--r--nova/tests/unit/policies/test_admin_password.py17
-rw-r--r--nova/tests/unit/policies/test_aggregates.py30
-rw-r--r--nova/tests/unit/policies/test_assisted_volume_snapshots.py3
-rw-r--r--nova/tests/unit/policies/test_attach_interfaces.py45
-rw-r--r--nova/tests/unit/policies/test_availability_zone.py17
-rw-r--r--nova/tests/unit/policies/test_baremetal_nodes.py16
-rw-r--r--nova/tests/unit/policies/test_console_auth_tokens.py2
-rw-r--r--nova/tests/unit/policies/test_console_output.py17
-rw-r--r--nova/tests/unit/policies/test_create_backup.py17
-rw-r--r--nova/tests/unit/policies/test_deferred_delete.py25
-rw-r--r--nova/tests/unit/policies/test_evacuate.py16
-rw-r--r--nova/tests/unit/policies/test_extensions.py10
-rw-r--r--nova/tests/unit/policies/test_flavor_access.py20
-rw-r--r--nova/tests/unit/policies/test_flavor_extra_specs.py34
-rw-r--r--nova/tests/unit/policies/test_flavor_manage.py8
-rw-r--r--nova/tests/unit/policies/test_floating_ip_pools.py12
-rw-r--r--nova/tests/unit/policies/test_floating_ips.py41
-rw-r--r--nova/tests/unit/policies/test_hosts.py19
-rw-r--r--nova/tests/unit/policies/test_hypervisors.py21
-rw-r--r--nova/tests/unit/policies/test_instance_actions.py38
-rw-r--r--nova/tests/unit/policies/test_instance_usage_audit_log.py5
-rw-r--r--nova/tests/unit/policies/test_keypairs.py17
-rw-r--r--nova/tests/unit/policies/test_limits.py7
-rw-r--r--nova/tests/unit/policies/test_lock_server.py21
-rw-r--r--nova/tests/unit/policies/test_migrate_server.py14
-rw-r--r--nova/tests/unit/policies/test_migrations.py2
-rw-r--r--nova/tests/unit/policies/test_multinic.py25
-rw-r--r--nova/tests/unit/policies/test_networks.py11
-rw-r--r--nova/tests/unit/policies/test_pause_server.py17
-rw-r--r--nova/tests/unit/policies/test_quota_class_sets.py15
-rw-r--r--nova/tests/unit/policies/test_quota_sets.py37
-rw-r--r--nova/tests/unit/policies/test_remote_consoles.py17
-rw-r--r--nova/tests/unit/policies/test_rescue.py25
-rw-r--r--nova/tests/unit/policies/test_security_groups.py76
-rw-r--r--nova/tests/unit/policies/test_server_diagnostics.py14
-rw-r--r--nova/tests/unit/policies/test_server_external_events.py3
-rw-r--r--nova/tests/unit/policies/test_server_groups.py36
-rw-r--r--nova/tests/unit/policies/test_server_ips.py21
-rw-r--r--nova/tests/unit/policies/test_server_metadata.py29
-rw-r--r--nova/tests/unit/policies/test_server_migrations.py15
-rw-r--r--nova/tests/unit/policies/test_server_password.py37
-rw-r--r--nova/tests/unit/policies/test_server_tags.py29
-rw-r--r--nova/tests/unit/policies/test_server_topology.py22
-rw-r--r--nova/tests/unit/policies/test_servers.py39
-rw-r--r--nova/tests/unit/policies/test_services.py15
-rw-r--r--nova/tests/unit/policies/test_shelve.py19
-rw-r--r--nova/tests/unit/policies/test_simple_tenant_usage.py21
-rw-r--r--nova/tests/unit/policies/test_suspend_server.py17
-rw-r--r--nova/tests/unit/policies/test_tenant_networks.py11
-rw-r--r--nova/tests/unit/policies/test_volumes.py77
-rw-r--r--nova/tests/unit/privsep/test_fs.py2
-rw-r--r--nova/tests/unit/privsep/test_idmapshift.py2
-rw-r--r--nova/tests/unit/privsep/test_libvirt.py3
-rw-r--r--nova/tests/unit/privsep/test_linux_net.py2
-rw-r--r--nova/tests/unit/privsep/test_path.py3
-rw-r--r--nova/tests/unit/privsep/test_qemu.py2
-rw-r--r--nova/tests/unit/privsep/test_utils.py2
-rw-r--r--nova/tests/unit/scheduler/client/test_query.py3
-rw-r--r--nova/tests/unit/scheduler/client/test_report.py68
-rw-r--r--nova/tests/unit/scheduler/filters/test_affinity_filters.py3
-rw-r--r--nova/tests/unit/scheduler/filters/test_aggregate_image_properties_isolation_filters.py2
-rw-r--r--nova/tests/unit/scheduler/filters/test_aggregate_instance_extra_specs_filters.py2
-rw-r--r--nova/tests/unit/scheduler/filters/test_aggregate_multitenancy_isolation_filters.py2
-rw-r--r--nova/tests/unit/scheduler/filters/test_availability_zone_filters.py2
-rw-r--r--nova/tests/unit/scheduler/filters/test_compute_filters.py2
-rw-r--r--nova/tests/unit/scheduler/filters/test_io_ops_filters.py2
-rw-r--r--nova/tests/unit/scheduler/filters/test_num_instances_filters.py2
-rw-r--r--nova/tests/unit/scheduler/filters/test_pci_passthrough_filters.py2
-rw-r--r--nova/tests/unit/scheduler/filters/test_type_filters.py2
-rw-r--r--nova/tests/unit/scheduler/test_filters.py2
-rw-r--r--nova/tests/unit/scheduler/test_host_manager.py2
-rw-r--r--nova/tests/unit/scheduler/test_manager.py871
-rw-r--r--nova/tests/unit/scheduler/test_request_filter.py89
-rw-r--r--nova/tests/unit/scheduler/test_rpcapi.py3
-rw-r--r--nova/tests/unit/scheduler/test_utils.py3
-rw-r--r--nova/tests/unit/scheduler/weights/test_weights_affinity.py2
-rw-r--r--nova/tests/unit/servicegroup/test_api.py2
-rw-r--r--nova/tests/unit/servicegroup/test_db_servicegroup.py3
-rw-r--r--nova/tests/unit/servicegroup/test_mc_servicegroup.py2
-rw-r--r--nova/tests/unit/storage/test_rbd.py4
-rw-r--r--nova/tests/unit/test_availability_zones.py3
-rw-r--r--nova/tests/unit/test_block_device.py3
-rw-r--r--nova/tests/unit/test_cache.py2
-rw-r--r--nova/tests/unit/test_cinder.py2
-rw-r--r--nova/tests/unit/test_conf.py2
-rw-r--r--nova/tests/unit/test_configdrive2.py2
-rw-r--r--nova/tests/unit/test_context.py3
-rw-r--r--nova/tests/unit/test_crypto.py2
-rw-r--r--nova/tests/unit/test_exception_wrapper.py2
-rw-r--r--nova/tests/unit/test_fixtures.py2
-rw-r--r--nova/tests/unit/test_hacking.py15
-rw-r--r--nova/tests/unit/test_identity.py2
-rw-r--r--nova/tests/unit/test_json_ref.py2
-rw-r--r--nova/tests/unit/test_metadata.py25
-rw-r--r--nova/tests/unit/test_notifications.py2
-rw-r--r--nova/tests/unit/test_notifier.py2
-rw-r--r--nova/tests/unit/test_policy.py6
-rw-r--r--nova/tests/unit/test_quota.py3
-rw-r--r--nova/tests/unit/test_rpc.py3
-rw-r--r--nova/tests/unit/test_service.py3
-rw-r--r--nova/tests/unit/test_service_auth.py3
-rw-r--r--nova/tests/unit/test_test.py17
-rw-r--r--nova/tests/unit/test_utils.py2
-rw-r--r--nova/tests/unit/test_weights.py2
-rw-r--r--nova/tests/unit/test_wsgi.py2
-rw-r--r--nova/tests/unit/utils.py3
-rw-r--r--nova/tests/unit/virt/disk/mount/test_api.py3
-rw-r--r--nova/tests/unit/virt/disk/mount/test_loop.py3
-rw-r--r--nova/tests/unit/virt/disk/mount/test_nbd.py2
-rw-r--r--nova/tests/unit/virt/disk/test_api.py2
-rw-r--r--nova/tests/unit/virt/disk/vfs/test_guestfs.py2
-rw-r--r--nova/tests/unit/virt/hyperv/test_base.py3
-rw-r--r--nova/tests/unit/virt/hyperv/test_block_device_manager.py2
-rw-r--r--nova/tests/unit/virt/hyperv/test_driver.py2
-rw-r--r--nova/tests/unit/virt/hyperv/test_eventhandler.py2
-rw-r--r--nova/tests/unit/virt/hyperv/test_hostops.py2
-rw-r--r--nova/tests/unit/virt/hyperv/test_imagecache.py2
-rw-r--r--nova/tests/unit/virt/hyperv/test_livemigrationops.py3
-rw-r--r--nova/tests/unit/virt/hyperv/test_migrationops.py2
-rw-r--r--nova/tests/unit/virt/hyperv/test_pathutils.py3
-rw-r--r--nova/tests/unit/virt/hyperv/test_rdpconsoleops.py2
-rw-r--r--nova/tests/unit/virt/hyperv/test_serialconsolehandler.py2
-rw-r--r--nova/tests/unit/virt/hyperv/test_serialconsoleops.py2
-rw-r--r--nova/tests/unit/virt/hyperv/test_serialproxy.py2
-rw-r--r--nova/tests/unit/virt/hyperv/test_snapshotops.py2
-rw-r--r--nova/tests/unit/virt/hyperv/test_vif.py2
-rw-r--r--nova/tests/unit/virt/hyperv/test_vmops.py19
-rw-r--r--nova/tests/unit/virt/hyperv/test_volumeops.py3
-rw-r--r--nova/tests/unit/virt/ironic/test_client_wrapper.py3
-rw-r--r--nova/tests/unit/virt/ironic/test_driver.py76
-rw-r--r--nova/tests/unit/virt/libvirt/storage/test_dmcrypt.py3
-rw-r--r--nova/tests/unit/virt/libvirt/storage/test_lvm.py3
-rw-r--r--nova/tests/unit/virt/libvirt/test_blockinfo.py102
-rw-r--r--nova/tests/unit/virt/libvirt/test_config.py94
-rw-r--r--nova/tests/unit/virt/libvirt/test_designer.py2
-rw-r--r--nova/tests/unit/virt/libvirt/test_driver.py901
-rw-r--r--nova/tests/unit/virt/libvirt/test_guest.py39
-rw-r--r--nova/tests/unit/virt/libvirt/test_host.py23
-rw-r--r--nova/tests/unit/virt/libvirt/test_imagebackend.py38
-rw-r--r--nova/tests/unit/virt/libvirt/test_imagecache.py2
-rw-r--r--nova/tests/unit/virt/libvirt/test_machine_type_utils.py3
-rw-r--r--nova/tests/unit/virt/libvirt/test_migration.py2
-rw-r--r--nova/tests/unit/virt/libvirt/test_utils.py76
-rw-r--r--nova/tests/unit/virt/libvirt/test_vif.py37
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_fibrechannel.py2
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_fs.py2
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_iscsi.py2
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_lightos.py23
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_mount.py2
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_net.py2
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_nfs.py2
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_nvme.py19
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_quobyte.py2
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_remotefs.py3
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_scaleio.py2
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_smbfs.py2
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_storpool.py2
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_volume.py3
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_vzstorage.py2
-rw-r--r--nova/tests/unit/virt/powervm/__init__.py65
-rw-r--r--nova/tests/unit/virt/powervm/disk/__init__.py0
-rw-r--r--nova/tests/unit/virt/powervm/disk/fake_adapter.py52
-rw-r--r--nova/tests/unit/virt/powervm/disk/test_driver.py59
-rw-r--r--nova/tests/unit/virt/powervm/disk/test_localdisk.py312
-rw-r--r--nova/tests/unit/virt/powervm/disk/test_ssp.py424
-rw-r--r--nova/tests/unit/virt/powervm/tasks/__init__.py0
-rw-r--r--nova/tests/unit/virt/powervm/tasks/test_image.py68
-rw-r--r--nova/tests/unit/virt/powervm/tasks/test_network.py323
-rw-r--r--nova/tests/unit/virt/powervm/tasks/test_storage.py354
-rw-r--r--nova/tests/unit/virt/powervm/tasks/test_vm.py134
-rw-r--r--nova/tests/unit/virt/powervm/test_driver.py649
-rw-r--r--nova/tests/unit/virt/powervm/test_host.py62
-rw-r--r--nova/tests/unit/virt/powervm/test_image.py55
-rw-r--r--nova/tests/unit/virt/powervm/test_media.py203
-rw-r--r--nova/tests/unit/virt/powervm/test_mgmt.py193
-rw-r--r--nova/tests/unit/virt/powervm/test_vif.py327
-rw-r--r--nova/tests/unit/virt/powervm/test_vm.py563
-rw-r--r--nova/tests/unit/virt/powervm/volume/__init__.py0
-rw-r--r--nova/tests/unit/virt/powervm/volume/test_fcvscsi.py456
-rw-r--r--nova/tests/unit/virt/test_block_device.py316
-rw-r--r--nova/tests/unit/virt/test_hardware.py163
-rw-r--r--nova/tests/unit/virt/test_imagecache.py3
-rw-r--r--nova/tests/unit/virt/test_images.py2
-rw-r--r--nova/tests/unit/virt/test_netutils.py23
-rw-r--r--nova/tests/unit/virt/test_osinfo.py3
-rw-r--r--nova/tests/unit/virt/test_virt.py29
-rw-r--r--nova/tests/unit/virt/test_virt_drivers.py2
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_configdrive.py3
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_driver_api.py6
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_ds_util.py2
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_imagecache.py2
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_images.py10
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_network_util.py2
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_session.py2
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_vif.py3
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_vm_util.py2
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_vmops.py2
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_volumeops.py3
-rw-r--r--nova/tests/unit/virt/zvm/test_driver.py3
-rw-r--r--nova/tests/unit/virt/zvm/test_guest.py2
-rw-r--r--nova/tests/unit/virt/zvm/test_hypervisor.py2
-rw-r--r--nova/tests/unit/virt/zvm/test_utils.py2
-rw-r--r--nova/tests/unit/volume/test_cinder.py3
363 files changed, 7306 insertions, 6865 deletions
diff --git a/nova/tests/unit/accelerator/test_cyborg.py b/nova/tests/unit/accelerator/test_cyborg.py
index 100f1a2115..2d814c74a1 100644
--- a/nova/tests/unit/accelerator/test_cyborg.py
+++ b/nova/tests/unit/accelerator/test_cyborg.py
@@ -13,7 +13,7 @@
# under the License.
import itertools
-import mock
+from unittest import mock
from keystoneauth1 import exceptions as ks_exc
from requests.models import Response
diff --git a/nova/tests/unit/api/openstack/compute/admin_only_action_common.py b/nova/tests/unit/api/openstack/compute/admin_only_action_common.py
index 37fd1012b7..f332d9f32f 100644
--- a/nova/tests/unit/api/openstack/compute/admin_only_action_common.py
+++ b/nova/tests/unit/api/openstack/compute/admin_only_action_common.py
@@ -12,8 +12,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils import timeutils
from oslo_utils import uuidutils
import webob
diff --git a/nova/tests/unit/api/openstack/compute/test_admin_password.py b/nova/tests/unit/api/openstack/compute/test_admin_password.py
index 90a4a2983b..67e4c743d5 100644
--- a/nova/tests/unit/api/openstack/compute/test_admin_password.py
+++ b/nova/tests/unit/api/openstack/compute/test_admin_password.py
@@ -13,7 +13,9 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+
+from unittest import mock
+
import webob
from nova.api.openstack.compute import admin_password as admin_password_v21
diff --git a/nova/tests/unit/api/openstack/compute/test_aggregates.py b/nova/tests/unit/api/openstack/compute/test_aggregates.py
index fb096861eb..21d644f0be 100644
--- a/nova/tests/unit/api/openstack/compute/test_aggregates.py
+++ b/nova/tests/unit/api/openstack/compute/test_aggregates.py
@@ -15,7 +15,8 @@
"""Tests for the aggregates admin api."""
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel
from webob import exc
diff --git a/nova/tests/unit/api/openstack/compute/test_attach_interfaces.py b/nova/tests/unit/api/openstack/compute/test_attach_interfaces.py
index 526cb6011d..e4719ea052 100644
--- a/nova/tests/unit/api/openstack/compute/test_attach_interfaces.py
+++ b/nova/tests/unit/api/openstack/compute/test_attach_interfaces.py
@@ -13,7 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from webob import exc
from nova.api.openstack import common
diff --git a/nova/tests/unit/api/openstack/compute/test_availability_zone.py b/nova/tests/unit/api/openstack/compute/test_availability_zone.py
index f355eb436a..a408e0d1aa 100644
--- a/nova/tests/unit/api/openstack/compute/test_availability_zone.py
+++ b/nova/tests/unit/api/openstack/compute/test_availability_zone.py
@@ -13,9 +13,9 @@
# under the License.
import datetime
+from unittest import mock
import iso8601
-import mock
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel
diff --git a/nova/tests/unit/api/openstack/compute/test_baremetal_nodes.py b/nova/tests/unit/api/openstack/compute/test_baremetal_nodes.py
index 55a8b03216..c8ad907b10 100644
--- a/nova/tests/unit/api/openstack/compute/test_baremetal_nodes.py
+++ b/nova/tests/unit/api/openstack/compute/test_baremetal_nodes.py
@@ -13,13 +13,12 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
from ironicclient import exc as ironic_exc
-import mock
from webob import exc
-from nova.api.openstack.compute import baremetal_nodes \
- as b_nodes_v21
+from nova.api.openstack.compute import baremetal_nodes as b_nodes_v21
from nova import context
from nova import exception
from nova import test
diff --git a/nova/tests/unit/api/openstack/compute/test_console_auth_tokens.py b/nova/tests/unit/api/openstack/compute/test_console_auth_tokens.py
index 429096d51d..a1f3d1e63d 100644
--- a/nova/tests/unit/api/openstack/compute/test_console_auth_tokens.py
+++ b/nova/tests/unit/api/openstack/compute/test_console_auth_tokens.py
@@ -14,8 +14,8 @@
# under the License.
import copy
+from unittest import mock
-import mock
import webob
from nova.api.openstack import api_version_request
diff --git a/nova/tests/unit/api/openstack/compute/test_console_output.py b/nova/tests/unit/api/openstack/compute/test_console_output.py
index 1a76a445fc..a9dc830255 100644
--- a/nova/tests/unit/api/openstack/compute/test_console_output.py
+++ b/nova/tests/unit/api/openstack/compute/test_console_output.py
@@ -14,8 +14,8 @@
# under the License.
import string
+from unittest import mock
-import mock
import webob
from nova.api.openstack.compute import console_output \
diff --git a/nova/tests/unit/api/openstack/compute/test_create_backup.py b/nova/tests/unit/api/openstack/compute/test_create_backup.py
index f7280a5a37..9728002e88 100644
--- a/nova/tests/unit/api/openstack/compute/test_create_backup.py
+++ b/nova/tests/unit/api/openstack/compute/test_create_backup.py
@@ -13,7 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils import timeutils
import webob
@@ -40,10 +41,6 @@ class CreateBackupTestsV21(admin_only_action_common.CommonMixin,
self.controller = getattr(self.create_backup, self.controller_name)()
self.compute_api = self.controller.compute_api
- patch_get = mock.patch.object(self.compute_api, 'get')
- self.mock_get = patch_get.start()
- self.addCleanup(patch_get.stop)
-
@mock.patch.object(common, 'check_img_metadata_properties_quota')
@mock.patch.object(api.API, 'backup')
def test_create_backup_with_metadata(self, mock_backup, mock_check_image):
diff --git a/nova/tests/unit/api/openstack/compute/test_deferred_delete.py b/nova/tests/unit/api/openstack/compute/test_deferred_delete.py
index db6f774c51..8a1c8efd57 100644
--- a/nova/tests/unit/api/openstack/compute/test_deferred_delete.py
+++ b/nova/tests/unit/api/openstack/compute/test_deferred_delete.py
@@ -13,7 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
import webob
from nova.api.openstack.compute import deferred_delete as dd_v21
diff --git a/nova/tests/unit/api/openstack/compute/test_disk_config.py b/nova/tests/unit/api/openstack/compute/test_disk_config.py
index bf3be1d0a3..c5ee59722a 100644
--- a/nova/tests/unit/api/openstack/compute/test_disk_config.py
+++ b/nova/tests/unit/api/openstack/compute/test_disk_config.py
@@ -14,8 +14,8 @@
# under the License.
import datetime
+from unittest import mock
-import mock
from oslo_serialization import jsonutils
from nova.api.openstack import compute
diff --git a/nova/tests/unit/api/openstack/compute/test_evacuate.py b/nova/tests/unit/api/openstack/compute/test_evacuate.py
index 6620d7a180..fb7f7662d8 100644
--- a/nova/tests/unit/api/openstack/compute/test_evacuate.py
+++ b/nova/tests/unit/api/openstack/compute/test_evacuate.py
@@ -12,8 +12,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
import testtools
import webob
diff --git a/nova/tests/unit/api/openstack/compute/test_flavor_access.py b/nova/tests/unit/api/openstack/compute/test_flavor_access.py
index 8c25a2efc2..0581a47c84 100644
--- a/nova/tests/unit/api/openstack/compute/test_flavor_access.py
+++ b/nova/tests/unit/api/openstack/compute/test_flavor_access.py
@@ -14,8 +14,8 @@
# under the License.
import datetime
+from unittest import mock
-import mock
from webob import exc
from nova.api.openstack import api_version_request as api_version
diff --git a/nova/tests/unit/api/openstack/compute/test_flavor_manage.py b/nova/tests/unit/api/openstack/compute/test_flavor_manage.py
index f8412c772c..948f255f34 100644
--- a/nova/tests/unit/api/openstack/compute/test_flavor_manage.py
+++ b/nova/tests/unit/api/openstack/compute/test_flavor_manage.py
@@ -14,8 +14,8 @@
# under the License.
import copy
+from unittest import mock
-import mock
from oslo_serialization import jsonutils
import webob
diff --git a/nova/tests/unit/api/openstack/compute/test_flavors.py b/nova/tests/unit/api/openstack/compute/test_flavors.py
index 4390b32012..c7fbf5c468 100644
--- a/nova/tests/unit/api/openstack/compute/test_flavors.py
+++ b/nova/tests/unit/api/openstack/compute/test_flavors.py
@@ -13,9 +13,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
from urllib import parse as urlparse
-import mock
import webob
from nova.api.openstack import common
diff --git a/nova/tests/unit/api/openstack/compute/test_flavors_extra_specs.py b/nova/tests/unit/api/openstack/compute/test_flavors_extra_specs.py
index e68bf7e306..8355ce59b5 100644
--- a/nova/tests/unit/api/openstack/compute/test_flavors_extra_specs.py
+++ b/nova/tests/unit/api/openstack/compute/test_flavors_extra_specs.py
@@ -13,7 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
import testtools
import webob
diff --git a/nova/tests/unit/api/openstack/compute/test_floating_ip_pools.py b/nova/tests/unit/api/openstack/compute/test_floating_ip_pools.py
index e25302ee9a..71ca209672 100644
--- a/nova/tests/unit/api/openstack/compute/test_floating_ip_pools.py
+++ b/nova/tests/unit/api/openstack/compute/test_floating_ip_pools.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.api.openstack.compute import floating_ip_pools \
as fipp_v21
diff --git a/nova/tests/unit/api/openstack/compute/test_floating_ips.py b/nova/tests/unit/api/openstack/compute/test_floating_ips.py
index 2cb89dfe76..7093c5a80d 100644
--- a/nova/tests/unit/api/openstack/compute/test_floating_ips.py
+++ b/nova/tests/unit/api/openstack/compute/test_floating_ips.py
@@ -14,7 +14,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
import webob
diff --git a/nova/tests/unit/api/openstack/compute/test_hosts.py b/nova/tests/unit/api/openstack/compute/test_hosts.py
index 7adc698093..f1cdde2917 100644
--- a/nova/tests/unit/api/openstack/compute/test_hosts.py
+++ b/nova/tests/unit/api/openstack/compute/test_hosts.py
@@ -13,7 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel
import testtools
import webob.exc
diff --git a/nova/tests/unit/api/openstack/compute/test_hypervisors.py b/nova/tests/unit/api/openstack/compute/test_hypervisors.py
index facc5389be..a908988811 100644
--- a/nova/tests/unit/api/openstack/compute/test_hypervisors.py
+++ b/nova/tests/unit/api/openstack/compute/test_hypervisors.py
@@ -14,8 +14,8 @@
# under the License.
import copy
+from unittest import mock
-import mock
import netaddr
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
@@ -368,25 +368,23 @@ class HypervisorsTestV21(test.NoDBTestCase):
return TEST_SERVICES[0]
raise exception.ComputeHostNotFound(host=host)
- @mock.patch.object(self.controller.host_api, 'compute_node_get_all',
- return_value=compute_nodes)
- @mock.patch.object(self.controller.host_api,
- 'service_get_by_compute_host',
- fake_service_get_by_compute_host)
- def _test(self, compute_node_get_all):
- req = self._get_request(True)
- result = self.controller.index(req)
- self.assertEqual(1, len(result['hypervisors']))
- expected = {
- 'id': compute_nodes[0].uuid if self.expect_uuid_for_id
- else compute_nodes[0].id,
- 'hypervisor_hostname': compute_nodes[0].hypervisor_hostname,
- 'state': 'up',
- 'status': 'enabled',
- }
- self.assertDictEqual(expected, result['hypervisors'][0])
+ m_get = self.controller.host_api.compute_node_get_all
+ m_get.side_effect = None
+ m_get.return_value = compute_nodes
+ self.controller.host_api.service_get_by_compute_host.side_effect = (
+ fake_service_get_by_compute_host)
- _test(self)
+ req = self._get_request(True)
+ result = self.controller.index(req)
+ self.assertEqual(1, len(result['hypervisors']))
+ expected = {
+ 'id': compute_nodes[0].uuid if self.expect_uuid_for_id
+ else compute_nodes[0].id,
+ 'hypervisor_hostname': compute_nodes[0].hypervisor_hostname,
+ 'state': 'up',
+ 'status': 'enabled',
+ }
+ self.assertDictEqual(expected, result['hypervisors'][0])
def test_index_compute_host_not_mapped(self):
"""Tests that we don't fail index if a host is not mapped."""
@@ -402,25 +400,22 @@ class HypervisorsTestV21(test.NoDBTestCase):
return TEST_SERVICES[0]
raise exception.HostMappingNotFound(name=host)
- @mock.patch.object(self.controller.host_api, 'compute_node_get_all',
- return_value=compute_nodes)
- @mock.patch.object(self.controller.host_api,
- 'service_get_by_compute_host',
- fake_service_get_by_compute_host)
- def _test(self, compute_node_get_all):
- req = self._get_request(True)
- result = self.controller.index(req)
- self.assertEqual(1, len(result['hypervisors']))
- expected = {
- 'id': compute_nodes[0].uuid if self.expect_uuid_for_id
- else compute_nodes[0].id,
- 'hypervisor_hostname': compute_nodes[0].hypervisor_hostname,
- 'state': 'up',
- 'status': 'enabled',
- }
- self.assertDictEqual(expected, result['hypervisors'][0])
+ self.controller.host_api.compute_node_get_all.return_value = (
+ compute_nodes)
+ self.controller.host_api.service_get_by_compute_host = (
+ fake_service_get_by_compute_host)
- _test(self)
+ req = self._get_request(True)
+ result = self.controller.index(req)
+ self.assertEqual(1, len(result['hypervisors']))
+ expected = {
+ 'id': compute_nodes[0].uuid if self.expect_uuid_for_id
+ else compute_nodes[0].id,
+ 'hypervisor_hostname': compute_nodes[0].hypervisor_hostname,
+ 'state': 'up',
+ 'status': 'enabled',
+ }
+ self.assertDictEqual(expected, result['hypervisors'][0])
def test_detail(self):
req = self._get_request(True)
@@ -444,32 +439,30 @@ class HypervisorsTestV21(test.NoDBTestCase):
return TEST_SERVICES[0]
raise exception.ComputeHostNotFound(host=host)
- @mock.patch.object(self.controller.host_api, 'compute_node_get_all',
- return_value=compute_nodes)
- @mock.patch.object(self.controller.host_api,
- 'service_get_by_compute_host',
- fake_service_get_by_compute_host)
- def _test(self, compute_node_get_all):
- req = self._get_request(True)
- result = self.controller.detail(req)
- self.assertEqual(1, len(result['hypervisors']))
- expected = {
- 'id': compute_nodes[0].id,
- 'hypervisor_hostname': compute_nodes[0].hypervisor_hostname,
- 'state': 'up',
- 'status': 'enabled',
- }
- # we don't care about all of the details, just make sure we get
- # the subset we care about and there are more keys than what index
- # would return
- hypervisor = result['hypervisors'][0]
- self.assertTrue(
- set(expected.keys()).issubset(set(hypervisor.keys())))
- self.assertGreater(len(hypervisor.keys()), len(expected.keys()))
- self.assertEqual(compute_nodes[0].hypervisor_hostname,
- hypervisor['hypervisor_hostname'])
-
- _test(self)
+ m_get = self.controller.host_api.compute_node_get_all
+ m_get.side_effect = None
+ m_get.return_value = compute_nodes
+ self.controller.host_api.service_get_by_compute_host.side_effect = (
+ fake_service_get_by_compute_host)
+
+ req = self._get_request(True)
+ result = self.controller.detail(req)
+ self.assertEqual(1, len(result['hypervisors']))
+ expected = {
+ 'id': compute_nodes[0].id,
+ 'hypervisor_hostname': compute_nodes[0].hypervisor_hostname,
+ 'state': 'up',
+ 'status': 'enabled',
+ }
+ # we don't care about all of the details, just make sure we get
+ # the subset we care about and there are more keys than what index
+ # would return
+ hypervisor = result['hypervisors'][0]
+ self.assertTrue(
+ set(expected.keys()).issubset(set(hypervisor.keys())))
+ self.assertGreater(len(hypervisor.keys()), len(expected.keys()))
+ self.assertEqual(compute_nodes[0].hypervisor_hostname,
+ hypervisor['hypervisor_hostname'])
def test_detail_compute_host_not_mapped(self):
"""Tests that if a service is deleted but the compute node is not we
@@ -487,32 +480,28 @@ class HypervisorsTestV21(test.NoDBTestCase):
return TEST_SERVICES[0]
raise exception.HostMappingNotFound(name=host)
- @mock.patch.object(self.controller.host_api, 'compute_node_get_all',
- return_value=compute_nodes)
- @mock.patch.object(self.controller.host_api,
- 'service_get_by_compute_host',
- fake_service_get_by_compute_host)
- def _test(self, compute_node_get_all):
- req = self._get_request(True)
- result = self.controller.detail(req)
- self.assertEqual(1, len(result['hypervisors']))
- expected = {
- 'id': compute_nodes[0].id,
- 'hypervisor_hostname': compute_nodes[0].hypervisor_hostname,
- 'state': 'up',
- 'status': 'enabled',
- }
- # we don't care about all of the details, just make sure we get
- # the subset we care about and there are more keys than what index
- # would return
- hypervisor = result['hypervisors'][0]
- self.assertTrue(
- set(expected.keys()).issubset(set(hypervisor.keys())))
- self.assertGreater(len(hypervisor.keys()), len(expected.keys()))
- self.assertEqual(compute_nodes[0].hypervisor_hostname,
- hypervisor['hypervisor_hostname'])
-
- _test(self)
+ self.controller.host_api.service_get_by_compute_host.side_effect = (
+ fake_service_get_by_compute_host)
+ self.controller.host_api.compute_node_get_all.return_value = (
+ compute_nodes)
+ req = self._get_request(True)
+ result = self.controller.detail(req)
+ self.assertEqual(1, len(result['hypervisors']))
+ expected = {
+ 'id': compute_nodes[0].id,
+ 'hypervisor_hostname': compute_nodes[0].hypervisor_hostname,
+ 'state': 'up',
+ 'status': 'enabled',
+ }
+ # we don't care about all of the details, just make sure we get
+ # the subset we care about and there are more keys than what index
+ # would return
+ hypervisor = result['hypervisors'][0]
+ self.assertTrue(
+ set(expected.keys()).issubset(set(hypervisor.keys())))
+ self.assertGreater(len(hypervisor.keys()), len(expected.keys()))
+ self.assertEqual(compute_nodes[0].hypervisor_hostname,
+ hypervisor['hypervisor_hostname'])
def test_show(self):
req = self._get_request(True)
@@ -525,21 +514,16 @@ class HypervisorsTestV21(test.NoDBTestCase):
"""Tests that if a service is deleted but the compute node is not we
don't fail when listing hypervisors.
"""
-
- @mock.patch.object(self.controller.host_api, 'compute_node_get',
- return_value=self.TEST_HYPERS_OBJ[0])
- @mock.patch.object(self.controller.host_api,
- 'service_get_by_compute_host')
- def _test(self, mock_service, mock_compute_node_get):
- req = self._get_request(True)
- mock_service.side_effect = exception.HostMappingNotFound(
- name='foo')
- hyper_id = self._get_hyper_id()
- self.assertRaises(exc.HTTPNotFound, self.controller.show,
- req, hyper_id)
- self.assertTrue(mock_service.called)
- mock_compute_node_get.assert_called_once_with(mock.ANY, hyper_id)
- _test(self)
+ self.controller.host_api.service_get_by_compute_host.side_effect = (
+ exception.HostMappingNotFound(name='foo'))
+ req = self._get_request(True)
+ hyper_id = self._get_hyper_id()
+ self.assertRaises(
+ exc.HTTPNotFound, self.controller.show, req, hyper_id)
+ self.assertTrue(
+ self.controller.host_api.service_get_by_compute_host.called)
+ self.controller.host_api.compute_node_get.assert_called_once_with(
+ mock.ANY, hyper_id)
def test_show_noid(self):
req = self._get_request(True)
@@ -611,20 +595,15 @@ class HypervisorsTestV21(test.NoDBTestCase):
mock.ANY, self.TEST_HYPERS_OBJ[0].host)
def test_uptime_hypervisor_not_mapped_service_get(self):
- @mock.patch.object(self.controller.host_api, 'compute_node_get')
- @mock.patch.object(self.controller.host_api, 'get_host_uptime')
- @mock.patch.object(self.controller.host_api,
- 'service_get_by_compute_host',
- side_effect=exception.HostMappingNotFound(
- name='dummy'))
- def _test(mock_get, _, __):
- req = self._get_request(True)
- hyper_id = self._get_hyper_id()
- self.assertRaises(exc.HTTPNotFound,
- self.controller.uptime, req, hyper_id)
- self.assertTrue(mock_get.called)
+ self.controller.host_api.service_get_by_compute_host.side_effect = (
+ exception.HostMappingNotFound(name='dummy'))
- _test()
+ req = self._get_request(True)
+ hyper_id = self._get_hyper_id()
+ self.assertRaises(exc.HTTPNotFound,
+ self.controller.uptime, req, hyper_id)
+ self.assertTrue(
+ self.controller.host_api.service_get_by_compute_host.called)
def test_uptime_hypervisor_not_mapped(self):
with mock.patch.object(self.controller.host_api, 'get_host_uptime',
@@ -644,30 +623,26 @@ class HypervisorsTestV21(test.NoDBTestCase):
self.assertEqual(dict(hypervisors=self.INDEX_HYPER_DICTS), result)
def test_search_non_exist(self):
- with mock.patch.object(self.controller.host_api,
- 'compute_node_search_by_hypervisor',
- return_value=[]) as mock_node_search:
- req = self._get_request(True)
- self.assertRaises(exc.HTTPNotFound, self.controller.search,
- req, 'a')
- self.assertEqual(1, mock_node_search.call_count)
+ m_search = self.controller.host_api.compute_node_search_by_hypervisor
+ m_search.side_effect = None
+ m_search.return_value = []
+
+ req = self._get_request(True)
+ self.assertRaises(exc.HTTPNotFound, self.controller.search, req, 'a')
+ self.assertEqual(1, m_search.call_count)
def test_search_unmapped(self):
+ m_search = self.controller.host_api.compute_node_search_by_hypervisor
+ m_search.side_effect = None
+ m_search.return_value = [mock.MagicMock()]
- @mock.patch.object(self.controller.host_api,
- 'compute_node_search_by_hypervisor')
- @mock.patch.object(self.controller.host_api,
- 'service_get_by_compute_host')
- def _test(mock_service, mock_search):
- mock_search.return_value = [mock.MagicMock()]
- mock_service.side_effect = exception.HostMappingNotFound(
- name='foo')
- req = self._get_request(True)
- self.assertRaises(exc.HTTPNotFound, self.controller.search,
- req, 'a')
- self.assertTrue(mock_service.called)
+ self.controller.host_api.service_get_by_compute_host.side_effect = (
+ exception.HostMappingNotFound(name='foo'))
- _test()
+ req = self._get_request(True)
+ self.assertRaises(exc.HTTPNotFound, self.controller.search, req, 'a')
+ self.assertTrue(
+ self.controller.host_api.service_get_by_compute_host.called)
@mock.patch.object(objects.InstanceList, 'get_by_host',
side_effect=fake_instance_get_all_by_host)
@@ -702,15 +677,12 @@ class HypervisorsTestV21(test.NoDBTestCase):
def test_servers_compute_host_not_found(self):
req = self._get_request(True)
- with test.nested(
- mock.patch.object(
- self.controller.host_api, 'instance_get_all_by_host',
- side_effect=fake_instance_get_all_by_host,
- ),
- mock.patch.object(
- self.controller.host_api, 'service_get_by_compute_host',
- side_effect=exception.ComputeHostNotFound(host='foo'),
- ),
+ self.controller.host_api.service_get_by_compute_host.side_effect = (
+ exception.ComputeHostNotFound(host='foo'))
+ with mock.patch.object(
+ self.controller.host_api,
+ 'instance_get_all_by_host',
+ side_effect=fake_instance_get_all_by_host,
):
# The result should be empty since every attempt to fetch the
# service for a hypervisor "failed"
@@ -718,24 +690,25 @@ class HypervisorsTestV21(test.NoDBTestCase):
self.assertEqual({'hypervisors': []}, result)
def test_servers_non_id(self):
- with mock.patch.object(self.controller.host_api,
- 'compute_node_search_by_hypervisor',
- return_value=[]) as mock_node_search:
- req = self._get_request(True)
- self.assertRaises(exc.HTTPNotFound,
- self.controller.servers,
- req, '115')
- self.assertEqual(1, mock_node_search.call_count)
+ m_search = self.controller.host_api.compute_node_search_by_hypervisor
+ m_search.side_effect = None
+ m_search.return_value = []
+
+ req = self._get_request(True)
+ self.assertRaises(exc.HTTPNotFound,
+ self.controller.servers,
+ req, '115')
+ self.assertEqual(1, m_search.call_count)
def test_servers_with_non_integer_hypervisor_id(self):
- with mock.patch.object(self.controller.host_api,
- 'compute_node_search_by_hypervisor',
- return_value=[]) as mock_node_search:
+ m_search = self.controller.host_api.compute_node_search_by_hypervisor
+ m_search.side_effect = None
+ m_search.return_value = []
- req = self._get_request(True)
- self.assertRaises(exc.HTTPNotFound,
- self.controller.servers, req, 'abc')
- self.assertEqual(1, mock_node_search.call_count)
+ req = self._get_request(True)
+ self.assertRaises(
+ exc.HTTPNotFound, self.controller.servers, req, 'abc')
+ self.assertEqual(1, m_search.call_count)
def test_servers_with_no_servers(self):
with mock.patch.object(self.controller.host_api,
@@ -1089,15 +1062,13 @@ class HypervisorsTestV253(HypervisorsTestV252):
use_admin_context=True,
url='/os-hypervisors?with_servers=1')
- with test.nested(
- mock.patch.object(
- self.controller.host_api, 'instance_get_all_by_host',
- side_effect=fake_instance_get_all_by_host,
- ),
- mock.patch.object(
- self.controller.host_api, 'service_get_by_compute_host',
- side_effect=exception.ComputeHostNotFound(host='foo'),
- ),
+ self.controller.host_api.service_get_by_compute_host.side_effect = (
+ exception.ComputeHostNotFound(host='foo'))
+
+ with mock.patch.object(
+ self.controller.host_api,
+ "instance_get_all_by_host",
+ side_effect=fake_instance_get_all_by_host,
):
# The result should be empty since every attempt to fetch the
# service for a hypervisor "failed"
@@ -1157,11 +1128,13 @@ class HypervisorsTestV253(HypervisorsTestV252):
use_admin_context=True,
url='/os-hypervisors?with_servers=yes&'
'hypervisor_hostname_pattern=shenzhen')
- with mock.patch.object(self.controller.host_api,
- 'compute_node_search_by_hypervisor',
- return_value=objects.ComputeNodeList()) as s:
- self.assertRaises(exc.HTTPNotFound, self.controller.index, req)
- s.assert_called_once_with(req.environ['nova.context'], 'shenzhen')
+ m_search = self.controller.host_api.compute_node_search_by_hypervisor
+ m_search.side_effect = None
+ m_search.return_value = objects.ComputeNodeList()
+
+ self.assertRaises(exc.HTTPNotFound, self.controller.index, req)
+ m_search.assert_called_once_with(
+ req.environ['nova.context'], 'shenzhen')
def test_detail_with_hostname_pattern(self):
"""Test listing hypervisors with details and using the
@@ -1170,13 +1143,14 @@ class HypervisorsTestV253(HypervisorsTestV252):
req = self._get_request(
use_admin_context=True,
url='/os-hypervisors?hypervisor_hostname_pattern=shenzhen')
- with mock.patch.object(
- self.controller.host_api,
- 'compute_node_search_by_hypervisor',
- return_value=objects.ComputeNodeList(objects=[TEST_HYPERS_OBJ[0]])
- ) as s:
- result = self.controller.detail(req)
- s.assert_called_once_with(req.environ['nova.context'], 'shenzhen')
+ m_search = self.controller.host_api.compute_node_search_by_hypervisor
+ m_search.side_effect = None
+ m_search.return_value = objects.ComputeNodeList(
+ objects=[TEST_HYPERS_OBJ[0]])
+
+ result = self.controller.detail(req)
+ m_search.assert_called_once_with(
+ req.environ['nova.context'], 'shenzhen')
expected = {'hypervisors': [self.DETAIL_HYPERS_DICTS[0]]}
@@ -1483,15 +1457,11 @@ class HypervisorsTestV288(HypervisorsTestV275):
self.controller.uptime, req)
def test_uptime_old_version(self):
- with mock.patch.object(
- self.controller.host_api, 'get_host_uptime',
- return_value='fake uptime',
- ):
- req = self._get_request(use_admin_context=True, version='2.87')
- hyper_id = self._get_hyper_id()
+ req = self._get_request(use_admin_context=True, version='2.87')
+ hyper_id = self._get_hyper_id()
- # no exception == pass
- self.controller.uptime(req, hyper_id)
+ # no exception == pass
+ self.controller.uptime(req, hyper_id)
def test_uptime_noid(self):
# the separate 'uptime' API has been removed, so skip this test
@@ -1526,34 +1496,36 @@ class HypervisorsTestV288(HypervisorsTestV275):
pass
def test_show_with_uptime_notimplemented(self):
- with mock.patch.object(
- self.controller.host_api, 'get_host_uptime',
- side_effect=NotImplementedError,
- ) as mock_get_uptime:
- req = self._get_request(use_admin_context=True)
- hyper_id = self._get_hyper_id()
+ self.controller.host_api.get_host_uptime.side_effect = (
+ NotImplementedError())
- result = self.controller.show(req, hyper_id)
+ req = self._get_request(use_admin_context=True)
+ hyper_id = self._get_hyper_id()
- expected_dict = copy.deepcopy(self.DETAIL_HYPERS_DICTS[0])
- expected_dict.update({'uptime': None})
- self.assertEqual({'hypervisor': expected_dict}, result)
- self.assertEqual(1, mock_get_uptime.call_count)
+ result = self.controller.show(req, hyper_id)
+
+ expected_dict = copy.deepcopy(self.DETAIL_HYPERS_DICTS[0])
+ expected_dict.update({'uptime': None})
+ self.assertEqual({'hypervisor': expected_dict}, result)
+ self.assertEqual(
+ 1, self.controller.host_api.get_host_uptime.call_count)
def test_show_with_uptime_hypervisor_down(self):
- with mock.patch.object(
- self.controller.host_api, 'get_host_uptime',
- side_effect=exception.ComputeServiceUnavailable(host='dummy')
- ) as mock_get_uptime:
- req = self._get_request(use_admin_context=True)
- hyper_id = self._get_hyper_id()
+ self.controller.host_api.get_host_uptime.side_effect = (
+ exception.ComputeServiceUnavailable(host='dummy'))
- result = self.controller.show(req, hyper_id)
+ req = self._get_request(use_admin_context=True)
+ hyper_id = self._get_hyper_id()
- expected_dict = copy.deepcopy(self.DETAIL_HYPERS_DICTS[0])
- expected_dict.update({'uptime': None})
- self.assertEqual({'hypervisor': expected_dict}, result)
- self.assertEqual(1, mock_get_uptime.call_count)
+ result = self.controller.show(req, hyper_id)
+
+ expected_dict = copy.deepcopy(self.DETAIL_HYPERS_DICTS[0])
+ expected_dict.update({'uptime': None})
+ self.assertEqual({'hypervisor': expected_dict}, result)
+ self.assertEqual(
+ 1,
+ self.controller.host_api.get_host_uptime.call_count
+ )
def test_show_old_version(self):
# ensure things still work as expected here
diff --git a/nova/tests/unit/api/openstack/compute/test_image_metadata.py b/nova/tests/unit/api/openstack/compute/test_image_metadata.py
index 2e1c26a712..4072d6f489 100644
--- a/nova/tests/unit/api/openstack/compute/test_image_metadata.py
+++ b/nova/tests/unit/api/openstack/compute/test_image_metadata.py
@@ -14,8 +14,8 @@
# under the License.
import copy
+from unittest import mock
-import mock
from oslo_serialization import jsonutils
import webob
diff --git a/nova/tests/unit/api/openstack/compute/test_images.py b/nova/tests/unit/api/openstack/compute/test_images.py
index fad4fcb5a2..734e755dd5 100644
--- a/nova/tests/unit/api/openstack/compute/test_images.py
+++ b/nova/tests/unit/api/openstack/compute/test_images.py
@@ -19,9 +19,9 @@ and as a WSGI layer
"""
import copy
+from unittest import mock
from urllib import parse as urlparse
-import mock
import webob
from nova.api.openstack.compute import images as images_v21
diff --git a/nova/tests/unit/api/openstack/compute/test_instance_actions.py b/nova/tests/unit/api/openstack/compute/test_instance_actions.py
index 04e9ae443e..df13e1d89d 100644
--- a/nova/tests/unit/api/openstack/compute/test_instance_actions.py
+++ b/nova/tests/unit/api/openstack/compute/test_instance_actions.py
@@ -15,9 +15,9 @@
import copy
import datetime
+from unittest import mock
import iso8601
-import mock
from oslo_policy import policy as oslo_policy
from oslo_utils.fixture import uuidsentinel as uuids
from webob import exc
diff --git a/nova/tests/unit/api/openstack/compute/test_keypairs.py b/nova/tests/unit/api/openstack/compute/test_keypairs.py
index 30e95fb21d..590639d5ed 100644
--- a/nova/tests/unit/api/openstack/compute/test_keypairs.py
+++ b/nova/tests/unit/api/openstack/compute/test_keypairs.py
@@ -13,7 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
import webob
from nova.api.openstack.compute import keypairs as keypairs_v21
@@ -37,6 +38,8 @@ keypair_data = {
FAKE_UUID = 'b48316c5-71e8-45e4-9884-6c78055b9b13'
+keypair_name_2_92_compatible = 'my-key@ my.host'
+
def fake_keypair(name):
return dict(test_keypair.fake_keypair,
@@ -110,16 +113,22 @@ class KeypairsTestV21(test.TestCase):
self.assertGreater(len(res_dict['keypair']['private_key']), 0)
self._assert_keypair_type(res_dict)
- def _test_keypair_create_bad_request_case(self,
- body,
- exception):
- self.assertRaises(exception,
- self.controller.create, self.req, body=body)
+ def _test_keypair_create_bad_request_case(
+ self, body, exception, error_msg=None
+ ):
+ if error_msg:
+ self.assertRaisesRegex(exception, error_msg,
+ self.controller.create,
+ self.req, body=body)
+ else:
+ self.assertRaises(exception,
+ self.controller.create, self.req, body=body)
def test_keypair_create_with_empty_name(self):
body = {'keypair': {'name': ''}}
self._test_keypair_create_bad_request_case(body,
- self.validation_error)
+ self.validation_error,
+ 'is too short')
def test_keypair_create_with_name_too_long(self):
body = {
@@ -128,7 +137,8 @@ class KeypairsTestV21(test.TestCase):
}
}
self._test_keypair_create_bad_request_case(body,
- self.validation_error)
+ self.validation_error,
+ 'is too long')
def test_keypair_create_with_name_leading_trailing_spaces(self):
body = {
@@ -136,8 +146,10 @@ class KeypairsTestV21(test.TestCase):
'name': ' test '
}
}
+ expected_msg = 'Can not start or end with whitespace.'
self._test_keypair_create_bad_request_case(body,
- self.validation_error)
+ self.validation_error,
+ expected_msg)
def test_keypair_create_with_name_leading_trailing_spaces_compat_mode(
self):
@@ -152,8 +164,21 @@ class KeypairsTestV21(test.TestCase):
'name': 'test/keypair'
}
}
+ expected_msg = 'Only expected characters'
self._test_keypair_create_bad_request_case(body,
- webob.exc.HTTPBadRequest)
+ self.validation_error,
+ expected_msg)
+
+ def test_keypair_create_with_special_characters(self):
+ body = {
+ 'keypair': {
+ 'name': keypair_name_2_92_compatible
+ }
+ }
+ expected_msg = 'Only expected characters'
+ self._test_keypair_create_bad_request_case(body,
+ self.validation_error,
+ expected_msg)
def test_keypair_import_bad_key(self):
body = {
@@ -167,8 +192,10 @@ class KeypairsTestV21(test.TestCase):
def test_keypair_create_with_invalid_keypair_body(self):
body = {'alpha': {'name': 'create_test'}}
+ expected_msg = "'keypair' is a required property"
self._test_keypair_create_bad_request_case(body,
- self.validation_error)
+ self.validation_error,
+ expected_msg)
def test_keypair_import(self):
body = {
@@ -470,3 +497,82 @@ class KeypairsTestV275(test.TestCase):
version='2.75', use_admin_context=True)
self.assertRaises(exception.ValidationError, self.controller.delete,
req, 1)
+
+
+class KeypairsTestV292(test.TestCase):
+ wsgi_api_version = '2.92'
+ wsgi_old_api_version = '2.91'
+
+ def setUp(self):
+ super(KeypairsTestV292, self).setUp()
+ self.controller = keypairs_v21.KeypairController()
+ self.req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
+ self.old_req = fakes.HTTPRequest.blank(
+ '', version=self.wsgi_old_api_version)
+
+ def test_keypair_create_no_longer_supported(self):
+ body = {
+ 'keypair': {
+ 'name': keypair_name_2_92_compatible,
+ }
+ }
+ self.assertRaises(exception.ValidationError, self.controller.create,
+ self.req, body=body)
+
+ def test_keypair_create_works_with_old_version(self):
+ body = {
+ 'keypair': {
+ 'name': 'fake',
+ }
+ }
+ res_dict = self.controller.create(self.old_req, body=body)
+ self.assertEqual('fake', res_dict['keypair']['name'])
+ self.assertGreater(len(res_dict['keypair']['private_key']), 0)
+
+ def test_keypair_import_works_with_new_version(self):
+ body = {
+ 'keypair': {
+ 'name': 'fake',
+ 'public_key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBYIznA'
+ 'x9D7118Q1VKGpXy2HDiKyUTM8XcUuhQpo0srqb9rboUp4'
+ 'a9NmCwpWpeElDLuva707GOUnfaBAvHBwsRXyxHJjRaI6Y'
+ 'Qj2oLJwqvaSaWUbyT1vtryRqy6J3TecN0WINY71f4uymi'
+ 'MZP0wby4bKBcYnac8KiCIlvkEl0ETjkOGUq8OyWRmn7lj'
+ 'j5SESEUdBP0JnuTFKddWTU/wD6wydeJaUhBTqOlHn0kX1'
+ 'GyqoNTE1UEhcM5ZRWgfUZfTjVyDF2kGj3vJLCJtJ8LoGc'
+ 'j7YaN4uPg1rBle+izwE/tLonRrds+cev8p6krSSrxWOwB'
+ 'bHkXa6OciiJDvkRzJXzf',
+ }
+ }
+ res_dict = self.controller.create(self.req, body=body)
+ self.assertEqual('fake', res_dict['keypair']['name'])
+ self.assertNotIn('private_key', res_dict['keypair'])
+
+ def test_keypair_create_refuses_special_chars_with_old_version(self):
+ body = {
+ 'keypair': {
+ 'name': keypair_name_2_92_compatible,
+ }
+ }
+ self.assertRaises(exception.ValidationError, self.controller.create,
+ self.old_req, body=body)
+
+ def test_keypair_import_with_special_characters(self):
+ body = {
+ 'keypair': {
+ 'name': keypair_name_2_92_compatible,
+ 'public_key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBYIznA'
+ 'x9D7118Q1VKGpXy2HDiKyUTM8XcUuhQpo0srqb9rboUp4'
+ 'a9NmCwpWpeElDLuva707GOUnfaBAvHBwsRXyxHJjRaI6Y'
+ 'Qj2oLJwqvaSaWUbyT1vtryRqy6J3TecN0WINY71f4uymi'
+ 'MZP0wby4bKBcYnac8KiCIlvkEl0ETjkOGUq8OyWRmn7lj'
+ 'j5SESEUdBP0JnuTFKddWTU/wD6wydeJaUhBTqOlHn0kX1'
+ 'GyqoNTE1UEhcM5ZRWgfUZfTjVyDF2kGj3vJLCJtJ8LoGc'
+ 'j7YaN4uPg1rBle+izwE/tLonRrds+cev8p6krSSrxWOwB'
+ 'bHkXa6OciiJDvkRzJXzf',
+ }
+ }
+
+ res_dict = self.controller.create(self.req, body=body)
+ self.assertEqual(keypair_name_2_92_compatible,
+ res_dict['keypair']['name'])
diff --git a/nova/tests/unit/api/openstack/compute/test_limits.py b/nova/tests/unit/api/openstack/compute/test_limits.py
index 2c245dc4a8..1748023aa8 100644
--- a/nova/tests/unit/api/openstack/compute/test_limits.py
+++ b/nova/tests/unit/api/openstack/compute/test_limits.py
@@ -19,8 +19,8 @@ Tests dealing with HTTP rate-limiting.
from http import client as httplib
from io import StringIO
+from unittest import mock
-import mock
from oslo_limit import fixture as limit_fixture
from oslo_serialization import jsonutils
from oslo_utils import encodeutils
@@ -34,7 +34,6 @@ from nova.limit import local as local_limit
from nova.limit import placement as placement_limit
from nova import objects
from nova.policies import limits as l_policies
-from nova import quota
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import matchers
@@ -52,12 +51,12 @@ class BaseLimitTestSuite(test.NoDBTestCase):
return {k: dict(limit=v, in_use=v // 2)
for k, v in self.absolute_limits.items()}
- mock_get_project_quotas = mock.patch.object(
+ patcher_get_project_quotas = mock.patch.object(
nova.quota.QUOTAS,
"get_project_quotas",
- side_effect = stub_get_project_quotas)
- mock_get_project_quotas.start()
- self.addCleanup(mock_get_project_quotas.stop)
+ side_effect=stub_get_project_quotas)
+ self.mock_get_project_quotas = patcher_get_project_quotas.start()
+ self.addCleanup(patcher_get_project_quotas.stop)
patcher = self.mock_can = mock.patch('nova.context.RequestContext.can')
self.mock_can = patcher.start()
self.addCleanup(patcher.stop)
@@ -154,16 +153,14 @@ class LimitsControllerTestV21(BaseLimitTestSuite):
return {k: dict(limit=v, in_use=v // 2)
for k, v in self.absolute_limits.items()}
- with mock.patch('nova.quota.QUOTAS.get_project_quotas') as \
- get_project_quotas:
- get_project_quotas.side_effect = _get_project_quotas
+ self.mock_get_project_quotas.side_effect = _get_project_quotas
- response = request.get_response(self.controller)
+ response = request.get_response(self.controller)
- body = jsonutils.loads(response.body)
- self.assertEqual(expected, body)
- get_project_quotas.assert_called_once_with(context, tenant_id,
- usages=True)
+ body = jsonutils.loads(response.body)
+ self.assertEqual(expected, body)
+ self.mock_get_project_quotas.assert_called_once_with(
+ context, tenant_id, usages=True)
def _do_test_used_limits(self, reserved):
request = self._get_index_request(tenant_id=None)
@@ -186,8 +183,7 @@ class LimitsControllerTestV21(BaseLimitTestSuite):
def stub_get_project_quotas(context, project_id, usages=True):
return limits
- self.stub_out('nova.quota.QUOTAS.get_project_quotas',
- stub_get_project_quotas)
+ self.mock_get_project_quotas.side_effect = stub_get_project_quotas
res = request.get_response(self.controller)
body = jsonutils.loads(res.body)
@@ -211,14 +207,15 @@ class LimitsControllerTestV21(BaseLimitTestSuite):
user_id=user_id,
project_id=project_id)
context = fake_req.environ["nova.context"]
- with mock.patch.object(quota.QUOTAS, 'get_project_quotas',
- return_value={}) as mock_get_quotas:
- fake_req.get_response(self.controller)
- self.assertEqual(2, self.mock_can.call_count)
- self.mock_can.assert_called_with(
- l_policies.OTHER_PROJECT_LIMIT_POLICY_NAME)
- mock_get_quotas.assert_called_once_with(context,
- tenant_id, usages=True)
+ self.mock_get_project_quotas.side_effect = None
+ self.mock_get_project_quotas.return_value = {}
+
+ fake_req.get_response(self.controller)
+ self.assertEqual(2, self.mock_can.call_count)
+ self.mock_can.assert_called_with(
+ l_policies.OTHER_PROJECT_LIMIT_POLICY_NAME)
+ self.mock_get_project_quotas.assert_called_once_with(context,
+ tenant_id, usages=True)
def _test_admin_can_fetch_used_limits_for_own_project(self, req_get):
project_id = "123456"
@@ -230,11 +227,12 @@ class LimitsControllerTestV21(BaseLimitTestSuite):
project_id=project_id)
context = fake_req.environ["nova.context"]
- with mock.patch.object(quota.QUOTAS, 'get_project_quotas',
- return_value={}) as mock_get_quotas:
- fake_req.get_response(self.controller)
- mock_get_quotas.assert_called_once_with(context,
- project_id, usages=True)
+ self.mock_get_project_quotas.side_effect = None
+ self.mock_get_project_quotas.return_value = {}
+
+ fake_req.get_response(self.controller)
+ self.mock_get_project_quotas.assert_called_once_with(
+ context, project_id, usages=True)
def test_admin_can_fetch_used_limits_for_own_project(self):
req_get = {}
@@ -262,12 +260,13 @@ class LimitsControllerTestV21(BaseLimitTestSuite):
project_id = "123456"
fake_req = self._get_index_request(project_id=project_id)
context = fake_req.environ["nova.context"]
- with mock.patch.object(quota.QUOTAS, 'get_project_quotas',
- return_value={}) as mock_get_quotas:
- fake_req.get_response(self.controller)
+ self.mock_get_project_quotas.side_effect = None
+ self.mock_get_project_quotas.return_value = {}
- mock_get_quotas.assert_called_once_with(context,
- project_id, usages=True)
+ fake_req.get_response(self.controller)
+
+ self.mock_get_project_quotas.assert_called_once_with(
+ context, project_id, usages=True)
def test_used_ram_added(self):
fake_req = self._get_index_request()
@@ -275,28 +274,26 @@ class LimitsControllerTestV21(BaseLimitTestSuite):
def stub_get_project_quotas(context, project_id, usages=True):
return {'ram': {'limit': 512, 'in_use': 256}}
- with mock.patch.object(quota.QUOTAS, 'get_project_quotas',
- side_effect=stub_get_project_quotas
- ) as mock_get_quotas:
+ self.mock_get_project_quotas.side_effect = stub_get_project_quotas
- res = fake_req.get_response(self.controller)
- body = jsonutils.loads(res.body)
- abs_limits = body['limits']['absolute']
- self.assertIn('totalRAMUsed', abs_limits)
- self.assertEqual(256, abs_limits['totalRAMUsed'])
- self.assertEqual(1, mock_get_quotas.call_count)
+ res = fake_req.get_response(self.controller)
+ body = jsonutils.loads(res.body)
+ abs_limits = body['limits']['absolute']
+ self.assertIn('totalRAMUsed', abs_limits)
+ self.assertEqual(256, abs_limits['totalRAMUsed'])
+ self.assertEqual(1, self.mock_get_project_quotas.call_count)
def test_no_ram_quota(self):
fake_req = self._get_index_request()
- with mock.patch.object(quota.QUOTAS, 'get_project_quotas',
- return_value={}) as mock_get_quotas:
+ self.mock_get_project_quotas.side_effect = None
+ self.mock_get_project_quotas.return_value = {}
- res = fake_req.get_response(self.controller)
- body = jsonutils.loads(res.body)
- abs_limits = body['limits']['absolute']
- self.assertNotIn('totalRAMUsed', abs_limits)
- self.assertEqual(1, mock_get_quotas.call_count)
+ res = fake_req.get_response(self.controller)
+ body = jsonutils.loads(res.body)
+ abs_limits = body['limits']['absolute']
+ self.assertNotIn('totalRAMUsed', abs_limits)
+ self.assertEqual(1, self.mock_get_project_quotas.call_count)
class FakeHttplibSocket(object):
@@ -398,25 +395,24 @@ class LimitsControllerTestV236(BaseLimitTestSuite):
return {k: dict(limit=v, in_use=v // 2)
for k, v in absolute_limits.items()}
- with mock.patch('nova.quota.QUOTAS.get_project_quotas') as \
- get_project_quotas:
- get_project_quotas.side_effect = _get_project_quotas
- response = self.controller.index(self.req)
- expected_response = {
- "limits": {
- "rate": [],
- "absolute": {
- "maxTotalRAMSize": 512,
- "maxTotalInstances": 5,
- "maxTotalCores": 21,
- "maxTotalKeypairs": 10,
- "totalRAMUsed": 256,
- "totalCoresUsed": 10,
- "totalInstancesUsed": 2,
- },
+ self.mock_get_project_quotas.side_effect = _get_project_quotas
+
+ response = self.controller.index(self.req)
+ expected_response = {
+ "limits": {
+ "rate": [],
+ "absolute": {
+ "maxTotalRAMSize": 512,
+ "maxTotalInstances": 5,
+ "maxTotalCores": 21,
+ "maxTotalKeypairs": 10,
+ "totalRAMUsed": 256,
+ "totalCoresUsed": 10,
+ "totalInstancesUsed": 2,
},
- }
- self.assertEqual(expected_response, response)
+ },
+ }
+ self.assertEqual(expected_response, response)
class LimitsControllerTestV239(BaseLimitTestSuite):
@@ -436,21 +432,20 @@ class LimitsControllerTestV239(BaseLimitTestSuite):
return {k: dict(limit=v, in_use=v // 2)
for k, v in absolute_limits.items()}
- with mock.patch('nova.quota.QUOTAS.get_project_quotas') as \
- get_project_quotas:
- get_project_quotas.side_effect = _get_project_quotas
- response = self.controller.index(self.req)
- # staring from version 2.39 there is no 'maxImageMeta' field
- # in response after removing 'image-metadata' proxy API
- expected_response = {
- "limits": {
- "rate": [],
- "absolute": {
- "maxServerMeta": 1,
- },
+ self.mock_get_project_quotas.side_effect = _get_project_quotas
+
+ response = self.controller.index(self.req)
+ # starting from version 2.39 there is no 'maxImageMeta' field
+ # in response after removing 'image-metadata' proxy API
+ expected_response = {
+ "limits": {
+ "rate": [],
+ "absolute": {
+ "maxServerMeta": 1,
},
- }
- self.assertEqual(expected_response, response)
+ },
+ }
+ self.assertEqual(expected_response, response)
class LimitsControllerTestV275(BaseLimitTestSuite):
@@ -469,10 +464,9 @@ class LimitsControllerTestV275(BaseLimitTestSuite):
return {k: dict(limit=v, in_use=v // 2)
for k, v in absolute_limits.items()}
- with mock.patch('nova.quota.QUOTAS.get_project_quotas') as \
- get_project_quotas:
- get_project_quotas.side_effect = _get_project_quotas
- self.controller.index(req)
+ self.mock_get_project_quotas.side_effect = _get_project_quotas
+ self.controller.index(req)
+ self.controller.index(req)
def test_index_additional_query_param(self):
req = fakes.HTTPRequest.blank("/?unknown=fake",
diff --git a/nova/tests/unit/api/openstack/compute/test_lock_server.py b/nova/tests/unit/api/openstack/compute/test_lock_server.py
index 3903ec7945..bf49bf2b73 100644
--- a/nova/tests/unit/api/openstack/compute/test_lock_server.py
+++ b/nova/tests/unit/api/openstack/compute/test_lock_server.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.api.openstack import api_version_request
from nova.api.openstack import common
diff --git a/nova/tests/unit/api/openstack/compute/test_microversions.py b/nova/tests/unit/api/openstack/compute/test_microversions.py
index c5b1ddb5e5..9f5dd90889 100644
--- a/nova/tests/unit/api/openstack/compute/test_microversions.py
+++ b/nova/tests/unit/api/openstack/compute/test_microversions.py
@@ -12,7 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_serialization import jsonutils
from nova.api.openstack import api_version_request as api_version
diff --git a/nova/tests/unit/api/openstack/compute/test_migrate_server.py b/nova/tests/unit/api/openstack/compute/test_migrate_server.py
index 683759eccc..8d1c853206 100644
--- a/nova/tests/unit/api/openstack/compute/test_migrate_server.py
+++ b/nova/tests/unit/api/openstack/compute/test_migrate_server.py
@@ -13,8 +13,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import uuidutils
import webob
@@ -530,9 +531,8 @@ class MigrateServerTestsV256(MigrateServerTestsV234):
self.req, fakes.FAKE_UUID, body=body)
def _test_migrate_exception(self, exc_info, expected_result):
- @mock.patch.object(self.compute_api, 'get')
@mock.patch.object(self.compute_api, 'resize', side_effect=exc_info)
- def _test(mock_resize, mock_get):
+ def _test(mock_resize):
instance = objects.Instance(uuid=uuids.instance)
self.assertRaises(expected_result,
self.controller._migrate,
diff --git a/nova/tests/unit/api/openstack/compute/test_migrations.py b/nova/tests/unit/api/openstack/compute/test_migrations.py
index a06d395bea..19bc42a9de 100644
--- a/nova/tests/unit/api/openstack/compute/test_migrations.py
+++ b/nova/tests/unit/api/openstack/compute/test_migrations.py
@@ -13,9 +13,9 @@
# under the License.
import datetime
+from unittest import mock
import iso8601
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from webob import exc
diff --git a/nova/tests/unit/api/openstack/compute/test_multinic.py b/nova/tests/unit/api/openstack/compute/test_multinic.py
index ceaaebf373..17a872fed2 100644
--- a/nova/tests/unit/api/openstack/compute/test_multinic.py
+++ b/nova/tests/unit/api/openstack/compute/test_multinic.py
@@ -13,8 +13,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
import webob
from nova.api.openstack.compute import multinic as multinic_v21
diff --git a/nova/tests/unit/api/openstack/compute/test_quota_classes.py b/nova/tests/unit/api/openstack/compute/test_quota_classes.py
index d501412803..463f8344c0 100644
--- a/nova/tests/unit/api/openstack/compute/test_quota_classes.py
+++ b/nova/tests/unit/api/openstack/compute/test_quota_classes.py
@@ -12,8 +12,10 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+
import copy
-import mock
+from unittest import mock
+
from oslo_limit import fixture as limit_fixture
import webob
diff --git a/nova/tests/unit/api/openstack/compute/test_quotas.py b/nova/tests/unit/api/openstack/compute/test_quotas.py
index 6cb8d9c7ad..0a1bbd08d8 100644
--- a/nova/tests/unit/api/openstack/compute/test_quotas.py
+++ b/nova/tests/unit/api/openstack/compute/test_quotas.py
@@ -14,7 +14,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_limit import fixture as limit_fixture
from oslo_utils.fixture import uuidsentinel as uuids
import webob
@@ -882,7 +883,8 @@ class UnifiedLimitsQuotaSetsTest(NoopQuotaSetsTest):
local_limit.KEY_PAIRS: 100,
local_limit.SERVER_GROUPS: 12,
local_limit.SERVER_GROUP_MEMBERS: 10}
- self.useFixture(limit_fixture.LimitFixture(reglimits, {}))
+ self.limit_fixture = self.useFixture(
+ limit_fixture.LimitFixture(reglimits, {}))
@mock.patch.object(placement_limit, "get_legacy_project_limits")
def test_show_v21(self, mock_proj):
@@ -1098,7 +1100,7 @@ class UnifiedLimitsQuotaSetsTest(NoopQuotaSetsTest):
local_limit.KEY_PAIRS: 1,
local_limit.SERVER_GROUPS: 3,
local_limit.SERVER_GROUP_MEMBERS: 2}
- self.useFixture(limit_fixture.LimitFixture(reglimits, {}))
+ self.limit_fixture.reglimits = reglimits
req = fakes.HTTPRequest.blank("")
response = self.controller.defaults(req, uuids.project_id)
diff --git a/nova/tests/unit/api/openstack/compute/test_remote_consoles.py b/nova/tests/unit/api/openstack/compute/test_remote_consoles.py
index 8d1dfa9a40..bd09307567 100644
--- a/nova/tests/unit/api/openstack/compute/test_remote_consoles.py
+++ b/nova/tests/unit/api/openstack/compute/test_remote_consoles.py
@@ -13,7 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
import webob
from nova.api.openstack import api_version_request
diff --git a/nova/tests/unit/api/openstack/compute/test_rescue.py b/nova/tests/unit/api/openstack/compute/test_rescue.py
index 28b8217d1a..8a87f52222 100644
--- a/nova/tests/unit/api/openstack/compute/test_rescue.py
+++ b/nova/tests/unit/api/openstack/compute/test_rescue.py
@@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
import ddt
from oslo_utils.fixture import uuidsentinel as uuids
diff --git a/nova/tests/unit/api/openstack/compute/test_security_groups.py b/nova/tests/unit/api/openstack/compute/test_security_groups.py
index 71cdcbc871..4a85a9997d 100644
--- a/nova/tests/unit/api/openstack/compute/test_security_groups.py
+++ b/nova/tests/unit/api/openstack/compute/test_security_groups.py
@@ -13,8 +13,9 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
from neutronclient.common import exceptions as n_exc
+from unittest import mock
+
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils import encodeutils
diff --git a/nova/tests/unit/api/openstack/compute/test_server_actions.py b/nova/tests/unit/api/openstack/compute/test_server_actions.py
index d07924abe8..08f7a31573 100644
--- a/nova/tests/unit/api/openstack/compute/test_server_actions.py
+++ b/nova/tests/unit/api/openstack/compute/test_server_actions.py
@@ -13,9 +13,10 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import ddt
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import uuidutils
import webob
@@ -66,11 +67,11 @@ class ServerActionsControllerTestV21(test.TestCase):
self.controller = self._get_controller()
self.compute_api = self.controller.compute_api
- # We don't care about anything getting as far as hitting the compute
- # RPC API so we just mock it out here.
- mock_rpcapi = mock.patch.object(self.compute_api, 'compute_rpcapi')
- mock_rpcapi.start()
- self.addCleanup(mock_rpcapi.stop)
+ # In most of the cases we don't care about anything getting as far as
+ # hitting the compute RPC API so we just mock it out here.
+ patcher_rpcapi = mock.patch.object(self.compute_api, 'compute_rpcapi')
+ self.mock_rpcapi = patcher_rpcapi.start()
+ self.addCleanup(patcher_rpcapi.stop)
# The project_id here matches what is used by default in
# fake_compute_get which need to match for policy checks.
self.req = fakes.HTTPRequest.blank('',
@@ -1079,21 +1080,23 @@ class ServerActionsControllerTestV21(test.TestCase):
snapshot = dict(id=_fake_id('d'))
+ self.mock_rpcapi.quiesce_instance.side_effect = (
+ exception.InstanceQuiesceNotSupported(
+ instance_id="fake", reason="test"
+ )
+ )
+
with test.nested(
mock.patch.object(
self.controller.compute_api.volume_api, 'get_absolute_limits',
return_value={'totalSnapshotsUsed': 0,
'maxTotalSnapshots': 10}),
- mock.patch.object(self.controller.compute_api.compute_rpcapi,
- 'quiesce_instance',
- side_effect=exception.InstanceQuiesceNotSupported(
- instance_id='fake', reason='test')),
mock.patch.object(self.controller.compute_api.volume_api, 'get',
return_value=volume),
mock.patch.object(self.controller.compute_api.volume_api,
'create_snapshot_force',
return_value=snapshot),
- ) as (mock_get_limits, mock_quiesce, mock_vol_get, mock_vol_create):
+ ) as (mock_get_limits, mock_vol_get, mock_vol_create):
if mock_vol_create_side_effect:
mock_vol_create.side_effect = mock_vol_create_side_effect
@@ -1125,7 +1128,7 @@ class ServerActionsControllerTestV21(test.TestCase):
for k in extra_properties.keys():
self.assertEqual(properties[k], extra_properties[k])
- mock_quiesce.assert_called_once_with(mock.ANY, mock.ANY)
+ self.mock_rpcapi.quiesce_instance.assert_called_once()
mock_vol_get.assert_called_once_with(mock.ANY, volume['id'])
mock_vol_create.assert_called_once_with(mock.ANY, volume['id'],
mock.ANY, mock.ANY)
@@ -1189,21 +1192,23 @@ class ServerActionsControllerTestV21(test.TestCase):
snapshot = dict(id=_fake_id('d'))
+ self.mock_rpcapi.quiesce_instance.side_effect = (
+ exception.InstanceQuiesceNotSupported(
+ instance_id="fake", reason="test"
+ )
+ )
+
with test.nested(
mock.patch.object(
self.controller.compute_api.volume_api, 'get_absolute_limits',
return_value={'totalSnapshotsUsed': 0,
'maxTotalSnapshots': 10}),
- mock.patch.object(self.controller.compute_api.compute_rpcapi,
- 'quiesce_instance',
- side_effect=exception.InstanceQuiesceNotSupported(
- instance_id='fake', reason='test')),
mock.patch.object(self.controller.compute_api.volume_api, 'get',
return_value=volume),
mock.patch.object(self.controller.compute_api.volume_api,
'create_snapshot_force',
return_value=snapshot),
- ) as (mock_get_limits, mock_quiesce, mock_vol_get, mock_vol_create):
+ ) as (mock_get_limits, mock_vol_get, mock_vol_create):
response = self.controller._action_create_image(self.req,
FAKE_UUID, body=body)
@@ -1218,7 +1223,7 @@ class ServerActionsControllerTestV21(test.TestCase):
for key, val in extra_metadata.items():
self.assertEqual(properties[key], val)
- mock_quiesce.assert_called_once_with(mock.ANY, mock.ANY)
+ self.mock_rpcapi.quiesce_instance.assert_called_once()
mock_vol_get.assert_called_once_with(mock.ANY, volume['id'])
mock_vol_create.assert_called_once_with(mock.ANY, volume['id'],
mock.ANY, mock.ANY)
diff --git a/nova/tests/unit/api/openstack/compute/test_server_diagnostics.py b/nova/tests/unit/api/openstack/compute/test_server_diagnostics.py
index d215f3e903..12d8bbb318 100644
--- a/nova/tests/unit/api/openstack/compute/test_server_diagnostics.py
+++ b/nova/tests/unit/api/openstack/compute/test_server_diagnostics.py
@@ -13,7 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
diff --git a/nova/tests/unit/api/openstack/compute/test_server_external_events.py b/nova/tests/unit/api/openstack/compute/test_server_external_events.py
index 647f468f37..e366d0acdd 100644
--- a/nova/tests/unit/api/openstack/compute/test_server_external_events.py
+++ b/nova/tests/unit/api/openstack/compute/test_server_external_events.py
@@ -12,8 +12,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures as fx
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from nova.api.openstack.compute import server_external_events \
diff --git a/nova/tests/unit/api/openstack/compute/test_server_group_quotas.py b/nova/tests/unit/api/openstack/compute/test_server_group_quotas.py
index a0404baffc..fe7a60f956 100644
--- a/nova/tests/unit/api/openstack/compute/test_server_group_quotas.py
+++ b/nova/tests/unit/api/openstack/compute/test_server_group_quotas.py
@@ -13,7 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_config import cfg
from oslo_limit import fixture as limit_fixture
from oslo_utils.fixture import uuidsentinel as uuids
@@ -209,7 +210,8 @@ class ServerGroupQuotasUnifiedLimitsTestV21(ServerGroupQuotasTestV21):
self.flags(driver='nova.quota.UnifiedLimitsDriver', group='quota')
self.req = fakes.HTTPRequest.blank('')
self.controller = sg_v21.ServerGroupController()
- self.useFixture(limit_fixture.LimitFixture({'server_groups': 10}, {}))
+ self.limit_fixture = self.useFixture(
+ limit_fixture.LimitFixture({'server_groups': 10}, {}))
@mock.patch('nova.limit.local.enforce_db_limit')
def test_create_server_group_during_recheck(self, mock_enforce):
@@ -236,7 +238,7 @@ class ServerGroupQuotasUnifiedLimitsTestV21(ServerGroupQuotasTestV21):
delta=1)
def test_create_group_fails_with_zero_quota(self):
- self.useFixture(limit_fixture.LimitFixture({'server_groups': 0}, {}))
+ self.limit_fixture.reglimits = {'server_groups': 0}
sgroup = {'name': 'test', 'policies': ['anti-affinity']}
exc = self.assertRaises(webob.exc.HTTPForbidden,
self.controller.create,
@@ -245,7 +247,7 @@ class ServerGroupQuotasUnifiedLimitsTestV21(ServerGroupQuotasTestV21):
self.assertIn(msg, str(exc))
def test_create_only_one_group_when_limit_is_one(self):
- self.useFixture(limit_fixture.LimitFixture({'server_groups': 1}, {}))
+ self.limit_fixture.reglimits = {'server_groups': 1}
policies = ['anti-affinity']
sgroup = {'name': 'test', 'policies': policies}
res_dict = self.controller.create(
diff --git a/nova/tests/unit/api/openstack/compute/test_server_groups.py b/nova/tests/unit/api/openstack/compute/test_server_groups.py
index a0d1712343..636682a6b7 100644
--- a/nova/tests/unit/api/openstack/compute/test_server_groups.py
+++ b/nova/tests/unit/api/openstack/compute/test_server_groups.py
@@ -14,7 +14,8 @@
# under the License.
import copy
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel
from oslo_utils import uuidutils
import webob
diff --git a/nova/tests/unit/api/openstack/compute/test_server_metadata.py b/nova/tests/unit/api/openstack/compute/test_server_metadata.py
index a454597305..9b420dde17 100644
--- a/nova/tests/unit/api/openstack/compute/test_server_metadata.py
+++ b/nova/tests/unit/api/openstack/compute/test_server_metadata.py
@@ -13,7 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
diff --git a/nova/tests/unit/api/openstack/compute/test_server_migrations.py b/nova/tests/unit/api/openstack/compute/test_server_migrations.py
index 8d798d434c..c5d8556751 100644
--- a/nova/tests/unit/api/openstack/compute/test_server_migrations.py
+++ b/nova/tests/unit/api/openstack/compute/test_server_migrations.py
@@ -15,8 +15,8 @@
import copy
import datetime
+from unittest import mock
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
import webob
diff --git a/nova/tests/unit/api/openstack/compute/test_server_password.py b/nova/tests/unit/api/openstack/compute/test_server_password.py
index e34ceb90e9..2751eee709 100644
--- a/nova/tests/unit/api/openstack/compute/test_server_password.py
+++ b/nova/tests/unit/api/openstack/compute/test_server_password.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.api.openstack.compute import server_password \
as server_password_v21
diff --git a/nova/tests/unit/api/openstack/compute/test_server_reset_state.py b/nova/tests/unit/api/openstack/compute/test_server_reset_state.py
index 3462cf21ac..3a0c9ca1e2 100644
--- a/nova/tests/unit/api/openstack/compute/test_server_reset_state.py
+++ b/nova/tests/unit/api/openstack/compute/test_server_reset_state.py
@@ -12,7 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils import uuidutils
import webob
diff --git a/nova/tests/unit/api/openstack/compute/test_server_start_stop.py b/nova/tests/unit/api/openstack/compute/test_server_start_stop.py
index 60d12d0c43..f604652622 100644
--- a/nova/tests/unit/api/openstack/compute/test_server_start_stop.py
+++ b/nova/tests/unit/api/openstack/compute/test_server_start_stop.py
@@ -12,7 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
import webob
diff --git a/nova/tests/unit/api/openstack/compute/test_server_tags.py b/nova/tests/unit/api/openstack/compute/test_server_tags.py
index b121c75c3a..4e4609d778 100644
--- a/nova/tests/unit/api/openstack/compute/test_server_tags.py
+++ b/nova/tests/unit/api/openstack/compute/test_server_tags.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from webob import exc
from nova.api.openstack.compute import server_tags
diff --git a/nova/tests/unit/api/openstack/compute/test_server_topology.py b/nova/tests/unit/api/openstack/compute/test_server_topology.py
index 3d8f6dc908..63d5f7a5c1 100644
--- a/nova/tests/unit/api/openstack/compute/test_server_topology.py
+++ b/nova/tests/unit/api/openstack/compute/test_server_topology.py
@@ -11,7 +11,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from webob import exc
diff --git a/nova/tests/unit/api/openstack/compute/test_servers.py b/nova/tests/unit/api/openstack/compute/test_servers.py
index 63115659af..8903de0c3c 100644
--- a/nova/tests/unit/api/openstack/compute/test_servers.py
+++ b/nova/tests/unit/api/openstack/compute/test_servers.py
@@ -17,13 +17,14 @@
import collections
import copy
import datetime
+from unittest import mock
+
import ddt
import functools
from urllib import parse as urlparse
import fixtures
import iso8601
-import mock
from oslo_policy import policy as oslo_policy
from oslo_serialization import base64
from oslo_serialization import jsonutils
@@ -2087,10 +2088,10 @@ class ServersControllerTestV216(_ServersControllerTest):
return server_dict
- @mock.patch('nova.compute.api.API.get_instance_host_status')
- def _verify_host_status_policy_behavior(self, func, mock_get_host_status):
+ def _verify_host_status_policy_behavior(self, func):
# Set policy to disallow both host_status cases and verify we don't
# call the get_instance_host_status compute RPC API.
+ self.mock_get_instance_host_status.reset_mock()
rules = {
'os_compute_api:servers:show:host_status': '!',
'os_compute_api:servers:show:host_status:unknown-only': '!',
@@ -2098,7 +2099,7 @@ class ServersControllerTestV216(_ServersControllerTest):
orig_rules = policy.get_rules()
policy.set_rules(oslo_policy.Rules.from_dict(rules), overwrite=False)
func()
- mock_get_host_status.assert_not_called()
+ self.mock_get_instance_host_status.assert_not_called()
# Restore the original rules.
policy.set_rules(orig_rules)
@@ -2638,15 +2639,13 @@ class ServersControllerTestV275(ControllerTest):
microversion = '2.75'
- @mock.patch('nova.compute.api.API.get_all')
- def test_get_servers_additional_query_param_old_version(self, mock_get):
+ def test_get_servers_additional_query_param_old_version(self):
req = fakes.HTTPRequest.blank(self.path_with_query % 'unknown=1',
use_admin_context=True,
version='2.74')
self.controller.index(req)
- @mock.patch('nova.compute.api.API.get_all')
- def test_get_servers_ignore_sort_key_old_version(self, mock_get):
+ def test_get_servers_ignore_sort_key_old_version(self):
req = fakes.HTTPRequest.blank(
self.path_with_query % 'sort_key=deleted',
use_admin_context=True, version='2.74')
@@ -3584,13 +3583,13 @@ class ServersControllerRebuildTestV263(ControllerTest):
},
}
- @mock.patch('nova.compute.api.API.get')
- def _rebuild_server(self, mock_get, certs=None,
- conf_enabled=True, conf_certs=None):
+ def _rebuild_server(self, certs=None, conf_enabled=True, conf_certs=None):
ctx = self.req.environ['nova.context']
- mock_get.return_value = fakes.stub_instance_obj(ctx,
- vm_state=vm_states.ACTIVE, trusted_certs=certs,
- project_id=self.req_project_id, user_id=self.req_user_id)
+ self.mock_get.side_effect = None
+ self.mock_get.return_value = fakes.stub_instance_obj(
+ ctx, vm_state=vm_states.ACTIVE, trusted_certs=certs,
+ project_id=self.req_project_id, user_id=self.req_user_id
+ )
self.flags(default_trusted_certificate_ids=conf_certs, group='glance')
@@ -3743,10 +3742,10 @@ class ServersControllerRebuildTestV271(ControllerTest):
}
}
- @mock.patch('nova.compute.api.API.get')
- def _rebuild_server(self, mock_get):
+ def _rebuild_server(self):
ctx = self.req.environ['nova.context']
- mock_get.return_value = fakes.stub_instance_obj(ctx,
+ self.mock_get.side_effect = None
+ self.mock_get.return_value = fakes.stub_instance_obj(ctx,
vm_state=vm_states.ACTIVE, project_id=self.req_project_id,
user_id=self.req_user_id)
server = self.controller._action_rebuild(
diff --git a/nova/tests/unit/api/openstack/compute/test_services.py b/nova/tests/unit/api/openstack/compute/test_services.py
index 5d83bc5a91..f237acc15a 100644
--- a/nova/tests/unit/api/openstack/compute/test_services.py
+++ b/nova/tests/unit/api/openstack/compute/test_services.py
@@ -14,9 +14,9 @@
import copy
import datetime
+from unittest import mock
from keystoneauth1 import exceptions as ks_exc
-import mock
from oslo_utils import fixture as utils_fixture
from oslo_utils.fixture import uuidsentinel
import webob.exc
diff --git a/nova/tests/unit/api/openstack/compute/test_shelve.py b/nova/tests/unit/api/openstack/compute/test_shelve.py
index 68e523be47..bfa8d2d055 100644
--- a/nova/tests/unit/api/openstack/compute/test_shelve.py
+++ b/nova/tests/unit/api/openstack/compute/test_shelve.py
@@ -12,10 +12,10 @@
# License for the specific language governing permissions and limitations
# under the License.
-import fixtures
-import mock
+from unittest import mock
import ddt
+import fixtures
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
import webob
@@ -134,13 +134,17 @@ class UnshelveServerControllerTestV277(test.NoDBTestCase):
'availability_zone': 'us-east'
}}
self.req.body = jsonutils.dump_as_bytes(body)
- self.req.api_version_request = (api_version_request.
- APIVersionRequest('2.76'))
- with mock.patch.object(self.controller.compute_api,
- 'unshelve') as mock_unshelve:
+ self.req.api_version_request = (
+ api_version_request.APIVersionRequest('2.76')
+ )
+ with mock.patch.object(
+ self.controller.compute_api, 'unshelve'
+ ) as mock_unshelve:
self.controller._unshelve(self.req, fakes.FAKE_UUID, body=body)
mock_unshelve.assert_called_once_with(
- self.req.environ['nova.context'], instance, new_az=None)
+ self.req.environ['nova.context'],
+ instance,
+ )
@mock.patch('nova.compute.api.API.unshelve')
@mock.patch('nova.api.openstack.common.get_instance')
@@ -158,7 +162,9 @@ class UnshelveServerControllerTestV277(test.NoDBTestCase):
APIVersionRequest('2.76'))
self.controller._unshelve(self.req, fakes.FAKE_UUID, body=body)
mock_unshelve.assert_called_once_with(
- self.req.environ['nova.context'], instance, new_az=None)
+ self.req.environ['nova.context'],
+ instance,
+ )
@mock.patch('nova.compute.api.API.unshelve')
@mock.patch('nova.api.openstack.common.get_instance')
@@ -193,6 +199,238 @@ class UnshelveServerControllerTestV277(test.NoDBTestCase):
'availability_zone': None
}}
self.req.body = jsonutils.dump_as_bytes(body)
+ self.assertRaises(
+ exception.ValidationError,
+ self.controller._unshelve,
+ self.req,
+ fakes.FAKE_UUID, body=body)
+
+ def test_unshelve_with_additional_param(self):
+ body = {
+ 'unshelve': {
+ 'availability_zone': 'us-east',
+ 'additional_param': 1
+ }}
+ self.req.body = jsonutils.dump_as_bytes(body)
+ exc = self.assertRaises(
+ exception.ValidationError,
+ self.controller._unshelve, self.req,
+ fakes.FAKE_UUID, body=body)
+ self.assertIn("Additional properties are not allowed", str(exc))
+
+
+class UnshelveServerControllerTestV291(test.NoDBTestCase):
+ """Server controller test for microversion 2.91
+
+ Add host parameter to unshelve a shelved-offloaded server of
+ 2.91 microversion.
+ """
+ wsgi_api_version = '2.91'
+
+ def setUp(self):
+ super(UnshelveServerControllerTestV291, self).setUp()
+ self.controller = shelve_v21.ShelveController()
+ self.req = fakes.HTTPRequest.blank(
+ '/%s/servers/a/action' % fakes.FAKE_PROJECT_ID,
+ use_admin_context=True, version=self.wsgi_api_version)
+
+ def fake_get_instance(self):
+ ctxt = self.req.environ['nova.context']
+ return fake_instance.fake_instance_obj(
+ ctxt, uuid=fakes.FAKE_UUID, vm_state=vm_states.SHELVED_OFFLOADED)
+
+ @mock.patch('nova.api.openstack.common.get_instance')
+ def test_unshelve_with_az_pre_2_91(self, mock_get_instance):
+ """Make sure specifying an AZ before microversion 2.91
+ is still working.
+ """
+ instance = self.fake_get_instance()
+ mock_get_instance.return_value = instance
+
+ body = {
+ 'unshelve': {
+ 'availability_zone': 'us-east',
+ }}
+ self.req.body = jsonutils.dump_as_bytes(body)
+ self.req.api_version_request = (
+ api_version_request.APIVersionRequest('2.77'))
+ with mock.patch.object(
+ self.controller.compute_api, 'unshelve'
+ ) as mock_unshelve:
+ self.controller._unshelve(self.req, fakes.FAKE_UUID, body=body)
+ mock_unshelve.assert_called_once_with(
+ self.req.environ['nova.context'],
+ instance,
+ new_az='us-east',
+ )
+
+ @mock.patch('nova.api.openstack.common.get_instance')
+ def test_unshelve_without_parameters_2_91(self, mock_get_instance):
+ """Make sure not specifying parameters with microversion 2.91
+ is working.
+ """
+ instance = self.fake_get_instance()
+ mock_get_instance.return_value = instance
+
+ body = {
+ 'unshelve': None
+ }
+ self.req.body = jsonutils.dump_as_bytes(body)
+ self.req.api_version_request = (
+ api_version_request.APIVersionRequest('2.91'))
+ with mock.patch.object(
+ self.controller.compute_api, 'unshelve') as mock_unshelve:
+ self.controller._unshelve(self.req, fakes.FAKE_UUID, body=body)
+ mock_unshelve.assert_called_once_with(
+ self.req.environ['nova.context'],
+ instance,
+ )
+
+ @mock.patch('nova.api.openstack.common.get_instance')
+ def test_unshelve_with_az_2_91(self, mock_get_instance):
+ """Make sure specifying an AZ with microversion 2.91
+ is working.
+ """
+ instance = self.fake_get_instance()
+ mock_get_instance.return_value = instance
+
+ body = {
+ 'unshelve': {
+ 'availability_zone': 'us-east',
+ }}
+ self.req.body = jsonutils.dump_as_bytes(body)
+ self.req.api_version_request = (
+ api_version_request.APIVersionRequest('2.91'))
+ with mock.patch.object(
+ self.controller.compute_api, 'unshelve') as mock_unshelve:
+ self.controller._unshelve(self.req, fakes.FAKE_UUID, body=body)
+ mock_unshelve.assert_called_once_with(
+ self.req.environ['nova.context'],
+ instance,
+ new_az='us-east',
+ host=None,
+ )
+
+ @mock.patch('nova.api.openstack.common.get_instance')
+ def test_unshelve_with_az_none_2_91(self, mock_get_instance):
+ """Make sure specifying an AZ to none (unpin server)
+ is working.
+ """
+ instance = self.fake_get_instance()
+ mock_get_instance.return_value = instance
+
+ body = {
+ 'unshelve': {
+ 'availability_zone': None,
+ }}
+ self.req.body = jsonutils.dump_as_bytes(body)
+ self.req.api_version_request = (
+ api_version_request.APIVersionRequest('2.91'))
+ with mock.patch.object(
+ self.controller.compute_api, 'unshelve') as mock_unshelve:
+ self.controller._unshelve(self.req, fakes.FAKE_UUID, body=body)
+ mock_unshelve.assert_called_once_with(
+ self.req.environ['nova.context'],
+ instance,
+ new_az=None,
+ host=None,
+ )
+
+ @mock.patch('nova.api.openstack.common.get_instance')
+ def test_unshelve_with_host_2_91(self, mock_get_instance):
+ """Make sure specifying a host with microversion 2.91
+ is working.
+ """
+ instance = self.fake_get_instance()
+ mock_get_instance.return_value = instance
+
+ body = {
+ 'unshelve': {
+ 'host': 'server02',
+ }}
+ self.req.body = jsonutils.dump_as_bytes(body)
+ self.req.api_version_request = (
+ api_version_request.APIVersionRequest('2.91'))
+ with mock.patch.object(
+ self.controller.compute_api, 'unshelve') as mock_unshelve:
+ self.controller._unshelve(self.req, fakes.FAKE_UUID, body=body)
+ mock_unshelve.assert_called_once_with(
+ self.req.environ['nova.context'],
+ instance,
+ host='server02',
+ )
+
+ @mock.patch('nova.compute.api.API.unshelve')
+ @mock.patch('nova.api.openstack.common.get_instance')
+ def test_unshelve_with_az_and_host_with_v2_91(
+ self, mock_get_instance, mock_unshelve):
+ """Make sure specifying a host and an availability_zone with
+ microversion 2.91 is working.
+ """
+ instance = self.fake_get_instance()
+ mock_get_instance.return_value = instance
+
+ body = {
+ 'unshelve': {
+ 'availability_zone': 'us-east',
+ 'host': 'server01',
+ }}
+ self.req.body = jsonutils.dump_as_bytes(body)
+ self.req.api_version_request = (
+ api_version_request.APIVersionRequest('2.91'))
+
+ self.controller._unshelve(self.req, fakes.FAKE_UUID, body=body)
+ self.controller.compute_api.unshelve.assert_called_once_with(
+ self.req.environ['nova.context'],
+ instance,
+ new_az='us-east',
+ host='server01',
+ )
+
+ def test_invalid_az_name_with_int(self):
+ body = {
+ 'unshelve': {
+ 'host': 1234
+ }}
+ self.req.body = jsonutils.dump_as_bytes(body)
+ self.assertRaises(
+ exception.ValidationError,
+ self.controller._unshelve,
+ self.req,
+ fakes.FAKE_UUID,
+ body=body)
+
+ def test_no_az_value(self):
+ body = {
+ 'unshelve': {
+ 'host': None
+ }}
+ self.req.body = jsonutils.dump_as_bytes(body)
+ self.assertRaises(
+ exception.ValidationError,
+ self.controller._unshelve,
+ self.req,
+ fakes.FAKE_UUID, body=body)
+
+ def test_invalid_host_fqdn_with_int(self):
+ body = {
+ 'unshelve': {
+ 'host': 1234
+ }}
+ self.req.body = jsonutils.dump_as_bytes(body)
+ self.assertRaises(
+ exception.ValidationError,
+ self.controller._unshelve,
+ self.req,
+ fakes.FAKE_UUID,
+ body=body)
+
+ def test_no_host(self):
+ body = {
+ 'unshelve': {
+ 'host': None
+ }}
+ self.req.body = jsonutils.dump_as_bytes(body)
self.assertRaises(exception.ValidationError,
self.controller._unshelve,
self.req, fakes.FAKE_UUID,
@@ -201,7 +439,7 @@ class UnshelveServerControllerTestV277(test.NoDBTestCase):
def test_unshelve_with_additional_param(self):
body = {
'unshelve': {
- 'availability_zone': 'us-east',
+ 'host': 'server01',
'additional_param': 1
}}
self.req.body = jsonutils.dump_as_bytes(body)
@@ -209,4 +447,4 @@ class UnshelveServerControllerTestV277(test.NoDBTestCase):
exception.ValidationError,
self.controller._unshelve, self.req,
fakes.FAKE_UUID, body=body)
- self.assertIn("Additional properties are not allowed", str(exc))
+ self.assertIn("Invalid input for field/attribute unshelve.", str(exc))
diff --git a/nova/tests/unit/api/openstack/compute/test_simple_tenant_usage.py b/nova/tests/unit/api/openstack/compute/test_simple_tenant_usage.py
index 5794fdf061..a7dcfae558 100644
--- a/nova/tests/unit/api/openstack/compute/test_simple_tenant_usage.py
+++ b/nova/tests/unit/api/openstack/compute/test_simple_tenant_usage.py
@@ -14,8 +14,8 @@
# under the License.
import datetime
+from unittest import mock
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
import webob
diff --git a/nova/tests/unit/api/openstack/compute/test_snapshots.py b/nova/tests/unit/api/openstack/compute/test_snapshots.py
index b23ed50865..2e133506a3 100644
--- a/nova/tests/unit/api/openstack/compute/test_snapshots.py
+++ b/nova/tests/unit/api/openstack/compute/test_snapshots.py
@@ -13,7 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
import webob
from nova.api.openstack.compute import volumes as volumes_v21
diff --git a/nova/tests/unit/api/openstack/compute/test_suspend_server.py b/nova/tests/unit/api/openstack/compute/test_suspend_server.py
index 6eeb2b4549..a44297362c 100644
--- a/nova/tests/unit/api/openstack/compute/test_suspend_server.py
+++ b/nova/tests/unit/api/openstack/compute/test_suspend_server.py
@@ -12,8 +12,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import ddt
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
import webob
diff --git a/nova/tests/unit/api/openstack/compute/test_tenant_networks.py b/nova/tests/unit/api/openstack/compute/test_tenant_networks.py
index d05c85c508..c6de561b11 100644
--- a/nova/tests/unit/api/openstack/compute/test_tenant_networks.py
+++ b/nova/tests/unit/api/openstack/compute/test_tenant_networks.py
@@ -13,8 +13,8 @@
# under the License.
import copy
+from unittest import mock
-import mock
from oslo_config import cfg
from oslo_utils.fixture import uuidsentinel as uuids
import webob
diff --git a/nova/tests/unit/api/openstack/compute/test_volumes.py b/nova/tests/unit/api/openstack/compute/test_volumes.py
index a24c104c93..5b4a2d8b1a 100644
--- a/nova/tests/unit/api/openstack/compute/test_volumes.py
+++ b/nova/tests/unit/api/openstack/compute/test_volumes.py
@@ -15,10 +15,10 @@
# under the License.
import datetime
+from unittest import mock
import urllib
import fixtures
-import mock
from oslo_serialization import jsonutils
from oslo_utils import encodeutils
from oslo_utils.fixture import uuidsentinel as uuids
@@ -1889,8 +1889,7 @@ class AssistedSnapshotDeleteTestCaseV21(test.NoDBTestCase):
req, '5')
def _test_assisted_delete_instance_conflict(self, api_error):
- # unset the stub on volume_snapshot_delete from setUp
- self.mock_volume_snapshot_delete.stop()
+ self.mock_volume_snapshot_delete.side_effect = api_error
params = {
'delete_info': jsonutils.dumps({'volume_id': '1'}),
}
@@ -1899,10 +1898,9 @@ class AssistedSnapshotDeleteTestCaseV21(test.NoDBTestCase):
urllib.parse.urlencode(params),
version=self.microversion)
req.method = 'DELETE'
- with mock.patch.object(compute_api.API, 'volume_snapshot_delete',
- side_effect=api_error):
- self.assertRaises(
- webob.exc.HTTPBadRequest, self.controller.delete, req, '5')
+
+ self.assertRaises(
+ webob.exc.HTTPBadRequest, self.controller.delete, req, '5')
def test_assisted_delete_instance_invalid_state(self):
api_error = exception.InstanceInvalidState(
diff --git a/nova/tests/unit/api/openstack/test_common.py b/nova/tests/unit/api/openstack/test_common.py
index 4666413e27..7fe98bd52e 100644
--- a/nova/tests/unit/api/openstack/test_common.py
+++ b/nova/tests/unit/api/openstack/test_common.py
@@ -17,7 +17,8 @@
Test suites for 'common' code used throughout the OpenStack HTTP API.
"""
-import mock
+from unittest import mock
+
from testtools import matchers
import webob
import webob.exc
diff --git a/nova/tests/unit/api/openstack/test_faults.py b/nova/tests/unit/api/openstack/test_faults.py
index 1bd56a87c5..c7dd5c0a9d 100644
--- a/nova/tests/unit/api/openstack/test_faults.py
+++ b/nova/tests/unit/api/openstack/test_faults.py
@@ -14,7 +14,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_serialization import jsonutils
import webob
import webob.dec
diff --git a/nova/tests/unit/api/openstack/test_requestlog.py b/nova/tests/unit/api/openstack/test_requestlog.py
index 0ea91439cc..7e79e1b079 100644
--- a/nova/tests/unit/api/openstack/test_requestlog.py
+++ b/nova/tests/unit/api/openstack/test_requestlog.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
import fixtures as fx
import testtools
diff --git a/nova/tests/unit/api/openstack/test_wsgi.py b/nova/tests/unit/api/openstack/test_wsgi.py
index e0cf8f6fd8..76554e1fcb 100644
--- a/nova/tests/unit/api/openstack/test_wsgi.py
+++ b/nova/tests/unit/api/openstack/test_wsgi.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_serialization import jsonutils
import testscenarios
import webob
diff --git a/nova/tests/unit/api/openstack/test_wsgi_app.py b/nova/tests/unit/api/openstack/test_wsgi_app.py
index 247886b9dd..0eb7011c11 100644
--- a/nova/tests/unit/api/openstack/test_wsgi_app.py
+++ b/nova/tests/unit/api/openstack/test_wsgi_app.py
@@ -11,9 +11,9 @@
# under the License.
import tempfile
+from unittest import mock
import fixtures
-import mock
from oslo_config import fixture as config_fixture
from oslotest import base
@@ -104,3 +104,18 @@ document_root = /tmp
'disable_compute_service_check_for_ffu', True,
group='workarounds')
wsgi_app._setup_service('myhost', 'api')
+
+ def test__get_config_files_empty_env(self):
+ env = {}
+ result = wsgi_app._get_config_files(env)
+ expected = ['/etc/nova/api-paste.ini', '/etc/nova/nova.conf']
+ self.assertEqual(result, expected)
+
+ def test__get_config_files_with_env(self):
+ env = {
+ "OS_NOVA_CONFIG_DIR": "/nova",
+ "OS_NOVA_CONFIG_FILES": "api.conf",
+ }
+ result = wsgi_app._get_config_files(env)
+ expected = ['/nova/api.conf']
+ self.assertEqual(result, expected)
diff --git a/nova/tests/unit/api/test_auth.py b/nova/tests/unit/api/test_auth.py
index 3be245b90e..3bc5f51b04 100644
--- a/nova/tests/unit/api/test_auth.py
+++ b/nova/tests/unit/api/test_auth.py
@@ -12,7 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_middleware import request_id
from oslo_serialization import jsonutils
import webob
diff --git a/nova/tests/unit/api/test_wsgi.py b/nova/tests/unit/api/test_wsgi.py
index b2701dc723..b8f215c730 100644
--- a/nova/tests/unit/api/test_wsgi.py
+++ b/nova/tests/unit/api/test_wsgi.py
@@ -20,8 +20,8 @@ Test WSGI basics and provide some helper functions for other WSGI tests.
"""
import sys
+from unittest import mock
-import mock
import routes
import webob
diff --git a/nova/tests/unit/api/validation/extra_specs/test_validators.py b/nova/tests/unit/api/validation/extra_specs/test_validators.py
index 969fb9b648..a8911aadad 100644
--- a/nova/tests/unit/api/validation/extra_specs/test_validators.py
+++ b/nova/tests/unit/api/validation/extra_specs/test_validators.py
@@ -28,7 +28,7 @@ class TestValidators(test.NoDBTestCase):
"""
namespaces = {
'accel', 'aggregate_instance_extra_specs', 'capabilities', 'hw',
- 'hw_rng', 'hw_video', 'os', 'pci_passthrough', 'powervm', 'quota',
+ 'hw_rng', 'hw_video', 'os', 'pci_passthrough', 'quota',
'resources(?P<group>([a-zA-Z0-9_-]{1,64})?)',
'trait(?P<group>([a-zA-Z0-9_-]{1,64})?)', 'vmware',
}
@@ -74,6 +74,10 @@ class TestValidators(test.NoDBTestCase):
('hw:pci_numa_affinity_policy', 'preferred'),
('hw:pci_numa_affinity_policy', 'socket'),
('hw:cpu_policy', 'mixed'),
+ ('hw:viommu_model', 'auto'),
+ ('hw:viommu_model', 'intel'),
+ ('hw:viommu_model', 'smmuv3'),
+ ('hw:viommu_model', 'virtio'),
)
for key, value in valid_specs:
validators.validate(key, value)
@@ -92,6 +96,7 @@ class TestValidators(test.NoDBTestCase):
('hw:pci_numa_affinity_policy', 'requird'),
('hw:pci_numa_affinity_policy', 'prefrred'),
('hw:pci_numa_affinity_policy', 'socet'),
+ ('hw:viommu_model', 'autt'),
)
for key, value in invalid_specs:
with testtools.ExpectedException(exception.ValidationError):
@@ -101,9 +106,7 @@ class TestValidators(test.NoDBTestCase):
valid_specs = (
('hw:numa_nodes', '1'),
('os:monitors', '1'),
- ('powervm:shared_weight', '1'),
('os:monitors', '8'),
- ('powervm:shared_weight', '255'),
)
for key, value in valid_specs:
validators.validate(key, value)
@@ -113,9 +116,7 @@ class TestValidators(test.NoDBTestCase):
('hw:serial_port_count', '!'), # NaN
('hw:numa_nodes', '0'), # has min
('os:monitors', '0'), # has min
- ('powervm:shared_weight', '-1'), # has min
('os:monitors', '9'), # has max
- ('powervm:shared_weight', '256'), # has max
)
for key, value in invalid_specs:
with testtools.ExpectedException(exception.ValidationError):
diff --git a/nova/tests/unit/cmd/test_baseproxy.py b/nova/tests/unit/cmd/test_baseproxy.py
index 34f911cd83..25f3905f24 100644
--- a/nova/tests/unit/cmd/test_baseproxy.py
+++ b/nova/tests/unit/cmd/test_baseproxy.py
@@ -13,9 +13,9 @@
# limitations under the License.
from io import StringIO
+from unittest import mock
import fixtures
-import mock
from oslo_log import log as logging
from oslo_reports import guru_meditation_report as gmr
diff --git a/nova/tests/unit/cmd/test_common.py b/nova/tests/unit/cmd/test_common.py
index cabb54f9d4..a32073c297 100644
--- a/nova/tests/unit/cmd/test_common.py
+++ b/nova/tests/unit/cmd/test_common.py
@@ -19,9 +19,9 @@
from io import StringIO
import sys
+from unittest import mock
import fixtures
-import mock
from nova.cmd import common as cmd_common
from nova import exception
diff --git a/nova/tests/unit/cmd/test_compute.py b/nova/tests/unit/cmd/test_compute.py
index acfcea50d2..e465b026aa 100644
--- a/nova/tests/unit/cmd/test_compute.py
+++ b/nova/tests/unit/cmd/test_compute.py
@@ -13,8 +13,8 @@
# limitations under the License.
import contextlib
+from unittest import mock
-import mock
from nova.cmd import compute
from nova import context
diff --git a/nova/tests/unit/cmd/test_manage.py b/nova/tests/unit/cmd/test_manage.py
index 5be0f8edac..10c1a77c94 100644
--- a/nova/tests/unit/cmd/test_manage.py
+++ b/nova/tests/unit/cmd/test_manage.py
@@ -17,11 +17,11 @@ import datetime
from io import StringIO
import sys
import textwrap
+from unittest import mock
import warnings
import ddt
import fixtures
-import mock
from oslo_db import exception as db_exc
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel
diff --git a/nova/tests/unit/cmd/test_nova_api.py b/nova/tests/unit/cmd/test_nova_api.py
index f13712eabd..a4f7d82105 100644
--- a/nova/tests/unit/cmd/test_nova_api.py
+++ b/nova/tests/unit/cmd/test_nova_api.py
@@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.cmd import api
from nova import config
diff --git a/nova/tests/unit/cmd/test_policy.py b/nova/tests/unit/cmd/test_policy.py
index 20cc87da13..df51665959 100644
--- a/nova/tests/unit/cmd/test_policy.py
+++ b/nova/tests/unit/cmd/test_policy.py
@@ -18,9 +18,9 @@
"""
from io import StringIO
+from unittest import mock
import fixtures
-import mock
from nova.cmd import policy
import nova.conf
@@ -129,7 +129,7 @@ class TestPolicyCheck(test.NoDBTestCase):
def test_filter_rules_non_admin(self):
context = nova_context.RequestContext()
- rule_conditions = [base_policies.PROJECT_READER]
+ rule_conditions = [base_policies.PROJECT_READER_OR_ADMIN]
expected_rules = [r.name for r in ia_policies.list_rules() if
r.check_str in rule_conditions]
self._check_filter_rules(context, expected_rules=expected_rules)
@@ -156,7 +156,7 @@ class TestPolicyCheck(test.NoDBTestCase):
db_context = nova_context.RequestContext(user_id='fake-user',
project_id='fake-project')
instance = fake_instance.fake_instance_obj(db_context)
- rule_conditions = [base_policies.PROJECT_READER]
+ rule_conditions = [base_policies.PROJECT_READER_OR_ADMIN]
expected_rules = [r.name for r in ia_policies.list_rules() if
r.check_str in rule_conditions]
self._check_filter_rules(db_context, instance, expected_rules)
diff --git a/nova/tests/unit/cmd/test_scheduler.py b/nova/tests/unit/cmd/test_scheduler.py
index e207c7343f..2927492abc 100644
--- a/nova/tests/unit/cmd/test_scheduler.py
+++ b/nova/tests/unit/cmd/test_scheduler.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.cmd import scheduler
from nova import config
diff --git a/nova/tests/unit/cmd/test_status.py b/nova/tests/unit/cmd/test_status.py
index b61da4ae9f..f5fcc168ee 100644
--- a/nova/tests/unit/cmd/test_status.py
+++ b/nova/tests/unit/cmd/test_status.py
@@ -21,10 +21,9 @@ Unit tests for the nova-status CLI interfaces.
# PlacementFixture, which is only available in functional tests.
from io import StringIO
+from unittest import mock
import fixtures
-import mock
-
from keystoneauth1 import exceptions as ks_exc
from keystoneauth1 import loading as keystone
from keystoneauth1 import session
@@ -40,7 +39,6 @@ from nova import exception
# in the tests, we don't use them in the actual CLI.
from nova import objects
from nova.objects import service
-from nova import policy
from nova import test
from nova.tests import fixtures as nova_fixtures
@@ -394,60 +392,6 @@ class TestUpgradeCheckCinderAPI(test.NoDBTestCase):
self.assertEqual(upgradecheck.Code.SUCCESS, result.code)
-class TestUpgradeCheckPolicy(test.NoDBTestCase):
-
- new_default_status = upgradecheck.Code.WARNING
-
- def setUp(self):
- super(TestUpgradeCheckPolicy, self).setUp()
- self.cmd = status.UpgradeCommands()
- self.rule_name = "context_is_admin"
-
- def tearDown(self):
- super(TestUpgradeCheckPolicy, self).tearDown()
- # Check if policy is reset back after the upgrade check
- self.assertIsNone(policy._ENFORCER)
-
- def test_policy_rule_with_new_defaults(self):
- new_default = "role:admin and system_scope:all"
- rule = {self.rule_name: new_default}
- self.policy.set_rules(rule, overwrite=False)
- self.assertEqual(self.new_default_status,
- self.cmd._check_policy().code)
-
- def test_policy_rule_with_old_defaults(self):
- new_default = "is_admin:True"
- rule = {self.rule_name: new_default}
- self.policy.set_rules(rule, overwrite=False)
-
- self.assertEqual(upgradecheck.Code.SUCCESS,
- self.cmd._check_policy().code)
-
- def test_policy_rule_with_both_defaults(self):
- new_default = "(role:admin and system_scope:all) or is_admin:True"
- rule = {self.rule_name: new_default}
- self.policy.set_rules(rule, overwrite=False)
-
- self.assertEqual(upgradecheck.Code.SUCCESS,
- self.cmd._check_policy().code)
-
- def test_policy_checks_with_fresh_init_and_no_policy_override(self):
- self.policy = self.useFixture(nova_fixtures.OverridePolicyFixture(
- rules_in_file={}))
- policy.reset()
- self.assertEqual(upgradecheck.Code.SUCCESS,
- self.cmd._check_policy().code)
-
-
-class TestUpgradeCheckPolicyEnableScope(TestUpgradeCheckPolicy):
-
- new_default_status = upgradecheck.Code.SUCCESS
-
- def setUp(self):
- super(TestUpgradeCheckPolicyEnableScope, self).setUp()
- self.flags(enforce_scope=True, group="oslo_policy")
-
-
class TestUpgradeCheckOldCompute(test.NoDBTestCase):
def setUp(self):
diff --git a/nova/tests/unit/compute/monitors/cpu/test_virt_driver.py b/nova/tests/unit/compute/monitors/cpu/test_virt_driver.py
index aed34ea30c..a563a7e346 100644
--- a/nova/tests/unit/compute/monitors/cpu/test_virt_driver.py
+++ b/nova/tests/unit/compute/monitors/cpu/test_virt_driver.py
@@ -15,7 +15,7 @@
"""Tests for Compute Driver CPU resource monitor."""
-import mock
+from unittest import mock
from nova.compute.monitors.cpu import virt_driver
from nova import objects
diff --git a/nova/tests/unit/compute/monitors/test_monitors.py b/nova/tests/unit/compute/monitors/test_monitors.py
index 34b4a34d20..d43f90206c 100644
--- a/nova/tests/unit/compute/monitors/test_monitors.py
+++ b/nova/tests/unit/compute/monitors/test_monitors.py
@@ -15,7 +15,7 @@
"""Tests for resource monitors."""
-import mock
+from unittest import mock
from nova.compute import monitors
from nova import test
diff --git a/nova/tests/unit/compute/test_api.py b/nova/tests/unit/compute/test_api.py
index b2b01772b5..f17a767b99 100644
--- a/nova/tests/unit/compute/test_api.py
+++ b/nova/tests/unit/compute/test_api.py
@@ -15,11 +15,11 @@
import contextlib
import datetime
+from unittest import mock
import ddt
import fixtures
import iso8601
-import mock
import os_traits as ot
from oslo_limit import exception as limit_exceptions
from oslo_messaging import exceptions as oslo_exceptions
@@ -967,6 +967,31 @@ class _ComputeAPIUnitTestMixIn(object):
return snapshot_id
+ def _test_delete(self, delete_type, **attrs):
+ delete_time = datetime.datetime(
+ 1955, 11, 5, 9, 30, tzinfo=iso8601.UTC)
+ timeutils.set_time_override(delete_time)
+ self.addCleanup(timeutils.clear_time_override)
+
+ with test.nested(
+ mock.patch.object(
+ self.compute_api.compute_rpcapi, 'confirm_resize'),
+ mock.patch.object(
+ self.compute_api.compute_rpcapi, 'terminate_instance'),
+ mock.patch.object(
+ self.compute_api.compute_rpcapi, 'soft_delete_instance'),
+ ) as (
+ mock_confirm, mock_terminate, mock_soft_delete
+ ):
+ self._do_delete(
+ delete_type,
+ mock_confirm,
+ mock_terminate,
+ mock_soft_delete,
+ delete_time,
+ **attrs
+ )
+
@mock.patch.object(compute_utils,
'notify_about_instance_action')
@mock.patch.object(objects.Migration, 'get_by_instance_and_status')
@@ -986,12 +1011,13 @@ class _ComputeAPIUnitTestMixIn(object):
@mock.patch.object(objects.BlockDeviceMappingList,
'get_by_instance_uuid', return_value=[])
@mock.patch.object(objects.Instance, 'save')
- def _test_delete(self, delete_type, mock_save, mock_bdm_get, mock_elevated,
- mock_get_cn, mock_up, mock_record, mock_inst_update,
- mock_deallocate, mock_inst_meta, mock_inst_destroy,
- mock_notify_legacy, mock_get_inst,
- mock_save_im, mock_image_delete, mock_mig_get,
- mock_notify, **attrs):
+ def _do_delete(
+ self, delete_type, mock_confirm, mock_terminate, mock_soft_delete,
+ delete_time, mock_save, mock_bdm_get, mock_elevated, mock_get_cn,
+ mock_up, mock_record, mock_inst_update, mock_deallocate,
+ mock_inst_meta, mock_inst_destroy, mock_notify_legacy, mock_get_inst,
+ mock_save_im, mock_image_delete, mock_mig_get, mock_notify, **attrs
+ ):
expected_save_calls = [mock.call()]
expected_record_calls = []
expected_elevated_calls = []
@@ -1001,17 +1027,11 @@ class _ComputeAPIUnitTestMixIn(object):
deltas = {'instances': -1,
'cores': -inst.flavor.vcpus,
'ram': -inst.flavor.memory_mb}
- delete_time = datetime.datetime(1955, 11, 5, 9, 30,
- tzinfo=iso8601.UTC)
- self.useFixture(utils_fixture.TimeFixture(delete_time))
task_state = (delete_type == 'soft_delete' and
task_states.SOFT_DELETING or task_states.DELETING)
updates = {'progress': 0, 'task_state': task_state}
if delete_type == 'soft_delete':
updates['deleted_at'] = delete_time
- rpcapi = self.compute_api.compute_rpcapi
- mock_confirm = self.useFixture(
- fixtures.MockPatchObject(rpcapi, 'confirm_resize')).mock
def _reset_task_state(context, instance, migration, src_host,
cast=False):
@@ -1026,11 +1046,6 @@ class _ComputeAPIUnitTestMixIn(object):
snapshot_id = self._set_delete_shelved_part(inst,
mock_image_delete)
- mock_terminate = self.useFixture(
- fixtures.MockPatchObject(rpcapi, 'terminate_instance')).mock
- mock_soft_delete = self.useFixture(
- fixtures.MockPatchObject(rpcapi, 'soft_delete_instance')).mock
-
if inst.task_state == task_states.RESIZE_FINISH:
self._test_delete_resizing_part(inst, deltas)
@@ -1239,10 +1254,12 @@ class _ComputeAPIUnitTestMixIn(object):
@mock.patch('nova.objects.Instance.save')
@mock.patch('nova.compute.utils.notify_about_instance_usage')
@mock.patch('nova.objects.Service.get_by_compute_host')
+ @mock.patch('nova.compute.api.API._record_action_start')
@mock.patch('nova.compute.api.API._local_delete')
def test_delete_error_state_with_no_host(
- self, mock_local_delete, mock_service_get, _mock_notify,
- _mock_save, mock_bdm_get, mock_lookup, _mock_del_booting):
+ self, mock_local_delete, mock_record, mock_service_get,
+ _mock_notify, _mock_save, mock_bdm_get, mock_lookup,
+ _mock_del_booting):
# Instance in error state with no host should be a local delete
# for non API cells
inst = self._create_instance_obj(params=dict(vm_state=vm_states.ERROR,
@@ -1254,6 +1271,8 @@ class _ComputeAPIUnitTestMixIn(object):
mock_local_delete.assert_called_once_with(
self.context, inst, mock_bdm_get.return_value,
'delete', self.compute_api._do_delete)
+ mock_record.assert_called_once_with(self.context, inst,
+ instance_actions.DELETE)
mock_terminate.assert_not_called()
mock_service_get.assert_not_called()
@@ -2074,7 +2093,8 @@ class _ComputeAPIUnitTestMixIn(object):
filter_properties = {'ignore_hosts': [fake_inst['host']]}
if request_spec:
- fake_spec = objects.RequestSpec()
+ fake_spec = objects.RequestSpec(
+ pci_requests=objects.InstancePCIRequests(requests=[]))
if requested_destination:
cell1 = objects.CellMapping(uuid=uuids.cell1, name='cell1')
fake_spec.requested_destination = objects.Destination(
@@ -2637,9 +2657,6 @@ class _ComputeAPIUnitTestMixIn(object):
rpcapi = self.compute_api.compute_rpcapi
- mock_pause = self.useFixture(
- fixtures.MockPatchObject(rpcapi, 'pause_instance')).mock
-
with mock.patch.object(rpcapi, 'pause_instance') as mock_pause:
self.compute_api.pause(self.context, instance)
@@ -3479,7 +3496,9 @@ class _ComputeAPIUnitTestMixIn(object):
'device_type': None, 'snapshot_id': '1-snapshot',
'device_name': '/dev/vda',
'destination_type': 'volume', 'delete_on_termination': False,
- 'tag': None, 'volume_type': None})
+ 'tag': None, 'volume_type': None,
+ 'encrypted': None, 'encryption_format': None,
+ 'encryption_secret_uuid': None, 'encryption_options': None})
limits_patcher = mock.patch.object(
self.compute_api.volume_api, 'get_absolute_limits',
@@ -3542,7 +3561,9 @@ class _ComputeAPIUnitTestMixIn(object):
'device_type': None, 'snapshot_id': None,
'device_name': '/dev/vdh',
'destination_type': 'local', 'delete_on_termination': True,
- 'tag': None, 'volume_type': None})
+ 'tag': None, 'volume_type': None,
+ 'encrypted': False, 'encryption_format': None,
+ 'encryption_secret_uuid': None, 'encryption_options': None})
quiesced = [False, False]
@@ -3987,6 +4008,155 @@ class _ComputeAPIUnitTestMixIn(object):
_checks_for_create_and_rebuild.assert_called_once_with(
self.context, None, image, flavor, {}, [], None)
+ @ddt.data(True, False)
+ @mock.patch.object(objects.RequestSpec, 'save')
+ @mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
+ @mock.patch.object(objects.Instance, 'save')
+ @mock.patch.object(objects.Instance, 'get_flavor')
+ @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
+ @mock.patch.object(compute_api.API, '_get_image')
+ @mock.patch.object(compute_api.API, '_check_image_arch')
+ @mock.patch.object(compute_api.API, '_check_auto_disk_config')
+ @mock.patch.object(compute_api.API, '_checks_for_create_and_rebuild')
+ @mock.patch.object(compute_api.API, '_record_action_start')
+ def test_rebuild_volume_backed(self, reimage_boot_vol,
+ _record_action_start, _checks_for_create_and_rebuild,
+ _check_auto_disk_config,
+ _check_image_arch, mock_get_image,
+ mock_get_bdms, get_flavor,
+ instance_save, req_spec_get_by_inst_uuid, request_save):
+ """Test a scenario where the instance is volume backed and we rebuild
+ with following cases:
+
+ 1) reimage_boot_volume=True
+ 2) reimage_boot_volume=False
+
+ """
+ instance = fake_instance.fake_instance_obj(
+ self.context, vm_state=vm_states.ACTIVE, cell_name='fake-cell',
+ launched_at=timeutils.utcnow(),
+ system_metadata={}, image_ref=uuids.image_ref,
+ expected_attrs=['system_metadata'], node='fake')
+ bdms = objects.BlockDeviceMappingList(objects=[
+ objects.BlockDeviceMapping(
+ boot_index=None, image_id=None,
+ source_type='volume', destination_type='volume',
+ volume_type=None, snapshot_id=None,
+ volume_id=uuids.volume_id, volume_size=None)])
+ mock_get_bdms.return_value = bdms
+ get_flavor.return_value = test_flavor.fake_flavor
+ flavor = instance.get_flavor()
+ image = {
+ "id": uuids.image_ref,
+ "min_ram": 10, "min_disk": 1,
+ "properties": {
+ 'architecture': fields_obj.Architecture.X86_64}}
+ mock_get_image.return_value = (None, image)
+ fake_spec = objects.RequestSpec(id=1, force_nodes=None)
+ req_spec_get_by_inst_uuid.return_value = fake_spec
+ fake_volume = {'id': uuids.volume_id, 'status': 'in-use'}
+ fake_conn_info = '{}'
+ fake_device = 'fake_vda'
+ root_bdm = mock.MagicMock(
+ volume_id=uuids.volume_id, connection_info=fake_conn_info,
+ device_name=fake_device, attachment_id=uuids.old_attachment_id,
+ save=mock.MagicMock())
+ admin_pass = "new password"
+ with mock.patch.object(self.compute_api.volume_api, 'get',
+ return_value=fake_volume), \
+ mock.patch.object(compute_utils, 'get_root_bdm',
+ return_value=root_bdm), \
+ mock.patch.object(self.compute_api.compute_task_api,
+ 'rebuild_instance') as rebuild_instance:
+ if reimage_boot_vol:
+ self.compute_api.rebuild(self.context,
+ instance,
+ uuids.image_ref,
+ admin_pass,
+ reimage_boot_volume=True)
+ rebuild_instance.assert_called_once_with(self.context,
+ instance=instance, new_pass=admin_pass,
+ image_ref=uuids.image_ref,
+ orig_image_ref=None, orig_sys_metadata={},
+ injected_files=[], bdms=bdms,
+ preserve_ephemeral=False, host=None,
+ request_spec=fake_spec,
+ reimage_boot_volume=True)
+ _check_auto_disk_config.assert_called_once_with(
+ image=image, auto_disk_config=None)
+ _checks_for_create_and_rebuild.assert_called_once_with(
+ self.context, None, image, flavor, {}, [], root_bdm)
+ mock_get_bdms.assert_called_once_with(
+ self.context, instance.uuid)
+ else:
+ self.assertRaises(
+ exception.NovaException,
+ self.compute_api.rebuild,
+ self.context,
+ instance,
+ uuids.image_ref,
+ admin_pass,
+ reimage_boot_volume=False)
+
+ @mock.patch.object(objects.RequestSpec, 'save')
+ @mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
+ @mock.patch.object(objects.Instance, 'save')
+ @mock.patch.object(objects.Instance, 'get_flavor')
+ @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
+ @mock.patch.object(compute_api.API, '_get_image')
+ @mock.patch.object(compute_api.API, '_check_image_arch')
+ @mock.patch.object(compute_api.API, '_check_auto_disk_config')
+ @mock.patch.object(compute_api.API, '_checks_for_create_and_rebuild')
+ @mock.patch.object(compute_api.API, '_record_action_start')
+ def test_rebuild_volume_backed_fails(self, _record_action_start,
+ _checks_for_create_and_rebuild, _check_auto_disk_config,
+ _check_image_arch, mock_get_image,
+ mock_get_bdms, get_flavor,
+ instance_save, req_spec_get_by_inst_uuid, request_save):
+ """Test a scenario where we don't pass parameters to rebuild
+ boot volume
+ """
+ instance = fake_instance.fake_instance_obj(
+ self.context, vm_state=vm_states.ACTIVE, cell_name='fake-cell',
+ launched_at=timeutils.utcnow(),
+ system_metadata={}, image_ref=uuids.image_ref,
+ expected_attrs=['system_metadata'], node='fake')
+ bdms = objects.BlockDeviceMappingList(objects=[
+ objects.BlockDeviceMapping(
+ boot_index=None, image_id=None,
+ source_type='volume', destination_type='volume',
+ volume_type=None, snapshot_id=None,
+ volume_id=uuids.volume_id, volume_size=None)])
+ mock_get_bdms.return_value = bdms
+ get_flavor.return_value = test_flavor.fake_flavor
+ image = {
+ "id": uuids.image_ref,
+ "min_ram": 10, "min_disk": 1,
+ "properties": {
+ 'architecture': fields_obj.Architecture.X86_64}}
+ mock_get_image.return_value = (None, image)
+ fake_spec = objects.RequestSpec(id=1, force_nodes=None)
+ req_spec_get_by_inst_uuid.return_value = fake_spec
+ fake_volume = {'id': uuids.volume_id, 'status': 'in-use'}
+ fake_conn_info = '{}'
+ fake_device = 'fake_vda'
+ root_bdm = mock.MagicMock(
+ volume_id=uuids.volume_id, connection_info=fake_conn_info,
+ device_name=fake_device, attachment_id=uuids.old_attachment_id,
+ save=mock.MagicMock())
+ admin_pass = "new password"
+ with mock.patch.object(self.compute_api.volume_api, 'get',
+ return_value=fake_volume), \
+ mock.patch.object(compute_utils, 'get_root_bdm',
+ return_value=root_bdm):
+ self.assertRaises(exception.NovaException,
+ self.compute_api.rebuild,
+ self.context,
+ instance,
+ uuids.image_ref,
+ admin_pass,
+ reimage_boot_volume=False)
+
@mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
@mock.patch.object(objects.Instance, 'save')
@mock.patch.object(objects.Instance, 'get_flavor')
@@ -4035,7 +4205,7 @@ class _ComputeAPIUnitTestMixIn(object):
orig_image_ref=uuids.image_ref,
orig_sys_metadata=orig_system_metadata, bdms=bdms,
preserve_ephemeral=False, host=instance.host,
- request_spec=fake_spec)
+ request_spec=fake_spec, reimage_boot_volume=False)
_check_auto_disk_config.assert_called_once_with(
image=image, auto_disk_config=None)
@@ -4108,7 +4278,7 @@ class _ComputeAPIUnitTestMixIn(object):
orig_image_ref=uuids.image_ref,
orig_sys_metadata=orig_system_metadata, bdms=bdms,
preserve_ephemeral=False, host=None,
- request_spec=fake_spec)
+ request_spec=fake_spec, reimage_boot_volume=False)
# assert the request spec was modified so the scheduler picks
# the existing instance host/node
req_spec_save.assert_called_once_with()
@@ -4176,7 +4346,7 @@ class _ComputeAPIUnitTestMixIn(object):
orig_image_ref=uuids.image_ref,
orig_sys_metadata=orig_system_metadata, bdms=bdms,
preserve_ephemeral=False, host=instance.host,
- request_spec=fake_spec)
+ request_spec=fake_spec, reimage_boot_volume=False)
_check_auto_disk_config.assert_called_once_with(
image=image, auto_disk_config=None)
@@ -4235,7 +4405,7 @@ class _ComputeAPIUnitTestMixIn(object):
orig_image_ref=uuids.image_ref,
orig_sys_metadata=orig_system_metadata, bdms=bdms,
preserve_ephemeral=False, host=instance.host,
- request_spec=fake_spec)
+ request_spec=fake_spec, reimage_boot_volume=False)
_check_auto_disk_config.assert_called_once_with(
image=image, auto_disk_config=None)
@@ -4299,7 +4469,7 @@ class _ComputeAPIUnitTestMixIn(object):
orig_image_ref=uuids.image_ref,
orig_sys_metadata=orig_system_metadata, bdms=bdms,
preserve_ephemeral=False, host=instance.host,
- request_spec=fake_spec)
+ request_spec=fake_spec, reimage_boot_volume=False)
_check_auto_disk_config.assert_called_once_with(
image=image, auto_disk_config=None)
@@ -5913,6 +6083,41 @@ class _ComputeAPIUnitTestMixIn(object):
'volume_id': 'volume_id'}]
self._test_check_and_transform_bdm(block_device_mapping)
+ def test_update_ephemeral_encryption_bdms(self):
+ flavor = self._create_flavor(
+ extra_specs={
+ 'hw:ephemeral_encryption': True,
+ 'hw:ephemeral_encryption_format': 'luks',
+ }
+ )
+ block_device_mapping = [
+ {'device_name': '/dev/sda1',
+ 'source_type': 'snapshot', 'destination_type': 'volume',
+ 'snapshot_id': uuids.snapshot_id,
+ 'delete_on_termination': False,
+ 'boot_index': 0},
+ {'device_name': '/dev/sdb2',
+ 'source_type': 'image', 'destination_type': 'local',
+ 'image_id': uuids.image_id, 'delete_on_termination': False},
+ {'device_name': '/dev/sdb3',
+ 'source_type': 'blank', 'destination_type': 'local',
+ 'guest_format': 'ext3', 'delete_on_termination': False}]
+
+ block_device_mapping = (
+ block_device_obj.block_device_make_list_from_dicts(
+ self.context,
+ map(fake_block_device.AnonFakeDbBlockDeviceDict,
+ block_device_mapping)))
+
+ self.compute_api._update_ephemeral_encryption_bdms(
+ flavor, {}, block_device_mapping)
+
+ for bdm in block_device_mapping:
+ if bdm.is_local:
+ self.assertTrue(bdm.encrypted)
+ else:
+ self.assertFalse(bdm.encrypted)
+
def test_bdm_validate_set_size_and_instance(self):
swap_size = 42
ephemeral_size = 24
@@ -7704,8 +7909,9 @@ class ComputeAPIUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase):
@mock.patch.object(compute_utils, 'notify_about_instance_usage')
@mock.patch.object(objects.BlockDeviceMapping, 'destroy')
@mock.patch.object(objects.Instance, 'destroy')
+ @mock.patch('nova.compute.api.API._record_action_start')
def _test_delete_volume_backed_instance(
- self, vm_state, mock_instance_destroy, bdm_destroy,
+ self, vm_state, mock_record, mock_instance_destroy, bdm_destroy,
notify_about_instance_usage, mock_save, mock_elevated,
bdm_get_by_instance_uuid, mock_lookup, _mock_del_booting,
notify_about_instance_action):
@@ -7734,6 +7940,8 @@ class ComputeAPIUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase):
'detach') as mock_detach:
self.compute_api.delete(self.context, inst)
+ mock_record.assert_called_once_with(self.context, inst,
+ instance_actions.DELETE)
mock_deallocate.assert_called_once_with(self.context, inst)
mock_detach.assert_called_once_with(self.context, volume_id,
inst.uuid)
@@ -7751,16 +7959,13 @@ class ComputeAPIUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase):
self.assertTrue(hasattr(self.compute_api, 'host'))
self.assertEqual(CONF.host, self.compute_api.host)
- @mock.patch('nova.scheduler.client.report.SchedulerReportClient')
+ @mock.patch('nova.scheduler.client.report.report_client_singleton')
def test_placement_client_init(self, mock_report_client):
"""Tests to make sure that the construction of the placement client
- only happens once per API class instance.
+ uses the singleton helper, and happens only when needed.
"""
- self.assertIsNone(self.compute_api._placementclient)
- # Access the property twice to make sure SchedulerReportClient is
- # only loaded once.
- for x in range(2):
- self.compute_api.placementclient
+ self.assertFalse(mock_report_client.called)
+ self.compute_api.placementclient
mock_report_client.assert_called_once_with()
def test_validate_host_for_cold_migrate_same_host_fails(self):
diff --git a/nova/tests/unit/compute/test_claims.py b/nova/tests/unit/compute/test_claims.py
index 8997511e73..9ef3999441 100644
--- a/nova/tests/unit/compute/test_claims.py
+++ b/nova/tests/unit/compute/test_claims.py
@@ -15,9 +15,9 @@
"""Tests for resource tracker claims."""
+from unittest import mock
import uuid
-import mock
from nova.compute import claims
from nova import context
diff --git a/nova/tests/unit/compute/test_compute.py b/nova/tests/unit/compute/test_compute.py
index 5be699ee81..314c29f583 100644
--- a/nova/tests/unit/compute/test_compute.py
+++ b/nova/tests/unit/compute/test_compute.py
@@ -22,10 +22,10 @@ import fixtures as std_fixtures
from itertools import chain
import operator
import sys
+from unittest import mock
from castellan import key_manager
import ddt
-import mock
from neutronclient.common import exceptions as neutron_exceptions
from oslo_log import log as logging
import oslo_messaging as messaging
@@ -168,7 +168,7 @@ class BaseTestCase(test.TestCase):
'uuid': uuids.fake_compute_node,
'vcpus_used': 0,
'deleted': 0,
- 'hypervisor_type': 'powervm',
+ 'hypervisor_type': 'libvirt',
'created_at': '2013-04-01T00:27:06.000000',
'local_gb_used': 0,
'updated_at': '2013-04-03T00:35:41.000000',
@@ -178,7 +178,7 @@ class BaseTestCase(test.TestCase):
'current_workload': 0,
'vcpus': 16,
'mapped': 1,
- 'cpu_info': 'ppc64,powervm,3940',
+ 'cpu_info': 'ppc64,libvirt,3940',
'running_vms': 0,
'free_disk_gb': 259,
'service_id': 7,
@@ -1389,13 +1389,14 @@ class ComputeVolumeTestCase(BaseTestCase):
@mock.patch.object(nova.virt.block_device, 'convert_snapshots')
@mock.patch.object(nova.virt.block_device, 'convert_volumes')
@mock.patch.object(nova.virt.block_device, 'convert_ephemerals')
+ @mock.patch.object(nova.virt.block_device, 'convert_local_images')
@mock.patch.object(nova.virt.block_device, 'convert_swap')
@mock.patch.object(nova.virt.block_device, 'attach_block_devices')
def test_prep_block_device_with_blanks(self, attach_block_devices,
- convert_swap, convert_ephemerals,
- convert_volumes, convert_snapshots,
- convert_images, convert_blanks,
- get_swap):
+ convert_swap, convert_local_images,
+ convert_ephemerals, convert_volumes,
+ convert_snapshots, convert_images,
+ convert_blanks, get_swap):
instance = self._create_fake_instance_obj()
instance['root_device_name'] = '/dev/vda'
root_volume = objects.BlockDeviceMapping(
@@ -1426,6 +1427,7 @@ class ComputeVolumeTestCase(BaseTestCase):
return bdm
convert_swap.return_value = []
+ convert_local_images.return_value = []
convert_ephemerals.return_value = []
convert_volumes.return_value = [blank_volume1, blank_volume2]
convert_snapshots.return_value = []
@@ -1438,6 +1440,7 @@ class ComputeVolumeTestCase(BaseTestCase):
'root_device_name': '/dev/vda',
'swap': [],
'ephemerals': [],
+ 'image': [],
'block_device_mapping': bdms
}
@@ -1452,6 +1455,7 @@ class ComputeVolumeTestCase(BaseTestCase):
self.assertIsNotNone(bdm.device_name)
convert_swap.assert_called_once_with(bdms)
+ convert_local_images.assert_called_once_with(bdms)
convert_ephemerals.assert_called_once_with(bdms)
bdm_args = tuple(bdms)
convert_volumes.assert_called_once_with(bdm_args)
@@ -2726,7 +2730,8 @@ class ComputeTestCase(BaseTestCase,
new_pass="new_password", orig_sys_metadata=sys_metadata,
bdms=[], recreate=False, on_shared_storage=False,
preserve_ephemeral=False, migration=None, scheduled_node=None,
- limits={}, request_spec=None, accel_uuids=[])
+ limits={}, request_spec=None, accel_uuids=[],
+ reimage_boot_volume=False)
self.compute.terminate_instance(self.context, instance, [])
def test_rebuild_driver(self):
@@ -2756,7 +2761,8 @@ class ComputeTestCase(BaseTestCase,
new_pass="new_password", orig_sys_metadata=sys_metadata,
bdms=[], recreate=False, on_shared_storage=False,
preserve_ephemeral=False, migration=None, scheduled_node=None,
- limits={}, request_spec=None, accel_uuids=[])
+ limits={}, request_spec=None, accel_uuids=[],
+ reimage_boot_volume=False)
self.assertTrue(called['rebuild'])
self.compute.terminate_instance(self.context, instance, [])
@@ -2808,7 +2814,8 @@ class ComputeTestCase(BaseTestCase,
new_pass="new_password", orig_sys_metadata=sys_metadata,
bdms=bdms, recreate=False, preserve_ephemeral=False,
migration=None, scheduled_node=None, limits={},
- on_shared_storage=False, request_spec=None, accel_uuids=[])
+ on_shared_storage=False, request_spec=None, accel_uuids=[],
+ reimage_boot_volume=False)
self.assertTrue(called['rebuild'])
self.compute.terminate_instance(self.context, instance, [])
@@ -2827,7 +2834,7 @@ class ComputeTestCase(BaseTestCase,
new_pass="new_password", orig_sys_metadata=sys_metadata, bdms=[],
recreate=False, on_shared_storage=False, preserve_ephemeral=False,
migration=None, scheduled_node=None, limits=None,
- request_spec=None, accel_uuids=[])
+ request_spec=None, accel_uuids=[], reimage_boot_volume=False)
self.compute.terminate_instance(self.context, instance, [])
def test_rebuild_launched_at_time(self):
@@ -2848,7 +2855,7 @@ class ComputeTestCase(BaseTestCase,
new_pass="new_password", orig_sys_metadata={}, bdms=[],
recreate=False, on_shared_storage=False, preserve_ephemeral=False,
migration=None, scheduled_node=None, limits={}, request_spec=None,
- accel_uuids=[])
+ accel_uuids=[], reimage_boot_volume=False)
instance.refresh()
self.assertEqual(cur_time,
instance['launched_at'].replace(tzinfo=None))
@@ -2881,7 +2888,8 @@ class ComputeTestCase(BaseTestCase,
injected_files=injected_files, new_pass="new_password",
orig_sys_metadata=sys_metadata, bdms=[], recreate=False,
on_shared_storage=False, preserve_ephemeral=False, migration=None,
- scheduled_node=None, limits={}, request_spec=None, accel_uuids=[])
+ scheduled_node=None, limits={}, request_spec=None, accel_uuids=[],
+ reimage_boot_volume=False)
self.compute.terminate_instance(self.context, instance, [])
@mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
@@ -3212,6 +3220,7 @@ class ComputeTestCase(BaseTestCase,
expected = {
'swap': None,
'ephemerals': [],
+ 'image': [],
'root_device_name': None,
'block_device_mapping': driver_bdms
}
@@ -3240,6 +3249,7 @@ class ComputeTestCase(BaseTestCase,
expected = {
'swap': None,
'ephemerals': [],
+ 'image': [],
'root_device_name': None,
'block_device_mapping': driver_bdms
}
@@ -3273,7 +3283,11 @@ class ComputeTestCase(BaseTestCase,
'delete_on_termination': True,
'guest_format': None,
'volume_size': 2,
- 'boot_index': -1
+ 'boot_index': -1,
+ 'encrypted': True,
+ 'encryption_secret_uuid': uuids.secret,
+ 'encryption_format': 'luks',
+ 'encryption_options': None,
})
swap = fake_block_device.FakeDbBlockDeviceDict({
'id': 3,
@@ -3308,16 +3322,25 @@ class ComputeTestCase(BaseTestCase,
'device_type': 'disk',
'disk_bus': 'virtio',
'guest_format': None,
- 'size': 1
+ 'size': 1,
+ 'encrypted': False,
+ 'encryption_secret_uuid': None,
+ 'encryption_format': None,
+ 'encryption_options': None,
},
{
'device_name': '/dev/vdc',
'device_type': 'disk',
'disk_bus': 'virtio',
'guest_format': None,
- 'size': 2
+ 'size': 2,
+ 'encrypted': True,
+ 'encryption_secret_uuid': uuids.secret,
+ 'encryption_format': 'luks',
+ 'encryption_options': None,
}
],
+ 'image': [],
'block_device_mapping': [],
'root_device_name': None
}
@@ -4593,7 +4616,8 @@ class ComputeTestCase(BaseTestCase,
'limits': {},
'request_spec': None,
'on_shared_storage': False,
- 'accel_uuids': ()}),
+ 'accel_uuids': (),
+ 'reimage_boot_volume': False}),
("set_admin_password", task_states.UPDATING_PASSWORD,
{'new_pass': None}),
("rescue_instance", task_states.RESCUING,
@@ -5111,7 +5135,8 @@ class ComputeTestCase(BaseTestCase,
injected_files=[], new_pass=password,
orig_sys_metadata=orig_sys_metadata, bdms=[], recreate=False,
on_shared_storage=False, preserve_ephemeral=False, migration=None,
- scheduled_node=None, limits={}, request_spec=None, accel_uuids=[])
+ scheduled_node=None, limits={}, request_spec=None, accel_uuids=[],
+ reimage_boot_volume=False)
inst_ref.refresh()
@@ -6108,7 +6133,7 @@ class ComputeTestCase(BaseTestCase,
mock_pre.assert_called_once_with(
test.MatchType(nova.context.RequestContext),
test.MatchType(objects.Instance),
- {'swap': None, 'ephemerals': [],
+ {'swap': None, 'ephemerals': [], 'image': [],
'root_device_name': None,
'block_device_mapping': []},
mock.ANY, mock.ANY, mock.ANY)
@@ -6474,7 +6499,7 @@ class ComputeTestCase(BaseTestCase,
self.assertEqual(2, mock_notify.call_count)
post_live_migration.assert_has_calls([
mock.call(c, instance, {'swap': None, 'ephemerals': [],
- 'root_device_name': None,
+ 'image': [], 'root_device_name': None,
'block_device_mapping': []},
migrate_data)])
migrate_instance_start.assert_has_calls([
@@ -6705,7 +6730,7 @@ class ComputeTestCase(BaseTestCase,
mock_setup.assert_called_once_with(c, instance, self.compute.host,
teardown=True)
mock_rollback.assert_called_once_with(c, instance, [],
- {'swap': None, 'ephemerals': [],
+ {'swap': None, 'ephemerals': [], 'image': [],
'root_device_name': None,
'block_device_mapping': []},
destroy_disks=True, migrate_data=None)
@@ -8134,7 +8159,7 @@ class ComputeTestCase(BaseTestCase,
self.compute._default_block_device_names(instance, {}, bdms)
self.assertEqual('/dev/vda', instance.root_device_name)
- mock_def.assert_called_once_with(instance, '/dev/vda', [], [],
+ mock_def.assert_called_once_with(instance, '/dev/vda', [], [], [],
[bdm for bdm in bdms])
@mock.patch.object(objects.BlockDeviceMapping, 'save')
@@ -8148,7 +8173,7 @@ class ComputeTestCase(BaseTestCase,
self.compute._default_block_device_names(instance, {}, bdms)
- mock_def.assert_called_once_with(instance, '/dev/vda', [], [],
+ mock_def.assert_called_once_with(instance, '/dev/vda', [], [], [],
[bdm for bdm in bdms])
@mock.patch.object(objects.Instance, 'save')
@@ -8170,7 +8195,7 @@ class ComputeTestCase(BaseTestCase,
self.assertEqual('/dev/vda', instance.root_device_name)
mock_default_dev.assert_called_once_with(instance, mock.ANY, bdms[0])
mock_default_name.assert_called_once_with(instance, '/dev/vda', [], [],
- [bdm for bdm in bdms])
+ [], [bdm for bdm in bdms])
def test_default_block_device_names_with_blank_volumes(self):
instance = self._create_fake_instance_obj()
@@ -8230,7 +8255,7 @@ class ComputeTestCase(BaseTestCase,
self.assertEqual('/dev/vda', instance.root_device_name)
self.assertTrue(object_save.called)
default_device_names.assert_called_once_with(instance,
- '/dev/vda', [bdms[-2]], [bdms[-1]],
+ '/dev/vda', [], [bdms[-2]], [bdms[-1]],
[bdm for bdm in bdms[:-2]])
def test_reserve_block_device_name(self):
@@ -8614,16 +8639,13 @@ class ComputeAPITestCase(BaseTestCase):
def test_create_instance_sets_system_metadata(self):
# Make sure image properties are copied into system metadata.
- with mock.patch.object(
- self.compute_api.compute_task_api, 'schedule_and_build_instances',
- ) as mock_sbi:
- ref, resv_id = self.compute_api.create(
- self.context,
- flavor=self.default_flavor,
- image_href='f5000000-0000-0000-0000-000000000000')
+ ref, resv_id = self.compute_api.create(
+ self.context,
+ flavor=self.default_flavor,
+ image_href='f5000000-0000-0000-0000-000000000000')
- build_call = mock_sbi.call_args_list[0]
- instance = build_call[1]['build_requests'][0].instance
+ build_call = self.schedule_and_build_instances_mock.call_args_list[0]
+ instance = build_call[1]['build_requests'][0].instance
image_props = {'image_kernel_id': uuids.kernel_id,
'image_ramdisk_id': uuids.ramdisk_id,
@@ -8633,16 +8655,14 @@ class ComputeAPITestCase(BaseTestCase):
self.assertEqual(value, instance.system_metadata[key])
def test_create_saves_flavor(self):
- with mock.patch.object(
- self.compute_api.compute_task_api, 'schedule_and_build_instances',
- ) as mock_sbi:
- ref, resv_id = self.compute_api.create(
- self.context,
- flavor=self.default_flavor,
- image_href=uuids.image_href_id)
+ ref, resv_id = self.compute_api.create(
+ self.context,
+ flavor=self.default_flavor,
+ image_href=uuids.image_href_id)
+
+ build_call = self.schedule_and_build_instances_mock.call_args_list[0]
+ instance = build_call[1]['build_requests'][0].instance
- build_call = mock_sbi.call_args_list[0]
- instance = build_call[1]['build_requests'][0].instance
self.assertIn('flavor', instance)
self.assertEqual(self.default_flavor.flavorid,
instance.flavor.flavorid)
@@ -8650,19 +8670,18 @@ class ComputeAPITestCase(BaseTestCase):
def test_create_instance_associates_security_groups(self):
# Make sure create associates security groups.
- with test.nested(
- mock.patch.object(self.compute_api.compute_task_api,
- 'schedule_and_build_instances'),
- mock.patch('nova.network.security_group_api.validate_name',
- return_value=uuids.secgroup_id),
- ) as (mock_sbi, mock_secgroups):
+ with mock.patch(
+ "nova.network.security_group_api.validate_name",
+ return_value=uuids.secgroup_id,
+ ) as mock_secgroups:
self.compute_api.create(
self.context,
flavor=self.default_flavor,
image_href=uuids.image_href_id,
security_groups=['testgroup'])
- build_call = mock_sbi.call_args_list[0]
+ build_call = (
+ self.schedule_and_build_instances_mock.call_args_list[0])
reqspec = build_call[1]['request_spec'][0]
self.assertEqual(1, len(reqspec.security_groups))
@@ -8697,22 +8716,19 @@ class ComputeAPITestCase(BaseTestCase):
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(port_id=uuids.port_instance)])
- with test.nested(
- mock.patch.object(
- self.compute_api.compute_task_api,
- 'schedule_and_build_instances'),
- mock.patch.object(
- self.compute_api.network_api,
- 'create_resource_requests',
- return_value=(None, [], objects.RequestLevelParams())),
- ) as (mock_sbi, _mock_create_resreqs):
+ with mock.patch.object(
+ self.compute_api.network_api,
+ "create_resource_requests",
+ return_value=(None, [], objects.RequestLevelParams()),
+ ):
self.compute_api.create(
self.context,
flavor=self.default_flavor,
image_href=uuids.image_href_id,
requested_networks=requested_networks)
- build_call = mock_sbi.call_args_list[0]
+ build_call = (
+ self.schedule_and_build_instances_mock.call_args_list[0])
reqspec = build_call[1]['request_spec'][0]
self.assertEqual(1, len(reqspec.requested_networks))
@@ -10212,8 +10228,7 @@ class ComputeAPITestCase(BaseTestCase):
self.compute_api.get_console_output,
self.context, instance)
- @mock.patch.object(compute_utils, 'notify_about_instance_action')
- def test_attach_interface(self, mock_notify):
+ def test_attach_interface(self):
instance = self._create_fake_instance_obj()
nwinfo = [fake_network_cache_model.new_vif()]
network_id = nwinfo[0]['network']['id']
@@ -10233,8 +10248,12 @@ class ComputeAPITestCase(BaseTestCase):
mock.patch.object(
self.compute,
"_claim_pci_device_for_interface_attach",
- return_value=None)
- ) as (cap, mock_lock, mock_create_resource_req, mock_claim_pci):
+ return_value=None),
+ mock.patch.object(compute_utils, 'notify_about_instance_action'),
+ ) as (
+ cap, mock_lock, mock_create_resource_req, mock_claim_pci,
+ mock_notify
+ ):
mock_create_resource_req.return_value = (
None, [], mock.sentinel.req_lvl_params)
vif = self.compute.attach_interface(self.context,
@@ -10742,8 +10761,13 @@ class ComputeAPITestCase(BaseTestCase):
supports_attach_interface=True),
mock.patch.object(self.compute.network_api,
'create_resource_requests'),
- mock.patch.object(self.compute.rt, 'claim_pci_devices',
- return_value=[]),
+ mock.patch.object(
+ self.compute.rt,
+ 'claim_pci_devices',
+ side_effect=exception.PciDeviceRequestFailed(
+ requests=instance.pci_requests
+ )
+ ),
mock.patch.object(
self.compute, '_allocate_port_resource_for_instance'),
mock.patch(
@@ -11052,8 +11076,7 @@ class ComputeAPITestCase(BaseTestCase):
mock_remove_res.assert_called_once_with(
self.context, instance.uuid, mock.sentinel.resources)
- @mock.patch.object(compute_utils, 'notify_about_instance_action')
- def test_detach_interface(self, mock_notify):
+ def test_detach_interface(self):
nwinfo, port_id = self.test_attach_interface()
instance = self._create_fake_instance_obj()
instance.info_cache = objects.InstanceInfoCache.new(
@@ -11086,10 +11109,13 @@ class ComputeAPITestCase(BaseTestCase):
mock.patch('nova.pci.request.get_instance_pci_request_from_vif',
return_value=pci_req),
mock.patch.object(self.compute.rt, 'unclaim_pci_devices'),
- mock.patch.object(instance, 'save')
+ mock.patch.object(instance, 'save'),
+ mock.patch.object(compute_utils, 'notify_about_instance_action'),
) as (
- mock_remove_alloc, mock_deallocate, mock_lock,
- mock_get_pci_req, mock_unclaim_pci, mock_instance_save):
+ mock_remove_alloc, mock_deallocate, mock_lock,
+ mock_get_pci_req, mock_unclaim_pci, mock_instance_save,
+ mock_notify
+ ):
self.compute.detach_interface(self.context, instance, port_id)
mock_deallocate.assert_called_once_with(
@@ -11564,12 +11590,60 @@ class ComputeAPITestCase(BaseTestCase):
instance.uuid, None)
@mock.patch.object(context.RequestContext, 'elevated')
+ @mock.patch.object(cinder.API, 'detach')
+ @mock.patch.object(cinder.API, 'terminate_connection')
+ @mock.patch.object(compute_manager.ComputeManager,
+ '_get_instance_block_device_info')
+ @mock.patch('nova.virt.fake.FakeDriver.get_volume_connector')
+ def test_shutdown_with_legacy_volume_detach(
+ self, mock_get_connector, mock_info, mock_terminate, mock_detach,
+ mock_elevated,
+ ):
+ # test _shutdown_instance with legacy BDMs without a volume
+ # attachment ID
+ admin = context.get_admin_context()
+ mock_elevated.return_value = admin
+ instance = self._create_fake_instance_obj()
+ connector = 'fake-connector'
+ mock_get_connector.return_value = connector
+
+ vol_a_bdm = block_device_obj.BlockDeviceMapping(
+ instance_uuid=instance['uuid'],
+ source_type='volume', destination_type='volume',
+ delete_on_termination=False,
+ volume_id=uuids.volume_a_id,
+ attachment_id=None)
+ vol_b_bdm = block_device_obj.BlockDeviceMapping(
+ instance_uuid=instance['uuid'],
+ source_type='volume', destination_type='volume',
+ delete_on_termination=False,
+ volume_id=uuids.volume_b_id,
+ attachment_id=None)
+ bdms = [vol_a_bdm, vol_b_bdm]
+
+ self.compute._shutdown_instance(admin, instance, bdms)
+
+ # we should only got the connector once, regardless of the number of
+ # volumes
+ mock_get_connector.assert_called_once_with(instance)
+ # but we should have separate terminate and detach calls
+ mock_terminate.assert_has_calls([
+ mock.call(admin, uuids.volume_a_id, connector),
+ mock.call(admin, uuids.volume_b_id, connector),
+ ])
+ mock_detach.assert_has_calls([
+ mock.call(admin, uuids.volume_a_id, instance.uuid),
+ mock.call(admin, uuids.volume_b_id, instance.uuid),
+ ])
+
+ @mock.patch.object(context.RequestContext, 'elevated')
@mock.patch.object(cinder.API, 'attachment_delete')
@mock.patch.object(compute_manager.ComputeManager,
'_get_instance_block_device_info')
- def test_shutdown_with_attachment_delete(self, mock_info,
- mock_attach_delete,
- mock_elevated):
+ @mock.patch('nova.virt.fake.FakeDriver.get_volume_connector')
+ def test_shutdown_with_attachment_delete(
+ self, mock_get_connector, mock_info, mock_attach_delete, mock_elevated,
+ ):
# test _shutdown_instance with volume bdm containing an
# attachment id. This should use the v3 cinder api.
admin = context.get_admin_context()
@@ -11589,14 +11663,18 @@ class ComputeAPITestCase(BaseTestCase):
self.compute._shutdown_instance(admin, instance, bdms)
mock_attach_delete.assert_called_once_with(admin, attachment_id)
+ # we shouldn't try to get a connector for a cinder v3-style attachment
+ mock_get_connector.assert_not_called()
@mock.patch.object(compute_manager.LOG, 'debug')
@mock.patch.object(cinder.API, 'attachment_delete')
@mock.patch.object(compute_manager.ComputeManager,
'_get_instance_block_device_info')
- def test_shutdown_with_attachment_not_found(self, mock_info,
- mock_attach_delete,
- mock_debug_log):
+ @mock.patch('nova.virt.fake.FakeDriver.get_volume_connector')
+ def test_shutdown_with_attachment_not_found(
+ self, mock_get_connector, mock_info, mock_attach_delete,
+ mock_debug_log,
+ ):
# test _shutdown_instance with attachment_delete throwing
# a VolumeAttachmentNotFound exception. This should not
# cause _shutdown_instance to fail. Only a debug log
@@ -11622,6 +11700,8 @@ class ComputeAPITestCase(BaseTestCase):
# get last call to LOG.debug and verify correct exception is in there
self.assertIsInstance(mock_debug_log.call_args[0][1],
exception.VolumeAttachmentNotFound)
+ # we shouldn't try to get a connector for a cinder v3-style attachment
+ mock_get_connector.assert_not_called()
def test_terminate_with_volumes(self):
# Make sure that volumes get detached during instance termination.
@@ -11896,17 +11976,16 @@ class ComputeAPITestCase(BaseTestCase):
instance.save()
@mock.patch.object(objects.Service, 'get_by_compute_host')
- @mock.patch.object(self.compute_api.compute_task_api,
- 'rebuild_instance')
@mock.patch.object(objects.ComputeNodeList, 'get_all_by_host')
@mock.patch.object(objects.RequestSpec,
'get_by_instance_uuid')
@mock.patch.object(self.compute_api.servicegroup_api, 'service_is_up')
- def do_test(service_is_up, get_by_instance_uuid, get_all_by_host,
- rebuild_instance, get_service):
+ def do_test(
+ service_is_up, get_by_instance_uuid, get_all_by_host, get_service
+ ):
service_is_up.return_value = False
get_by_instance_uuid.return_value = fake_spec
- rebuild_instance.side_effect = fake_rebuild_instance
+ self.rebuild_instance_mock.side_effect = fake_rebuild_instance
get_all_by_host.return_value = objects.ComputeNodeList(
objects=[objects.ComputeNode(
host='fake_dest_host',
@@ -11924,7 +12003,7 @@ class ComputeAPITestCase(BaseTestCase):
host = None
else:
host = 'fake_dest_host'
- rebuild_instance.assert_called_once_with(
+ self.rebuild_instance_mock.assert_called_once_with(
ctxt,
instance=instance,
new_pass=None,
@@ -13042,16 +13121,13 @@ class ComputeAPIAggrTestCase(BaseTestCase):
hosts = aggregate.hosts if 'hosts' in aggregate else None
self.assertIn(values[0][1][0], hosts)
- @mock.patch('nova.scheduler.client.report.SchedulerReportClient')
+ @mock.patch('nova.scheduler.client.report.report_client_singleton')
def test_placement_client_init(self, mock_report_client):
"""Tests to make sure that the construction of the placement client
- only happens once per AggregateAPI class instance.
+ uses the singleton helper, and happens only when needed.
"""
- self.assertIsNone(self.api._placement_client)
- # Access the property twice to make sure SchedulerReportClient is
- # only loaded once.
- for x in range(2):
- self.api.placement_client
+ self.assertFalse(mock_report_client.called)
+ self.api.placement_client
mock_report_client.assert_called_once_with()
@@ -13464,7 +13540,7 @@ class EvacuateHostTestCase(BaseTestCase):
image_ref, injected_files, 'newpass', {}, bdms, recreate=True,
on_shared_storage=on_shared_storage, migration=migration,
preserve_ephemeral=False, scheduled_node=node, limits=limits,
- request_spec=None, accel_uuids=[])
+ request_spec=None, accel_uuids=[], reimage_boot_volume=False)
if vm_states_is_stopped:
mock_notify_rebuild.assert_has_calls([
mock.call(ctxt, self.inst, self.inst.host, phase='start',
diff --git a/nova/tests/unit/compute/test_compute_mgr.py b/nova/tests/unit/compute/test_compute_mgr.py
index 645d82b4bf..e521283acc 100644
--- a/nova/tests/unit/compute/test_compute_mgr.py
+++ b/nova/tests/unit/compute/test_compute_mgr.py
@@ -17,6 +17,7 @@ import copy
import datetime
import fixtures as std_fixtures
import time
+from unittest import mock
from cinderclient import exceptions as cinder_exception
from cursive import exception as cursive_exception
@@ -24,7 +25,6 @@ import ddt
from eventlet import event as eventlet_event
from eventlet import timeout as eventlet_timeout
from keystoneauth1 import exceptions as keystone_exception
-import mock
import netaddr
from oslo_log import log as logging
import oslo_messaging as messaging
@@ -348,6 +348,46 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self.context, mock.sentinel.node, startup=True)
log_mock.exception.assert_called_once()
+ def test_update_available_resource_for_node_pci_placement_failed_startup(
+ self
+ ):
+ """If the PCI placement translation failed during startup then the
+ exception is raised up to kill the service
+ """
+ rt = self._mock_rt(spec_set=['update_available_resource'])
+ rt.update_available_resource.side_effect = (
+ exception.PlacementPciException(error='error'))
+
+ self.assertRaises(
+ exception.PlacementPciException,
+ self.compute._update_available_resource_for_node,
+ self.context,
+ mock.sentinel.node,
+ startup=True,
+ )
+ rt.update_available_resource.assert_called_once_with(
+ self.context, mock.sentinel.node, startup=True)
+
+ @mock.patch('nova.compute.manager.LOG')
+ def test_update_available_resource_for_node_pci_placement_failed_later(
+ self, mock_log
+ ):
+ """If the PCI placement translation failed later (not at startup)
+ during a periodic then the exception is just logged
+ """
+ rt = self._mock_rt(spec_set=['update_available_resource'])
+ rt.update_available_resource.side_effect = (
+ exception.PlacementPciException(error='error'))
+
+ self.compute._update_available_resource_for_node(
+ self.context, mock.sentinel.node, startup=False)
+ rt.update_available_resource.assert_called_once_with(
+ self.context, mock.sentinel.node, startup=False)
+ mock_log.exception.assert_called_once_with(
+ 'Error updating PCI resources for node %(node)s.',
+ {'node': mock.sentinel.node}
+ )
+
@mock.patch.object(manager, 'LOG')
@mock.patch.object(manager.ComputeManager,
'_update_available_resource_for_node')
@@ -1310,6 +1350,36 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self.compute._init_instance(self.context, instance)
set_error_state.assert_called_once_with(instance)
+ def test_init_instance_vif_plug_fails_missing_pci(self):
+ instance = fake_instance.fake_instance_obj(
+ self.context,
+ uuid=uuids.instance,
+ info_cache=None,
+ power_state=power_state.RUNNING,
+ vm_state=vm_states.ACTIVE,
+ task_state=None,
+ host=self.compute.host,
+ expected_attrs=['info_cache'])
+
+ with test.nested(
+ mock.patch.object(context, 'get_admin_context',
+ return_value=self.context),
+ mock.patch.object(objects.Instance, 'get_network_info',
+ return_value=network_model.NetworkInfo()),
+ mock.patch.object(self.compute.driver, 'plug_vifs',
+ side_effect=exception.PciDeviceNotFoundById("pci-addr")),
+ mock.patch("nova.compute.manager.LOG.exception"),
+ ) as (get_admin_context, get_nw_info, plug_vifs, log_exception):
+ # as this does not raise, we are sure that the compute service
+ # continues initializing the rest of the instances
+ self.compute._init_instance(self.context, instance)
+ log_exception.assert_called_once_with(
+ "Virtual interface plugging failed for instance. Probably the "
+ "vnic_type of the bound port has been changed. Nova does not "
+ "support such change.",
+ instance=instance
+ )
+
def _test__validate_pinning_configuration(self, supports_pcpus=True):
instance_1 = fake_instance.fake_instance_obj(
self.context, uuid=uuids.instance_1)
@@ -5064,13 +5134,16 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
msg = mock_log.warning.call_args_list[0]
self.assertIn('appears to not be owned by this host', msg[0][0])
- def test_init_host_pci_passthrough_whitelist_validation_failure(self):
- # Tests that we fail init_host if there is a pci.passthrough_whitelist
+ def test_init_host_pci_device_spec_validation_failure(self):
+ # Tests that we fail init_host if there is a pci.device_spec
# configured incorrectly.
- self.flags(passthrough_whitelist=[
- # it's invalid to specify both in the same devspec
- jsonutils.dumps({'address': 'foo', 'devname': 'bar'})],
- group='pci')
+ self.flags(
+ device_spec=[
+ # it's invalid to specify both in the same devspec
+ jsonutils.dumps({'address': 'foo', 'devname': 'bar'})
+ ],
+ group='pci'
+ )
self.assertRaises(exception.PciDeviceInvalidDeviceName,
self.compute.init_host)
@@ -5262,7 +5335,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self.compute.rebuild_instance(
self.context, instance, None, None, None, None, None, None,
- recreate, False, False, None, scheduled_node, {}, None, [])
+ recreate, False, False, None, scheduled_node, {}, None, [], False)
mock_set.assert_called_once_with(None, 'failed')
mock_notify_about_instance_usage.assert_called_once_with(
mock.ANY, instance, 'rebuild.error', fault=mock_rebuild.side_effect
@@ -5373,7 +5446,8 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
None, recreate=True, on_shared_storage=None,
preserve_ephemeral=False, migration=None,
scheduled_node='fake-node',
- limits={}, request_spec=request_spec, accel_uuids=[])
+ limits={}, request_spec=request_spec, accel_uuids=[],
+ reimage_boot_volume=False)
mock_validate_policy.assert_called_once_with(
elevated_context, instance, {'group': [uuids.group]})
@@ -5412,7 +5486,8 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self.context, instance, None, None, None, None, None, None,
recreate=True, on_shared_storage=None, preserve_ephemeral=False,
migration=None, scheduled_node='fake-node', limits={},
- request_spec=request_spec, accel_uuids=[])
+ request_spec=request_spec, accel_uuids=[],
+ reimage_boot_volume=False)
mock_validate_policy.assert_called_once_with(
elevated_context, instance, {'group': [uuids.group]})
@@ -5438,7 +5513,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self.compute.rebuild_instance(
self.context, instance, None, None,
None, None, None, None, False,
- False, False, migration, None, {}, None, [])
+ False, False, migration, None, {}, None, [], False)
self.assertFalse(mock_get.called)
self.assertEqual(node, instance.node)
self.assertEqual('done', migration.status)
@@ -5460,7 +5535,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self.compute.rebuild_instance(
self.context, instance, None, None, None, None, None,
None, True, False, False, mock.sentinel.migration, None, {},
- None, [])
+ None, [], False)
mock_get.assert_called_once_with(mock.ANY, self.compute.host)
mock_rt.finish_evacuation.assert_called_once_with(
instance, 'new-node', mock.sentinel.migration)
@@ -5542,7 +5617,8 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
recreate, on_shared_storage,
preserve_ephemeral, {}, {},
self.allocations,
- mock.sentinel.mapping, [])
+ mock.sentinel.mapping, [],
+ False)
mock_notify_usage.assert_has_calls(
[mock.call(self.context, instance, "rebuild.start",
@@ -5560,8 +5636,12 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
provider_mappings=mock.sentinel.mapping)
mock_get_nw_info.assert_called_once_with(self.context, instance)
- def test_rebuild_default_impl(self):
- def _detach(context, bdms):
+ @ddt.data((False, False), (False, True), (True, False), (True, True))
+ @ddt.unpack
+ def test_rebuild_default_impl(self, is_vol_backed, reimage_boot_vol):
+ fake_image_meta = mock.MagicMock(id='fake_id')
+
+ def _detach(context, bdms, detach_root_bdm=True):
# NOTE(rpodolyaka): check that instance has been powered off by
# the time we detach block devices, exact calls arguments will be
# checked below
@@ -5587,13 +5667,20 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
mock.patch.object(self.compute, '_power_off_instance',
return_value=None),
mock.patch.object(self.compute, '_get_accel_info',
- return_value=[])
+ return_value=[]),
+ mock.patch.object(compute_utils, 'is_volume_backed_instance',
+ return_value=is_vol_backed),
+ mock.patch.object(self.compute, '_rebuild_volume_backed_instance'),
+ mock.patch.object(compute_utils, 'get_root_bdm')
) as(
mock_destroy,
mock_spawn,
mock_save,
mock_power_off,
- mock_accel_info
+ mock_accel_info,
+ mock_is_volume_backed,
+ mock_rebuild_vol_backed_inst,
+ mock_get_root,
):
instance = fake_instance.fake_instance_obj(self.context)
instance.migration_context = None
@@ -5603,9 +5690,19 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
instance.device_metadata = None
instance.task_state = task_states.REBUILDING
instance.save(expected_task_state=[task_states.REBUILDING])
+ fake_block_device_info = {
+ 'block_device_mapping': [
+ {'attachment_id': '341a8917-f74d-4473-8ee7-4ca05e5e0ab3',
+ 'volume_id': 'b7c93bb9-dfe4-41af-aa56-e6b28342fd8f',
+ 'connection_info': {'driver_volume_type': 'iscsi',
+ 'data': {'target_discovered': False,
+ 'target_portal': '127.0.0.1:3260',
+ 'target_iqn': 'iqn.2010-10.org.openstack:volume-'
+ 'b7c93bb9-dfe4-41af-aa56-e6b28342fd8f',
+ 'target_lun': 0}}}]}
self.compute._rebuild_default_impl(self.context,
instance,
- None,
+ fake_image_meta,
[],
admin_password='new_pass',
bdms=[],
@@ -5614,16 +5711,151 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
attach_block_devices=_attach,
network_info=None,
evacuate=False,
- block_device_info=None,
- preserve_ephemeral=False)
+ block_device_info=
+ fake_block_device_info,
+ preserve_ephemeral=False,
+ reimage_boot_volume=
+ reimage_boot_vol)
self.assertTrue(mock_save.called)
self.assertTrue(mock_spawn.called)
mock_destroy.assert_called_once_with(
self.context, instance,
- network_info=None, block_device_info=None)
+ network_info=None, block_device_info=fake_block_device_info)
mock_power_off.assert_called_once_with(
instance, clean_shutdown=True)
+ if is_vol_backed and reimage_boot_vol:
+ mock_rebuild_vol_backed_inst.assert_called_once_with(
+ self.context, instance, [], fake_image_meta.id)
+ else:
+ mock_rebuild_vol_backed_inst.assert_not_called()
+
+ @mock.patch('nova.volume.cinder.API.attachment_delete')
+ @mock.patch('nova.volume.cinder.API.attachment_create',
+ return_value={'id': uuids.new_attachment_id})
+ @mock.patch.object(nova.compute.manager.ComputeVirtAPI,
+ 'wait_for_instance_event')
+ def test__rebuild_volume_backed_instance(
+ self, wait_inst_event, attach_create, attach_delete):
+ fake_conn_info = '{}'
+ fake_device = 'fake_vda'
+ root_bdm = mock.MagicMock(
+ volume_id=uuids.volume_id, connection_info=fake_conn_info,
+ device_name=fake_device, attachment_id=uuids.old_attachment_id,
+ save=mock.MagicMock())
+ bdms = [root_bdm]
+ events = [('volume-reimaged', root_bdm.volume_id)]
+ image_size_gb = 1
+ deadline = CONF.reimage_timeout_per_gb * image_size_gb
+
+ with test.nested(
+ mock.patch.object(objects.Instance, 'save',
+ return_value=None),
+ mock.patch.object(compute_utils, 'get_root_bdm',
+ return_value=root_bdm),
+ mock.patch.object(self.compute, 'volume_api'),
+ mock.patch.object(self.compute.image_api, 'get'),
+ ) as (
+ mock_save,
+ mock_get_root_bdm,
+ mock_vol_api,
+ mock_get_img
+ ):
+ instance = fake_instance.fake_instance_obj(self.context)
+ instance.task_state = task_states.REBUILDING
+ # 1024 ** 3 = 1073741824
+ mock_get_img.return_value = {'size': 1073741824}
+ self.compute._rebuild_volume_backed_instance(
+ self.context, instance, bdms, uuids.image_id)
+ mock_vol_api.attachment_create.assert_called_once_with(
+ self.context, uuids.volume_id, instance.uuid)
+ mock_vol_api.attachment_delete.assert_called_once_with(
+ self.context, uuids.old_attachment_id)
+ mock_vol_api.reimage_volume.assert_called_once_with(
+ self.context, uuids.volume_id, uuids.image_id,
+ reimage_reserved=True)
+ mock_get_img.assert_called_once_with(
+ self.context, uuids.image_id)
+ mock_get_root_bdm.assert_called_once_with(
+ self.context, instance, bdms)
+ wait_inst_event.assert_called_once_with(
+ instance, events, deadline=deadline,
+ error_callback=self.compute._reimage_failed_callback)
+
+ @mock.patch('nova.volume.cinder.API.attachment_delete')
+ @mock.patch('nova.volume.cinder.API.attachment_create',
+ return_value={'id': uuids.new_attachment_id})
+ @mock.patch.object(nova.compute.manager.ComputeVirtAPI,
+ 'wait_for_instance_event')
+ def test__rebuild_volume_backed_instance_image_not_found(
+ self, wait_inst_event, attach_create, attach_delete):
+ fake_conn_info = '{}'
+ fake_device = 'fake_vda'
+ root_bdm = mock.MagicMock(
+ volume_id=uuids.volume_id, connection_info=fake_conn_info,
+ device_name=fake_device, attachment_id=uuids.old_attachment_id,
+ save=mock.MagicMock())
+ bdms = [root_bdm]
+
+ with test.nested(
+ mock.patch.object(objects.Instance, 'save',
+ return_value=None),
+ mock.patch.object(compute_utils, 'get_root_bdm',
+ return_value=root_bdm),
+ mock.patch.object(self.compute, 'volume_api'),
+ mock.patch.object(self.compute.image_api, 'get'),
+ ) as(
+ mock_save,
+ mock_get_root_bdm,
+ mock_vol_api,
+ mock_get_img
+ ):
+ mock_get_img.side_effect = exception.ImageNotFound(
+ image_id=uuids.image_id)
+ instance = fake_instance.fake_instance_obj(self.context)
+ instance.task_state = task_states.REBUILDING
+ instance.save(expected_task_state=[task_states.REBUILDING])
+ mock_get_img.return_value = {'size': 1}
+ self.assertRaises(
+ exception.BuildAbortException,
+ self.compute._rebuild_volume_backed_instance,
+ self.context, instance, bdms, uuids.image_id)
+ mock_vol_api.attachment_create.assert_called_once_with(
+ self.context, uuids.volume_id, instance.uuid)
+ mock_vol_api.attachment_delete.assert_called_once_with(
+ self.context, uuids.old_attachment_id)
+ mock_get_img.assert_called_once_with(
+ self.context, uuids.image_id)
+
+ @mock.patch.object(objects.Instance, 'save', return_value=None)
+ @mock.patch.object(fake_driver.SmallFakeDriver, 'detach_volume')
+ @mock.patch.object(cinder.API, 'roll_detaching')
+ def test__detach_root_volume(self, mock_roll_detach, mock_detach,
+ mock_save):
+ exception_list = [
+ '',
+ exception.DiskNotFound(location="not\\here"),
+ exception.DeviceDetachFailed(device="fake_dev", reason="unknown"),
+ ]
+ mock_detach.side_effect = exception_list
+ fake_conn_info = '{}'
+ fake_device = 'fake_vda'
+ root_bdm = mock.MagicMock(
+ volume_id=uuids.volume_id, connection_info=fake_conn_info,
+ device_name=fake_device, attachment_id=uuids.old_attachment_id,
+ save=mock.MagicMock())
+ instance = fake_instance.fake_instance_obj(self.context)
+ instance.task_state = task_states.REBUILDING
+ instance.save(expected_task_state=[task_states.REBUILDING])
+ self.compute._detach_root_volume(self.context, instance, root_bdm)
+ self.compute._detach_root_volume(self.context, instance, root_bdm)
+ self.assertRaises(exception.DeviceDetachFailed,
+ self.compute._detach_root_volume,
+ self.context, instance, root_bdm)
+ mock_roll_detach.assert_called_with(self.context, uuids.volume_id)
+ self.assertRaises(Exception, self.compute._detach_root_volume, # noqa
+ self.context, instance, root_bdm)
+ mock_roll_detach.assert_called_with(self.context, uuids.volume_id)
def test_do_rebuild_instance_check_trusted_certs(self):
"""Tests the scenario that we're rebuilding an instance with
@@ -5645,7 +5877,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
request_spec=objects.RequestSpec(),
allocations=self.allocations,
request_group_resource_providers_mapping=mock.sentinel.mapping,
- accel_uuids=[])
+ accel_uuids=[], reimage_boot_volume=False)
self.assertIn('Trusted image certificates provided on host', str(ex))
def test_reverts_task_state_instance_not_found(self):
@@ -7695,6 +7927,42 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
@mock.patch.object(virt_driver.ComputeDriver, 'failed_spawn_cleanup')
@mock.patch.object(virt_driver.ComputeDriver, 'prepare_for_spawn')
+ @mock.patch.object(virt_driver.ComputeDriver,
+ 'prepare_networks_before_block_device_mapping')
+ @mock.patch.object(virt_driver.ComputeDriver,
+ 'clean_networks_preparation')
+ def test_failed_prepare_for_spawn(self, mock_clean, mock_prepnet,
+ mock_prepspawn, mock_failedspawn):
+ mock_prepspawn.side_effect = exception.ComputeResourcesUnavailable(
+ reason="asdf")
+ with mock.patch.object(self.compute,
+ '_build_networks_for_instance',
+ return_value=self.network_info
+ ) as _build_networks_for_instance:
+
+ try:
+ with self.compute._build_resources(self.context, self.instance,
+ self.requested_networks, self.security_groups,
+ self.image, self.block_device_mapping,
+ self.resource_provider_mapping, self.accel_uuids):
+ pass
+ except Exception as e:
+ self.assertIsInstance(e,
+ exception.ComputeResourcesUnavailable)
+
+ _build_networks_for_instance.assert_has_calls(
+ [mock.call(self.context, self.instance,
+ self.requested_networks, self.security_groups,
+ self.resource_provider_mapping,
+ self.network_arqs)])
+
+ mock_prepnet.assert_not_called()
+ mock_clean.assert_called_once_with(self.instance, self.network_info)
+ mock_prepspawn.assert_called_once_with(self.instance)
+ mock_failedspawn.assert_called_once_with(self.instance)
+
+ @mock.patch.object(virt_driver.ComputeDriver, 'failed_spawn_cleanup')
+ @mock.patch.object(virt_driver.ComputeDriver, 'prepare_for_spawn')
@mock.patch.object(manager.ComputeManager, '_build_networks_for_instance')
def test_build_resources_aborts_on_failed_network_alloc(self, mock_build,
mock_prepspawn,
@@ -8589,11 +8857,9 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
@mock.patch.object(self.compute.network_api, 'setup_networks_on_host')
@mock.patch.object(self.compute.network_api, 'migrate_instance_start')
@mock.patch.object(compute_utils, 'notify_usage_exists')
- @mock.patch.object(self.migration, 'save')
@mock.patch.object(objects.BlockDeviceMappingList,
'get_by_instance_uuid')
def do_test(get_by_instance_uuid,
- migration_save,
notify_usage_exists,
migrate_instance_start,
setup_networks_on_host,
@@ -8665,7 +8931,6 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
@mock.patch.object(self.compute.network_api, 'migrate_instance_finish',
side_effect=_migrate_instance_finish)
@mock.patch.object(self.compute.network_api, 'setup_networks_on_host')
- @mock.patch.object(self.migration, 'save')
@mock.patch.object(self.instance, 'save')
@mock.patch.object(self.compute, '_set_instance_info')
@mock.patch.object(db, 'instance_fault_create')
@@ -8679,7 +8944,6 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
fault_create,
set_instance_info,
instance_save,
- migration_save,
setup_networks_on_host,
migrate_instance_finish,
get_instance_nw_info,
@@ -8723,11 +8987,9 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
@mock.patch.object(self.compute.network_api, 'migrate_instance_start')
@mock.patch.object(compute_utils, 'notify_usage_exists')
@mock.patch.object(db, 'instance_extra_update_by_uuid')
- @mock.patch.object(self.migration, 'save')
@mock.patch.object(objects.BlockDeviceMappingList,
'get_by_instance_uuid')
def do_revert_resize(mock_get_by_instance_uuid,
- mock_migration_save,
mock_extra_update,
mock_notify_usage_exists,
mock_migrate_instance_start,
@@ -8774,7 +9036,6 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
@mock.patch.object(compute_utils, 'notify_about_instance_action')
@mock.patch.object(self.compute, "_set_instance_info")
@mock.patch.object(self.instance, 'save')
- @mock.patch.object(self.migration, 'save')
@mock.patch.object(compute_utils, 'add_instance_fault_from_exc')
@mock.patch.object(db, 'instance_fault_create')
@mock.patch.object(db, 'instance_extra_update_by_uuid')
@@ -8798,7 +9059,6 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
mock_extra_update,
mock_fault_create,
mock_fault_from_exc,
- mock_mig_save,
mock_inst_save,
mock_set,
mock_notify_about_instance_action,
@@ -8892,7 +9152,6 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
@mock.patch.object(self.compute, '_delete_scheduler_instance_info')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.Migration.get_by_id')
- @mock.patch.object(self.migration, 'save')
@mock.patch.object(self.compute, '_notify_about_instance_usage')
@mock.patch.object(self.compute, 'network_api')
@mock.patch.object(self.compute.driver, 'confirm_migration')
@@ -8901,7 +9160,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
@mock.patch.object(self.instance, 'save')
def do_confirm_resize(mock_save, mock_drop, mock_delete,
mock_confirm, mock_nwapi, mock_notify,
- mock_mig_save, mock_mig_get, mock_inst_get,
+ mock_mig_get, mock_inst_get,
mock_delete_scheduler_info):
self._mock_rt()
@@ -8984,16 +9243,16 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
instance_get_by_uuid.assert_called_once()
def test_confirm_resize_calls_virt_driver_with_old_pci(self):
- @mock.patch.object(self.migration, 'save')
@mock.patch.object(self.compute, '_notify_about_instance_usage')
@mock.patch.object(self.compute, 'network_api')
@mock.patch.object(self.compute.driver, 'confirm_migration')
@mock.patch.object(self.compute, '_delete_allocation_after_move')
@mock.patch.object(self.instance, 'drop_migration_context')
@mock.patch.object(self.instance, 'save')
- def do_confirm_resize(mock_save, mock_drop, mock_delete,
- mock_confirm, mock_nwapi, mock_notify,
- mock_mig_save):
+ def do_confirm_resize(
+ mock_save, mock_drop, mock_delete, mock_confirm, mock_nwapi,
+ mock_notify
+ ):
# Mock virt driver confirm_resize() to save the provided
# network_info, we will check it later.
updated_nw_info = []
@@ -9986,6 +10245,27 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
self.instance,
migration)
+ def test_post_live_migration_update_host(self):
+ @mock.patch.object(self.compute, '_get_compute_info')
+ def _test_post_live_migration(_get_compute_info):
+ dest_host = 'dest'
+ cn = objects.ComputeNode(hypervisor_hostname=dest_host)
+ _get_compute_info.return_value = cn
+ instance = fake_instance.fake_instance_obj(self.context,
+ node='src',
+ uuid=uuids.instance)
+ with mock.patch.object(self.compute, "_post_live_migration"
+ ) as plm, mock.patch.object(instance, "save") as save:
+ error = ValueError("some failure")
+ plm.side_effect = error
+ self.assertRaises(
+ ValueError, self.compute._post_live_migration_update_host,
+ self.context, instance, dest_host)
+ save.assert_called_once()
+ self.assertEqual(instance.host, dest_host)
+
+ _test_post_live_migration()
+
def test_post_live_migration_cinder_pre_344_api(self):
# Because live migration has
# succeeded,_post_live_migration_remove_source_vol_connections()
diff --git a/nova/tests/unit/compute/test_host_api.py b/nova/tests/unit/compute/test_host_api.py
index e4c310deb0..7f9e862057 100644
--- a/nova/tests/unit/compute/test_host_api.py
+++ b/nova/tests/unit/compute/test_host_api.py
@@ -14,8 +14,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
import oslo_messaging as messaging
from oslo_utils.fixture import uuidsentinel as uuids
diff --git a/nova/tests/unit/compute/test_instance_list.py b/nova/tests/unit/compute/test_instance_list.py
index e6e195e9cc..6544ddc801 100644
--- a/nova/tests/unit/compute/test_instance_list.py
+++ b/nova/tests/unit/compute/test_instance_list.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from nova.compute import instance_list
diff --git a/nova/tests/unit/compute/test_keypairs.py b/nova/tests/unit/compute/test_keypairs.py
index 0ea20a60ef..8822cb4522 100644
--- a/nova/tests/unit/compute/test_keypairs.py
+++ b/nova/tests/unit/compute/test_keypairs.py
@@ -14,7 +14,8 @@
# under the License.
"""Tests for keypair API."""
-import mock
+from unittest import mock
+
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_limit import fixture as limit_fixture
@@ -123,24 +124,6 @@ class CreateImportSharedTestMixIn(object):
name, *args)
self.assertIn(expected_message, str(exc))
- def assertInvalidKeypair(self, expected_message, name):
- msg = 'Keypair data is invalid: %s' % expected_message
- self.assertKeypairRaises(exception.InvalidKeypair, msg, name)
-
- def test_name_too_short(self):
- msg = ('Keypair name must be string and between 1 '
- 'and 255 characters long')
- self.assertInvalidKeypair(msg, '')
-
- def test_name_too_long(self):
- msg = ('Keypair name must be string and between 1 '
- 'and 255 characters long')
- self.assertInvalidKeypair(msg, 'x' * 256)
-
- def test_invalid_chars(self):
- msg = "Keypair name contains unsafe characters"
- self.assertInvalidKeypair(msg, '* BAD CHARACTERS! *')
-
def test_already_exists(self):
def db_key_pair_create_duplicate(context, keypair):
raise exception.KeyPairExists(key_name=keypair.get('name', ''))
diff --git a/nova/tests/unit/compute/test_multi_cell_list.py b/nova/tests/unit/compute/test_multi_cell_list.py
index 6bb67a76b8..5906f69de2 100644
--- a/nova/tests/unit/compute/test_multi_cell_list.py
+++ b/nova/tests/unit/compute/test_multi_cell_list.py
@@ -13,7 +13,8 @@
from contextlib import contextmanager
import copy
import datetime
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from nova.compute import multi_cell_list
diff --git a/nova/tests/unit/compute/test_pci_placement_translator.py b/nova/tests/unit/compute/test_pci_placement_translator.py
new file mode 100644
index 0000000000..0592186e54
--- /dev/null
+++ b/nova/tests/unit/compute/test_pci_placement_translator.py
@@ -0,0 +1,291 @@
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import ddt
+from oslo_utils.fixture import uuidsentinel as uuids
+from unittest import mock
+
+from nova.compute import pci_placement_translator as ppt
+from nova.compute import provider_tree
+from nova import exception
+from nova.objects import fields
+from nova.objects import pci_device
+from nova.pci import devspec
+from nova import test
+
+
+def dev(v, p):
+ return pci_device.PciDevice(vendor_id=v, product_id=p)
+
+
+# NOTE(gibi): Most of the nova.compute.pci_placement_translator module is
+# covered with functional tests in
+# nova.tests.functional.libvirt.test_pci_in_placement
+@ddt.ddt
+class TestTranslator(test.NoDBTestCase):
+ def setUp(self):
+ super().setUp()
+ patcher = mock.patch(
+ "nova.compute.pci_placement_translator."
+ "_is_placement_tracking_enabled")
+ self.addCleanup(patcher.stop)
+ patcher.start()
+
+ def test_translator_skips_devices_without_matching_spec(self):
+ """As every PCI device in the PciTracker is created by matching a
+ PciDeviceSpec the translator should always be able to look up the spec
+ for a device. But if cannot then the device will be skipped and warning
+ will be emitted.
+ """
+ pci_tracker = mock.Mock()
+ pci_tracker.pci_devs = pci_device.PciDeviceList(
+ objects=[
+ pci_device.PciDevice(
+ address="0000:81:00.0",
+ status=fields.PciDeviceStatus.AVAILABLE,
+ instance_uuid=None,
+ )
+ ]
+ )
+ # So we have a device but there is no spec for it
+ pci_tracker.dev_filter.get_devspec = mock.Mock(return_value=None)
+ pci_tracker.dev_filter.specs = []
+ # we expect that the provider_tree is not touched as the device without
+ # spec is skipped, we assert that with the NonCallableMock
+ provider_tree = mock.NonCallableMock()
+
+ ppt.update_provider_tree_for_pci(
+ provider_tree, "fake-node", pci_tracker, {}, [])
+
+ self.assertIn(
+ "Device spec is not found for device 0000:81:00.0 in "
+ "[pci]device_spec. Ignoring device in Placement resource view. "
+ "This should not happen. Please file a bug.",
+ self.stdlog.logger.output
+ )
+
+ @ddt.unpack
+ @ddt.data(
+ (None, set()),
+ ("", set()),
+ ("a", {"CUSTOM_A"}),
+ ("a,b", {"CUSTOM_A", "CUSTOM_B"}),
+ ("HW_GPU_API_VULKAN", {"HW_GPU_API_VULKAN"}),
+ ("CUSTOM_FOO", {"CUSTOM_FOO"}),
+ ("custom_bar", {"CUSTOM_BAR"}),
+ ("custom-bar", {"CUSTOM_CUSTOM_BAR"}),
+ ("CUSTOM_a", {"CUSTOM_A"}),
+ ("a@!#$b123X", {"CUSTOM_A_B123X"}),
+ # Note that both trait names are normalized to the same trait
+ ("a!@b,a###b", {"CUSTOM_A_B"}),
+ )
+ def test_trait_normalization(self, trait_names, expected_traits):
+ self.assertEqual(
+ expected_traits,
+ ppt.get_traits(trait_names)
+ )
+
+ @ddt.unpack
+ @ddt.data(
+ (dev(v='1234', p='5678'), None, "CUSTOM_PCI_1234_5678"),
+ (dev(v='1234', p='5678'), "", "CUSTOM_PCI_1234_5678"),
+ (dev(v='1234', p='5678'), "PGPU", "PGPU"),
+ (dev(v='1234', p='5678'), "pgpu", "PGPU"),
+ (dev(v='1234', p='5678'), "foobar", "CUSTOM_FOOBAR"),
+ (dev(v='1234', p='5678'), "custom_foo", "CUSTOM_FOO"),
+ (dev(v='1234', p='5678'), "CUSTOM_foo", "CUSTOM_FOO"),
+ (dev(v='1234', p='5678'), "custom_FOO", "CUSTOM_FOO"),
+ (dev(v='1234', p='5678'), "CUSTOM_FOO", "CUSTOM_FOO"),
+ (dev(v='1234', p='5678'), "custom-foo", "CUSTOM_CUSTOM_FOO"),
+ (dev(v='1234', p='5678'), "a###b", "CUSTOM_A_B"),
+ (dev(v='123a', p='567b'), "", "CUSTOM_PCI_123A_567B"),
+ )
+ def test_resource_class_normalization(self, pci_dev, rc_name, expected_rc):
+ self.assertEqual(
+ expected_rc,
+ ppt.get_resource_class(
+ rc_name, pci_dev.vendor_id, pci_dev.product_id
+ ),
+ )
+
+ def test_dependent_device_pf_then_vf(self):
+ pv = ppt.PlacementView(
+ "fake-node", instances_under_same_host_resize=[])
+ pf = pci_device.PciDevice(
+ address="0000:81:00.0",
+ dev_type=fields.PciDeviceType.SRIOV_PF,
+ vendor_id="dead",
+ product_id="beef",
+ )
+ vf = pci_device.PciDevice(
+ address="0000:81:00.1",
+ parent_addr=pf.address,
+ dev_type=fields.PciDeviceType.SRIOV_VF,
+ vendor_id="dead",
+ product_id="beef",
+ )
+
+ pv._add_dev(pf, {"resource_class": "foo"})
+ ex = self.assertRaises(
+ exception.PlacementPciDependentDeviceException,
+ pv._add_dev,
+ vf,
+ {"resource_class": "bar"}
+ )
+
+ self.assertEqual(
+ "Configuring both 0000:81:00.1 and 0000:81:00.0 in "
+ "[pci]device_spec is not supported. Either the parent PF or its "
+ "children VFs can be configured.",
+ str(ex),
+ )
+
+ def test_dependent_device_vf_then_pf(self):
+ pv = ppt.PlacementView(
+ "fake-node", instances_under_same_host_resize=[])
+ pf = pci_device.PciDevice(
+ address="0000:81:00.0",
+ dev_type=fields.PciDeviceType.SRIOV_PF,
+ vendor_id="dead",
+ product_id="beef",
+ )
+ vf = pci_device.PciDevice(
+ address="0000:81:00.1",
+ parent_addr=pf.address,
+ dev_type=fields.PciDeviceType.SRIOV_VF,
+ vendor_id="dead",
+ product_id="beef",
+ )
+ vf2 = pci_device.PciDevice(
+ address="0000:81:00.2",
+ parent_addr=pf.address,
+ dev_type=fields.PciDeviceType.SRIOV_VF,
+ vendor_id="dead",
+ product_id="beef",
+ )
+
+ pv._add_dev(vf, {"resource_class": "foo"})
+ pv._add_dev(vf2, {"resource_class": "foo"})
+ ex = self.assertRaises(
+ exception.PlacementPciDependentDeviceException,
+ pv._add_dev,
+ pf,
+ {"resource_class": "bar"}
+ )
+
+ self.assertEqual(
+ "Configuring both 0000:81:00.0 and 0000:81:00.1,0000:81:00.2 in "
+ "[pci]device_spec is not supported. Either the parent PF or its "
+ "children VFs can be configured.",
+ str(ex),
+ )
+
+ def test_mixed_rc_for_sibling_vfs(self):
+ pv = ppt.PlacementView(
+ "fake-node", instances_under_same_host_resize=[])
+ vf1, vf2, vf3, vf4 = [
+ pci_device.PciDevice(
+ address="0000:81:00.%d" % f,
+ parent_addr="0000:71:00.0",
+ dev_type=fields.PciDeviceType.SRIOV_VF,
+ vendor_id="dead",
+ product_id="beef",
+ )
+ for f in range(0, 4)
+ ]
+
+ pv._add_dev(vf1, {"resource_class": "a", "traits": "foo,bar,baz"})
+ # order is irrelevant
+ pv._add_dev(vf2, {"resource_class": "a", "traits": "foo,baz,bar"})
+ # but missing trait is rejected
+ ex = self.assertRaises(
+ exception.PlacementPciMixedTraitsException,
+ pv._add_dev,
+ vf3,
+ {"resource_class": "a", "traits": "foo,bar"},
+ )
+ self.assertEqual(
+ "VFs from the same PF cannot be configured with different set of "
+ "'traits' in [pci]device_spec. We got "
+ "COMPUTE_MANAGED_PCI_DEVICE,CUSTOM_BAR,CUSTOM_FOO for "
+ "0000:81:00.2 and "
+ "COMPUTE_MANAGED_PCI_DEVICE,CUSTOM_BAR,CUSTOM_BAZ,CUSTOM_FOO "
+ "for 0000:81:00.0,0000:81:00.1.",
+ str(ex),
+ )
+ # as well as additional trait
+ ex = self.assertRaises(
+ exception.PlacementPciMixedTraitsException,
+ pv._add_dev,
+ vf4,
+ {"resource_class": "a", "traits": "foo,bar,baz,extra"}
+ )
+ self.assertEqual(
+ "VFs from the same PF cannot be configured with different set of "
+ "'traits' in [pci]device_spec. We got "
+ "COMPUTE_MANAGED_PCI_DEVICE,CUSTOM_BAR,CUSTOM_BAZ,CUSTOM_EXTRA,"
+ "CUSTOM_FOO for 0000:81:00.3 and COMPUTE_MANAGED_PCI_DEVICE,"
+ "CUSTOM_BAR,CUSTOM_BAZ,CUSTOM_FOO for 0000:81:00.0,0000:81:00.1.",
+ str(ex),
+ )
+
+ def test_translator_maps_pci_device_to_rp(self):
+ pv = ppt.PlacementView(
+ "fake-node", instances_under_same_host_resize=[])
+ vf = pci_device.PciDevice(
+ address="0000:81:00.1",
+ parent_addr="0000:71:00.0",
+ dev_type=fields.PciDeviceType.SRIOV_VF,
+ vendor_id="dead",
+ product_id="beef",
+ )
+ pf = pci_device.PciDevice(
+ address="0000:72:00.0",
+ parent_addr=None,
+ dev_type=fields.PciDeviceType.SRIOV_PF,
+ vendor_id="dead",
+ product_id="beef",
+ )
+ pt = provider_tree.ProviderTree()
+ pt.new_root("fake-node", uuids.compute_rp)
+
+ pv._add_dev(vf, {})
+ pv._add_dev(pf, {})
+ pv.update_provider_tree(pt)
+
+ self.assertEqual(
+ pt.data("fake-node_0000:71:00.0").uuid, vf.extra_info["rp_uuid"]
+ )
+ self.assertEqual(
+ pt.data("fake-node_0000:72:00.0").uuid, pf.extra_info["rp_uuid"]
+ )
+
+ def test_update_provider_tree_for_pci_update_pools(self):
+ pt = provider_tree.ProviderTree()
+ pt.new_root("fake-node", uuids.compute_rp)
+ pf = pci_device.PciDevice(
+ address="0000:72:00.0",
+ parent_addr=None,
+ dev_type=fields.PciDeviceType.SRIOV_PF,
+ vendor_id="dead",
+ product_id="beef",
+ status=fields.PciDeviceStatus.AVAILABLE,
+ )
+ pci_tracker = mock.Mock()
+ pci_tracker.pci_devs = [pf]
+ pci_tracker.dev_filter.specs = [devspec.PciDeviceSpec({})]
+
+ ppt.update_provider_tree_for_pci(pt, 'fake-node', pci_tracker, {}, [])
+
+ pci_tracker.stats.populate_pools_metadata_from_assigned_devices.\
+ assert_called_once_with()
diff --git a/nova/tests/unit/compute/test_resource_tracker.py b/nova/tests/unit/compute/test_resource_tracker.py
index 6a87bee436..6258054aa7 100644
--- a/nova/tests/unit/compute/test_resource_tracker.py
+++ b/nova/tests/unit/compute/test_resource_tracker.py
@@ -12,12 +12,14 @@
import copy
import datetime
+import ddt
+from unittest import mock
from keystoneauth1 import exceptions as ks_exc
-import mock
import os_resource_classes as orc
import os_traits
from oslo_config import cfg
+from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
from oslo_utils import units
@@ -1511,6 +1513,7 @@ class TestInitComputeNode(BaseTestCase):
self.assertNotIn(_NODENAME, self.rt.old_resources)
+@ddt.ddt
class TestUpdateComputeNode(BaseTestCase):
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_sync_compute_service_disabled_trait', new=mock.Mock())
@@ -1577,6 +1580,7 @@ class TestUpdateComputeNode(BaseTestCase):
self.rt._update(mock.sentinel.ctx, new_compute)
save_mock.assert_called_once_with()
+ @mock.patch('nova.objects.ComputeNode.save', new=mock.Mock())
@mock.patch(
'nova.pci.stats.PciDeviceStats.has_remote_managed_device_pools',
return_value=True)
@@ -1707,12 +1711,18 @@ class TestUpdateComputeNode(BaseTestCase):
self.assertEqual(exp_inv, ptree.data(new_compute.uuid).inventory)
mock_sync_disabled.assert_called_once()
+ @ddt.data(
+ exc.ResourceProviderUpdateConflict(
+ uuid='uuid', generation=42, error='error'),
+ exc.PlacementReshapeConflict(error='error'),
+ )
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_sync_compute_service_disabled_trait')
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_resource_change', return_value=False)
- def test_update_retry_success(self, mock_resource_change,
- mock_sync_disabled):
+ def test_update_retry_success(
+ self, exc, mock_resource_change, mock_sync_disabled
+ ):
self._setup_rt()
orig_compute = _COMPUTE_NODE_FIXTURES[0].obj_clone()
self.rt.compute_nodes[_NODENAME] = orig_compute
@@ -1726,9 +1736,7 @@ class TestUpdateComputeNode(BaseTestCase):
self.driver_mock.update_provider_tree.side_effect = lambda *a: None
ufpt_mock = self.rt.reportclient.update_from_provider_tree
- ufpt_mock.side_effect = (
- exc.ResourceProviderUpdateConflict(
- uuid='uuid', generation=42, error='error'), None)
+ ufpt_mock.side_effect = (exc, None)
self.rt._update(mock.sentinel.ctx, new_compute)
@@ -1766,7 +1774,221 @@ class TestUpdateComputeNode(BaseTestCase):
self.assertEqual(4, ufpt_mock.call_count)
self.assertEqual(4, mock_sync_disabled.call_count)
# The retry is restricted to _update_to_placement
- self.assertEqual(1, mock_resource_change.call_count)
+ self.assertEqual(0, mock_resource_change.call_count)
+
+ @mock.patch(
+ 'nova.compute.resource_tracker.ResourceTracker.'
+ '_sync_compute_service_disabled_trait',
+ new=mock.Mock()
+ )
+ @mock.patch(
+ 'nova.compute.resource_tracker.ResourceTracker._resource_change',
+ new=mock.Mock(return_value=False)
+ )
+ @mock.patch(
+ 'nova.compute.pci_placement_translator.update_provider_tree_for_pci')
+ def test_update_pci_reporting(self, mock_update_provider_tree_for_pci):
+ """Assert that resource tracker calls update_provider_tree_for_pci
+ and that call did not change any allocations so
+ update_from_provider_tree called without triggering reshape
+ """
+ compute_obj = _COMPUTE_NODE_FIXTURES[0].obj_clone()
+ self._setup_rt()
+ ptree = self._setup_ptree(compute_obj)
+ # simulate that pci reporting did not touch allocations
+ mock_update_provider_tree_for_pci.return_value = False
+
+ self.rt._update(mock.sentinel.ctx, compute_obj)
+
+ mock_get_allocs = (
+ self.report_client_mock.get_allocations_for_provider_tree)
+ mock_get_allocs.assert_called_once_with(
+ mock.sentinel.ctx, compute_obj.hypervisor_hostname)
+ mock_update_provider_tree_for_pci.assert_called_once_with(
+ ptree,
+ compute_obj.hypervisor_hostname,
+ self.rt.pci_tracker,
+ mock_get_allocs.return_value,
+ [],
+ )
+ upt = self.rt.reportclient.update_from_provider_tree
+ upt.assert_called_once_with(mock.sentinel.ctx, ptree, allocations=None)
+
+ @mock.patch(
+ 'nova.compute.resource_tracker.ResourceTracker.'
+ '_sync_compute_service_disabled_trait',
+ new=mock.Mock()
+ )
+ @mock.patch(
+ 'nova.compute.resource_tracker.ResourceTracker._resource_change',
+ new=mock.Mock(return_value=False)
+ )
+ @mock.patch(
+ 'nova.compute.pci_placement_translator.update_provider_tree_for_pci')
+ def test_update_pci_reporting_reshape(
+ self, mock_update_provider_tree_for_pci
+ ):
+ """Assert that resource tracker calls update_provider_tree_for_pci
+ and that call changed allocations so
+ update_from_provider_tree called with allocations to trigger reshape
+ """
+ compute_obj = _COMPUTE_NODE_FIXTURES[0].obj_clone()
+ self._setup_rt()
+ ptree = self._setup_ptree(compute_obj)
+ # simulate that pci reporting changed some allocations
+ mock_update_provider_tree_for_pci.return_value = True
+
+ self.rt._update(mock.sentinel.ctx, compute_obj)
+
+ mock_get_allocs = (
+ self.report_client_mock.get_allocations_for_provider_tree)
+ mock_get_allocs.assert_called_once_with(
+ mock.sentinel.ctx, compute_obj.hypervisor_hostname)
+ mock_update_provider_tree_for_pci.assert_called_once_with(
+ ptree,
+ compute_obj.hypervisor_hostname,
+ self.rt.pci_tracker,
+ mock_get_allocs.return_value,
+ [],
+ )
+ upt = self.rt.reportclient.update_from_provider_tree
+ upt.assert_called_once_with(
+ mock.sentinel.ctx, ptree, allocations=mock_get_allocs.return_value)
+
+ @ddt.data(True, False)
+ @mock.patch(
+ 'nova.compute.resource_tracker.ResourceTracker.'
+ '_sync_compute_service_disabled_trait',
+ new=mock.Mock()
+ )
+ @mock.patch(
+ 'nova.compute.resource_tracker.ResourceTracker._resource_change',
+ new=mock.Mock(return_value=False)
+ )
+ @mock.patch(
+ 'nova.compute.pci_placement_translator.update_provider_tree_for_pci')
+ def test_update_pci_reporting_driver_reshape(
+ self, pci_reshape, mock_update_provider_tree_for_pci
+ ):
+ """Assert that resource tracker first called the
+ driver.update_provider_tree and that needed reshape so the allocations
+ are pulled. Then independently of update_provider_tree_for_pci the
+ update_from_provider_tree is called with the allocations to trigger
+ reshape in placement
+ """
+ compute_obj = _COMPUTE_NODE_FIXTURES[0].obj_clone()
+ self._setup_rt()
+ ptree = self._setup_ptree(compute_obj)
+ # simulate that the driver requests reshape
+ self.driver_mock.update_provider_tree.side_effect = [
+ exc.ReshapeNeeded, None]
+ mock_update_provider_tree_for_pci.return_value = pci_reshape
+
+ self.rt._update(mock.sentinel.ctx, compute_obj, startup=True)
+
+ mock_get_allocs = (
+ self.report_client_mock.get_allocations_for_provider_tree)
+ mock_get_allocs.assert_called_once_with(
+ mock.sentinel.ctx, compute_obj.hypervisor_hostname)
+ mock_update_provider_tree_for_pci.assert_called_once_with(
+ ptree,
+ compute_obj.hypervisor_hostname,
+ self.rt.pci_tracker,
+ mock_get_allocs.return_value,
+ [],
+ )
+ upt = self.rt.reportclient.update_from_provider_tree
+ upt.assert_called_once_with(
+ mock.sentinel.ctx, ptree, allocations=mock_get_allocs.return_value)
+
+ @mock.patch(
+ 'nova.compute.resource_tracker.ResourceTracker.'
+ '_sync_compute_service_disabled_trait',
+ new=mock.Mock()
+ )
+ @mock.patch(
+ 'nova.compute.resource_tracker.ResourceTracker._resource_change',
+ new=mock.Mock(return_value=False)
+ )
+ @mock.patch(
+ 'nova.compute.pci_placement_translator.update_provider_tree_for_pci')
+ def test_update_pci_reporting_same_host_resize(
+ self, mock_update_provider_tree_for_pci
+ ):
+ """Assert that resource tracker calls update_provider_tree_for_pci
+ and with the list of instances that are being resized to the same
+ host.
+ """
+ compute_obj = _COMPUTE_NODE_FIXTURES[0].obj_clone()
+ self._setup_rt()
+ ptree = self._setup_ptree(compute_obj)
+ # simulate that pci reporting did not touch allocations
+ mock_update_provider_tree_for_pci.return_value = False
+ self.rt.tracked_migrations = {
+ uuids.inst1: objects.Migration(
+ migration_type="resize",
+ source_node="fake-node",
+ dest_node="fake-node",
+ instance_uuid=uuids.inst1,
+ ),
+ uuids.inst2: objects.Migration(
+ migration_type="evacuation",
+ source_node="fake-node",
+ dest_node="fake-node",
+ instance_uuid=uuids.inst2,
+ ),
+ uuids.inst3: objects.Migration(
+ migration_type="resize",
+ source_node="fake-node1",
+ dest_node="fake-node2",
+ instance_uuid=uuids.inst3,
+ ),
+ }
+
+ self.rt._update(mock.sentinel.ctx, compute_obj)
+
+ mock_get_allocs = (
+ self.report_client_mock.get_allocations_for_provider_tree)
+ mock_get_allocs.assert_called_once_with(
+ mock.sentinel.ctx, compute_obj.hypervisor_hostname)
+ mock_update_provider_tree_for_pci.assert_called_once_with(
+ ptree,
+ compute_obj.hypervisor_hostname,
+ self.rt.pci_tracker,
+ mock_get_allocs.return_value,
+ [uuids.inst1],
+ )
+ upt = self.rt.reportclient.update_from_provider_tree
+ upt.assert_called_once_with(mock.sentinel.ctx, ptree, allocations=None)
+
+ @mock.patch(
+ 'nova.compute.resource_tracker.ResourceTracker.'
+ '_sync_compute_service_disabled_trait',
+ new=mock.Mock()
+ )
+ @mock.patch(
+ 'nova.compute.resource_tracker.ResourceTracker._resource_change',
+ new=mock.Mock(return_value=False)
+ )
+ def test_update_pci_reporting_allocation_in_use_error_propagated(self):
+ """Assert that if the pci placement reporting code tries to remove
+ inventory with allocation from placement due to invalid hypervisor
+ or [pci]device_spec reconfiguration then the InventoryInUse error from
+ placement is propagated and makes the compute startup fail.
+ """
+ compute_obj = _COMPUTE_NODE_FIXTURES[0].obj_clone()
+ self._setup_rt()
+ self.rt.reportclient.update_from_provider_tree.side_effect = (
+ exc.InventoryInUse(
+ resource_class="FOO", resource_provider="bar"))
+
+ self.assertRaises(
+ exc.PlacementPciException,
+ self.rt._update,
+ mock.sentinel.ctx,
+ compute_obj,
+ startup=True,
+ )
@mock.patch('nova.objects.Service.get_by_compute_host',
return_value=objects.Service(disabled=True))
@@ -1820,6 +2042,10 @@ class TestUpdateComputeNode(BaseTestCase):
self.assertIn('Unable to find services table record for nova-compute',
mock_log_error.call_args[0][0])
+ @mock.patch(
+ 'nova.compute.resource_tracker.ResourceTracker.'
+ '_update_to_placement',
+ new=mock.Mock())
def test_update_compute_node_save_fails_restores_old_resources(self):
"""Tests the scenario that compute_node.save() fails and the
old_resources value for the node is restored to its previous value
@@ -2124,26 +2350,45 @@ class TestInstanceClaim(BaseTestCase):
# PCI devices on the host and sends an updated pci_device_pools
# attribute of the ComputeNode object.
+ self.flags(
+ group="pci",
+ device_spec=[
+ jsonutils.dumps({"vendor_id": "0001", "product_id": "0002"})
+ ],
+ )
+ pci_dev = pci_device.PciDevice.create(
+ None,
+ dev_dict={
+ "compute_node_id": 1,
+ "address": "0000:81:00.0",
+ "product_id": "0002",
+ "vendor_id": "0001",
+ "numa_node": 0,
+ "dev_type": obj_fields.PciDeviceType.STANDARD,
+ "status": obj_fields.PciDeviceStatus.AVAILABLE,
+ "parent_addr": None,
+ },
+ )
+
+ pci_dev.instance_uuid = None
+ pci_devs = [pci_dev]
+
# TODO(jaypipes): Remove once the PCI tracker is always created
# upon the resource tracker being initialized...
with mock.patch.object(
objects.PciDeviceList, 'get_by_compute_node',
- return_value=objects.PciDeviceList()
+ return_value=objects.PciDeviceList(objects=pci_devs)
):
self.rt.pci_tracker = pci_manager.PciDevTracker(
mock.sentinel.ctx, _COMPUTE_NODE_FIXTURES[0])
- pci_dev = pci_device.PciDevice.create(
- None, fake_pci_device.dev_dict)
- pci_devs = [pci_dev]
- self.rt.pci_tracker.pci_devs = objects.PciDeviceList(objects=pci_devs)
-
request = objects.InstancePCIRequest(count=1,
- spec=[{'vendor_id': 'v', 'product_id': 'p'}])
+ spec=[{'vendor_id': '0001', 'product_id': '0002'}])
pci_requests = objects.InstancePCIRequests(
requests=[request],
instance_uuid=self.instance.uuid)
self.instance.pci_requests = pci_requests
+ self.instance.pci_devices = objects.PciDeviceList()
check_bfv_mock.return_value = False
disk_used = self.instance.root_gb + self.instance.ephemeral_gb
@@ -2155,7 +2400,20 @@ class TestInstanceClaim(BaseTestCase):
"free_ram_mb": expected.memory_mb - self.instance.memory_mb,
'running_vms': 1,
'vcpus_used': 1,
- 'pci_device_pools': objects.PciDevicePoolList(),
+ 'pci_device_pools': objects.PciDevicePoolList(
+ objects=[
+ objects.PciDevicePool(
+ vendor_id='0001',
+ product_id='0002',
+ numa_node=0,
+ tags={
+ 'dev_type': 'type-PCI',
+ 'address': '0000:81:00.0'
+ },
+ count=0
+ )
+ ]
+ ),
'stats': {
'io_workload': 0,
'num_instances': 1,
@@ -4205,9 +4463,9 @@ class TestCleanComputeNodeCache(BaseTestCase):
invalid_nodename = "invalid-node"
self.rt.compute_nodes[_NODENAME] = self.compute
self.rt.compute_nodes[invalid_nodename] = mock.sentinel.compute
- with mock.patch.object(
- self.rt.reportclient, "invalidate_resource_provider",
- ) as mock_invalidate:
- self.rt.clean_compute_node_cache([self.compute])
- mock_remove.assert_called_once_with(invalid_nodename)
- mock_invalidate.assert_called_once_with(invalid_nodename)
+ mock_invalidate = self.rt.reportclient.invalidate_resource_provider
+
+ self.rt.clean_compute_node_cache([self.compute])
+
+ mock_remove.assert_called_once_with(invalid_nodename)
+ mock_invalidate.assert_called_once_with(invalid_nodename)
diff --git a/nova/tests/unit/compute/test_rpcapi.py b/nova/tests/unit/compute/test_rpcapi.py
index f0e560a3de..55d0fc53e8 100644
--- a/nova/tests/unit/compute/test_rpcapi.py
+++ b/nova/tests/unit/compute/test_rpcapi.py
@@ -16,7 +16,8 @@
Unit Tests for nova.compute.rpcapi
"""
-import mock
+from unittest import mock
+
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
@@ -834,7 +835,8 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
bdms=[], instance=self.fake_instance_obj, host='new_host',
orig_sys_metadata=None, recreate=True, on_shared_storage=True,
preserve_ephemeral=True, migration=None, node=None,
- limits=None, request_spec=None, accel_uuids=[], version='6.0')
+ limits=None, request_spec=None, accel_uuids=[],
+ reimage_boot_volume=False, version='6.1')
def test_rebuild_instance_old_rpcapi(self):
# With rpcapi < 5.12, accel_uuids must be dropped in the client call.
@@ -861,20 +863,58 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
'migration': None,
'limits': None
}
+ # Pass reimage_boot_volume to the client call...
compute_api.rebuild_instance(
ctxt, instance=self.fake_instance_obj,
accel_uuids=['938af7f9-f136-4e5a-bdbe-3b6feab54311'],
- node=None, host=None, **rebuild_args)
+ node=None, host=None, reimage_boot_volume=False,
+ **rebuild_args)
mock_client.can_send_version.assert_has_calls([mock.call('6.0'),
mock.call('5.12')])
mock_client.prepare.assert_called_with(
server=self.fake_instance_obj.host, version='5.0')
+ # ...and assert that it does not show up on the wire before 6.1
mock_cctx.cast.assert_called_with( # No accel_uuids
ctxt, 'rebuild_instance',
instance=self.fake_instance_obj,
scheduled_node=None, **rebuild_args)
+ def test_rebuild_instance_vol_backed_old_rpcapi(self):
+ # With rpcapi < 6.1, if reimage_boot_volume is True then we
+ # should raise error.
+ ctxt = context.RequestContext('fake_user', 'fake_project')
+ compute_api = compute_rpcapi.ComputeAPI()
+ compute_api.router.client = mock.Mock()
+ mock_client = mock.MagicMock()
+ compute_api.router.client.return_value = mock_client
+ # Force can_send_version to [False, True, True], so that 6.0
+ # version is used.
+ mock_client.can_send_version.side_effect = [False, True, True]
+ mock_cctx = mock.MagicMock()
+ mock_client.prepare.return_value = mock_cctx
+ rebuild_args = {
+ 'new_pass': 'admin_password',
+ 'injected_files': 'files_to_inject',
+ 'image_ref': uuids.image_ref,
+ 'orig_image_ref': uuids.orig_image_ref,
+ 'orig_sys_metadata': 'orig_sys_meta',
+ 'bdms': {},
+ 'recreate': False,
+ 'on_shared_storage': False,
+ 'preserve_ephemeral': False,
+ 'request_spec': None,
+ 'migration': None,
+ 'limits': None,
+ 'accel_uuids': [],
+ 'reimage_boot_volume': True,
+ }
+ self.assertRaises(
+ exception.NovaException, compute_api.rebuild_instance,
+ ctxt, instance=self.fake_instance_obj,
+ node=None, host=None, **rebuild_args)
+ mock_client.can_send_version.assert_has_calls([mock.call('6.1')])
+
def test_reserve_block_device_name(self):
self.flags(long_rpc_timeout=1234)
self._test_compute_api('reserve_block_device_name', 'call',
diff --git a/nova/tests/unit/compute/test_shelve.py b/nova/tests/unit/compute/test_shelve.py
index a50b4ca4de..c939b927f1 100644
--- a/nova/tests/unit/compute/test_shelve.py
+++ b/nova/tests/unit/compute/test_shelve.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import eventlet
-import mock
from oslo_utils import fixture as utils_fixture
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
@@ -24,6 +25,7 @@ from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
import nova.conf
+from nova import context
from nova.db.main import api as db
from nova import exception
from nova.network import neutron as neutron_api
@@ -849,9 +851,67 @@ class ShelveComputeAPITestCase(test_compute.BaseTestCase):
exclude_states = set()
return vm_state - exclude_states
+ @mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
+ 'aggregate_add_host')
+ @mock.patch('nova.availability_zones.get_availability_zones')
+ def _create_host_inside_az(
+ self,
+ ctxt,
+ host,
+ az,
+ mock_az,
+ mock_aggregate,
+ ):
+
+ self.api = compute_api.AggregateAPI()
+ mock_az.return_value = [az]
+
+ cells = objects.CellMappingList.get_all(ctxt)
+ cell = cells[0]
+ with context.target_cell(ctxt, cell) as cctxt:
+ s = objects.Service(context=cctxt,
+ host=host,
+ binary='nova-compute',
+ topic='compute',
+ report_count=0)
+ s.create()
+
+ hm = objects.HostMapping(context=ctxt,
+ cell_mapping=cell,
+ host=host)
+ hm.create()
+
+ self._init_aggregate_with_host(None, 'fake_aggregate1',
+ az, host)
+
+ def _create_request_spec_for_initial_az(self, az):
+ fake_spec = objects.RequestSpec()
+ fake_spec.availability_zone = az
+ return fake_spec
+
+ def _assert_unshelving_and_request_spec_az_and_host(
+ self,
+ context,
+ instance,
+ fake_spec,
+ fake_zone,
+ fake_host,
+ mock_get_by_instance_uuid,
+ mock_unshelve
+ ):
+ mock_get_by_instance_uuid.assert_called_once_with(context,
+ instance.uuid)
+
+ mock_unshelve.assert_called_once_with(context, instance, fake_spec)
+
+ self.assertEqual(instance.task_state, task_states.UNSHELVING)
+ self.assertEqual(fake_spec.availability_zone, fake_zone)
+ if fake_host:
+ self.assertEqual(fake_spec.requested_destination.host, fake_host)
+
def _test_shelve(self, vm_state=vm_states.ACTIVE, boot_from_volume=False,
clean_shutdown=True):
- # Ensure instance can be shelved.
+
params = dict(task_state=None, vm_state=vm_state, display_name='vm01')
fake_instance = self._create_fake_instance_obj(params=params)
instance = fake_instance
@@ -988,12 +1048,14 @@ class ShelveComputeAPITestCase(test_compute.BaseTestCase):
return instance
+ @mock.patch.object(objects.RequestSpec, 'save')
@mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
- def test_unshelve(self, get_by_instance_uuid):
+ def test_unshelve(self, get_by_instance_uuid, fake_save):
# Ensure instance can be unshelved.
instance = self._get_specify_state_instance(vm_states.SHELVED)
fake_spec = objects.RequestSpec()
+ fake_spec.availability_zone = None
get_by_instance_uuid.return_value = fake_spec
with mock.patch.object(self.compute_api.compute_task_api,
'unshelve_instance') as unshelve:
@@ -1116,24 +1178,558 @@ class ShelveComputeAPITestCase(test_compute.BaseTestCase):
mock_get_bdms.assert_called_once_with(self.context, instance.uuid)
mock_get.assert_called_once_with(self.context, uuids.volume_id)
- @mock.patch.object(compute_api.API, '_validate_unshelve_az')
+# Next tests attempt to check the following behavior
+# +----------+---------------------------+-------+----------------------------+
+# | Boot | Unshelve after offload AZ | Host | Result |
+# +==========+===========================+=======+============================+
+# | No AZ | No AZ or AZ=null | No | Free scheduling, |
+# | | | | reqspec.AZ=None |
+# +----------+---------------------------+-------+----------------------------+
+# | No AZ | No AZ or AZ=null | Host1 | Schedule to host1, |
+# | | | | reqspec.AZ=None |
+# +----------+---------------------------+-------+----------------------------+
+# | No AZ | AZ="AZ1" | No | Schedule to AZ1, |
+# | | | | reqspec.AZ="AZ1" |
+# +----------+---------------------------+-------+----------------------------+
+# | No AZ | AZ="AZ1" | Host1 | Verify that host1 in AZ1, |
+# | | | | or (1). Schedule to |
+# | | | | host1, reqspec.AZ="AZ1" |
+# +----------+---------------------------+-------+----------------------------+
+# | AZ1 | No AZ | No | Schedule to AZ1, |
+# | | | | reqspec.AZ="AZ1" |
+# +----------+---------------------------+-------+----------------------------+
+# | AZ1 | AZ=null | No | Free scheduling, |
+# | | | | reqspec.AZ=None |
+# +----------+---------------------------+-------+----------------------------+
+# | AZ1 | No AZ | Host1 | If host1 is in AZ1, |
+# | | | | then schedule to host1, |
+# | | | | reqspec.AZ="AZ1", otherwise|
+# | | | | reject the request (1) |
+# +----------+---------------------------+-------+----------------------------+
+# | AZ1 | AZ=null | Host1 | Schedule to host1, |
+# | | | | reqspec.AZ=None |
+# +----------+---------------------------+-------+----------------------------+
+# | AZ1 | AZ="AZ2" | No | Schedule to AZ2, |
+# | | | | reqspec.AZ="AZ2" |
+# +----------+---------------------------+-------+----------------------------+
+# | AZ1 | AZ="AZ2" | Host1 | If host1 in AZ2 then |
+# | | | | schedule to host1, |
+# | | | | reqspec.AZ="AZ2", |
+# | | | | otherwise reject (1) |
+# +----------+---------------------------+-------+----------------------------+
+#
+# (1) Check at the api and return an error.
+#
+#
+# +----------+---------------------------+-------+----------------------------+
+# | No AZ | No AZ or AZ=null | No | Free scheduling, |
+# | | | | reqspec.AZ=None |
+# +----------+---------------------------+-------+----------------------------+
+ @mock.patch.object(nova.conductor.ComputeTaskAPI, 'unshelve_instance')
@mock.patch.object(objects.RequestSpec, 'save')
+ @mock.patch.object(objects.ComputeNodeList, 'get_all_by_host')
@mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
- def test_specified_az_unshelve(self, get_by_instance_uuid,
- mock_save, mock_validate_unshelve_az):
- # Ensure instance can be unshelved.
+ def test_unshelve_without_az(
+ self,
+ mock_get_by_instance_uuid,
+ mock_get_all_by_host,
+ mock_save,
+ mock_unshelve
+ ):
+
+ context = self.context.elevated()
+ fake_host = 'fake_host1'
+ fake_zone = 'avail_zone1'
+ self._create_host_inside_az(self.context, fake_host, fake_zone)
+
instance = self._get_specify_state_instance(
vm_states.SHELVED_OFFLOADED)
- new_az = "west_az"
- fake_spec = objects.RequestSpec()
- fake_spec.availability_zone = "fake-old-az"
- get_by_instance_uuid.return_value = fake_spec
+ fake_spec = self._create_request_spec_for_initial_az(None)
+ mock_get_by_instance_uuid.return_value = fake_spec
+
+ self.compute_api.unshelve(context, instance)
+
+ self._assert_unshelving_and_request_spec_az_and_host(
+ context,
+ instance,
+ fake_spec,
+ None,
+ None,
+ mock_get_by_instance_uuid,
+ mock_unshelve
+ )
+
+# +----------+---------------------------+-------+----------------------------+
+# | No AZ | No AZ or AZ=null | Host1 | Schedule to host1, |
+# | | | | reqspec.AZ=None |
+# +----------+---------------------------+-------+----------------------------+
+ @mock.patch.object(nova.conductor.ComputeTaskAPI, 'unshelve_instance')
+ @mock.patch.object(objects.RequestSpec, 'save')
+ @mock.patch.object(objects.ComputeNodeList, 'get_all_by_host')
+ @mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
+ def test_unshelve_without_az_to_host(
+ self,
+ mock_get_by_instance_uuid,
+ mock_get_all_by_host,
+ mock_save,
+ mock_unshelve
+ ):
+
+ context = self.context.elevated()
+ fake_host = 'fake_host1'
+ fake_zone = 'avail_zone1'
+ self._create_host_inside_az(self.context, fake_host, fake_zone)
+
+ instance = self._get_specify_state_instance(
+ vm_states.SHELVED_OFFLOADED)
+
+ fake_spec = self._create_request_spec_for_initial_az(None)
+ mock_get_by_instance_uuid.return_value = fake_spec
+
+ self.compute_api.unshelve(context, instance, host=fake_host)
+
+ self._assert_unshelving_and_request_spec_az_and_host(
+ context,
+ instance,
+ fake_spec,
+ None,
+ fake_host,
+ mock_get_by_instance_uuid,
+ mock_unshelve
+ )
+
+# +----------+---------------------------+-------+----------------------------+
+# | No AZ | AZ="AZ1" | No | Schedule to AZ1, |
+# | | | | reqspec.AZ="AZ1" |
+# +----------+---------------------------+-------+----------------------------+
+ @mock.patch.object(nova.conductor.ComputeTaskAPI, 'unshelve_instance')
+ @mock.patch.object(objects.RequestSpec, 'save')
+ @mock.patch.object(objects.ComputeNodeList, 'get_all_by_host')
+ @mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
+ def test_unshelve_without_az_to_newaz(
+ self,
+ mock_get_by_instance_uuid,
+ mock_get_all_by_host,
+ mock_save,
+ mock_unshelve
+ ):
+
+ context = self.context.elevated()
+ fake_host = 'fake_host1'
+ fake_zone = 'avail_zone1'
+ self._create_host_inside_az(self.context, fake_host, fake_zone)
+
+ instance = self._get_specify_state_instance(
+ vm_states.SHELVED_OFFLOADED)
+
+ fake_spec = self._create_request_spec_for_initial_az(None)
+ mock_get_by_instance_uuid.return_value = fake_spec
+
+ self.compute_api.unshelve(context, instance, new_az=fake_zone)
+
+ self._assert_unshelving_and_request_spec_az_and_host(
+ context,
+ instance,
+ fake_spec,
+ fake_zone,
+ None,
+ mock_get_by_instance_uuid,
+ mock_unshelve
+ )
+
+# +----------+---------------------------+-------+----------------------------+
+# | No AZ | AZ="AZ1" | Host1 | Verify that host1 in AZ1, |
+# | | | | or (1). Schedule to |
+# | | | | host1, reqspec.AZ="AZ1" |
+# +----------+---------------------------+-------+----------------------------+
+ @mock.patch.object(nova.conductor.ComputeTaskAPI, 'unshelve_instance')
+ @mock.patch.object(objects.RequestSpec, 'save')
+ @mock.patch.object(objects.ComputeNodeList, 'get_all_by_host')
+ @mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
+ def test_unshelve_without_az_to_newaz_and_host(
+ self,
+ mock_get_by_instance_uuid,
+ mock_get_all_by_host,
+ mock_save,
+ mock_unshelve
+ ):
+
+ context = self.context.elevated()
+ fake_host = 'fake_host1'
+ fake_zone = 'avail_zone1'
+ self._create_host_inside_az(self.context, fake_host, fake_zone)
+
+ instance = self._get_specify_state_instance(
+ vm_states.SHELVED_OFFLOADED)
+
+ fake_spec = self._create_request_spec_for_initial_az(None)
+ mock_get_by_instance_uuid.return_value = fake_spec
+
+ self.compute_api.unshelve(
+ context, instance, new_az=fake_zone, host=fake_host
+ )
+
+ self._assert_unshelving_and_request_spec_az_and_host(
+ context,
+ instance,
+ fake_spec,
+ fake_zone,
+ fake_host,
+ mock_get_by_instance_uuid,
+ mock_unshelve
+ )
+
+ @mock.patch.object(nova.conductor.ComputeTaskAPI, 'unshelve_instance')
+ @mock.patch.object(objects.RequestSpec, 'save')
+ @mock.patch.object(objects.ComputeNodeList, 'get_all_by_host')
+ @mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
+ def test_unshelve_without_az_to_newaz_and_host_invalid(
+ self,
+ mock_get_by_instance_uuid,
+ mock_get_all_by_host,
+ mock_save,
+ mock_unshelve
+ ):
+
+ context = self.context.elevated()
+ fake_host = 'fake_host1'
+ fake_zone = 'avail_zone1'
+ self._create_host_inside_az(self.context, fake_host, fake_zone)
+
+ instance = self._get_specify_state_instance(
+ vm_states.SHELVED_OFFLOADED)
+
+ fake_spec = self._create_request_spec_for_initial_az(None)
+ mock_get_by_instance_uuid.return_value = fake_spec
+
+ exc = self.assertRaises(
+ nova.exception.UnshelveHostNotInAZ,
+ self.compute_api.unshelve,
+ context,
+ instance,
+ new_az='avail_zone1',
+ host='fake_mini'
+ )
+
+ self.assertIn(
+ exc.message,
+ 'Host "fake_mini" is not in the availability zone "avail_zone1".'
+ )
+
+# +----------+---------------------------+-------+----------------------------+
+# | AZ1 | No AZ | No | Schedule to AZ1, |
+# | | | | reqspec.AZ="AZ1" |
+# +----------+---------------------------+-------+----------------------------+
+ @mock.patch.object(nova.conductor.ComputeTaskAPI, 'unshelve_instance')
+ @mock.patch.object(objects.RequestSpec, 'save')
+ @mock.patch.object(objects.ComputeNodeList, 'get_all_by_host')
+ @mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
+ def test_unshelve_with_az(
+ self,
+ mock_get_by_instance_uuid,
+ mock_get_all_by_host,
+ mock_save,
+ mock_unshelve
+ ):
+
+ context = self.context.elevated()
+ fake_host = 'fake_host1'
+ fake_zone = 'avail_zone1'
+ self._create_host_inside_az(self.context, fake_host, fake_zone)
+
+ instance = self._get_specify_state_instance(
+ vm_states.SHELVED_OFFLOADED)
+
+ fake_spec = self._create_request_spec_for_initial_az(fake_zone)
+ mock_get_by_instance_uuid.return_value = fake_spec
+
+ self.compute_api.unshelve(context, instance)
+
+ self._assert_unshelving_and_request_spec_az_and_host(
+ context,
+ instance,
+ fake_spec,
+ fake_zone,
+ None,
+ mock_get_by_instance_uuid,
+ mock_unshelve
+ )
- self.compute_api.unshelve(self.context, instance, new_az=new_az)
+# +----------+---------------------------+-------+----------------------------+
+# | AZ1 | AZ=null | No | Free scheduling, |
+# | | | | reqspec.AZ=None |
+# +----------+---------------------------+-------+----------------------------+
+ @mock.patch.object(nova.conductor.ComputeTaskAPI, 'unshelve_instance')
+ @mock.patch.object(objects.RequestSpec, 'save')
+ @mock.patch.object(objects.ComputeNodeList, 'get_all_by_host')
+ @mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
+ def test_unshelve_with_az_to_unpin_az(
+ self,
+ mock_get_by_instance_uuid,
+ mock_get_all_by_host,
+ mock_save,
+ mock_unshelve
+ ):
+
+ context = self.context.elevated()
+ fake_host = 'fake_host1'
+ fake_zone = 'avail_zone1'
+ self._create_host_inside_az(self.context, fake_host, fake_zone)
+
+ instance = self._get_specify_state_instance(
+ vm_states.SHELVED_OFFLOADED)
+
+ fake_spec = self._create_request_spec_for_initial_az(fake_zone)
+ mock_get_by_instance_uuid.return_value = fake_spec
+
+ self.compute_api.unshelve(context, instance, new_az=None)
+
+ self._assert_unshelving_and_request_spec_az_and_host(
+ context,
+ instance,
+ fake_spec,
+ None,
+ None,
+ mock_get_by_instance_uuid,
+ mock_unshelve
+ )
+
+# +----------+---------------------------+-------+----------------------------+
+# | AZ1 | No AZ | Host1 | If host1 is in AZ1, |
+# | | | | then schedule to host1, |
+# | | | | reqspec.AZ="AZ1", otherwise|
+# | | | | reject the request (1) |
+# +----------+---------------------------+-------+----------------------------+
+ @mock.patch.object(nova.conductor.ComputeTaskAPI, 'unshelve_instance')
+ @mock.patch.object(objects.RequestSpec, 'save')
+ @mock.patch.object(objects.ComputeNodeList, 'get_all_by_host')
+ @mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
+ def test_unshelve_with_az_to_host_in_az(
+ self,
+ mock_get_by_instance_uuid,
+ mock_get_all_by_host,
+ mock_save,
+ mock_unshelve
+ ):
+
+ context = self.context.elevated()
+ fake_host = 'fake_host1'
+ fake_zone = 'avail_zone1'
+ self._create_host_inside_az(self.context, fake_host, fake_zone)
+
+ instance = self._get_specify_state_instance(
+ vm_states.SHELVED_OFFLOADED)
+
+ fake_spec = self._create_request_spec_for_initial_az(fake_zone)
+ mock_get_by_instance_uuid.return_value = fake_spec
+
+ self.compute_api.unshelve(context, instance, host=fake_host)
+
+ self._assert_unshelving_and_request_spec_az_and_host(
+ context,
+ instance,
+ fake_spec,
+ fake_zone,
+ fake_host,
+ mock_get_by_instance_uuid,
+ mock_unshelve
+ )
+
+ @mock.patch.object(nova.conductor.ComputeTaskAPI, 'unshelve_instance')
+ @mock.patch.object(objects.RequestSpec, 'save')
+ @mock.patch.object(objects.ComputeNodeList, 'get_all_by_host')
+ @mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
+ def test_unshelve_with_az_to_invalid_host(
+ self,
+ mock_get_by_instance_uuid,
+ mock_get_all_by_host,
+ mock_save,
+ mock_unshelve
+ ):
+
+ context = self.context.elevated()
+ fake_host = 'fake_host1'
+ fake_zone = 'avail_zone1'
+ self._create_host_inside_az(self.context, fake_host, fake_zone)
+
+ instance = self._get_specify_state_instance(
+ vm_states.SHELVED_OFFLOADED)
+
+ fake_spec = self._create_request_spec_for_initial_az(fake_zone)
+ mock_get_by_instance_uuid.return_value = fake_spec
+
+ exc = self.assertRaises(
+ nova.exception.UnshelveHostNotInAZ,
+ self.compute_api.unshelve,
+ context,
+ instance,
+ host='fake_mini'
+ )
+
+ self.assertIn(
+ exc.message,
+ 'Host "fake_mini" is not in the availability zone "avail_zone1".'
+ )
+
+# +----------+---------------------------+-------+----------------------------+
+# | AZ1 | AZ=null | Host1 | Schedule to host1, |
+# | | | | reqspec.AZ=None |
+# +----------+---------------------------+-------+----------------------------+
+ @mock.patch.object(nova.conductor.ComputeTaskAPI, 'unshelve_instance')
+ @mock.patch.object(objects.RequestSpec, 'save')
+ @mock.patch.object(objects.ComputeNodeList, 'get_all_by_host')
+ @mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
+ def test_unshelve_with_az_to_host_unpin_az(
+ self,
+ mock_get_by_instance_uuid,
+ mock_get_all_by_host,
+ mock_save,
+ mock_unshelve
+ ):
+
+ context = self.context.elevated()
+ fake_host = 'fake_host1'
+ fake_zone = 'avail_zone1'
+ self._create_host_inside_az(self.context, fake_host, fake_zone)
+
+ instance = self._get_specify_state_instance(
+ vm_states.SHELVED_OFFLOADED)
+
+ fake_spec = self._create_request_spec_for_initial_az(fake_zone)
+ mock_get_by_instance_uuid.return_value = fake_spec
+
+ self.compute_api.unshelve(
+ context, instance, new_az=None, host=fake_host
+ )
+
+ self._assert_unshelving_and_request_spec_az_and_host(
+ context,
+ instance,
+ fake_spec,
+ None,
+ fake_host,
+ mock_get_by_instance_uuid,
+ mock_unshelve
+ )
+
+# +----------+---------------------------+-------+----------------------------+
+# | AZ1 | AZ="AZ2" | No | Schedule to AZ2, |
+# | | | | reqspec.AZ="AZ2" |
+# +----------+---------------------------+-------+----------------------------+
+ @mock.patch.object(nova.conductor.ComputeTaskAPI, 'unshelve_instance')
+ @mock.patch.object(objects.RequestSpec, 'save')
+ @mock.patch.object(objects.ComputeNodeList, 'get_all_by_host')
+ @mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
+ def test_unshelve_with_az_to_newaz(
+ self,
+ mock_get_by_instance_uuid,
+ mock_get_all_by_host,
+ mock_save,
+ mock_unshelve
+ ):
+
+ context = self.context.elevated()
+ fake_host = 'fake_host1'
+ fake_zone = 'avail_zone1'
+ self._create_host_inside_az(self.context, fake_host, fake_zone)
- mock_save.assert_called_once_with()
- self.assertEqual(new_az, fake_spec.availability_zone)
+ instance = self._get_specify_state_instance(
+ vm_states.SHELVED_OFFLOADED)
+
+ fake_spec = self._create_request_spec_for_initial_az('az1')
+ mock_get_by_instance_uuid.return_value = fake_spec
+
+ self.compute_api.unshelve(
+ context, instance, new_az=fake_zone
+ )
+
+ self._assert_unshelving_and_request_spec_az_and_host(
+ context,
+ instance,
+ fake_spec,
+ fake_zone,
+ None,
+ mock_get_by_instance_uuid,
+ mock_unshelve
+ )
+
+# +----------+---------------------------+-------+----------------------------+
+# | AZ1 | AZ="AZ2" | Host1 | If host1 in AZ2 then |
+# | | | | schedule to host1, |
+# | | | | reqspec.AZ="AZ2", |
+# | | | | otherwise reject (1) |
+# +----------+---------------------------+-------+----------------------------+
+ @mock.patch.object(nova.conductor.ComputeTaskAPI, 'unshelve_instance')
+ @mock.patch.object(objects.RequestSpec, 'save')
+ @mock.patch.object(objects.ComputeNodeList, 'get_all_by_host')
+ @mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
+ def test_unshelve_with_az_to_newaz_and_host(
+ self,
+ mock_get_by_instance_uuid,
+ mock_get_all_by_host,
+ mock_save,
+ mock_unshelve
+ ):
- mock_validate_unshelve_az.assert_called_once_with(
- self.context, instance, new_az)
+ context = self.context.elevated()
+ fake_host = 'fake_host1'
+ fake_zone = 'avail_zone1'
+ self._create_host_inside_az(self.context, fake_host, fake_zone)
+
+ instance = self._get_specify_state_instance(
+ vm_states.SHELVED_OFFLOADED)
+
+ fake_spec = self._create_request_spec_for_initial_az('az1')
+ mock_get_by_instance_uuid.return_value = fake_spec
+
+ self.compute_api.unshelve(
+ context, instance, new_az=fake_zone, host=fake_host
+ )
+
+ self._assert_unshelving_and_request_spec_az_and_host(
+ context,
+ instance,
+ fake_spec,
+ fake_zone,
+ fake_host,
+ mock_get_by_instance_uuid,
+ mock_unshelve
+ )
+
+ @mock.patch.object(nova.conductor.ComputeTaskAPI, 'unshelve_instance')
+ @mock.patch.object(objects.RequestSpec, 'save')
+ @mock.patch.object(objects.ComputeNodeList, 'get_all_by_host')
+ @mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
+ def test_unshelve_with_az_to_newaz_and_invalid_host(
+ self,
+ mock_get_by_instance_uuid,
+ mock_get_all_by_host,
+ mock_save,
+ mock_unshelve
+ ):
+
+ context = self.context.elevated()
+ fake_host = 'fake_host1'
+ fake_zone = 'avail_zone1'
+ self._create_host_inside_az(self.context, fake_host, fake_zone)
+
+ instance = self._get_specify_state_instance(
+ vm_states.SHELVED_OFFLOADED)
+
+ fake_spec = self._create_request_spec_for_initial_az('az1')
+ mock_get_by_instance_uuid.return_value = fake_spec
+
+ exc = self.assertRaises(
+ nova.exception.UnshelveHostNotInAZ,
+ self.compute_api.unshelve,
+ context,
+ instance,
+ new_az=fake_zone,
+ host='fake_mini'
+ )
+
+ self.assertIn(
+ exc.message,
+ 'Host "fake_mini" is not in the availability zone "avail_zone1".'
+ )
diff --git a/nova/tests/unit/compute/test_utils.py b/nova/tests/unit/compute/test_utils.py
index 6c3cbc1b57..848050d769 100644
--- a/nova/tests/unit/compute/test_utils.py
+++ b/nova/tests/unit/compute/test_utils.py
@@ -19,8 +19,8 @@
import copy
import datetime
import string
+from unittest import mock
-import mock
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import uuidutils
diff --git a/nova/tests/unit/compute/test_virtapi.py b/nova/tests/unit/compute/test_virtapi.py
index d6dc8f125b..71c9097525 100644
--- a/nova/tests/unit/compute/test_virtapi.py
+++ b/nova/tests/unit/compute/test_virtapi.py
@@ -13,9 +13,9 @@
# under the License.
import collections
+from unittest import mock
import eventlet.timeout
-import mock
import os_traits
from oslo_utils.fixture import uuidsentinel as uuids
@@ -187,6 +187,9 @@ class ComputeVirtAPITest(VirtAPIBaseTest):
do_test()
+ @mock.patch(
+ 'oslo_utils.timeutils.StopWatch.elapsed',
+ new=mock.Mock(return_value=1.23))
def test_wait_for_instance_event_timeout(self):
instance = mock.Mock()
instance.vm_state = mock.sentinel.vm_state
@@ -212,11 +215,14 @@ class ComputeVirtAPITest(VirtAPIBaseTest):
'vm_state': mock.sentinel.vm_state,
'task_state': mock.sentinel.task_state,
'event_states':
- 'foo-bar: timed out after 0.00 seconds',
+ 'foo-bar: timed out after 1.23 seconds',
},
instance=instance
)
+ @mock.patch(
+ 'oslo_utils.timeutils.StopWatch.elapsed',
+ new=mock.Mock(return_value=1.23))
def test_wait_for_instance_event_one_received_one_timed_out(self):
instance = mock.Mock()
instance.vm_state = mock.sentinel.vm_state
@@ -252,12 +258,15 @@ class ComputeVirtAPITest(VirtAPIBaseTest):
'vm_state': mock.sentinel.vm_state,
'task_state': mock.sentinel.task_state,
'event_states':
- 'foo-bar: received after waiting 0.00 seconds, '
- 'missing-event: timed out after 0.00 seconds',
+ 'foo-bar: received after waiting 1.23 seconds, '
+ 'missing-event: timed out after 1.23 seconds',
},
instance=instance
)
+ @mock.patch(
+ 'oslo_utils.timeutils.StopWatch.elapsed',
+ new=mock.Mock(return_value=1.23))
def test_wait_for_instance_event_multiple_events(self):
instance = mock.Mock()
instance.vm_state = mock.sentinel.vm_state
@@ -282,7 +291,6 @@ class ComputeVirtAPITest(VirtAPIBaseTest):
m.tag = tag
m.event_name = '%s-%s' % (name, tag)
m.wait.side_effect = fake_event_waiter
- print(name, tag)
if name == 'received-but-not-waited':
m.ready.return_value = True
if name == 'missing-but-not-waited':
@@ -323,9 +331,9 @@ class ComputeVirtAPITest(VirtAPIBaseTest):
'vm_state': mock.sentinel.vm_state,
'task_state': mock.sentinel.task_state,
'event_states':
- 'received-event: received after waiting 0.00 seconds, '
+ 'received-event: received after waiting 1.23 seconds, '
'early-event: received early, '
- 'missing-event: timed out after 0.00 seconds, '
+ 'missing-event: timed out after 1.23 seconds, '
'received-but-not-waited-event: received but not '
'processed, '
'missing-but-not-waited-event: expected but not received'
diff --git a/nova/tests/unit/conductor/tasks/test_base.py b/nova/tests/unit/conductor/tasks/test_base.py
index a7151c4cd0..cf9e8f9cfd 100644
--- a/nova/tests/unit/conductor/tasks/test_base.py
+++ b/nova/tests/unit/conductor/tasks/test_base.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.conductor.tasks import base
from nova import test
diff --git a/nova/tests/unit/conductor/tasks/test_cross_cell_migrate.py b/nova/tests/unit/conductor/tasks/test_cross_cell_migrate.py
index ec07e6f55f..c4b6c217b6 100644
--- a/nova/tests/unit/conductor/tasks/test_cross_cell_migrate.py
+++ b/nova/tests/unit/conductor/tasks/test_cross_cell_migrate.py
@@ -11,8 +11,8 @@
# under the License.
import copy
+from unittest import mock
-import mock
from oslo_messaging import exceptions as messaging_exceptions
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
diff --git a/nova/tests/unit/conductor/tasks/test_live_migrate.py b/nova/tests/unit/conductor/tasks/test_live_migrate.py
index cb40c076c8..de15be28bd 100644
--- a/nova/tests/unit/conductor/tasks/test_live_migrate.py
+++ b/nova/tests/unit/conductor/tasks/test_live_migrate.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
@@ -345,6 +346,36 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
mock.call(self.destination)],
mock_get_info.call_args_list)
+ @mock.patch.object(live_migrate.LiveMigrationTask, '_get_compute_info')
+ def test_skip_hypervisor_version_check_on_lm_raise_ex(self, mock_get_info):
+ host1 = {'hypervisor_type': 'a', 'hypervisor_version': 7}
+ host2 = {'hypervisor_type': 'a', 'hypervisor_version': 6}
+ self.flags(group='workarounds',
+ skip_hypervisor_version_check_on_lm=False)
+ mock_get_info.side_effect = [objects.ComputeNode(**host1),
+ objects.ComputeNode(**host2)]
+ self.assertRaises(exception.DestinationHypervisorTooOld,
+ self.task._check_compatible_with_source_hypervisor,
+ self.destination)
+ self.assertEqual([mock.call(self.instance_host),
+ mock.call(self.destination)],
+ mock_get_info.call_args_list)
+
+ @mock.patch.object(live_migrate.LiveMigrationTask, '_get_compute_info')
+ def test_skip_hypervisor_version_check_on_lm_do_not_raise_ex(
+ self, mock_get_info
+ ):
+ host1 = {'hypervisor_type': 'a', 'hypervisor_version': 7}
+ host2 = {'hypervisor_type': 'a', 'hypervisor_version': 6}
+ self.flags(group='workarounds',
+ skip_hypervisor_version_check_on_lm=True)
+ mock_get_info.side_effect = [objects.ComputeNode(**host1),
+ objects.ComputeNode(**host2)]
+ self.task._check_compatible_with_source_hypervisor(self.destination)
+ self.assertEqual([mock.call(self.instance_host),
+ mock.call(self.destination)],
+ mock_get_info.call_args_list)
+
@mock.patch.object(compute_rpcapi.ComputeAPI,
'check_can_live_migrate_destination')
def test_check_requested_destination(self, mock_check):
diff --git a/nova/tests/unit/conductor/tasks/test_migrate.py b/nova/tests/unit/conductor/tasks/test_migrate.py
index 145e54f884..46cb033c5c 100644
--- a/nova/tests/unit/conductor/tasks/test_migrate.py
+++ b/nova/tests/unit/conductor/tasks/test_migrate.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
diff --git a/nova/tests/unit/conductor/test_conductor.py b/nova/tests/unit/conductor/test_conductor.py
index 2e3725223e..e942217a6c 100644
--- a/nova/tests/unit/conductor/test_conductor.py
+++ b/nova/tests/unit/conductor/test_conductor.py
@@ -16,8 +16,10 @@
"""Tests for the conductor service."""
import copy
+import ddt
+from unittest import mock
-import mock
+from keystoneauth1 import exceptions as ks_exc
from oslo_db import exception as db_exc
from oslo_limit import exception as limit_exceptions
import oslo_messaging as messaging
@@ -52,6 +54,7 @@ from nova.objects import block_device as block_device_obj
from nova.objects import fields
from nova.objects import request_spec
from nova.scheduler.client import query
+from nova.scheduler.client import report
from nova.scheduler import utils as scheduler_utils
from nova import test
from nova.tests import fixtures
@@ -385,7 +388,8 @@ class _BaseTaskTestCase(object):
'on_shared_storage': False,
'preserve_ephemeral': False,
'host': 'compute-host',
- 'request_spec': None}
+ 'request_spec': None,
+ 'reimage_boot_volume': False}
if update_args:
rebuild_args.update(update_args)
compute_rebuild_args = copy.deepcopy(rebuild_args)
@@ -4747,6 +4751,42 @@ class ConductorTaskRPCAPITestCase(_BaseTaskTestCase,
mock.sentinel.migration)
can_send_version.assert_called_once_with('1.23')
+ def test_rebuild_instance_volume_backed(self):
+ inst_obj = self._create_fake_instance_obj()
+ version = '1.24'
+ cctxt_mock = mock.MagicMock()
+ rebuild_args, compute_args = self._prepare_rebuild_args(
+ {'host': inst_obj.host})
+ rebuild_args['reimage_boot_volume'] = True
+
+ @mock.patch.object(self.conductor.client, 'prepare',
+ return_value=cctxt_mock)
+ @mock.patch.object(self.conductor.client, 'can_send_version',
+ return_value=True)
+ def _test(mock_can_send_ver, prepare_mock):
+ self.conductor.rebuild_instance(
+ self.context, inst_obj, **rebuild_args)
+ prepare_mock.assert_called_once_with(version=version)
+ kw = {'instance': inst_obj, **rebuild_args}
+ cctxt_mock.cast.assert_called_once_with(
+ self.context, 'rebuild_instance', **kw)
+ _test()
+
+ def test_rebuild_instance_volume_backed_old_service(self):
+ """Tests rebuild_instance_volume_backed when the service is too old"""
+ inst_obj = mock.MagicMock()
+ rebuild_args, compute_args = self._prepare_rebuild_args(
+ {'host': inst_obj.host})
+ rebuild_args['reimage_boot_volume'] = True
+ with mock.patch.object(
+ self.conductor.client, 'can_send_version',
+ return_value=False) as can_send_version:
+ self.assertRaises(exc.NovaException,
+ self.conductor.rebuild_instance,
+ self.context, inst_obj,
+ **rebuild_args)
+ can_send_version.assert_called_once_with('1.24')
+
class ConductorTaskAPITestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
"""Compute task API Tests."""
@@ -4869,3 +4909,35 @@ class ConductorTaskAPITestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
logtext)
self.assertIn('host3\' because it is not up', logtext)
self.assertIn('image1 failed 1 times', logtext)
+
+
+@ddt.ddt
+class TestConductorTaskManager(test.NoDBTestCase):
+ def test_placement_client_startup(self):
+ self.assertIsNone(report.PLACEMENTCLIENT)
+ conductor_manager.ComputeTaskManager()
+ self.assertIsNotNone(report.PLACEMENTCLIENT)
+
+ @ddt.data(ks_exc.MissingAuthPlugin,
+ ks_exc.Unauthorized,
+ test.TestingException)
+ def test_placement_client_startup_fatals(self, exc):
+ self.assertRaises(exc,
+ self._test_placement_client_startup_exception, exc)
+
+ @ddt.data(ks_exc.EndpointNotFound,
+ ks_exc.DiscoveryFailure,
+ ks_exc.RequestTimeout,
+ ks_exc.GatewayTimeout,
+ ks_exc.ConnectFailure)
+ def test_placement_client_startup_non_fatal(self, exc):
+ self._test_placement_client_startup_exception(exc)
+
+ @mock.patch.object(report, 'LOG')
+ def _test_placement_client_startup_exception(self, exc, mock_log):
+ with mock.patch.object(report.SchedulerReportClient, '_create_client',
+ side_effect=exc):
+ try:
+ conductor_manager.ComputeTaskManager()
+ finally:
+ mock_log.error.assert_called_once()
diff --git a/nova/tests/unit/console/rfb/test_auth.py b/nova/tests/unit/console/rfb/test_auth.py
index c4026b6637..1d66b2684f 100644
--- a/nova/tests/unit/console/rfb/test_auth.py
+++ b/nova/tests/unit/console/rfb/test_auth.py
@@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.console.rfb import auth
from nova.console.rfb import authnone
diff --git a/nova/tests/unit/console/rfb/test_authnone.py b/nova/tests/unit/console/rfb/test_authnone.py
index e628106e3b..3ca44dce89 100644
--- a/nova/tests/unit/console/rfb/test_authnone.py
+++ b/nova/tests/unit/console/rfb/test_authnone.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.console.rfb import auth
from nova.console.rfb import authnone
diff --git a/nova/tests/unit/console/rfb/test_authvencrypt.py b/nova/tests/unit/console/rfb/test_authvencrypt.py
index f7fc31939e..de9bccb44a 100644
--- a/nova/tests/unit/console/rfb/test_authvencrypt.py
+++ b/nova/tests/unit/console/rfb/test_authvencrypt.py
@@ -14,8 +14,8 @@
import ssl
import struct
+from unittest import mock
-import mock
from nova.console.rfb import auth
from nova.console.rfb import authvencrypt
diff --git a/nova/tests/unit/console/securityproxy/test_rfb.py b/nova/tests/unit/console/securityproxy/test_rfb.py
index 3eb8ba6acf..17cf8f7c57 100644
--- a/nova/tests/unit/console/securityproxy/test_rfb.py
+++ b/nova/tests/unit/console/securityproxy/test_rfb.py
@@ -15,7 +15,7 @@
"""Tests the Console Security Proxy Framework."""
-import mock
+from unittest import mock
from nova.console.rfb import auth
from nova.console.rfb import authnone
diff --git a/nova/tests/unit/console/test_serial.py b/nova/tests/unit/console/test_serial.py
index bc87ca6ca2..44d88e6e83 100644
--- a/nova/tests/unit/console/test_serial.py
+++ b/nova/tests/unit/console/test_serial.py
@@ -15,8 +15,7 @@
"""Tests for Serial Console."""
import socket
-
-import mock
+from unittest import mock
from nova.console import serial
from nova import exception
diff --git a/nova/tests/unit/console/test_websocketproxy.py b/nova/tests/unit/console/test_websocketproxy.py
index 1833d3a0c9..fc25bef2bc 100644
--- a/nova/tests/unit/console/test_websocketproxy.py
+++ b/nova/tests/unit/console/test_websocketproxy.py
@@ -17,8 +17,8 @@
import copy
import io
import socket
+from unittest import mock
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
import nova.conf
@@ -587,12 +587,12 @@ class NovaProxyRequestHandlerTestCase(test.NoDBTestCase):
self.wh.socket.assert_called_with('node1', 10000, connect=True)
self.wh.do_proxy.assert_called_with('<socket>')
- def test_reject_open_redirect(self):
+ def test_reject_open_redirect(self, url='//example.com/%2F..'):
# This will test the behavior when an attempt is made to cause an open
# redirect. It should be rejected.
mock_req = mock.MagicMock()
mock_req.makefile().readline.side_effect = [
- b'GET //example.com/%2F.. HTTP/1.1\r\n',
+ f'GET {url} HTTP/1.1\r\n'.encode('utf-8'),
b''
]
@@ -617,41 +617,32 @@ class NovaProxyRequestHandlerTestCase(test.NoDBTestCase):
result = output.readlines()
# Verify no redirect happens and instead a 400 Bad Request is returned.
- self.assertIn('400 URI must not start with //', result[0].decode())
+ # NOTE: As of python 3.10.6 there is a fix for this vulnerability,
+ # which will cause a 301 Moved Permanently error to be returned
+ # instead that redirects to a sanitized version of the URL with extra
+ # leading '/' characters removed.
+ # See https://github.com/python/cpython/issues/87389 for details.
+ # We will consider either response to be valid for this test. This will
+ # also help if and when the above fix gets backported to older versions
+ # of python.
+ errmsg = result[0].decode()
+ expected_nova = '400 URI must not start with //'
+ expected_cpython = '301 Moved Permanently'
+
+ self.assertTrue(expected_nova in errmsg or expected_cpython in errmsg)
+
+ # If we detect the cpython fix, verify that the redirect location is
+ # now the same url but with extra leading '/' characters removed.
+ if expected_cpython in errmsg:
+ location = result[3].decode()
+ location = location.removeprefix('Location: ').rstrip('\r\n')
+ self.assertTrue(
+ location.startswith('/example.com/%2F..'),
+ msg='Redirect location is not the expected sanitized URL',
+ )
def test_reject_open_redirect_3_slashes(self):
- # This will test the behavior when an attempt is made to cause an open
- # redirect. It should be rejected.
- mock_req = mock.MagicMock()
- mock_req.makefile().readline.side_effect = [
- b'GET ///example.com/%2F.. HTTP/1.1\r\n',
- b''
- ]
-
- # Collect the response data to verify at the end. The
- # SimpleHTTPRequestHandler writes the response data by calling the
- # request socket sendall() method.
- self.data = b''
-
- def fake_sendall(data):
- self.data += data
-
- mock_req.sendall.side_effect = fake_sendall
-
- client_addr = ('8.8.8.8', 54321)
- mock_server = mock.MagicMock()
- # This specifies that the server will be able to handle requests other
- # than only websockets.
- mock_server.only_upgrade = False
-
- # Constructing a handler will process the mock_req request passed in.
- websocketproxy.NovaProxyRequestHandler(
- mock_req, client_addr, mock_server)
-
- # Verify no redirect happens and instead a 400 Bad Request is returned.
- self.data = self.data.decode()
- self.assertIn('Error code: 400', self.data)
- self.assertIn('Message: URI must not start with //', self.data)
+ self.test_reject_open_redirect(url='///example.com/%2F..')
@mock.patch('nova.objects.ConsoleAuthToken.validate')
def test_no_compute_rpcapi_with_invalid_token(self, mock_validate):
diff --git a/nova/tests/unit/db/api/test_api.py b/nova/tests/unit/db/api/test_api.py
index 251407612f..6113791a8e 100644
--- a/nova/tests/unit/db/api/test_api.py
+++ b/nova/tests/unit/db/api/test_api.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.db.api import api as db_api
from nova import test
diff --git a/nova/tests/unit/db/api/test_migrations.py b/nova/tests/unit/db/api/test_migrations.py
index 1b14d569db..3b9b17aab2 100644
--- a/nova/tests/unit/db/api/test_migrations.py
+++ b/nova/tests/unit/db/api/test_migrations.py
@@ -21,10 +21,11 @@ test will then use that DB and username/password combo to run the tests. Refer
to the 'tools/test-setup.sh' for an example of how to configure this.
"""
+from unittest import mock
+
from alembic import command as alembic_api
from alembic import script as alembic_script
from migrate.versioning import api as migrate_api
-import mock
from oslo_db.sqlalchemy import enginefacade
from oslo_db.sqlalchemy import test_fixtures
from oslo_db.sqlalchemy import test_migrations
diff --git a/nova/tests/unit/db/main/test_api.py b/nova/tests/unit/db/main/test_api.py
index a984b7234d..98f9c854d9 100644
--- a/nova/tests/unit/db/main/test_api.py
+++ b/nova/tests/unit/db/main/test_api.py
@@ -18,10 +18,10 @@
import copy
import datetime
+from unittest import mock
from dateutil import parser as dateutil_parser
import iso8601
-import mock
import netaddr
from oslo_db import api as oslo_db_api
from oslo_db import exception as db_exc
@@ -277,33 +277,21 @@ class DecoratorTestCase(test.TestCase):
'No DB access allowed in ',
mock_log.error.call_args[0][0])
- @mock.patch.object(db, 'LOG')
- @mock.patch.object(db, 'DISABLE_DB_ACCESS', return_value=True)
- def test_pick_context_manager_writer_disable_db_access(
- self, mock_DISABLE_DB_ACCESS, mock_log,
- ):
+ def test_pick_context_manager_writer_disable_db_access(self):
@db.pick_context_manager_writer
def func(context, value):
pass
self._test_pick_context_manager_disable_db_access(func)
- @mock.patch.object(db, 'LOG')
- @mock.patch.object(db, 'DISABLE_DB_ACCESS', return_value=True)
- def test_pick_context_manager_reader_disable_db_access(
- self, mock_DISABLE_DB_ACCESS, mock_log,
- ):
+ def test_pick_context_manager_reader_disable_db_access(self):
@db.pick_context_manager_reader
def func(context, value):
pass
self._test_pick_context_manager_disable_db_access(func)
- @mock.patch.object(db, 'LOG')
- @mock.patch.object(db, 'DISABLE_DB_ACCESS', return_value=True)
- def test_pick_context_manager_reader_allow_async_disable_db_access(
- self, mock_DISABLE_DB_ACCESS, mock_log,
- ):
+ def test_pick_context_manager_reader_allow_async_disable_db_access(self):
@db.pick_context_manager_reader_allow_async
def func(context, value):
pass
diff --git a/nova/tests/unit/db/main/test_migrations.py b/nova/tests/unit/db/main/test_migrations.py
index f5ce3697b3..d2c4ef9762 100644
--- a/nova/tests/unit/db/main/test_migrations.py
+++ b/nova/tests/unit/db/main/test_migrations.py
@@ -25,11 +25,12 @@ test will then use that DB and username/password combo to run the tests. Refer
to the 'tools/test-setup.sh' for an example of how to configure this.
"""
+from unittest import mock
+
from alembic import command as alembic_api
from alembic import script as alembic_script
import fixtures
from migrate.versioning import api as migrate_api
-import mock
from oslo_db.sqlalchemy import enginefacade
from oslo_db.sqlalchemy import test_fixtures
from oslo_db.sqlalchemy import test_migrations
@@ -240,6 +241,12 @@ class NovaMigrationsWalk(
'Index %s on table %s should not exist' % (index, table_name),
)
+ def assertColumnExists(self, connection, table_name, column):
+ self.assertTrue(
+ oslodbutils.column_exists(connection, table_name, column),
+ 'Column %s on table %s should exist' % (column, table_name),
+ )
+
def _migrate_up(self, connection, revision):
if revision == self.init_version: # no tests for the initial revision
alembic_api.upgrade(self.config, revision)
@@ -280,6 +287,33 @@ class NovaMigrationsWalk(
# no check for the MySQL-specific change
+ def _check_ccb0fa1a2252(self, connection):
+ for prefix in ('', 'shadow_'):
+ table_name = prefix + 'block_device_mapping'
+ table = oslodbutils.get_table(connection, table_name)
+
+ self.assertColumnExists(connection, table_name, 'encrypted')
+ self.assertColumnExists(
+ connection, table_name, 'encryption_secret_uuid')
+ self.assertColumnExists(
+ connection, table_name, 'encryption_format')
+ self.assertColumnExists(
+ connection, table_name, 'encryption_options')
+
+ # Only check for the expected types if we're using sqlite because
+ # other databases' types may be different. For example, Boolean
+ # may be represented as an integer in MySQL
+ if connection.engine.name != 'sqlite':
+ return
+
+ self.assertIsInstance(table.c.encrypted.type, sa.types.Boolean)
+ self.assertIsInstance(
+ table.c.encryption_secret_uuid.type, sa.types.String)
+ self.assertIsInstance(
+ table.c.encryption_format.type, sa.types.String)
+ self.assertIsInstance(
+ table.c.encryption_options.type, sa.types.String)
+
def test_single_base_revision(self):
"""Ensure we only have a single base revision.
diff --git a/nova/tests/unit/db/test_migration.py b/nova/tests/unit/db/test_migration.py
index ca9b5c0c4c..ca86f6347c 100644
--- a/nova/tests/unit/db/test_migration.py
+++ b/nova/tests/unit/db/test_migration.py
@@ -14,12 +14,12 @@
import glob
import os
+from unittest import mock
import urllib
from alembic.runtime import migration as alembic_migration
from migrate import exceptions as migrate_exceptions
from migrate.versioning import api as migrate_api
-import mock
from nova.db.api import api as api_db_api
from nova.db.main import api as main_db_api
diff --git a/nova/tests/unit/fixtures/test_libvirt.py b/nova/tests/unit/fixtures/test_libvirt.py
index eab9c54a13..448f8f6720 100644
--- a/nova/tests/unit/fixtures/test_libvirt.py
+++ b/nova/tests/unit/fixtures/test_libvirt.py
@@ -12,8 +12,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
from lxml import etree
-import mock
from oslo_utils import uuidutils
from nova.objects import fields as obj_fields
diff --git a/nova/tests/unit/image/test_glance.py b/nova/tests/unit/image/test_glance.py
index 4f35f060e4..935a271d44 100644
--- a/nova/tests/unit/image/test_glance.py
+++ b/nova/tests/unit/image/test_glance.py
@@ -18,6 +18,7 @@ import copy
import datetime
import io
from io import StringIO
+from unittest import mock
import urllib.parse as urlparse
import cryptography
@@ -28,7 +29,6 @@ import glanceclient.exc
from glanceclient.v1 import images
from glanceclient.v2 import schemas
from keystoneauth1 import loading as ks_loading
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
import testtools
diff --git a/nova/tests/unit/limit/test_local.py b/nova/tests/unit/limit/test_local.py
index 4dd321b757..8bf163d69f 100644
--- a/nova/tests/unit/limit/test_local.py
+++ b/nova/tests/unit/limit/test_local.py
@@ -13,8 +13,7 @@
# under the License.
import copy
-
-import mock
+from unittest import mock
from oslo_config import cfg
from oslo_limit import exception as limit_exceptions
diff --git a/nova/tests/unit/limit/test_placement.py b/nova/tests/unit/limit/test_placement.py
index cfb6ce36b4..3640890c74 100644
--- a/nova/tests/unit/limit/test_placement.py
+++ b/nova/tests/unit/limit/test_placement.py
@@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from oslo_config import cfg
from oslo_limit import exception as limit_exceptions
diff --git a/nova/tests/unit/network/test_neutron.py b/nova/tests/unit/network/test_neutron.py
index ad987dad90..eefa7b974f 100644
--- a/nova/tests/unit/network/test_neutron.py
+++ b/nova/tests/unit/network/test_neutron.py
@@ -16,11 +16,11 @@
import collections
import copy
+from unittest import mock
from keystoneauth1.fixture import V2Token
from keystoneauth1 import loading as ks_loading
from keystoneauth1 import service_token
-import mock
from neutronclient.common import exceptions
from neutronclient.v2_0 import client
from oslo_config import cfg
@@ -42,7 +42,6 @@ from nova import objects
from nova.objects import fields as obj_fields
from nova.objects import network_request as net_req_obj
from nova.objects import virtual_interface as obj_vif
-from nova.pci import manager as pci_manager
from nova.pci import request as pci_request
from nova.pci import utils as pci_utils
from nova.pci import whitelist as pci_whitelist
@@ -3383,6 +3382,155 @@ class TestAPI(TestAPIBase):
mocked_client.list_ports.assert_called_once_with(
tenant_id=uuids.fake, device_id=uuids.instance)
+ @mock.patch.object(
+ neutronapi.API,
+ '_get_physnet_tunneled_info',
+ new=mock.Mock(return_value=(None, False)))
+ @mock.patch.object(
+ neutronapi.API,
+ '_get_preexisting_port_ids',
+ new=mock.Mock(return_value=[]))
+ @mock.patch.object(
+ neutronapi.API,
+ '_get_subnets_from_port',
+ new=mock.Mock(return_value=[model.Subnet(cidr='1.0.0.0/8')]))
+ @mock.patch.object(
+ neutronapi.API,
+ '_get_floating_ips_by_fixed_and_port',
+ new=mock.Mock(return_value=[{'floating_ip_address': '10.0.0.1'}]))
+ @mock.patch.object(neutronapi, 'get_client')
+ def test_build_network_info_model_full_vnic_type_change(
+ self, mock_get_client
+ ):
+ mocked_client = mock.create_autospec(client.Client)
+ mock_get_client.return_value = mocked_client
+ fake_inst = objects.Instance()
+ fake_inst.project_id = uuids.fake
+ fake_inst.uuid = uuids.instance
+ fake_ports = [
+ {
+ "id": "port1",
+ "network_id": "net-id",
+ "tenant_id": uuids.fake,
+ "admin_state_up": True,
+ "status": "ACTIVE",
+ "fixed_ips": [{"ip_address": "1.1.1.1"}],
+ "mac_address": "de:ad:be:ef:00:01",
+ "binding:vif_type": model.VIF_TYPE_BRIDGE,
+ "binding:vnic_type": model.VNIC_TYPE_DIRECT,
+ "binding:vif_details": {},
+ },
+ ]
+ mocked_client.list_ports.return_value = {'ports': fake_ports}
+ fake_inst.info_cache = objects.InstanceInfoCache.new(
+ self.context, uuids.instance)
+ fake_inst.info_cache.network_info = model.NetworkInfo.hydrate([])
+
+ # build the network info first
+ nw_infos = self.api._build_network_info_model(
+ self.context,
+ fake_inst,
+ force_refresh=True,
+ )
+
+ self.assertEqual(1, len(nw_infos))
+ fake_inst.info_cache.network_info = nw_infos
+
+ # change the vnic_type of the port and rebuild the network info
+ fake_ports[0]["binding:vnic_type"] = model.VNIC_TYPE_MACVTAP
+ with mock.patch(
+ "nova.network.neutron.API._log_error_if_vnic_type_changed"
+ ) as mock_log:
+ nw_infos = self.api._build_network_info_model(
+ self.context,
+ fake_inst,
+ force_refresh=True,
+ )
+
+ mock_log.assert_called_once_with(
+ fake_ports[0]["id"], "direct", "macvtap", fake_inst)
+ self.assertEqual(1, len(nw_infos))
+
+ @mock.patch.object(
+ neutronapi.API,
+ '_get_physnet_tunneled_info',
+ new=mock.Mock(return_value=(None, False)))
+ @mock.patch.object(
+ neutronapi.API,
+ '_get_preexisting_port_ids',
+ new=mock.Mock(return_value=[]))
+ @mock.patch.object(
+ neutronapi.API,
+ '_get_subnets_from_port',
+ new=mock.Mock(return_value=[model.Subnet(cidr='1.0.0.0/8')]))
+ @mock.patch.object(
+ neutronapi.API,
+ '_get_floating_ips_by_fixed_and_port',
+ new=mock.Mock(return_value=[{'floating_ip_address': '10.0.0.1'}]))
+ @mock.patch.object(neutronapi, 'get_client')
+ def test_build_network_info_model_single_vnic_type_change(
+ self, mock_get_client
+ ):
+ mocked_client = mock.create_autospec(client.Client)
+ mock_get_client.return_value = mocked_client
+ fake_inst = objects.Instance()
+ fake_inst.project_id = uuids.fake
+ fake_inst.uuid = uuids.instance
+ fake_ports = [
+ {
+ "id": "port1",
+ "network_id": "net-id",
+ "tenant_id": uuids.fake,
+ "admin_state_up": True,
+ "status": "ACTIVE",
+ "fixed_ips": [{"ip_address": "1.1.1.1"}],
+ "mac_address": "de:ad:be:ef:00:01",
+ "binding:vif_type": model.VIF_TYPE_BRIDGE,
+ "binding:vnic_type": model.VNIC_TYPE_DIRECT,
+ "binding:vif_details": {},
+ },
+ ]
+ fake_nets = [
+ {
+ "id": "net-id",
+ "name": "foo",
+ "tenant_id": uuids.fake,
+ }
+ ]
+ mocked_client.list_ports.return_value = {'ports': fake_ports}
+ fake_inst.info_cache = objects.InstanceInfoCache.new(
+ self.context, uuids.instance)
+ fake_inst.info_cache.network_info = model.NetworkInfo.hydrate([])
+
+ # build the network info first
+ nw_infos = self.api._build_network_info_model(
+ self.context,
+ fake_inst,
+ fake_nets,
+ [fake_ports[0]["id"]],
+ refresh_vif_id=fake_ports[0]["id"],
+ )
+
+ self.assertEqual(1, len(nw_infos))
+ fake_inst.info_cache.network_info = nw_infos
+
+ # change the vnic_type of the port and rebuild the network info
+ fake_ports[0]["binding:vnic_type"] = model.VNIC_TYPE_MACVTAP
+ with mock.patch(
+ "nova.network.neutron.API._log_error_if_vnic_type_changed"
+ ) as mock_log:
+ nw_infos = self.api._build_network_info_model(
+ self.context,
+ fake_inst,
+ fake_nets,
+ [fake_ports[0]["id"]],
+ refresh_vif_id=fake_ports[0]["id"],
+ )
+
+ mock_log.assert_called_once_with(
+ fake_ports[0]["id"], "direct", "macvtap", fake_inst)
+ self.assertEqual(1, len(nw_infos))
+
@mock.patch.object(neutronapi, 'get_client')
def test_get_subnets_from_port(self, mock_get_client):
mocked_client = mock.create_autospec(client.Client)
@@ -7738,11 +7886,11 @@ class TestAPIPortbinding(TestAPIBase):
'vf_num': 1,
}))
@mock.patch.object(pci_whitelist.Whitelist, 'get_devspec')
- @mock.patch.object(pci_manager, 'get_instance_pci_devs')
+ @mock.patch('nova.objects.Instance.get_pci_devices')
def test_populate_neutron_extension_values_binding_sriov(
self, mock_get_instance_pci_devs, mock_get_pci_device_devspec):
host_id = 'my_host_id'
- instance = {'host': host_id}
+ instance = objects.Instance(host=host_id)
port_req_body = {'port': {}}
pci_req_id = 'my_req_id'
pci_dev = {'vendor_id': '1377',
@@ -7783,11 +7931,11 @@ class TestAPIPortbinding(TestAPIBase):
})
)
@mock.patch.object(pci_whitelist.Whitelist, 'get_devspec')
- @mock.patch.object(pci_manager, 'get_instance_pci_devs')
+ @mock.patch('nova.objects.Instance.get_pci_devices')
def test_populate_neutron_extension_values_binding_sriov_card_serial(
self, mock_get_instance_pci_devs, mock_get_pci_device_devspec):
host_id = 'my_host_id'
- instance = {'host': host_id}
+ instance = objects.Instance(host=host_id)
port_req_body = {'port': {}}
pci_req_id = 'my_req_id'
pci_dev = {'vendor_id': 'a2d6',
@@ -7867,11 +8015,11 @@ class TestAPIPortbinding(TestAPIBase):
})
)
@mock.patch.object(pci_whitelist.Whitelist, 'get_devspec')
- @mock.patch.object(pci_manager, 'get_instance_pci_devs')
+ @mock.patch('nova.objects.Instance.get_pci_devices')
def test_populate_neutron_extension_values_binding_sriov_with_cap(
self, mock_get_instance_pci_devs, mock_get_pci_device_devspec):
host_id = 'my_host_id'
- instance = {'host': host_id}
+ instance = objects.Instance(host=host_id)
port_req_body = {'port': {
constants.BINDING_PROFILE: {
'capabilities': ['switchdev']}}}
@@ -7907,12 +8055,12 @@ class TestAPIPortbinding(TestAPIBase):
constants.BINDING_PROFILE])
@mock.patch.object(pci_whitelist.Whitelist, 'get_devspec')
- @mock.patch.object(pci_manager, 'get_instance_pci_devs')
+ @mock.patch('nova.objects.Instance.get_pci_devices')
def test_populate_neutron_extension_values_binding_sriov_pf(
self, mock_get_instance_pci_devs, mock_get_devspec
):
host_id = 'my_host_id'
- instance = {'host': host_id}
+ instance = objects.Instance(host=host_id)
port_req_body = {'port': {}}
pci_dev = objects.PciDevice(
@@ -8041,11 +8189,11 @@ class TestAPIPortbinding(TestAPIBase):
)
@mock.patch.object(pci_whitelist.Whitelist, 'get_devspec')
- @mock.patch.object(pci_manager, 'get_instance_pci_devs')
+ @mock.patch('nova.objects.Instance.get_pci_devices')
def test_populate_neutron_extension_values_binding_sriov_fail(
self, mock_get_instance_pci_devs, mock_get_pci_device_devspec):
host_id = 'my_host_id'
- instance = {'host': host_id}
+ instance = objects.Instance(host=host_id)
port_req_body = {'port': {}}
pci_req_id = 'my_req_id'
pci_objs = [objects.PciDevice(vendor_id='1377',
@@ -8062,7 +8210,7 @@ class TestAPIPortbinding(TestAPIBase):
self.api._populate_neutron_binding_profile,
instance, pci_req_id, port_req_body, None)
- @mock.patch.object(pci_manager, 'get_instance_pci_devs', return_value=[])
+ @mock.patch('nova.objects.Instance.get_pci_devices', return_value=[])
def test_populate_neutron_binding_profile_pci_dev_not_found(
self, mock_get_instance_pci_devs):
api = neutronapi.API()
@@ -8073,9 +8221,13 @@ class TestAPIPortbinding(TestAPIBase):
api._populate_neutron_binding_profile,
instance, pci_req_id, port_req_body, None)
mock_get_instance_pci_devs.assert_called_once_with(
- instance, pci_req_id)
+ request_id=pci_req_id)
@mock.patch.object(
+ pci_utils, 'is_physical_function',
+ new=mock.Mock(return_value=False)
+ )
+ @mock.patch.object(
pci_utils, 'get_vf_num_by_pci_address',
new=mock.MagicMock(
side_effect=(lambda vf_a: {'0000:0a:00.1': 1}.get(vf_a)))
@@ -8085,18 +8237,26 @@ class TestAPIPortbinding(TestAPIBase):
new=mock.MagicMock(side_effect=(lambda vf_a: {
'0000:0a:00.0': '52:54:00:1e:59:c6'}.get(vf_a)))
)
- @mock.patch.object(pci_manager, 'get_instance_pci_devs')
+ @mock.patch('nova.objects.Instance.get_pci_devices')
def test_pci_parse_whitelist_called_once(
- self, mock_get_instance_pci_devs):
- white_list = [
- '{"address":"0000:0a:00.1","physical_network":"default"}']
- cfg.CONF.set_override('passthrough_whitelist', white_list, 'pci')
+ self, mock_get_instance_pci_devs
+ ):
+ device_spec = [
+ jsonutils.dumps(
+ {
+ "address": "0000:0a:00.1",
+ "physical_network": "default",
+ }
+ )
+ ]
+ cfg.CONF.set_override(
+ 'device_spec', device_spec, 'pci')
# NOTE(takashin): neutronapi.API must be initialized
- # after the 'passthrough_whitelist' is set in this test case.
+ # after the 'device_spec' is set in this test case.
api = neutronapi.API()
host_id = 'my_host_id'
- instance = {'host': host_id}
+ instance = objects.Instance(host=host_id)
pci_req_id = 'my_req_id'
port_req_body = {'port': {}}
pci_dev = {'vendor_id': '1377',
@@ -8106,7 +8266,7 @@ class TestAPIPortbinding(TestAPIBase):
'dev_type': obj_fields.PciDeviceType.SRIOV_VF,
}
- whitelist = pci_whitelist.Whitelist(CONF.pci.passthrough_whitelist)
+ whitelist = pci_whitelist.Whitelist(CONF.pci.device_spec)
with mock.patch.object(pci_whitelist.Whitelist,
'_parse_white_list_from_config',
wraps=whitelist._parse_white_list_from_config
@@ -8132,7 +8292,7 @@ class TestAPIPortbinding(TestAPIBase):
vf.update_device(pci_dev)
return instance, pf, vf
- @mock.patch.object(pci_manager, 'get_instance_pci_devs')
+ @mock.patch('nova.objects.Instance.get_pci_devices')
@mock.patch.object(pci_utils, 'get_mac_by_pci_address')
def test_populate_pci_mac_address_pf(self, mock_get_mac_by_pci_address,
mock_get_instance_pci_devs):
@@ -8146,7 +8306,7 @@ class TestAPIPortbinding(TestAPIBase):
self.api._populate_pci_mac_address(instance, 0, req)
self.assertEqual(expected_port_req_body, req)
- @mock.patch.object(pci_manager, 'get_instance_pci_devs')
+ @mock.patch('nova.objects.Instance.get_pci_devices')
@mock.patch.object(pci_utils, 'get_mac_by_pci_address')
def test_populate_pci_mac_address_vf(self, mock_get_mac_by_pci_address,
mock_get_instance_pci_devs):
@@ -8158,7 +8318,7 @@ class TestAPIPortbinding(TestAPIBase):
self.api._populate_pci_mac_address(instance, 42, port_req_body)
self.assertEqual(port_req_body, req)
- @mock.patch.object(pci_manager, 'get_instance_pci_devs')
+ @mock.patch('nova.objects.Instance.get_pci_devices')
@mock.patch.object(pci_utils, 'get_mac_by_pci_address')
def test_populate_pci_mac_address_vf_fail(self,
mock_get_mac_by_pci_address,
@@ -8173,7 +8333,7 @@ class TestAPIPortbinding(TestAPIBase):
self.api._populate_pci_mac_address(instance, 42, port_req_body)
self.assertEqual(port_req_body, req)
- @mock.patch.object(pci_manager, 'get_instance_pci_devs')
+ @mock.patch('nova.objects.Instance.get_pci_devices')
@mock.patch('nova.network.neutron.LOG.error')
def test_populate_pci_mac_address_no_device(self, mock_log_error,
mock_get_instance_pci_devs):
diff --git a/nova/tests/unit/network/test_security_group.py b/nova/tests/unit/network/test_security_group.py
index b0bde1d9a2..a76dd4bf3c 100644
--- a/nova/tests/unit/network/test_security_group.py
+++ b/nova/tests/unit/network/test_security_group.py
@@ -13,10 +13,11 @@
# License for the specific language governing permissions and limitations
# under the License.
#
-import mock
from neutronclient.common import exceptions as n_exc
from neutronclient.neutron import v2_0 as neutronv20
from neutronclient.v2_0 import client
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from nova import context
diff --git a/nova/tests/unit/notifications/objects/test_flavor.py b/nova/tests/unit/notifications/objects/test_flavor.py
index 41fc8a36c3..e3cb9ec4c3 100644
--- a/nova/tests/unit/notifications/objects/test_flavor.py
+++ b/nova/tests/unit/notifications/objects/test_flavor.py
@@ -11,8 +11,8 @@
# under the License.
import copy
+from unittest import mock
-import mock
from nova import context
from nova.notifications.objects import flavor as flavor_notification
diff --git a/nova/tests/unit/notifications/objects/test_instance.py b/nova/tests/unit/notifications/objects/test_instance.py
index c2b7315587..8735e972dc 100644
--- a/nova/tests/unit/notifications/objects/test_instance.py
+++ b/nova/tests/unit/notifications/objects/test_instance.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
diff --git a/nova/tests/unit/notifications/objects/test_notification.py b/nova/tests/unit/notifications/objects/test_notification.py
index 3d9eeece8e..de9e6f2762 100644
--- a/nova/tests/unit/notifications/objects/test_notification.py
+++ b/nova/tests/unit/notifications/objects/test_notification.py
@@ -12,8 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
import collections
+from unittest import mock
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
from oslo_versionedobjects import fixture
@@ -386,7 +386,7 @@ notification_object_data = {
# ImageMetaProps, so when you see a fail here for that reason, you must
# *also* bump the version of ImageMetaPropsPayload. See its docstring for
# more information.
- 'ImageMetaPropsPayload': '1.9-24a851511d98e652aebd3536e7e08330',
+ 'ImageMetaPropsPayload': '1.12-b9c64832d7772c1973e913bacbe0e8f9',
'InstanceActionNotification': '1.0-a73147b93b520ff0061865849d3dfa56',
'InstanceActionPayload': '1.8-4fa3da9cbf0761f1f700ae578f36dc2f',
'InstanceActionRebuildNotification':
diff --git a/nova/tests/unit/notifications/objects/test_service.py b/nova/tests/unit/notifications/objects/test_service.py
index 6f0f5c7f7a..297dcac56f 100644
--- a/nova/tests/unit/notifications/objects/test_service.py
+++ b/nova/tests/unit/notifications/objects/test_service.py
@@ -14,8 +14,8 @@
# under the License.
import copy
+from unittest import mock
-import mock
from oslo_utils import timeutils
from nova import context
diff --git a/nova/tests/unit/notifications/test_base.py b/nova/tests/unit/notifications/test_base.py
index 3ee2e36ddc..c0468ec64d 100644
--- a/nova/tests/unit/notifications/test_base.py
+++ b/nova/tests/unit/notifications/test_base.py
@@ -13,9 +13,9 @@
# License for the specific language governing permissions and limitations
# under the License.
import datetime
+from unittest import mock
from keystoneauth1 import exceptions as ks_exc
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from nova import context as nova_context
diff --git a/nova/tests/unit/objects/test_aggregate.py b/nova/tests/unit/objects/test_aggregate.py
index bdb14f72ad..3f01c9613d 100644
--- a/nova/tests/unit/objects/test_aggregate.py
+++ b/nova/tests/unit/objects/test_aggregate.py
@@ -12,7 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel
from oslo_utils import timeutils
diff --git a/nova/tests/unit/objects/test_block_device.py b/nova/tests/unit/objects/test_block_device.py
index 80c9e9a1fa..85959a961a 100644
--- a/nova/tests/unit/objects/test_block_device.py
+++ b/nova/tests/unit/objects/test_block_device.py
@@ -12,7 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from nova import context
@@ -250,6 +251,14 @@ class _TestBlockDeviceMappingObject(object):
destination_type='local')
self.assertFalse(bdm.is_volume)
+ def test_is_local(self):
+ self.assertTrue(
+ objects.BlockDeviceMapping(
+ context=self.context, destination_type='local').is_local)
+ self.assertFalse(
+ objects.BlockDeviceMapping(
+ context=self.context, destination_type='volume').is_local)
+
def test_obj_load_attr_not_instance(self):
"""Tests that lazy-loading something other than the instance field
results in an error.
@@ -275,6 +284,11 @@ class _TestBlockDeviceMappingObject(object):
mock_inst_get_by_uuid.assert_called_once_with(
self.context, bdm.instance_uuid)
+ def test_obj_load_attr_encrypted(self):
+ bdm = objects.BlockDeviceMapping(self.context, **self.fake_bdm())
+ del bdm.encrypted
+ self.assertEqual(bdm.fields['encrypted'].default, bdm.encrypted)
+
def test_obj_make_compatible_pre_1_17(self):
values = {'source_type': 'volume', 'volume_id': 'fake-vol-id',
'destination_type': 'volume',
diff --git a/nova/tests/unit/objects/test_build_request.py b/nova/tests/unit/objects/test_build_request.py
index 2b60888c5d..a55ab34008 100644
--- a/nova/tests/unit/objects/test_build_request.py
+++ b/nova/tests/unit/objects/test_build_request.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_versionedobjects import base as o_vo_base
diff --git a/nova/tests/unit/objects/test_cell_mapping.py b/nova/tests/unit/objects/test_cell_mapping.py
index 3182269cc5..936793294b 100644
--- a/nova/tests/unit/objects/test_cell_mapping.py
+++ b/nova/tests/unit/objects/test_cell_mapping.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import uuidutils
diff --git a/nova/tests/unit/objects/test_compute_node.py b/nova/tests/unit/objects/test_compute_node.py
index 297edfbd55..7e6894a1cc 100644
--- a/nova/tests/unit/objects/test_compute_node.py
+++ b/nova/tests/unit/objects/test_compute_node.py
@@ -13,8 +13,8 @@
# under the License.
import copy
+from unittest import mock
-import mock
import netaddr
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel
@@ -666,8 +666,8 @@ class _TestComputeNodeObject(object):
CONF.initial_disk_allocation_ratio, compute.disk_allocation_ratio)
mock_update.assert_called_once_with(
- self.context, 123, {'cpu_allocation_ratio': 16.0,
- 'ram_allocation_ratio': 1.5,
+ self.context, 123, {'cpu_allocation_ratio': 4.0,
+ 'ram_allocation_ratio': 1.0,
'disk_allocation_ratio': 1.0})
@mock.patch('nova.db.main.api.compute_node_update')
@@ -694,8 +694,8 @@ class _TestComputeNodeObject(object):
CONF.initial_disk_allocation_ratio, compute.disk_allocation_ratio)
mock_update.assert_called_once_with(
- self.context, 123, {'cpu_allocation_ratio': 16.0,
- 'ram_allocation_ratio': 1.5,
+ self.context, 123, {'cpu_allocation_ratio': 4.0,
+ 'ram_allocation_ratio': 1.0,
'disk_allocation_ratio': 1.0})
@mock.patch('nova.db.main.api.compute_node_update')
@@ -722,8 +722,8 @@ class _TestComputeNodeObject(object):
CONF.initial_disk_allocation_ratio, compute.disk_allocation_ratio)
mock_update.assert_called_once_with(
- self.context, 123, {'cpu_allocation_ratio': 16.0,
- 'ram_allocation_ratio': 1.5,
+ self.context, 123, {'cpu_allocation_ratio': 4.0,
+ 'ram_allocation_ratio': 1.0,
'disk_allocation_ratio': 1.0})
def test_get_all_by_not_mapped(self):
diff --git a/nova/tests/unit/objects/test_console_auth_token.py b/nova/tests/unit/objects/test_console_auth_token.py
index 9c92e798b0..9a0901e12a 100644
--- a/nova/tests/unit/objects/test_console_auth_token.py
+++ b/nova/tests/unit/objects/test_console_auth_token.py
@@ -14,7 +14,7 @@
# under the License.
import copy
-import mock
+from unittest import mock
import urllib.parse as urlparse
from oslo_db.exception import DBDuplicateEntry
diff --git a/nova/tests/unit/objects/test_ec2.py b/nova/tests/unit/objects/test_ec2.py
index 8261fd6173..55230a7599 100644
--- a/nova/tests/unit/objects/test_ec2.py
+++ b/nova/tests/unit/objects/test_ec2.py
@@ -12,7 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from nova.db.main import api as db
diff --git a/nova/tests/unit/objects/test_external_event.py b/nova/tests/unit/objects/test_external_event.py
index 915358ba59..58c45c2549 100644
--- a/nova/tests/unit/objects/test_external_event.py
+++ b/nova/tests/unit/objects/test_external_event.py
@@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.objects import external_event as external_event_obj
from nova.tests.unit.objects import test_objects
diff --git a/nova/tests/unit/objects/test_fields.py b/nova/tests/unit/objects/test_fields.py
index 39f9de8cfe..461dc0ff6f 100644
--- a/nova/tests/unit/objects/test_fields.py
+++ b/nova/tests/unit/objects/test_fields.py
@@ -15,9 +15,9 @@
import collections
import datetime
import os
+from unittest import mock
import iso8601
-import mock
from oslo_serialization import jsonutils
from oslo_versionedobjects import exception as ovo_exc
@@ -551,7 +551,7 @@ class TestNetworkModel(TestField):
def setUp(self):
super(TestNetworkModel, self).setUp()
model = network_model.NetworkInfo()
- self.field = fields.Field(fields.NetworkModel())
+ self.field = fields.NetworkModelField()
self.coerce_good_values = [(model, model), (model.json(), model)]
self.coerce_bad_values = [[], 'foo']
self.to_primitive_values = [(model, model.json())]
@@ -570,7 +570,7 @@ class TestNetworkVIFModel(TestField):
super(TestNetworkVIFModel, self).setUp()
model = network_model.VIF('6c197bc7-820c-40d5-8aff-7116b993e793')
primitive = jsonutils.dumps(model)
- self.field = fields.Field(fields.NetworkVIFModel())
+ self.field = fields.NetworkVIFModelField()
self.coerce_good_values = [(model, model), (primitive, model)]
self.coerce_bad_values = [[], 'foo']
self.to_primitive_values = [(model, primitive)]
diff --git a/nova/tests/unit/objects/test_flavor.py b/nova/tests/unit/objects/test_flavor.py
index 93294d95aa..4172d3fda3 100644
--- a/nova/tests/unit/objects/test_flavor.py
+++ b/nova/tests/unit/objects/test_flavor.py
@@ -13,8 +13,8 @@
# under the License.
import datetime
+from unittest import mock
-import mock
from oslo_db import exception as db_exc
from oslo_utils import uuidutils
diff --git a/nova/tests/unit/objects/test_host_mapping.py b/nova/tests/unit/objects/test_host_mapping.py
index 8917e318af..73eadb7047 100644
--- a/nova/tests/unit/objects/test_host_mapping.py
+++ b/nova/tests/unit/objects/test_host_mapping.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_db import exception as db_exc
from oslo_utils.fixture import uuidsentinel as uuids
diff --git a/nova/tests/unit/objects/test_image_meta.py b/nova/tests/unit/objects/test_image_meta.py
index 6e3725de84..371f7b101a 100644
--- a/nova/tests/unit/objects/test_image_meta.py
+++ b/nova/tests/unit/objects/test_image_meta.py
@@ -108,6 +108,7 @@ class TestImageMetaProps(test.NoDBTestCase):
'hw_video_model': 'vga',
'hw_video_ram': '512',
'hw_qemu_guest_agent': 'yes',
+ 'hw_locked_memory': 'true',
'trait:CUSTOM_TRUSTED': 'required',
# Fill sane values for the rest here
}
@@ -116,6 +117,7 @@ class TestImageMetaProps(test.NoDBTestCase):
self.assertEqual('vga', virtprops.hw_video_model)
self.assertEqual(512, virtprops.hw_video_ram)
self.assertTrue(virtprops.hw_qemu_guest_agent)
+ self.assertTrue(virtprops.hw_locked_memory)
self.assertIsNotNone(virtprops.traits_required)
self.assertIn('CUSTOM_TRUSTED', virtprops.traits_required)
@@ -285,6 +287,28 @@ class TestImageMetaProps(test.NoDBTestCase):
self.assertEqual([set([0, 1, 2, 3])],
virtprops.hw_numa_cpus)
+ def test_locked_memory_prop(self):
+ props = {'hw_locked_memory': 'true'}
+ virtprops = objects.ImageMetaProps.from_dict(props)
+ self.assertTrue(virtprops.hw_locked_memory)
+
+ def test_obj_make_compatible_hw_locked_memory(self):
+ """Check 'hw_locked_memory' compatibility."""
+ # assert that 'hw_locked_memory' is supported
+ # on a suitably new version
+ obj = objects.ImageMetaProps(
+ hw_locked_memory='true',
+ )
+ primitive = obj.obj_to_primitive('1.33')
+ self.assertIn('hw_locked_memory',
+ primitive['nova_object.data'])
+ self.assertTrue(primitive['nova_object.data']['hw_locked_memory'])
+
+ # and is absent on older versions
+ primitive = obj.obj_to_primitive('1.32')
+ self.assertNotIn('hw_locked_memory',
+ primitive['nova_object.data'])
+
def test_get_unnumbered_trait_fields(self):
"""Tests that only valid un-numbered required traits are parsed from
the properties.
@@ -349,6 +373,34 @@ class TestImageMetaProps(test.NoDBTestCase):
self.assertRaises(exception.ObjectActionError,
obj.obj_to_primitive, '1.0')
+ def test_obj_make_compatible_hw_ephemeral_encryption(self):
+ """Check 'hw_ephemeral_encryption(_format)' compatibility."""
+ # assert that 'hw_ephemeral_encryption' and
+ # 'hw_ephemeral_encryption_format' is supported
+ # on a suitably new version
+ new_fields = (
+ 'hw_ephemeral_encryption',
+ 'hw_ephemeral_encryption_format'
+ )
+ eph_format = objects.fields.BlockDeviceEncryptionFormatType.LUKS
+ obj = objects.ImageMetaProps(
+ hw_ephemeral_encryption='yes',
+ hw_ephemeral_encryption_format=eph_format,
+ )
+ primitive = obj.obj_to_primitive('1.32')
+ for field in new_fields:
+ self.assertIn(field, primitive['nova_object.data'])
+ self.assertTrue(
+ primitive['nova_object.data']['hw_ephemeral_encryption'])
+ self.assertEqual(
+ eph_format,
+ primitive['nova_object.data']['hw_ephemeral_encryption_format'])
+
+ # and is absent on older versions
+ primitive = obj.obj_to_primitive('1.31')
+ for field in new_fields:
+ self.assertNotIn(field, primitive['nova_object.data'])
+
def test_obj_make_compatible_hw_emulation(self):
"""Check 'hw_emulation_architecture' compatibility."""
# assert that 'hw_emulation_architecture' is supported
@@ -486,3 +538,19 @@ class TestImageMetaProps(test.NoDBTestCase):
hw_pci_numa_affinity_policy=fields.PCINUMAAffinityPolicy.SOCKET)
self.assertRaises(exception.ObjectActionError,
obj.obj_to_primitive, '1.27')
+
+ def test_obj_make_compatible_viommu_model(self):
+ """Check 'hw_viommu_model' compatibility."""
+ # assert that 'hw_viommu_model' is supported on a suitably new version
+ obj = objects.ImageMetaProps(
+ hw_viommu_model=objects.fields.VIOMMUModel.VIRTIO,
+ )
+ primitive = obj.obj_to_primitive('1.34')
+ self.assertIn('hw_viommu_model', primitive['nova_object.data'])
+ self.assertEqual(
+ objects.fields.VIOMMUModel.VIRTIO,
+ primitive['nova_object.data']['hw_viommu_model'])
+
+ # and is absent on older versions
+ primitive = obj.obj_to_primitive('1.33')
+ self.assertNotIn('hw_viommu_model', primitive['nova_object.data'])
diff --git a/nova/tests/unit/objects/test_instance.py b/nova/tests/unit/objects/test_instance.py
index 6004bdf63d..6215d2be60 100644
--- a/nova/tests/unit/objects/test_instance.py
+++ b/nova/tests/unit/objects/test_instance.py
@@ -14,8 +14,8 @@
import collections
import datetime
+from unittest import mock
-import mock
import netaddr
from oslo_db import exception as db_exc
from oslo_serialization import jsonutils
@@ -25,6 +25,7 @@ from oslo_versionedobjects import base as ovo_base
from nova.compute import task_states
from nova.compute import vm_states
+from nova import context
from nova.db.main import api as db
from nova.db.main import models as sql_models
from nova import exception
@@ -2073,3 +2074,164 @@ class TestInstanceObjectMisc(test.NoDBTestCase):
self.assertEqual(['metadata', 'system_metadata', 'info_cache',
'security_groups', 'pci_devices', 'tags', 'extra',
'extra.flavor'], result_list)
+
+
+class TestInstanceObjectGetPciDevices(test.NoDBTestCase):
+ def test_lazy_loading_pci_devices(self):
+ user_id = "fake-user"
+ project_id = "fake-project"
+ ctxt = context.RequestContext(user_id, project_id)
+
+ inst = instance.Instance(ctxt, uuid=uuids.instance)
+ with mock.patch(
+ "nova.objects.PciDeviceList.get_by_instance_uuid",
+ return_value=objects.PciDeviceList(),
+ ) as mock_get_pci:
+ self.assertEqual([], inst.get_pci_devices())
+
+ mock_get_pci.assert_called_once_with(ctxt, uuids.instance)
+
+ def test_lazy_loading_pci_requests(self):
+ user_id = "fake-user"
+ project_id = "fake-project"
+ ctxt = context.RequestContext(user_id, project_id)
+
+ devs = [objects.PciDevice(request_id=uuids.req1)]
+ inst = instance.Instance(
+ ctxt,
+ uuid=uuids.instance,
+ pci_devices=objects.PciDeviceList(
+ objects=devs
+ ),
+ )
+
+ with mock.patch(
+ "nova.objects.InstancePCIRequests.get_by_instance_uuid",
+ return_value=objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ request_id=uuids.req1,
+ alias_name="pci-alias-1",
+ ),
+ ]
+ ),
+ ) as mock_get_pci_req:
+ self.assertEqual(
+ devs,
+ inst.get_pci_devices(
+ source=objects.InstancePCIRequest.FLAVOR_ALIAS
+ ),
+ )
+
+ mock_get_pci_req.assert_called_once_with(ctxt, uuids.instance)
+
+ def test_no_filter(self):
+ devs = [objects.PciDevice()]
+
+ inst = instance.Instance(
+ pci_devices=objects.PciDeviceList(objects=devs)
+ )
+
+ self.assertEqual(devs, inst.get_pci_devices())
+
+ def test_no_filter_by_request_id(self):
+ expected_devs = [objects.PciDevice(request_id=uuids.req1)]
+ all_devs = expected_devs + [objects.PciDevice(request_id=uuids.req2)]
+
+ inst = instance.Instance(
+ pci_devices=objects.PciDeviceList(objects=all_devs)
+ )
+
+ self.assertEqual(
+ expected_devs, inst.get_pci_devices(request_id=uuids.req1)
+ )
+
+ def test_no_filter_by_source(self):
+ expected_devs = [
+ objects.PciDevice(request_id=uuids.req1),
+ objects.PciDevice(request_id=uuids.req1),
+ ]
+ all_devs = expected_devs + [objects.PciDevice(request_id=uuids.req2)]
+
+ inst = instance.Instance(
+ pci_devices=objects.PciDeviceList(objects=all_devs),
+ pci_requests=objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ request_id=uuids.req1,
+ alias_name="pci-alias-1",
+ ),
+ objects.InstancePCIRequest(
+ request_id=uuids.req2,
+ ),
+ ]
+ ),
+ )
+
+ self.assertEqual(
+ expected_devs,
+ inst.get_pci_devices(
+ source=objects.InstancePCIRequest.FLAVOR_ALIAS
+ ),
+ )
+
+ def test_no_filter_by_request_id_and_source(self):
+ expected_devs = []
+ all_devs = expected_devs + [
+ objects.PciDevice(request_id=uuids.req1),
+ objects.PciDevice(request_id=uuids.req2),
+ objects.PciDevice(request_id=uuids.req1),
+ ]
+
+ inst = instance.Instance(
+ pci_devices=objects.PciDeviceList(objects=all_devs),
+ pci_requests=objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ request_id=uuids.req1,
+ alias_name="pci-alias-1",
+ ),
+ objects.InstancePCIRequest(
+ request_id=uuids.req2,
+ ),
+ ]
+ ),
+ )
+
+ self.assertEqual(
+ expected_devs,
+ inst.get_pci_devices(
+ request_id=uuids.req1,
+ source=objects.InstancePCIRequest.NEUTRON_PORT,
+ ),
+ )
+
+ def test_old_pci_dev_and_req(self):
+ """This tests the case when the system has old InstancePCIRequest
+ objects without the request_id being filled. And therefore have
+ PciDevice object where the request_id is None too. These requests and
+ devices are always flavor based.
+ """
+ devs = [
+ objects.PciDevice(request_id=None),
+ objects.PciDevice(request_id=None),
+ ]
+
+ inst = instance.Instance(
+ pci_devices=objects.PciDeviceList(objects=devs),
+ pci_requests=objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ request_id=None,
+ alias_name="pci-alias-1",
+ ),
+ ]
+ ),
+ )
+
+ self.assertEqual(
+ devs,
+ inst.get_pci_devices(
+ source=objects.InstancePCIRequest.FLAVOR_ALIAS,
+ ),
+ )
diff --git a/nova/tests/unit/objects/test_instance_action.py b/nova/tests/unit/objects/test_instance_action.py
index 1743623b1c..8322102021 100644
--- a/nova/tests/unit/objects/test_instance_action.py
+++ b/nova/tests/unit/objects/test_instance_action.py
@@ -14,8 +14,8 @@
import copy
import traceback
+from unittest import mock
-import mock
from oslo_utils import fixture as utils_fixture
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
diff --git a/nova/tests/unit/objects/test_instance_device_metadata.py b/nova/tests/unit/objects/test_instance_device_metadata.py
index 6f998db84e..c04d02dcb7 100644
--- a/nova/tests/unit/objects/test_instance_device_metadata.py
+++ b/nova/tests/unit/objects/test_instance_device_metadata.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_serialization import jsonutils
from nova import objects
diff --git a/nova/tests/unit/objects/test_instance_fault.py b/nova/tests/unit/objects/test_instance_fault.py
index b19d8663c1..1816801fca 100644
--- a/nova/tests/unit/objects/test_instance_fault.py
+++ b/nova/tests/unit/objects/test_instance_fault.py
@@ -12,7 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from nova.db.main import api as db
diff --git a/nova/tests/unit/objects/test_instance_group.py b/nova/tests/unit/objects/test_instance_group.py
index 90efb62902..5ea566fea7 100644
--- a/nova/tests/unit/objects/test_instance_group.py
+++ b/nova/tests/unit/objects/test_instance_group.py
@@ -13,8 +13,8 @@
# under the License.
import copy
+from unittest import mock
-import mock
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
diff --git a/nova/tests/unit/objects/test_instance_info_cache.py b/nova/tests/unit/objects/test_instance_info_cache.py
index 13c1082ffc..2c4d6a3263 100644
--- a/nova/tests/unit/objects/test_instance_info_cache.py
+++ b/nova/tests/unit/objects/test_instance_info_cache.py
@@ -13,8 +13,8 @@
# under the License.
import datetime
+from unittest import mock
-import mock
from oslo_db import exception as db_exc
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
diff --git a/nova/tests/unit/objects/test_instance_mapping.py b/nova/tests/unit/objects/test_instance_mapping.py
index 2c877c0a1f..865f5b6581 100644
--- a/nova/tests/unit/objects/test_instance_mapping.py
+++ b/nova/tests/unit/objects/test_instance_mapping.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils import uuidutils
from sqlalchemy.orm import exc as orm_exc
diff --git a/nova/tests/unit/objects/test_instance_numa.py b/nova/tests/unit/objects/test_instance_numa.py
index f7a9ef7a1d..0d3bd0dba0 100644
--- a/nova/tests/unit/objects/test_instance_numa.py
+++ b/nova/tests/unit/objects/test_instance_numa.py
@@ -11,7 +11,8 @@
# under the License.
import copy
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_versionedobjects import base as ovo_base
import testtools
diff --git a/nova/tests/unit/objects/test_instance_pci_requests.py b/nova/tests/unit/objects/test_instance_pci_requests.py
index 3f21b26010..91b289dbd5 100644
--- a/nova/tests/unit/objects/test_instance_pci_requests.py
+++ b/nova/tests/unit/objects/test_instance_pci_requests.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_versionedobjects import base as ovo_base
diff --git a/nova/tests/unit/objects/test_keypair.py b/nova/tests/unit/objects/test_keypair.py
index ad405b7e1b..b86bbb44de 100644
--- a/nova/tests/unit/objects/test_keypair.py
+++ b/nova/tests/unit/objects/test_keypair.py
@@ -12,7 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils import timeutils
from nova import exception
diff --git a/nova/tests/unit/objects/test_migrate_data.py b/nova/tests/unit/objects/test_migrate_data.py
index 7f587c6906..7ceaf2a192 100644
--- a/nova/tests/unit/objects/test_migrate_data.py
+++ b/nova/tests/unit/objects/test_migrate_data.py
@@ -219,67 +219,6 @@ class TestRemoteHyperVLiveMigrateData(test_objects._RemoteTest,
pass
-class _TestPowerVMLiveMigrateData(object):
- @staticmethod
- def _mk_obj():
- return migrate_data.PowerVMLiveMigrateData(
- host_mig_data=dict(one=2),
- dest_ip='1.2.3.4',
- dest_user_id='a_user',
- dest_sys_name='a_sys',
- public_key='a_key',
- dest_proc_compat='POWER7',
- vol_data=dict(three=4),
- vea_vlan_mappings=dict(five=6),
- old_vol_attachment_ids=dict(seven=8),
- wait_for_vif_plugged=True)
-
- @staticmethod
- def _mk_leg():
- return {
- 'host_mig_data': {'one': '2'},
- 'dest_ip': '1.2.3.4',
- 'dest_user_id': 'a_user',
- 'dest_sys_name': 'a_sys',
- 'public_key': 'a_key',
- 'dest_proc_compat': 'POWER7',
- 'vol_data': {'three': '4'},
- 'vea_vlan_mappings': {'five': '6'},
- 'old_vol_attachment_ids': {'seven': '8'},
- 'wait_for_vif_plugged': True
- }
-
- def test_migrate_data(self):
- obj = self._mk_obj()
- self.assertEqual('a_key', obj.public_key)
- obj.public_key = 'key2'
- self.assertEqual('key2', obj.public_key)
-
- def test_obj_make_compatible(self):
- obj = self._mk_obj()
-
- data = lambda x: x['nova_object.data']
-
- primitive = data(obj.obj_to_primitive())
- self.assertIn('vea_vlan_mappings', primitive)
- primitive = data(obj.obj_to_primitive(target_version='1.0'))
- self.assertNotIn('vea_vlan_mappings', primitive)
- primitive = data(obj.obj_to_primitive(target_version='1.1'))
- self.assertNotIn('old_vol_attachment_ids', primitive)
- primitive = data(obj.obj_to_primitive(target_version='1.2'))
- self.assertNotIn('wait_for_vif_plugged', primitive)
-
-
-class TestPowerVMLiveMigrateData(test_objects._LocalTest,
- _TestPowerVMLiveMigrateData):
- pass
-
-
-class TestRemotePowerVMLiveMigrateData(test_objects._RemoteTest,
- _TestPowerVMLiveMigrateData):
- pass
-
-
class TestVIFMigrateData(test.NoDBTestCase):
def test_get_dest_vif_source_vif_not_set(self):
diff --git a/nova/tests/unit/objects/test_migration.py b/nova/tests/unit/objects/test_migration.py
index 970122a409..6da232b933 100644
--- a/nova/tests/unit/objects/test_migration.py
+++ b/nova/tests/unit/objects/test_migration.py
@@ -12,7 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel
from oslo_utils import timeutils
diff --git a/nova/tests/unit/objects/test_migration_context.py b/nova/tests/unit/objects/test_migration_context.py
index 94e8e9d57f..12becaee38 100644
--- a/nova/tests/unit/objects/test_migration_context.py
+++ b/nova/tests/unit/objects/test_migration_context.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
diff --git a/nova/tests/unit/objects/test_objects.py b/nova/tests/unit/objects/test_objects.py
index 6f162dc3bb..aab079381c 100644
--- a/nova/tests/unit/objects/test_objects.py
+++ b/nova/tests/unit/objects/test_objects.py
@@ -19,9 +19,9 @@ import datetime
import inspect
import os
import pprint
+from unittest import mock
import fixtures
-import mock
from oslo_utils import timeutils
from oslo_versionedobjects import base as ovo_base
from oslo_versionedobjects import exception as ovo_exc
@@ -1046,7 +1046,7 @@ class TestRegistry(test.NoDBTestCase):
object_data = {
'Aggregate': '1.3-f315cb68906307ca2d1cca84d4753585',
'AggregateList': '1.3-3ea55a050354e72ef3306adefa553957',
- 'BlockDeviceMapping': '1.20-45a6ad666ddf14bbbedece2293af77e2',
+ 'BlockDeviceMapping': '1.21-220abb8aa1450e759b72fce8ec6ff955',
'BlockDeviceMappingList': '1.18-73bcbbae5ef5e8adcedbc821db869306',
'BuildRequest': '1.3-077dee42bed93f8a5b62be77657b7152',
'BuildRequestList': '1.0-cd95608eccb89fbc702c8b52f38ec738',
@@ -1066,20 +1066,20 @@ object_data = {
'EC2InstanceMapping': '1.0-a4556eb5c5e94c045fe84f49cf71644f',
'Flavor': '1.2-4ce99b41327bb230262e5a8f45ff0ce3',
'FlavorList': '1.1-52b5928600e7ca973aa4fc1e46f3934c',
- 'HVSpec': '1.2-de06bcec472a2f04966b855a49c46b41',
'HostMapping': '1.0-1a3390a696792a552ab7bd31a77ba9ac',
'HostMappingList': '1.1-18ac2bfb8c1eb5545bed856da58a79bc',
+ 'HVSpec': '1.2-de06bcec472a2f04966b855a49c46b41',
'HyperVLiveMigrateData': '1.4-e265780e6acfa631476c8170e8d6fce0',
'IDEDeviceBus': '1.0-29d4c9f27ac44197f01b6ac1b7e16502',
'ImageMeta': '1.8-642d1b2eb3e880a367f37d72dd76162d',
- 'ImageMetaProps': '1.31-27337af769b0c85b4ba4be8aebc1a65d',
+ 'ImageMetaProps': '1.34-29b3a6b7fe703f36bfd240d914f16c21',
'Instance': '2.7-d187aec68cad2e4d8b8a03a68e4739ce',
'InstanceAction': '1.2-9a5abc87fdd3af46f45731960651efb5',
'InstanceActionEvent': '1.4-5b1f361bd81989f8bb2c20bb7e8a4cb4',
'InstanceActionEventList': '1.1-13d92fb953030cdbfee56481756e02be',
'InstanceActionList': '1.1-a2b2fb6006b47c27076d3a1d48baa759',
'InstanceDeviceMetadata': '1.0-74d78dd36aa32d26d2769a1b57caf186',
- 'InstanceExternalEvent': '1.4-06c2dfcf2d2813c24cd37ee728524f1a',
+ 'InstanceExternalEvent': '1.5-1ec57351a9851c1eb43ccd90662d6dd0',
'InstanceFault': '1.2-7ef01f16f1084ad1304a513d6d410a38',
'InstanceFaultList': '1.2-6bb72de2872fe49ded5eb937a93f2451',
'InstanceGroup': '1.11-852ac511d30913ee88f3c3a869a8f30a',
@@ -1097,27 +1097,27 @@ object_data = {
'LibvirtLiveMigrateBDMInfo': '1.1-5f4a68873560b6f834b74e7861d71aaf',
'LibvirtLiveMigrateData': '1.10-348cf70ea44d3b985f45f64725d6f6a7',
'LibvirtLiveMigrateNUMAInfo': '1.0-0e777677f3459d0ed1634eabbdb6c22f',
+ 'LibvirtVPMEMDevice': '1.0-17ffaf47585199eeb9a2b83d6bde069f',
'MemoryDiagnostics': '1.0-2c995ae0f2223bb0f8e523c5cc0b83da',
'Migration': '1.7-bd45b232fd7c95cd79ae9187e10ef582',
'MigrationContext': '1.2-89f10a83999f852a489962ae37d8a026',
'MigrationList': '1.5-36793f8d65bae421bd5564d09a4de7be',
'MonitorMetric': '1.1-53b1db7c4ae2c531db79761e7acc52ba',
'MonitorMetricList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e',
- 'NUMACell': '1.5-2592de3c926a7840d763bcc85f81afa7',
- 'NUMAPagesTopology': '1.1-edab9fa2dc43c117a38d600be54b4542',
- 'NUMATopology': '1.2-c63fad38be73b6afd04715c9c1b29220',
- 'NUMATopologyLimits': '1.1-4235c5da7a76c7e36075f0cd2f5cf922',
'NetworkInterfaceMetadata': '1.2-6f3d480b40fe339067b1c0dd4d656716',
'NetworkMetadata': '1.0-2cb8d21b34f87b0261d3e1d1ae5cf218',
'NetworkRequest': '1.3-3a815ea3df7defa61e0b894dee5288ba',
'NetworkRequestList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e',
'NicDiagnostics': '1.0-895e9ad50e0f56d5258585e3e066aea5',
- 'PCIDeviceBus': '1.0-2b891cb77e42961044689f3dc2718995',
+ 'NUMACell': '1.5-2592de3c926a7840d763bcc85f81afa7',
+ 'NUMAPagesTopology': '1.1-edab9fa2dc43c117a38d600be54b4542',
+ 'NUMATopology': '1.2-c63fad38be73b6afd04715c9c1b29220',
+ 'NUMATopologyLimits': '1.1-4235c5da7a76c7e36075f0cd2f5cf922',
'PciDevice': '1.7-680e4c590aae154958ccf9677774413b',
+ 'PCIDeviceBus': '1.0-2b891cb77e42961044689f3dc2718995',
'PciDeviceList': '1.3-52ff14355491c8c580bdc0ba34c26210',
'PciDevicePool': '1.1-3f5ddc3ff7bfa14da7f6c7e9904cc000',
'PciDevicePoolList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e',
- 'PowerVMLiveMigrateData': '1.4-a745f4eda16b45e1bc5686a0c498f27e',
'Quotas': '1.3-3b2b91371f60e788035778fc5f87797d',
'QuotasNoOp': '1.3-d1593cf969c81846bc8192255ea95cce',
'RequestGroup': '1.3-0458d350a8ec9d0673f9be5640a990ce',
@@ -1127,9 +1127,9 @@ object_data = {
'ResourceList': '1.0-4a53826625cc280e15fae64a575e0879',
'ResourceMetadata': '1.0-77509ea1ea0dd750d5864b9bd87d3f9d',
'S3ImageMapping': '1.0-7dd7366a890d82660ed121de9092276e',
- 'SCSIDeviceBus': '1.0-61c1e89a00901069ab1cf2991681533b',
'SchedulerLimits': '1.0-249c4bd8e62a9b327b7026b7f19cc641',
'SchedulerRetries': '1.1-3c9c8b16143ebbb6ad7030e999d14cc0',
+ 'SCSIDeviceBus': '1.0-61c1e89a00901069ab1cf2991681533b',
'SecurityGroup': '1.2-86d67d8d3ab0c971e1dc86e02f9524a8',
'SecurityGroupList': '1.1-c655ed13298e630f4d398152f7d08d71',
'Selection': '1.1-548e3c2f04da2a61ceaf9c4e1589f264',
@@ -1142,16 +1142,14 @@ object_data = {
'TrustedCerts': '1.0-dcf528851e0f868c77ee47e90563cda7',
'USBDeviceBus': '1.0-e4c7dd6032e46cd74b027df5eb2d4750',
'VIFMigrateData': '1.0-cb15282b25a039ab35046ed705eb931d',
- 'VMwareLiveMigrateData': '1.0-a3cc858a2bf1d3806d6f57cfaa1fb98a',
'VirtCPUFeature': '1.0-ea2464bdd09084bd388e5f61d5d4fc86',
'VirtCPUModel': '1.0-5e1864af9227f698326203d7249796b5',
'VirtCPUTopology': '1.0-fc694de72e20298f7c6bab1083fd4563',
'VirtualInterface': '1.3-efd3ca8ebcc5ce65fff5a25f31754c54',
'VirtualInterfaceList': '1.0-9750e2074437b3077e46359102779fc6',
+ 'VMwareLiveMigrateData': '1.0-a3cc858a2bf1d3806d6f57cfaa1fb98a',
'VolumeUsage': '1.0-6c8190c46ce1469bb3286a1f21c2e475',
'XenDeviceBus': '1.0-272a4f899b24e31e42b2b9a7ed7e9194',
- # TODO(efried): re-alphabetize this
- 'LibvirtVPMEMDevice': '1.0-17ffaf47585199eeb9a2b83d6bde069f',
}
diff --git a/nova/tests/unit/objects/test_pci_device.py b/nova/tests/unit/objects/test_pci_device.py
index 91ec566c32..1e971c5a21 100644
--- a/nova/tests/unit/objects/test_pci_device.py
+++ b/nova/tests/unit/objects/test_pci_device.py
@@ -14,8 +14,8 @@
# under the License.
import copy
+from unittest import mock
-import mock
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
diff --git a/nova/tests/unit/objects/test_quotas.py b/nova/tests/unit/objects/test_quotas.py
index 154c9f278a..15da48f1c4 100644
--- a/nova/tests/unit/objects/test_quotas.py
+++ b/nova/tests/unit/objects/test_quotas.py
@@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova import context
from nova.db.main import api as db_api
diff --git a/nova/tests/unit/objects/test_request_spec.py b/nova/tests/unit/objects/test_request_spec.py
index 9c23fd4983..d1bb59868f 100644
--- a/nova/tests/unit/objects/test_request_spec.py
+++ b/nova/tests/unit/objects/test_request_spec.py
@@ -12,8 +12,9 @@
# License for the specific language governing permissions and limitations
# under the License.
import collections
+from unittest import mock
-import mock
+import fixtures
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import uuidutils
@@ -430,6 +431,67 @@ class _TestRequestSpecObject(object):
self.assertListEqual([rg], spec.requested_resources)
self.assertEqual(req_lvl_params, spec.request_level_params)
+ # TODO(gibi): replace this with setting the config
+ # [scheduler]pci_in_placement=True once that flag is available
+ @mock.patch(
+ 'nova.objects.request_spec.RequestSpec._pci_in_placement_enabled',
+ new=mock.Mock(return_value=True),
+ )
+ def test_from_components_flavor_based_pci_requests(self):
+ ctxt = context.RequestContext(
+ fakes.FAKE_USER_ID, fakes.FAKE_PROJECT_ID
+ )
+ instance = fake_instance.fake_instance_obj(ctxt)
+ image = {
+ "id": uuids.image_id,
+ "properties": {"mappings": []},
+ "status": "fake-status",
+ "location": "far-away",
+ }
+ flavor = fake_flavor.fake_flavor_obj(ctxt)
+ filter_properties = {"fake": "property"}
+
+ qos_port_rg = request_spec.RequestGroup()
+ req_lvl_params = request_spec.RequestLevelParams()
+
+ pci_requests = objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ count=1,
+ alias_name='a-dev',
+ request_id=uuids.req1,
+ spec=[{"vendor_id": "1234", "product_id": "fe12"}],
+ )
+ ]
+ )
+ pci_request_group = request_spec.RequestGroup(
+ requester_id=f"{uuids.req1}-0",
+ resources={"CUSTOM_PCI_1234_FE12": 1},
+ same_provider=True,
+ )
+
+ spec = objects.RequestSpec.from_components(
+ ctxt,
+ instance.uuid,
+ image,
+ flavor,
+ instance.numa_topology,
+ pci_requests,
+ filter_properties,
+ None,
+ instance.availability_zone,
+ port_resource_requests=[qos_port_rg],
+ request_level_params=req_lvl_params,
+ )
+
+ self.assertEqual(2, len(spec.requested_resources))
+ self.assertEqual(qos_port_rg, spec.requested_resources[0])
+ self.assertEqual(
+ pci_request_group.obj_to_primitive(),
+ spec.requested_resources[1].obj_to_primitive(),
+ )
+ self.assertEqual(req_lvl_params, spec.request_level_params)
+
def test_get_scheduler_hint(self):
spec_obj = objects.RequestSpec(scheduler_hints={'foo_single': ['1'],
'foo_mul': ['1', '2']})
@@ -1054,6 +1116,191 @@ class TestRemoteRequestSpecObject(test_objects._RemoteTest,
pass
+class TestInstancePCIRequestToRequestGroups(test.NoDBTestCase):
+ def setUp(self):
+ super().setUp()
+ # TODO(gibi): replace this with setting the config
+ # [scheduler]pci_in_placement=True once that flag is available
+ self.mock_pci_in_placement_enabled = self.useFixture(
+ fixtures.MockPatch(
+ "nova.objects.request_spec.RequestSpec."
+ "_pci_in_placement_enabled",
+ return_value=True,
+ )
+ ).mock
+
+ def test_pci_reqs_ignored_if_disabled(self):
+ self.mock_pci_in_placement_enabled.return_value = False
+
+ spec = request_spec.RequestSpec(
+ requested_resources=[],
+ pci_requests=objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ count=1,
+ request_id=uuids.req1,
+ spec=[{"vendor_id": "de12", "product_id": "1234"}],
+ alias_name="a-dev",
+ ),
+ ]
+ ),
+ )
+
+ spec._generate_request_groups_from_pci_requests()
+
+ self.assertEqual(0, len(spec.requested_resources))
+
+ def test_neutron_based_requests_are_ignored(self):
+ pci_req = objects.InstancePCIRequest(
+ count=1,
+ request_id=uuids.req1,
+ spec=[],
+ )
+ spec = request_spec.RequestSpec(
+ requested_resources=[],
+ pci_requests=objects.InstancePCIRequests(requests=[pci_req]),
+ )
+ self.assertEqual(
+ objects.InstancePCIRequest.NEUTRON_PORT, pci_req.source
+ )
+
+ spec._generate_request_groups_from_pci_requests()
+
+ self.assertEqual(0, len(spec.requested_resources))
+
+ def test_rc_from_product_and_vendor(self):
+ spec = request_spec.RequestSpec(
+ requested_resources=[],
+ pci_requests=objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ count=1,
+ request_id=uuids.req1,
+ spec=[{"vendor_id": "de12", "product_id": "1234"}],
+ alias_name="a-dev",
+ ),
+ objects.InstancePCIRequest(
+ count=1,
+ request_id=uuids.req2,
+ spec=[{"vendor_id": "fff", "product_id": "dead"}],
+ alias_name="a-dev",
+ ),
+ ]
+ ),
+ )
+
+ spec._generate_request_groups_from_pci_requests()
+
+ self.assertEqual(2, len(spec.requested_resources))
+ self.assertEqual(
+ request_spec.RequestGroup(
+ requester_id=f"{uuids.req1}-0",
+ resources={"CUSTOM_PCI_DE12_1234": 1},
+ use_same_provider=True,
+ ).obj_to_primitive(),
+ spec.requested_resources[0].obj_to_primitive(),
+ )
+ self.assertEqual(
+ request_spec.RequestGroup(
+ requester_id=f"{uuids.req2}-0",
+ resources={"CUSTOM_PCI_FFF_DEAD": 1},
+ use_same_provider=True,
+ ).obj_to_primitive(),
+ spec.requested_resources[1].obj_to_primitive(),
+ )
+
+ def test_multi_device_split_to_multiple_groups(self):
+ spec = request_spec.RequestSpec(
+ requested_resources=[],
+ pci_requests=objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ count=2,
+ request_id=uuids.req1,
+ spec=[{"vendor_id": "de12", "product_id": "1234"}],
+ alias_name="a-dev",
+ ),
+ ]
+ ),
+ )
+
+ spec._generate_request_groups_from_pci_requests()
+
+ self.assertEqual(2, len(spec.requested_resources))
+ self.assertEqual(
+ request_spec.RequestGroup(
+ requester_id=f"{uuids.req1}-0",
+ resources={"CUSTOM_PCI_DE12_1234": 1},
+ use_same_provider=True,
+ ).obj_to_primitive(),
+ spec.requested_resources[0].obj_to_primitive(),
+ )
+ self.assertEqual(
+ request_spec.RequestGroup(
+ requester_id=f"{uuids.req1}-1",
+ resources={"CUSTOM_PCI_DE12_1234": 1},
+ use_same_provider=True,
+ ).obj_to_primitive(),
+ spec.requested_resources[1].obj_to_primitive(),
+ )
+
+ def test_with_rc_and_traits_from_the_pci_req_spec(self):
+ spec = request_spec.RequestSpec(
+ requested_resources=[],
+ pci_requests=objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ count=1,
+ request_id=uuids.req1,
+ spec=[
+ {
+ "vendor_id": "de12",
+ "product_id": "1234",
+ "resource_class": "gpu",
+ }
+ ],
+ alias_name="a-dev",
+ ),
+ objects.InstancePCIRequest(
+ count=1,
+ request_id=uuids.req2,
+ spec=[
+ {
+ "vendor_id": "fff",
+ "product_id": "dead",
+ "traits": "foo,bar,CUSTOM_BLUE",
+ }
+ ],
+ alias_name="a-dev",
+ ),
+ ]
+ ),
+ )
+
+ spec._generate_request_groups_from_pci_requests()
+
+ self.assertEqual(2, len(spec.requested_resources))
+ self.assertEqual(
+ request_spec.RequestGroup(
+ requester_id=f"{uuids.req1}-0",
+ resources={"CUSTOM_GPU": 1},
+ use_same_provider=True,
+ ).obj_to_primitive(),
+ spec.requested_resources[0].obj_to_primitive(),
+ )
+ # Note that sets would be serialized to tuples by obj_to_primitive in
+ # random order, so we need to match this spec field by field
+ expected = request_spec.RequestGroup(
+ requester_id=f"{uuids.req2}-0",
+ resources={"CUSTOM_PCI_FFF_DEAD": 1},
+ required_traits={"CUSTOM_FOO", "CUSTOM_BAR", "CUSTOM_BLUE"},
+ use_same_provider=True,
+ )
+ actual = spec.requested_resources[1]
+ for field in request_spec.RequestGroup.fields.keys():
+ self.assertEqual(getattr(expected, field), getattr(actual, field))
+
+
class TestRequestGroupObject(test.NoDBTestCase):
def setUp(self):
super(TestRequestGroupObject, self).setUp()
diff --git a/nova/tests/unit/objects/test_resource.py b/nova/tests/unit/objects/test_resource.py
index 3ac12eee84..0e43df185b 100644
--- a/nova/tests/unit/objects/test_resource.py
+++ b/nova/tests/unit/objects/test_resource.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
diff --git a/nova/tests/unit/objects/test_security_group.py b/nova/tests/unit/objects/test_security_group.py
index 7d6a3773c5..527e5d84d6 100644
--- a/nova/tests/unit/objects/test_security_group.py
+++ b/nova/tests/unit/objects/test_security_group.py
@@ -12,7 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_versionedobjects import fixture as ovo_fixture
diff --git a/nova/tests/unit/objects/test_service.py b/nova/tests/unit/objects/test_service.py
index 84cbd4bf6a..60ab806207 100644
--- a/nova/tests/unit/objects/test_service.py
+++ b/nova/tests/unit/objects/test_service.py
@@ -12,7 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel
from oslo_utils import timeutils
from oslo_versionedobjects import base as ovo_base
diff --git a/nova/tests/unit/objects/test_tag.py b/nova/tests/unit/objects/test_tag.py
index 29579b1e78..caf039152d 100644
--- a/nova/tests/unit/objects/test_tag.py
+++ b/nova/tests/unit/objects/test_tag.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.objects import tag
from nova.tests.unit.objects import test_objects
diff --git a/nova/tests/unit/objects/test_task_log.py b/nova/tests/unit/objects/test_task_log.py
index 6d93ebab4c..2ac7971c28 100644
--- a/nova/tests/unit/objects/test_task_log.py
+++ b/nova/tests/unit/objects/test_task_log.py
@@ -11,9 +11,9 @@
# under the License.
import datetime
+from unittest import mock
import iso8601
-import mock
from oslo_utils import timeutils
from nova import objects
diff --git a/nova/tests/unit/objects/test_trusted_certs.py b/nova/tests/unit/objects/test_trusted_certs.py
index 3010dd6b5c..9029845ef3 100644
--- a/nova/tests/unit/objects/test_trusted_certs.py
+++ b/nova/tests/unit/objects/test_trusted_certs.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.objects import trusted_certs
from nova.tests.unit.objects import test_objects
diff --git a/nova/tests/unit/objects/test_virtual_interface.py b/nova/tests/unit/objects/test_virtual_interface.py
index a9049bac88..a806668c6b 100644
--- a/nova/tests/unit/objects/test_virtual_interface.py
+++ b/nova/tests/unit/objects/test_virtual_interface.py
@@ -12,7 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from nova.db.main import api as db
diff --git a/nova/tests/unit/objects/test_volume_usage.py b/nova/tests/unit/objects/test_volume_usage.py
index a465955ad6..d8df53d5c7 100644
--- a/nova/tests/unit/objects/test_volume_usage.py
+++ b/nova/tests/unit/objects/test_volume_usage.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
diff --git a/nova/tests/unit/pci/fakes.py b/nova/tests/unit/pci/fakes.py
index 93ab33b27f..e0267ff087 100644
--- a/nova/tests/unit/pci/fakes.py
+++ b/nova/tests/unit/pci/fakes.py
@@ -14,8 +14,8 @@
# under the License.
import functools
+from unittest import mock
-import mock
from nova.pci import whitelist
diff --git a/nova/tests/unit/pci/test_devspec.py b/nova/tests/unit/pci/test_devspec.py
index 1b7af10316..4f747e7b7d 100644
--- a/nova/tests/unit/pci/test_devspec.py
+++ b/nova/tests/unit/pci/test_devspec.py
@@ -11,8 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-
-import mock
+from unittest import mock
from nova import exception
from nova import objects
@@ -52,7 +51,7 @@ class PhysicalPciAddressTestCase(test.NoDBTestCase):
for component in invalid_val_addr:
address = dict(self.pci_addr)
address[component] = str(invalid_val_addr[component])
- self.assertRaises(exception.PciConfigInvalidWhitelist,
+ self.assertRaises(exception.PciConfigInvalidSpec,
devspec.PhysicalPciAddress, address)
def test_init_by_dict_missing_values(self):
@@ -76,7 +75,7 @@ class PhysicalPciAddressTestCase(test.NoDBTestCase):
"0000:0a:" + str(devspec.MAX_SLOT + 1) + ".5",
"0000:0a:00." + str(devspec.MAX_FUNC + 1)]
for address in invalid_addresses:
- self.assertRaises(exception.PciConfigInvalidWhitelist,
+ self.assertRaises(exception.PciConfigInvalidSpec,
devspec.PhysicalPciAddress, address)
def test_init_by_string_missing_values(self):
@@ -122,7 +121,7 @@ class PciAddressGlobSpecTestCase(test.NoDBTestCase):
"0000:0a:" + str(devspec.MAX_SLOT + 1) + ".5",
"0000:0a:00." + str(devspec.MAX_FUNC + 1)]
for address in invalid_addresses:
- self.assertRaises(exception.PciConfigInvalidWhitelist,
+ self.assertRaises(exception.PciConfigInvalidSpec,
devspec.PciAddressGlobSpec, address)
def test_match(self):
@@ -208,18 +207,18 @@ class PciAddressTestCase(test.NoDBTestCase):
def test_address_invalid_character(self):
pci_info = {"address": "0000:h4.12:6", "physical_network": "hr_net"}
- exc = self.assertRaises(exception.PciConfigInvalidWhitelist,
+ exc = self.assertRaises(exception.PciConfigInvalidSpec,
devspec.PciDeviceSpec, pci_info)
- msg = ("Invalid PCI devices Whitelist config: property func ('12:6') "
+ msg = ("Invalid [pci]device_spec config: property func ('12:6') "
"does not parse as a hex number.")
self.assertEqual(msg, str(exc))
def test_max_func(self):
pci_info = {"address": "0000:0a:00.%s" % (devspec.MAX_FUNC + 1),
"physical_network": "hr_net"}
- exc = self.assertRaises(exception.PciConfigInvalidWhitelist,
+ exc = self.assertRaises(exception.PciConfigInvalidSpec,
devspec.PciDeviceSpec, pci_info)
- msg = ('Invalid PCI devices Whitelist config: property func (%x) is '
+ msg = ('Invalid [pci]device_spec config: property func (%x) is '
'greater than the maximum allowable value (%x).'
% (devspec.MAX_FUNC + 1, devspec.MAX_FUNC))
self.assertEqual(msg, str(exc))
@@ -227,9 +226,9 @@ class PciAddressTestCase(test.NoDBTestCase):
def test_max_domain(self):
pci_info = {"address": "%x:0a:00.5" % (devspec.MAX_DOMAIN + 1),
"physical_network": "hr_net"}
- exc = self.assertRaises(exception.PciConfigInvalidWhitelist,
+ exc = self.assertRaises(exception.PciConfigInvalidSpec,
devspec.PciDeviceSpec, pci_info)
- msg = ('Invalid PCI devices Whitelist config: property domain (%X) '
+ msg = ('Invalid [pci]device_spec config: property domain (%X) '
'is greater than the maximum allowable value (%X).'
% (devspec.MAX_DOMAIN + 1, devspec.MAX_DOMAIN))
self.assertEqual(msg, str(exc))
@@ -237,9 +236,9 @@ class PciAddressTestCase(test.NoDBTestCase):
def test_max_bus(self):
pci_info = {"address": "0000:%x:00.5" % (devspec.MAX_BUS + 1),
"physical_network": "hr_net"}
- exc = self.assertRaises(exception.PciConfigInvalidWhitelist,
+ exc = self.assertRaises(exception.PciConfigInvalidSpec,
devspec.PciDeviceSpec, pci_info)
- msg = ('Invalid PCI devices Whitelist config: property bus (%X) is '
+ msg = ('Invalid [pci]device_spec config: property bus (%X) is '
'greater than the maximum allowable value (%X).'
% (devspec.MAX_BUS + 1, devspec.MAX_BUS))
self.assertEqual(msg, str(exc))
@@ -247,9 +246,9 @@ class PciAddressTestCase(test.NoDBTestCase):
def test_max_slot(self):
pci_info = {"address": "0000:0a:%x.5" % (devspec.MAX_SLOT + 1),
"physical_network": "hr_net"}
- exc = self.assertRaises(exception.PciConfigInvalidWhitelist,
+ exc = self.assertRaises(exception.PciConfigInvalidSpec,
devspec.PciDeviceSpec, pci_info)
- msg = ('Invalid PCI devices Whitelist config: property slot (%X) is '
+ msg = ('Invalid [pci]device_spec config: property slot (%X) is '
'greater than the maximum allowable value (%X).'
% (devspec.MAX_SLOT + 1, devspec.MAX_SLOT))
self.assertEqual(msg, str(exc))
@@ -383,10 +382,10 @@ class PciDevSpecTestCase(test.NoDBTestCase):
def test_vendor_id_out_of_range(self):
pci_info = {"vendor_id": "80860", "address": "*:*:*.5",
"product_id": "5057", "physical_network": "hr_net"}
- exc = self.assertRaises(exception.PciConfigInvalidWhitelist,
+ exc = self.assertRaises(exception.PciConfigInvalidSpec,
devspec.PciDeviceSpec, pci_info)
self.assertEqual(
- "Invalid PCI devices Whitelist config: property vendor_id (80860) "
+ "Invalid [pci]device_spec config: property vendor_id (80860) "
"is greater than the maximum allowable value (FFFF).",
str(exc))
@@ -399,10 +398,10 @@ class PciDevSpecTestCase(test.NoDBTestCase):
def test_product_id_out_of_range(self):
pci_info = {"vendor_id": "8086", "address": "*:*:*.5",
"product_id": "50570", "physical_network": "hr_net"}
- exc = self.assertRaises(exception.PciConfigInvalidWhitelist,
+ exc = self.assertRaises(exception.PciConfigInvalidSpec,
devspec.PciDeviceSpec, pci_info)
self.assertEqual(
- "Invalid PCI devices Whitelist config: property product_id "
+ "Invalid [pci]device_spec config: property product_id "
"(50570) is greater than the maximum allowable value (FFFF).",
str(exc))
@@ -554,21 +553,21 @@ class PciDevSpecRemoteManagedTestCase(test.NoDBTestCase):
pci_info = {"vendor_id": "8080", "address": "0000:0a:00.0",
"product_id": "5050", "physical_network": "hr_net",
PCI_REMOTE_MANAGED_TAG: "true"}
- self.assertRaises(exception.PciConfigInvalidWhitelist,
+ self.assertRaises(exception.PciConfigInvalidSpec,
devspec.PciDeviceSpec, pci_info)
# VF device ID mismatch.
pci_info = {"vendor_id": "8086", "address": "0000:0a:00.0",
"product_id": "5050", "physical_network": "hr_net",
PCI_REMOTE_MANAGED_TAG: "true"}
- self.assertRaises(exception.PciConfigInvalidWhitelist,
+ self.assertRaises(exception.PciConfigInvalidSpec,
devspec.PciDeviceSpec, pci_info)
# VF vendor ID mismatch.
pci_info = {"vendor_id": "8080", "address": "0000:0a:00.0",
"product_id": "5058", "physical_network": "hr_net",
PCI_REMOTE_MANAGED_TAG: "true"}
- self.assertRaises(exception.PciConfigInvalidWhitelist,
+ self.assertRaises(exception.PciConfigInvalidSpec,
devspec.PciDeviceSpec, pci_info)
@mock.patch('nova.pci.utils.is_physical_function',
@@ -645,6 +644,10 @@ class PciDevSpecRemoteManagedTestCase(test.NoDBTestCase):
pci = devspec.PciDeviceSpec(pci_info)
self.assertTrue(pci.match(self.test_dev))
+ @mock.patch(
+ 'nova.pci.utils.is_physical_function',
+ new=mock.Mock(return_value=False)
+ )
def test_remote_managed_vf_match_by_pci_obj(self):
pci_info = {"vendor_id": "8086", "address": "0000:0a:00.2",
"product_id": "5057", "physical_network": "hr_net",
@@ -664,6 +667,10 @@ class PciDevSpecRemoteManagedTestCase(test.NoDBTestCase):
pci_obj = objects.PciDevice.create(None, pci_dev)
self.assertTrue(pci.match_pci_obj(pci_obj))
+ @mock.patch(
+ 'nova.pci.utils.is_physical_function',
+ new=mock.Mock(return_value=False)
+ )
def test_remote_managed_vf_no_match_by_pci_obj(self):
pci_info = {"vendor_id": "8086", "address": "0000:0a:00.0",
"product_id": "5057", "physical_network": "hr_net",
diff --git a/nova/tests/unit/pci/test_manager.py b/nova/tests/unit/pci/test_manager.py
index 6682b3e3b2..bcd4cecb85 100644
--- a/nova/tests/unit/pci/test_manager.py
+++ b/nova/tests/unit/pci/test_manager.py
@@ -14,8 +14,8 @@
# under the License.
import copy
+from unittest import mock
-import mock
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel
@@ -235,7 +235,7 @@ class PciDevTrackerTestCase(test.NoDBTestCase):
self, mock_debug):
self.flags(
group='pci',
- passthrough_whitelist=[
+ device_spec=[
'{"product_id":"2032", "vendor_id":"8086"}'])
# There are systems where 32 bit PCI domain is used. See bug 1897528
# for example. While nova (and qemu) does not support assigning such
@@ -651,8 +651,13 @@ class PciDevTrackerTestCase(test.NoDBTestCase):
pci_requests = copy.deepcopy(fake_pci_requests)
pci_requests[0]['count'] = 4
pci_requests_obj = self._create_pci_requests_object(pci_requests)
- self.tracker.claim_instance(mock.sentinel.context,
- pci_requests_obj, None)
+ self.assertRaises(
+ exception.PciDeviceRequestFailed,
+ self.tracker.claim_instance,
+ mock.sentinel.context,
+ pci_requests_obj,
+ None
+ )
self.assertEqual(len(self.tracker.claims[self.inst['uuid']]), 0)
devs = self.tracker.update_pci_for_instance(None,
self.inst,
@@ -687,11 +692,13 @@ class PciDevTrackerTestCase(test.NoDBTestCase):
self.inst.numa_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=1, cpuset=set([1, 2]), memory=512)])
- claims = self.tracker.claim_instance(
+ self.assertRaises(
+ exception.PciDeviceRequestFailed,
+ self.tracker.claim_instance,
mock.sentinel.context,
pci_requests_obj,
- self.inst.numa_topology)
- self.assertEqual([], claims)
+ self.inst.numa_topology
+ )
def test_update_pci_for_instance_deleted(self):
pci_requests_obj = self._create_pci_requests_object(fake_pci_requests)
@@ -803,7 +810,7 @@ class PciDevTrackerTestCase(test.NoDBTestCase):
free_pci_device_ids = (
[dev.id for dev in self.tracker.pci_stats.get_free_devs()])
self.assertEqual(2, len(free_pci_device_ids))
- allocated_devs = manager.get_instance_pci_devs(self.inst)
+ allocated_devs = self.inst.get_pci_devices()
pci_device = allocated_devs[0]
self.assertNotIn(pci_device.id, free_pci_device_ids)
instance_uuid = self.inst['uuid']
@@ -866,24 +873,3 @@ class PciDevTrackerTestCase(test.NoDBTestCase):
self.assertIsNone(self.tracker.allocations.get(instance_uuid))
free_devs = self.tracker.pci_stats.get_free_devs()
self.assertEqual(len(fake_db_devs), len(free_devs))
-
-
-class PciGetInstanceDevs(test.NoDBTestCase):
-
- def test_get_devs_object(self):
- def _fake_obj_load_attr(foo, attrname):
- if attrname == 'pci_devices':
- self.load_attr_called = True
- foo.pci_devices = objects.PciDeviceList()
-
- self.stub_out(
- 'nova.objects.Instance.obj_load_attr',
- _fake_obj_load_attr)
-
- self.load_attr_called = False
- manager.get_instance_pci_devs(objects.Instance())
- self.assertTrue(self.load_attr_called)
-
- def test_get_devs_no_pci_devices(self):
- inst = objects.Instance(pci_devices=None)
- self.assertEqual([], manager.get_instance_pci_devs(inst))
diff --git a/nova/tests/unit/pci/test_request.py b/nova/tests/unit/pci/test_request.py
index 432f03b0b2..4a3f17f6cb 100644
--- a/nova/tests/unit/pci/test_request.py
+++ b/nova/tests/unit/pci/test_request.py
@@ -15,7 +15,8 @@
"""Tests for PCI request."""
-import mock
+from unittest import mock
+
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel
@@ -186,6 +187,21 @@ class PciRequestTestCase(test.NoDBTestCase):
self.assertIn("xxx", aliases)
self.assertEqual(policy, aliases["xxx"][0])
+ def test_get_alias_from_config_valid_rc_and_traits(self):
+ fake_alias = jsonutils.dumps({
+ "name": "xxx",
+ "resource_class": "foo",
+ "traits": "bar,baz",
+ })
+ self.flags(alias=[fake_alias], group='pci')
+ aliases = request._get_alias_from_config()
+ self.assertIsNotNone(aliases)
+ self.assertIn("xxx", aliases)
+ self.assertEqual(
+ ("legacy", [{"resource_class": "foo", "traits": "bar,baz"}]),
+ aliases["xxx"],
+ )
+
def test_get_alias_from_config_conflicting_device_type(self):
"""Check behavior when device_type conflicts occur."""
fake_alias_a = jsonutils.dumps({
diff --git a/nova/tests/unit/pci/test_stats.py b/nova/tests/unit/pci/test_stats.py
index b60986a87d..d9b5b7bca1 100644
--- a/nova/tests/unit/pci/test_stats.py
+++ b/nova/tests/unit/pci/test_stats.py
@@ -13,8 +13,11 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_config import cfg
+from oslo_serialization import jsonutils
+from oslo_utils.fixture import uuidsentinel as uuids
from nova import exception
from nova import objects
@@ -98,33 +101,26 @@ class PciDeviceStatsTestCase(test.NoDBTestCase):
def setUp(self):
super(PciDeviceStatsTestCase, self).setUp()
- self._setup_pci_stats()
-
- def _setup_pci_stats(self, numa_topology=None):
- """Exists for tests that need to setup pci_stats with a specific NUMA
- topology, while still allowing tests that don't care to get the default
- "empty" one.
- """
- if not numa_topology:
- numa_topology = objects.NUMATopology()
- self.pci_stats = stats.PciDeviceStats(numa_topology)
+ self.pci_stats = stats.PciDeviceStats(objects.NUMATopology())
# The following two calls need to be made before adding the devices.
patcher = fakes.fake_pci_whitelist()
self.addCleanup(patcher.stop)
self._create_fake_devs()
def test_add_device(self):
- self.assertEqual(len(self.pci_stats.pools), 3)
+ self.assertEqual(len(self.pci_stats.pools), 4)
self.assertEqual(set([d['vendor_id'] for d in self.pci_stats]),
set(['v1', 'v2', 'v3']))
- self.assertEqual(set([d['count'] for d in self.pci_stats]),
- set([1, 2]))
+ self.assertEqual([d['count'] for d in self.pci_stats], [1, 1, 1, 1])
def test_remove_device(self):
+ self.assertEqual(len(self.pci_stats.pools), 4)
self.pci_stats.remove_device(self.fake_dev_2)
- self.assertEqual(len(self.pci_stats.pools), 2)
- self.assertEqual(self.pci_stats.pools[0]['count'], 2)
+ self.assertEqual(len(self.pci_stats.pools), 3)
+ self.assertEqual(self.pci_stats.pools[0]['count'], 1)
self.assertEqual(self.pci_stats.pools[0]['vendor_id'], 'v1')
+ self.assertEqual(self.pci_stats.pools[1]['count'], 1)
+ self.assertEqual(self.pci_stats.pools[1]['vendor_id'], 'v1')
def test_remove_device_exception(self):
self.pci_stats.remove_device(self.fake_dev_2)
@@ -153,14 +149,13 @@ class PciDeviceStatsTestCase(test.NoDBTestCase):
m = self.pci_stats.to_device_pools_obj()
new_stats = stats.PciDeviceStats(objects.NUMATopology(), m)
- self.assertEqual(len(new_stats.pools), 3)
- self.assertEqual(set([d['count'] for d in new_stats]),
- set([1, 2]))
+ self.assertEqual(len(new_stats.pools), 4)
+ self.assertEqual([d['count'] for d in new_stats], [1, 1, 1, 1])
self.assertEqual(set([d['vendor_id'] for d in new_stats]),
set(['v1', 'v2', 'v3']))
def test_apply_requests(self):
- self.assertEqual(len(self.pci_stats.pools), 3)
+ self.assertEqual(len(self.pci_stats.pools), 4)
self.pci_stats.apply_requests(pci_requests)
self.assertEqual(len(self.pci_stats.pools), 2)
self.assertEqual(self.pci_stats.pools[0]['vendor_id'], 'v1')
@@ -173,16 +168,14 @@ class PciDeviceStatsTestCase(test.NoDBTestCase):
def test_support_requests(self):
self.assertTrue(self.pci_stats.support_requests(pci_requests))
- self.assertEqual(len(self.pci_stats.pools), 3)
- self.assertEqual(set([d['count'] for d in self.pci_stats]),
- set((1, 2)))
+ self.assertEqual(len(self.pci_stats.pools), 4)
+ self.assertEqual([d['count'] for d in self.pci_stats], [1, 1, 1, 1])
def test_support_requests_failed(self):
self.assertFalse(
self.pci_stats.support_requests(pci_requests_multiple))
- self.assertEqual(len(self.pci_stats.pools), 3)
- self.assertEqual(set([d['count'] for d in self.pci_stats]),
- set([1, 2]))
+ self.assertEqual(len(self.pci_stats.pools), 4)
+ self.assertEqual([d['count'] for d in self.pci_stats], [1, 1, 1, 1])
def test_support_requests_numa(self):
cells = [
@@ -240,18 +233,18 @@ class PciDeviceStatsTestCase(test.NoDBTestCase):
self.assertFalse(self.pci_stats.support_requests(pci_requests, cells))
def test_filter_pools_for_socket_affinity_no_socket(self):
- self._setup_pci_stats(
- objects.NUMATopology(
- cells=[objects.NUMACell(socket=None)]))
+ self.pci_stats.numa_topology = objects.NUMATopology(
+ cells=[objects.NUMACell(socket=None)])
+
self.assertEqual(
[],
self.pci_stats._filter_pools_for_socket_affinity(
self.pci_stats.pools, [objects.InstanceNUMACell()]))
def test_filter_pools_for_socket_affinity(self):
- self._setup_pci_stats(
- objects.NUMATopology(
- cells=[objects.NUMACell(id=1, socket=1)]))
+ self.pci_stats.numa_topology = objects.NUMATopology(
+ cells=[objects.NUMACell(id=1, socket=1)])
+
pools = self.pci_stats._filter_pools_for_socket_affinity(
self.pci_stats.pools, [objects.InstanceNUMACell(id=1)])
self.assertEqual(1, len(pools))
@@ -269,8 +262,11 @@ class PciDeviceStatsTestCase(test.NoDBTestCase):
self.assertEqual(0, len(devs))
def test_consume_requests_failed(self):
- self.assertIsNone(self.pci_stats.consume_requests(
- pci_requests_multiple))
+ self.assertRaises(
+ exception.PciDeviceRequestFailed,
+ self.pci_stats.consume_requests,
+ pci_requests_multiple,
+ )
def test_consume_requests_numa(self):
cells = [
@@ -289,7 +285,12 @@ class PciDeviceStatsTestCase(test.NoDBTestCase):
objects.InstanceNUMACell(
id=0, cpuset=set(), pcpuset=set(), memory=0),
]
- self.assertIsNone(self.pci_stats.consume_requests(pci_requests, cells))
+ self.assertRaises(
+ exception.PciDeviceRequestFailed,
+ self.pci_stats.consume_requests,
+ pci_requests,
+ cells,
+ )
def test_consume_requests_no_numa_info(self):
cells = [
@@ -321,11 +322,16 @@ class PciDeviceStatsTestCase(test.NoDBTestCase):
pci_requests = self._get_fake_requests(vendor_ids=[vendor_id],
numa_policy=policy, count=count)
- devs = self.pci_stats.consume_requests(pci_requests, cells)
if expected is None:
- self.assertIsNone(devs)
+ self.assertRaises(
+ exception.PciDeviceRequestFailed,
+ self.pci_stats.consume_requests,
+ pci_requests,
+ cells,
+ )
else:
+ devs = self.pci_stats.consume_requests(pci_requests, cells)
self.assertEqual(set(expected),
set([dev.product_id for dev in devs]))
@@ -452,9 +458,9 @@ class PciDeviceStatsTestCase(test.NoDBTestCase):
@mock.patch(
'nova.pci.whitelist.Whitelist._parse_white_list_from_config')
- def test_white_list_parsing(self, mock_whitelist_parse):
- white_list = '{"product_id":"0001", "vendor_id":"8086"}'
- CONF.set_override('passthrough_whitelist', white_list, 'pci')
+ def test_device_spec_parsing(self, mock_whitelist_parse):
+ device_spec = {"product_id": "0001", "vendor_id": "8086"}
+ CONF.set_override('device_spec', jsonutils.dumps(device_spec), 'pci')
pci_stats = stats.PciDeviceStats(objects.NUMATopology())
pci_stats.add_device(self.fake_dev_2)
pci_stats.remove_device(self.fake_dev_2)
@@ -465,16 +471,34 @@ class PciDeviceStatsWithTagsTestCase(test.NoDBTestCase):
def setUp(self):
super(PciDeviceStatsWithTagsTestCase, self).setUp()
- white_list = ['{"vendor_id":"1137","product_id":"0071",'
- '"address":"*:0a:00.*","physical_network":"physnet1"}',
- '{"vendor_id":"1137","product_id":"0072"}',
- '{"vendor_id":"15b3","product_id":"101e", '
- '"remote_managed": "true"}',
- '{"vendor_id":"15b3","product_id":"101c"}',
- '{"vendor_id":"15b3","product_id":"1018", '
- '"remote_managed": "false"}']
- self.flags(passthrough_whitelist=white_list, group='pci')
- dev_filter = whitelist.Whitelist(white_list)
+ device_spec = [
+ jsonutils.dumps(
+ {
+ "vendor_id": "1137",
+ "product_id": "0071",
+ "address": "*:0a:00.*",
+ "physical_network": "physnet1",
+ }
+ ),
+ jsonutils.dumps({"vendor_id": "1137", "product_id": "0072"}),
+ jsonutils.dumps(
+ {
+ "vendor_id": "15b3",
+ "product_id": "101e",
+ "remote_managed": "true",
+ }
+ ),
+ jsonutils.dumps({"vendor_id": "15b3", "product_id": "101c"}),
+ jsonutils.dumps(
+ {
+ "vendor_id": "15b3",
+ "product_id": "1018",
+ "remote_managed": "false",
+ }
+ ),
+ ]
+ self.flags(device_spec=device_spec, group="pci")
+ dev_filter = whitelist.Whitelist(device_spec)
self.pci_stats = stats.PciDeviceStats(
objects.NUMATopology(),
dev_filter=dev_filter)
@@ -547,7 +571,7 @@ class PciDeviceStatsWithTagsTestCase(test.NoDBTestCase):
'compute_node_id': 1,
'address': '0000:0e:00.1',
'vendor_id': '15b3',
- 'product_id': '1018',
+ 'product_id': '101c',
'status': 'available',
'request_id': None,
'dev_type': fields.PciDeviceType.SRIOV_VF,
@@ -575,35 +599,68 @@ class PciDeviceStatsWithTagsTestCase(test.NoDBTestCase):
self.assertEqual(v, pool[k])
def _assertPools(self):
+ nr_tagged = len(self.pci_tagged_devices)
+ nr_untagged = len(self.pci_untagged_devices)
+ nr_remote = len(self.remote_managed_netdevs)
+ nr_local = len(self.locally_managed_netdevs)
+ self.assertEqual(
+ nr_tagged + nr_untagged + nr_remote + nr_local,
+ len(self.pci_stats.pools),
+ )
# Pools are ordered based on the number of keys. 'product_id',
# 'vendor_id' are always part of the keys. When tags are present,
- # they are also part of the keys. In this test class, we have
- # 5 pools with the second one having the tag 'physical_network'
- # and the value 'physnet1' and multiple pools for testing
- # variations of explicit/implicit remote_managed tagging.
- self.assertEqual(5, len(self.pci_stats.pools))
- self._assertPoolContent(self.pci_stats.pools[0], '1137', '0072',
- len(self.pci_untagged_devices))
- self.assertEqual(self.pci_untagged_devices,
- self.pci_stats.pools[0]['devices'])
- self._assertPoolContent(self.pci_stats.pools[1], '1137', '0071',
- len(self.pci_tagged_devices),
- physical_network='physnet1')
- self.assertEqual(self.pci_tagged_devices,
- self.pci_stats.pools[1]['devices'])
- self._assertPoolContent(self.pci_stats.pools[2], '15b3', '101e',
- len(self.remote_managed_netdevs),
- remote_managed='true')
- self.assertEqual(self.remote_managed_netdevs,
- self.pci_stats.pools[2]['devices'])
- self._assertPoolContent(self.pci_stats.pools[3], '15b3', '101c', 1,
- remote_managed='false')
- self.assertEqual([self.locally_managed_netdevs[0]],
- self.pci_stats.pools[3]['devices'])
- self._assertPoolContent(self.pci_stats.pools[4], '15b3', '1018', 1,
- remote_managed='false')
- self.assertEqual([self.locally_managed_netdevs[1]],
- self.pci_stats.pools[4]['devices'])
+ # they are also part of the keys.
+
+ # 3 pools for the pci_untagged_devices
+ devs = []
+ j = 0
+ for i in range(j, j + nr_untagged):
+ self._assertPoolContent(self.pci_stats.pools[i], '1137', '0072', 1)
+ devs += self.pci_stats.pools[i]['devices']
+ self.assertEqual(self.pci_untagged_devices, devs)
+ j += nr_untagged
+
+ # 4 pools for the pci_tagged_devices'
+ devs = []
+ for i in range(j, j + nr_tagged):
+ self._assertPoolContent(
+ self.pci_stats.pools[i],
+ "1137",
+ "0071",
+ 1,
+ physical_network="physnet1",
+ )
+ devs += self.pci_stats.pools[i]['devices']
+ self.assertEqual(self.pci_tagged_devices, devs)
+ j += nr_tagged
+
+ # one with remote_managed_netdevs
+ devs = []
+ for i in range(j, j + nr_remote):
+ self._assertPoolContent(
+ self.pci_stats.pools[i],
+ "15b3",
+ "101e",
+ 1,
+ remote_managed="true",
+ )
+ devs += self.pci_stats.pools[i]['devices']
+ self.assertEqual(self.remote_managed_netdevs, devs)
+ j += nr_remote
+
+ # two with locally_managed_netdevs
+ devs = []
+ for i in range(j, j + nr_local):
+ self._assertPoolContent(
+ self.pci_stats.pools[i],
+ "15b3",
+ "101c",
+ 1,
+ remote_managed="false",
+ )
+ devs += self.pci_stats.pools[i]['devices']
+ self.assertEqual(self.locally_managed_netdevs, devs)
+ j += nr_local
def test_add_devices(self):
self._create_pci_devices()
@@ -626,20 +683,30 @@ class PciDeviceStatsWithTagsTestCase(test.NoDBTestCase):
PCI_REMOTE_MANAGED_TAG: 'False'}]),
objects.InstancePCIRequest(count=1,
spec=[{'vendor_id': '15b3',
- 'product_id': '1018',
+ 'product_id': '101c',
PCI_REMOTE_MANAGED_TAG: 'False'}])]
devs = self.pci_stats.consume_requests(pci_requests)
self.assertEqual(5, len(devs))
- self.assertEqual(set(['0071', '0072', '1018', '101e', '101c']),
+ self.assertEqual(set(['0071', '0072', '101e', '101c']),
set([dev.product_id for dev in devs]))
- self._assertPoolContent(self.pci_stats.pools[0], '1137', '0072', 2)
- self._assertPoolContent(self.pci_stats.pools[1], '1137', '0071', 3,
+ self._assertPoolContent(self.pci_stats.pools[0], '1137', '0072', 0)
+ self._assertPoolContent(self.pci_stats.pools[1], '1137', '0072', 1)
+ self._assertPoolContent(self.pci_stats.pools[2], '1137', '0072', 1)
+
+ self._assertPoolContent(self.pci_stats.pools[3], '1137', '0071', 0,
+ physical_network='physnet1')
+ self._assertPoolContent(self.pci_stats.pools[4], '1137', '0071', 1,
+ physical_network='physnet1')
+ self._assertPoolContent(self.pci_stats.pools[5], '1137', '0071', 1,
physical_network='physnet1')
- self._assertPoolContent(self.pci_stats.pools[2], '15b3', '101e', 0,
+ self._assertPoolContent(self.pci_stats.pools[6], '1137', '0071', 1,
+ physical_network='physnet1')
+
+ self._assertPoolContent(self.pci_stats.pools[7], '15b3', '101e', 0,
remote_managed='true')
- self._assertPoolContent(self.pci_stats.pools[3], '15b3', '101c', 0,
+ self._assertPoolContent(self.pci_stats.pools[8], '15b3', '101c', 0,
remote_managed='false')
- self._assertPoolContent(self.pci_stats.pools[4], '15b3', '1018', 0,
+ self._assertPoolContent(self.pci_stats.pools[9], '15b3', '101c', 0,
remote_managed='false')
def test_add_device_no_devspec(self):
@@ -682,43 +749,292 @@ class PciDeviceStatsWithTagsTestCase(test.NoDBTestCase):
self.pci_stats.remove_device(dev2)
self._assertPools()
- def test_update_device(self):
- # Update device type of one of the device from type-PCI to
+ def test_update_device_splits_the_pool(self):
+ # Update device type of one of the device from type-VF to
# type-PF. Verify if the existing pool is updated and a new
# pool is created with dev_type type-PF.
- self._create_pci_devices()
- dev1 = self.pci_tagged_devices.pop()
- dev1.dev_type = 'type-PF'
- self.pci_stats.update_device(dev1)
- self.assertEqual(6, len(self.pci_stats.pools))
- self._assertPoolContent(self.pci_stats.pools[0], '1137', '0072',
- len(self.pci_untagged_devices))
- self.assertEqual(self.pci_untagged_devices,
- self.pci_stats.pools[0]['devices'])
- self._assertPoolContent(self.pci_stats.pools[1], '1137', '0071',
- len(self.pci_tagged_devices),
- physical_network='physnet1')
- self.assertEqual(self.pci_tagged_devices,
- self.pci_stats.pools[1]['devices'])
- self._assertPoolContent(self.pci_stats.pools[5], '1137', '0071',
- 1,
- physical_network='physnet1',
- remote_managed='false')
- self.assertEqual(dev1,
- self.pci_stats.pools[5]['devices'][0])
+ vfs = []
+ for i in range(3):
+ dev = objects.PciDevice(
+ compute_node_id=1,
+ address="0000:0a:00.%d" % i,
+ vendor_id="1137",
+ product_id="0071",
+ status="available",
+ dev_type="type-VF",
+ parent_addr="0000:0a:01.0",
+ numa_node=0
+ )
+ vfs.append(dev)
+ self.pci_stats.add_device(dev)
+
+ self.assertEqual(1, len(self.pci_stats.pools))
+ self.assertEqual(3, self.pci_stats.pools[0]["count"])
+ self.assertEqual(vfs, self.pci_stats.pools[0]["devices"])
+
+ dev = vfs.pop()
+ dev.dev_type = 'type-PF'
+ dev.parent_addr = None
+ self.pci_stats.update_device(dev)
+ self.assertEqual(2, len(self.pci_stats.pools))
+ self.assertEqual(2, self.pci_stats.pools[0]["count"])
+ self.assertEqual(vfs, self.pci_stats.pools[0]["devices"])
+ self.assertEqual(1, self.pci_stats.pools[1]["count"])
+ self.assertEqual([dev], self.pci_stats.pools[1]["devices"])
+
+ def test_only_vfs_from_the_same_parent_are_pooled(self):
+ pf1_vfs = []
+ for i in range(2):
+ dev = objects.PciDevice(
+ compute_node_id=1,
+ address="0000:0a:00.%d" % i,
+ vendor_id="15b3",
+ product_id="1018",
+ status="available",
+ dev_type="type-VF",
+ parent_addr="0000:0a:01.0",
+ numa_node=0
+ )
+ pf1_vfs.append(dev)
+ self.pci_stats.add_device(dev)
+
+ pf2_vfs = []
+ for i in range(2):
+ dev = objects.PciDevice(
+ compute_node_id=1,
+ address="0000:0b:00.%d" % i,
+ vendor_id="15b3",
+ product_id="1018",
+ status="available",
+ dev_type="type-VF",
+ parent_addr="0000:0b:01.0",
+ numa_node=0
+ )
+ pf2_vfs.append(dev)
+ self.pci_stats.add_device(dev)
+
+ self.assertEqual(2, len(self.pci_stats.pools))
+ self.assertEqual(2, self.pci_stats.pools[0]["count"])
+ self.assertEqual(pf1_vfs, self.pci_stats.pools[0]["devices"])
+ self.assertEqual(2, len(self.pci_stats.pools))
+ self.assertEqual(2, self.pci_stats.pools[1]["count"])
+ self.assertEqual(pf2_vfs, self.pci_stats.pools[1]["devices"])
+
+
+class PciDeviceStatsPlacementSupportTestCase(test.NoDBTestCase):
+
+ def test_device_spec_rc_and_traits_ignored_during_pooling(self):
+ """Assert that resource_class and traits from the device spec are not
+ used as discriminator for pool creation.
+ """
+ device_spec = [
+ jsonutils.dumps(
+ {
+ "resource_class": "foo",
+ "address": "*:81:00.1",
+ "traits": "gold",
+ }
+ ),
+ jsonutils.dumps(
+ {
+ "resource_class": "baar",
+ "address": "*:81:00.2",
+ "traits": "silver",
+ }
+ ),
+ ]
+ self.flags(device_spec=device_spec, group="pci")
+ dev_filter = whitelist.Whitelist(device_spec)
+ pci_stats = stats.PciDeviceStats(
+ objects.NUMATopology(),
+ dev_filter=dev_filter)
+ pci_dev1 = objects.PciDevice(
+ vendor_id="dead",
+ product_id="beef",
+ address="0000:81:00.1",
+ parent_addr="0000:81:00.0",
+ numa_node=0,
+ dev_type="type-VF",
+ )
+ pci_dev2 = objects.PciDevice(
+ vendor_id="dead",
+ product_id="beef",
+ address="0000:81:00.2",
+ parent_addr="0000:81:00.0",
+ numa_node=0,
+ dev_type="type-VF",
+ )
+ # the two device matched by different device_specs with different
+ # resource_class and traits fields
+ pci_stats.add_device(pci_dev1)
+ pci_stats.add_device(pci_dev2)
+
+ # but they are put in the same pool as all the other fields are
+ # matching
+ self.assertEqual(1, len(pci_stats.pools))
+ self.assertEqual(2, pci_stats.pools[0]["count"])
+
+ def test_filter_pools_for_spec_ignores_rc_and_traits_in_spec(self):
+ """Assert that resource_class and traits are ignored in the pci
+ request spec during matching the request to pools.
+ """
+ pci_stats = stats.PciDeviceStats(objects.NUMATopology())
+ pools = [{"vendor_id": "dead", "product_id": "beef"}]
+
+ matching_pools = pci_stats._filter_pools_for_spec(
+ pools=pools,
+ request=objects.InstancePCIRequest(
+ spec=[
+ {
+ "vendor_id": "dead",
+ "product_id": "beef",
+ "resource_class": "foo",
+ "traits": "blue",
+ }
+ ]
+ ),
+ )
+
+ self.assertEqual(pools, matching_pools)
+
+ def test_populate_pools_metadata_from_assigned_devices(self):
+ device_spec = [
+ jsonutils.dumps(
+ {
+ "address": "0000:81:00.*",
+ }
+ ),
+ ]
+ self.flags(device_spec=device_spec, group="pci")
+ dev_filter = whitelist.Whitelist(device_spec)
+ pci_stats = stats.PciDeviceStats(
+ objects.NUMATopology(),
+ dev_filter=dev_filter)
+ pci_dev1 = objects.PciDevice(
+ vendor_id="dead",
+ product_id="beef",
+ address="0000:81:00.1",
+ parent_addr="0000:81:00.0",
+ numa_node=0,
+ dev_type="type-VF",
+ )
+ pci_dev2 = objects.PciDevice(
+ vendor_id="dead",
+ product_id="beef",
+ address="0000:81:00.2",
+ parent_addr="0000:81:00.0",
+ numa_node=0,
+ dev_type="type-VF",
+ )
+ pci_stats.add_device(pci_dev1)
+ pci_dev1.extra_info = {'rp_uuid': uuids.rp1}
+ pci_stats.add_device(pci_dev2)
+ pci_dev2.extra_info = {'rp_uuid': uuids.rp1}
+
+ self.assertEqual(1, len(pci_stats.pools))
+
+ pci_stats.populate_pools_metadata_from_assigned_devices()
+
+ self.assertEqual(uuids.rp1, pci_stats.pools[0]['rp_uuid'])
+
+ def test_populate_pools_metadata_from_assigned_devices_device_without_rp(
+ self
+ ):
+ device_spec = [
+ jsonutils.dumps(
+ {
+ "address": "0000:81:00.*",
+ }
+ ),
+ ]
+ self.flags(device_spec=device_spec, group="pci")
+ dev_filter = whitelist.Whitelist(device_spec)
+ pci_stats = stats.PciDeviceStats(
+ objects.NUMATopology(),
+ dev_filter=dev_filter)
+ pci_dev1 = objects.PciDevice(
+ vendor_id="dead",
+ product_id="beef",
+ address="0000:81:00.1",
+ parent_addr="0000:81:00.0",
+ numa_node=0,
+ dev_type="type-VF",
+ )
+ pci_stats.add_device(pci_dev1)
+
+ self.assertEqual(1, len(pci_stats.pools))
+
+ pci_stats.populate_pools_metadata_from_assigned_devices()
+
+ self.assertNotIn('rp_uuid', pci_stats.pools[0])
+
+ def test_populate_pools_metadata_from_assigned_devices_multiple_rp(self):
+ device_spec = [
+ jsonutils.dumps(
+ {
+ "address": "0000:81:00.*",
+ }
+ ),
+ ]
+ self.flags(device_spec=device_spec, group="pci")
+ dev_filter = whitelist.Whitelist(device_spec)
+ pci_stats = stats.PciDeviceStats(
+ objects.NUMATopology(),
+ dev_filter=dev_filter)
+ pci_dev1 = objects.PciDevice(
+ compute_node_id=1,
+ vendor_id="dead",
+ product_id="beef",
+ address="0000:81:00.1",
+ parent_addr="0000:81:00.0",
+ numa_node=0,
+ dev_type="type-VF",
+ )
+ pci_dev2 = objects.PciDevice(
+ compute_node_id=1,
+ vendor_id="dead",
+ product_id="beef",
+ address="0000:81:00.2",
+ parent_addr="0000:81:00.0",
+ numa_node=0,
+ dev_type="type-VF",
+ )
+ pci_stats.add_device(pci_dev1)
+ pci_dev1.extra_info = {'rp_uuid': uuids.rp1}
+ pci_stats.add_device(pci_dev2)
+ pci_dev2.extra_info = {'rp_uuid': uuids.rp2}
+
+ self.assertEqual(1, len(pci_stats.pools))
+
+ self.assertRaises(
+ ValueError,
+ pci_stats.populate_pools_metadata_from_assigned_devices,
+ )
class PciDeviceVFPFStatsTestCase(test.NoDBTestCase):
def setUp(self):
super(PciDeviceVFPFStatsTestCase, self).setUp()
- white_list = ['{"vendor_id":"8086","product_id":"1528"}',
- '{"vendor_id":"8086","product_id":"1515"}',
- '{"vendor_id":"15b3","product_id":"a2d6", '
- '"remote_managed": "false"}',
- '{"vendor_id":"15b3","product_id":"101e", '
- '"remote_managed": "true"}']
- self.flags(passthrough_whitelist=white_list, group='pci')
+ device_spec = [
+ jsonutils.dumps({"vendor_id": "8086", "product_id": "1528"}),
+ jsonutils.dumps({"vendor_id": "8086", "product_id": "1515"}),
+ jsonutils.dumps(
+ {
+ "vendor_id": "15b3",
+ "product_id": "a2d6",
+ "remote_managed": "false",
+ }
+ ),
+ jsonutils.dumps(
+ {
+ "vendor_id": "15b3",
+ "product_id": "101e",
+ "remote_managed": "true",
+ }
+ ),
+ ]
+ self.flags(device_spec=device_spec, group='pci')
self.pci_stats = stats.PciDeviceStats(objects.NUMATopology())
def _create_pci_devices(self, vf_product_id=1515, pf_product_id=1528):
@@ -884,13 +1200,21 @@ class PciDeviceVFPFStatsTestCase(test.NoDBTestCase):
objects.InstancePCIRequest(count=1,
spec=[{'product_id': '1528',
'dev_type': 'type-PF'}])]
- self.assertIsNone(self.pci_stats.consume_requests(pci_requests))
+ self.assertRaises(
+ exception.PciDeviceRequestFailed,
+ self.pci_stats.consume_requests,
+ pci_requests,
+ )
def test_consume_VF_and_PF_same_product_id_failed(self):
self._create_pci_devices(pf_product_id=1515)
pci_requests = [objects.InstancePCIRequest(count=9,
spec=[{'product_id': '1515'}])]
- self.assertIsNone(self.pci_stats.consume_requests(pci_requests))
+ self.assertRaises(
+ exception.PciDeviceRequestFailed,
+ self.pci_stats.consume_requests,
+ pci_requests,
+ )
def test_consume_PF_not_remote_managed(self):
self._create_pci_devices()
@@ -932,8 +1256,11 @@ class PciDeviceVFPFStatsTestCase(test.NoDBTestCase):
objects.InstancePCIRequest(count=1,
spec=[{'product_id': '101e'}])]
free_devs_before = self.pci_stats.get_free_devs()
- devs = self.pci_stats.consume_requests(pci_requests)
- self.assertIsNone(devs)
+ self.assertRaises(
+ exception.PciDeviceRequestFailed,
+ self.pci_stats.consume_requests,
+ pci_requests,
+ )
free_devs_after = self.pci_stats.get_free_devs()
self.assertEqual(free_devs_before, free_devs_after)
diff --git a/nova/tests/unit/pci/test_utils.py b/nova/tests/unit/pci/test_utils.py
index 1a8b273433..1a1f9955b9 100644
--- a/nova/tests/unit/pci/test_utils.py
+++ b/nova/tests/unit/pci/test_utils.py
@@ -16,9 +16,9 @@
import glob
import os
+from unittest import mock
import fixtures
-import mock
from nova import exception
from nova.pci import utils
diff --git a/nova/tests/unit/policies/base.py b/nova/tests/unit/policies/base.py
index 5ebccd9121..68a051b26c 100644
--- a/nova/tests/unit/policies/base.py
+++ b/nova/tests/unit/policies/base.py
@@ -134,6 +134,44 @@ class BasePolicyTest(test.TestCase):
self.system_admin_context, self.system_foo_context,
self.system_member_context, self.system_reader_context,
])
+ # A few commmon set of contexts to be used in tests
+ #
+ # With scope disable and no legacy rule, any admin,
+ # project members have access. No other role in that project
+ # will have access.
+ self.project_member_or_admin_with_no_scope_no_legacy = set([
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context, self.project_member_context,
+ ])
+ # With scope enable and legacy rule, only project scoped admin
+ # and any role in that project will have access.
+ self.project_m_r_or_admin_with_scope_and_legacy = set([
+ self.legacy_admin_context, self.project_admin_context,
+ self.project_member_context, self.project_reader_context,
+ self.project_foo_context
+ ])
+ # With scope enable and no legacy rule, only project scoped admin
+ # and project members have access. No other role in that project
+ # or system scoped token will have access.
+ self.project_member_or_admin_with_scope_no_legacy = set([
+ self.legacy_admin_context, self.project_admin_context,
+ self.project_member_context
+ ])
+ # With scope disable and no legacy rule, any admin,
+ # project members, and project reader have access. No other
+ # role in that project will have access.
+ self.project_reader_or_admin_with_no_scope_no_legacy = set([
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context, self.project_member_context,
+ self.project_reader_context
+ ])
+ # With scope enable and no legacy rule, only project scoped admin,
+ # project members, and project reader have access. No other role
+ # in that project or system scoped token will have access.
+ self.project_reader_or_admin_with_scope_no_legacy = set([
+ self.legacy_admin_context, self.project_admin_context,
+ self.project_member_context, self.project_reader_context
+ ])
if self.without_deprecated_rules:
# To simulate the new world, remove deprecations by overriding
@@ -149,6 +187,10 @@ class BasePolicyTest(test.TestCase):
"role:member and project_id:%(project_id)s",
"project_reader_api":
"role:reader and project_id:%(project_id)s",
+ "project_member_or_admin":
+ "rule:project_member_api or rule:context_is_admin",
+ "project_reader_or_admin":
+ "rule:project_reader_api or rule:context_is_admin",
})
self.policy.set_rules(self.rules_without_deprecation,
overwrite=False)
diff --git a/nova/tests/unit/policies/test_admin_actions.py b/nova/tests/unit/policies/test_admin_actions.py
index 42cb74302f..21157fd832 100644
--- a/nova/tests/unit/policies/test_admin_actions.py
+++ b/nova/tests/unit/policies/test_admin_actions.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
@@ -77,12 +78,6 @@ class AdminActionsNoLegacyNoScopePolicyTest(AdminActionsPolicyTest):
without_deprecated_rules = True
- def setUp(self):
- super(AdminActionsNoLegacyNoScopePolicyTest, self).setUp()
- # With no legacy rule and scope disable, only project admin
- # is able to perform server admin actions.
- self.project_action_authorized_contexts = [self.project_admin_context]
-
class AdminActionsScopeTypePolicyTest(AdminActionsPolicyTest):
"""Test Admin Actions APIs policies with system scope enabled.
@@ -110,10 +105,3 @@ class AdminActionsScopeTypeNoLegacyPolicyTest(AdminActionsScopeTypePolicyTest):
only project admin is able to perform admin action on their server.
"""
without_deprecated_rules = True
-
- def setUp(self):
- super(AdminActionsScopeTypeNoLegacyPolicyTest, self).setUp()
- # This is how our RBAC will looks like. With no legacy rule
- # and scope enable, only project admin is able to perform
- # server admin actions.
- self.project_action_authorized_contexts = [self.project_admin_context]
diff --git a/nova/tests/unit/policies/test_admin_password.py b/nova/tests/unit/policies/test_admin_password.py
index cec2f27f53..01cce2950e 100644
--- a/nova/tests/unit/policies/test_admin_password.py
+++ b/nova/tests/unit/policies/test_admin_password.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
@@ -100,8 +101,8 @@ class AdminPasswordNoLegacyNoScopePolicyTest(AdminPasswordPolicyTest):
super(AdminPasswordNoLegacyNoScopePolicyTest, self).setUp()
# With no legacy rule, only project admin or member will be
# able to change the server password.
- self.project_action_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
class AdminPasswordScopeTypePolicyTest(AdminPasswordPolicyTest):
@@ -118,10 +119,8 @@ class AdminPasswordScopeTypePolicyTest(AdminPasswordPolicyTest):
super(AdminPasswordScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
# Scope enable will not allow system admin to change password.
- self.project_action_authorized_contexts = [
- self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
+ self.project_action_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
class AdminPasswordScopeTypeNoLegacyTest(AdminPasswordScopeTypePolicyTest):
@@ -138,5 +137,5 @@ class AdminPasswordScopeTypeNoLegacyTest(AdminPasswordScopeTypePolicyTest):
# With scope enable and no legacy rule only project admin/member
# will be able to change password for the server.
- self.project_action_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_aggregates.py b/nova/tests/unit/policies/test_aggregates.py
index dbb8a69552..6ac7b6e010 100644
--- a/nova/tests/unit/policies/test_aggregates.py
+++ b/nova/tests/unit/policies/test_aggregates.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from nova.api.openstack.compute import aggregates
@@ -34,14 +35,14 @@ class AggregatesPolicyTest(base.BasePolicyTest):
# With legacy rule and scope check disabled by default, system admin,
# legacy admin, and project admin will be able to perform Aggregate
# Operations.
- self.system_admin_authorized_contexts = [
+ self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context]
@mock.patch('nova.compute.api.AggregateAPI.get_aggregate_list')
def test_list_aggregate_policy(self, mock_list):
rule_name = "os_compute_api:os-aggregates:index"
- self.common_policy_auth(self.system_admin_authorized_contexts,
+ self.common_policy_auth(self.project_admin_authorized_contexts,
rule_name, self.controller.index,
self.req)
@@ -54,7 +55,7 @@ class AggregatesPolicyTest(base.BasePolicyTest):
"hosts": ["host1", "host2"]})
body = {"aggregate": {"name": "test",
"availability_zone": "nova1"}}
- self.common_policy_auth(self.system_admin_authorized_contexts,
+ self.common_policy_auth(self.project_admin_authorized_contexts,
rule_name,
self.controller.create,
self.req, body=body)
@@ -62,7 +63,7 @@ class AggregatesPolicyTest(base.BasePolicyTest):
@mock.patch('nova.compute.api.AggregateAPI.update_aggregate')
def test_update_aggregate_policy(self, mock_update):
rule_name = "os_compute_api:os-aggregates:update"
- self.common_policy_auth(self.system_admin_authorized_contexts,
+ self.common_policy_auth(self.project_admin_authorized_contexts,
rule_name, self.controller.update,
self.req, 1,
body={"aggregate": {"name": "new_name"}})
@@ -70,7 +71,7 @@ class AggregatesPolicyTest(base.BasePolicyTest):
@mock.patch('nova.compute.api.AggregateAPI.delete_aggregate')
def test_delete_aggregate_policy(self, mock_delete):
rule_name = "os_compute_api:os-aggregates:delete"
- self.common_policy_auth(self.system_admin_authorized_contexts,
+ self.common_policy_auth(self.project_admin_authorized_contexts,
rule_name,
self.controller.delete,
self.req, 1)
@@ -78,7 +79,7 @@ class AggregatesPolicyTest(base.BasePolicyTest):
@mock.patch('nova.compute.api.AggregateAPI.get_aggregate')
def test_show_aggregate_policy(self, mock_show):
rule_name = "os_compute_api:os-aggregates:show"
- self.common_policy_auth(self.system_admin_authorized_contexts,
+ self.common_policy_auth(self.project_admin_authorized_contexts,
rule_name, self.controller.show,
self.req, 1)
@@ -86,7 +87,7 @@ class AggregatesPolicyTest(base.BasePolicyTest):
def test_set_metadata_aggregate_policy(self, mock_metadata):
rule_name = "os_compute_api:os-aggregates:set_metadata"
body = {"set_metadata": {"metadata": {"foo": "bar"}}}
- self.common_policy_auth(self.system_admin_authorized_contexts,
+ self.common_policy_auth(self.project_admin_authorized_contexts,
rule_name,
self.controller._set_metadata,
self.req, 1, body=body)
@@ -94,7 +95,7 @@ class AggregatesPolicyTest(base.BasePolicyTest):
@mock.patch('nova.compute.api.AggregateAPI.add_host_to_aggregate')
def test_add_host_aggregate_policy(self, mock_add):
rule_name = "os_compute_api:os-aggregates:add_host"
- self.common_policy_auth(self.system_admin_authorized_contexts,
+ self.common_policy_auth(self.project_admin_authorized_contexts,
rule_name, self.controller._add_host,
self.req, 1,
body={"add_host": {"host": "host1"}})
@@ -102,7 +103,7 @@ class AggregatesPolicyTest(base.BasePolicyTest):
@mock.patch('nova.compute.api.AggregateAPI.remove_host_from_aggregate')
def test_remove_host_aggregate_policy(self, mock_remove):
rule_name = "os_compute_api:os-aggregates:remove_host"
- self.common_policy_auth(self.system_admin_authorized_contexts,
+ self.common_policy_auth(self.project_admin_authorized_contexts,
rule_name,
self.controller._remove_host,
self.req, 1,
@@ -117,7 +118,7 @@ class AggregatesPolicyTest(base.BasePolicyTest):
body = {'cache': [{'id': uuids.fake_id}]}
req = fakes.HTTPRequest.blank('', version='2.81')
with mock.patch('nova.conductor.api.ComputeTaskAPI.cache_images'):
- self.common_policy_auth(self.system_admin_authorized_contexts,
+ self.common_policy_auth(self.project_admin_authorized_contexts,
rule_name, self.controller.images,
req, 1, body=body)
@@ -148,9 +149,10 @@ class AggregatesScopeTypePolicyTest(AggregatesPolicyTest):
super(AggregatesScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
- # With scope checks enable, only system admin is able to perform
- # Aggregate Operations.
- self.system_admin_authorized_contexts = [self.system_admin_context]
+ # With scope checks enabled, only project-scoped admins are
+ # able to perform Aggregate Operations.
+ self.project_admin_authorized_contexts = [self.legacy_admin_context,
+ self.project_admin_context]
class AggregatesScopeTypeNoLegacyPolicyTest(AggregatesScopeTypePolicyTest):
diff --git a/nova/tests/unit/policies/test_assisted_volume_snapshots.py b/nova/tests/unit/policies/test_assisted_volume_snapshots.py
index 5f7d6978e5..dce62e5bcc 100644
--- a/nova/tests/unit/policies/test_assisted_volume_snapshots.py
+++ b/nova/tests/unit/policies/test_assisted_volume_snapshots.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
import urllib
diff --git a/nova/tests/unit/policies/test_attach_interfaces.py b/nova/tests/unit/policies/test_attach_interfaces.py
index 8625369d61..33c531c9c7 100644
--- a/nova/tests/unit/policies/test_attach_interfaces.py
+++ b/nova/tests/unit/policies/test_attach_interfaces.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
@@ -116,22 +117,21 @@ class AttachInterfacesNoLegacyNoScopePolicyTest(AttachInterfacesPolicyTest):
without_deprecated_rules = True
rules_without_deprecation = {
ai_policies.POLICY_ROOT % 'list':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
ai_policies.POLICY_ROOT % 'show':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
ai_policies.POLICY_ROOT % 'create':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
ai_policies.POLICY_ROOT % 'delete':
- base_policy.PROJECT_MEMBER}
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
def setUp(self):
super(AttachInterfacesNoLegacyNoScopePolicyTest, self).setUp()
# With no legacy rule, legacy admin loose power.
- self.project_member_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
- self.project_reader_authorized_contexts = [
- self.project_admin_context, self.project_member_context,
- self.project_reader_context]
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_no_scope_no_legacy)
class AttachInterfacesScopeTypePolicyTest(AttachInterfacesPolicyTest):
@@ -148,12 +148,10 @@ class AttachInterfacesScopeTypePolicyTest(AttachInterfacesPolicyTest):
super(AttachInterfacesScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
# With Scope enable, system users no longer allowed.
- self.project_member_authorized_contexts = [
- self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
+ self.project_member_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
self.project_reader_authorized_contexts = (
- self.project_member_authorized_contexts)
+ self.project_m_r_or_admin_with_scope_and_legacy)
class AttachInterfacesDeprecatedPolicyTest(base.BasePolicyTest):
@@ -216,20 +214,19 @@ class AttachInterfacesScopeTypeNoLegacyPolicyTest(
without_deprecated_rules = True
rules_without_deprecation = {
ai_policies.POLICY_ROOT % 'list':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
ai_policies.POLICY_ROOT % 'show':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
ai_policies.POLICY_ROOT % 'create':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
ai_policies.POLICY_ROOT % 'delete':
- base_policy.PROJECT_MEMBER}
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
def setUp(self):
super(AttachInterfacesScopeTypeNoLegacyPolicyTest, self).setUp()
# With no legacy and scope enable, only project admin, member,
# and reader will be able to allowed operation on server interface.
- self.project_member_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
- self.project_reader_authorized_contexts = [
- self.project_admin_context, self.project_member_context,
- self.project_reader_context]
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_availability_zone.py b/nova/tests/unit/policies/test_availability_zone.py
index 6814d030cc..1852f8444c 100644
--- a/nova/tests/unit/policies/test_availability_zone.py
+++ b/nova/tests/unit/policies/test_availability_zone.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.api.openstack.compute import availability_zone
from nova.tests.unit.api.openstack import fakes
@@ -34,20 +34,21 @@ class AvailabilityZonePolicyTest(base.BasePolicyTest):
# With legacy rule and scope check disabled by default, system admin,
# legacy admin, and project admin will be able to get AZ with host
# information.
- self.system_admin_authorized_contexts = [
+ self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context]
+ self.project_authorized_contexts = self.all_contexts
@mock.patch('nova.objects.Instance.save')
def test_availability_zone_list_policy(self, mock_save):
rule_name = "os_compute_api:os-availability-zone:list"
- self.common_policy_auth(self.all_contexts,
+ self.common_policy_auth(self.project_authorized_contexts,
rule_name, self.controller.index,
self.req)
def test_availability_zone_detail_policy(self):
rule_name = "os_compute_api:os-availability-zone:detail"
- self.common_policy_auth(self.system_admin_authorized_contexts,
+ self.common_policy_auth(self.project_admin_authorized_contexts,
rule_name, self.controller.detail,
self.req)
@@ -79,9 +80,11 @@ class AvailabilityZoneScopeTypePolicyTest(AvailabilityZonePolicyTest):
super(AvailabilityZoneScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
- # With scope checks enable, only system admin is able to get
- # AZ with host information.
- self.system_admin_authorized_contexts = [self.system_admin_context]
+ # With scope checks enable, only project-scoped admins are
+ # able to get AZ with host information.
+ self.project_admin_authorized_contexts = [self.legacy_admin_context,
+ self.project_admin_context]
+ self.project_authorized_contexts = self.all_project_contexts
class AZScopeTypeNoLegacyPolicyTest(AvailabilityZoneScopeTypePolicyTest):
diff --git a/nova/tests/unit/policies/test_baremetal_nodes.py b/nova/tests/unit/policies/test_baremetal_nodes.py
index 639c314f23..68f02087c4 100644
--- a/nova/tests/unit/policies/test_baremetal_nodes.py
+++ b/nova/tests/unit/policies/test_baremetal_nodes.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from nova.api.openstack.compute import baremetal_nodes
@@ -42,13 +43,13 @@ class BaremetalNodesPolicyTest(base.BasePolicyTest):
lambda *_: FAKE_IRONIC_CLIENT)
# With legacy rule and scope check disabled by default, system admin,
# legacy admin, and project admin will be able to get baremetal nodes.
- self.system_admin_authorized_contexts = [
+ self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context]
def test_index_nodes_policy(self):
rule_name = "os_compute_api:os-baremetal-nodes:list"
- self.common_policy_auth(self.system_admin_authorized_contexts,
+ self.common_policy_auth(self.project_admin_authorized_contexts,
rule_name, self.controller.index,
self.req)
@@ -61,7 +62,7 @@ class BaremetalNodesPolicyTest(base.BasePolicyTest):
mock_get.return_value = node
mock_port.return_value = []
- self.common_policy_auth(self.system_admin_authorized_contexts,
+ self.common_policy_auth(self.project_admin_authorized_contexts,
rule_name,
self.controller.show,
self.req, uuids.fake_id)
@@ -94,9 +95,10 @@ class BaremetalNodesScopeTypePolicyTest(BaremetalNodesPolicyTest):
super(BaremetalNodesScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
- # With scope checks enable, only system admin is able to get
- # baremetal nodes.
- self.system_admin_authorized_contexts = [self.system_admin_context]
+ # With scope checks enable, only project-scoped admins are
+ # able to get baremetal nodes.
+ self.project_admin_authorized_contexts = [self.legacy_admin_context,
+ self.project_admin_context]
class BNScopeTypeNoLegacyPolicyTest(BaremetalNodesScopeTypePolicyTest):
diff --git a/nova/tests/unit/policies/test_console_auth_tokens.py b/nova/tests/unit/policies/test_console_auth_tokens.py
index ba70355d17..a658816538 100644
--- a/nova/tests/unit/policies/test_console_auth_tokens.py
+++ b/nova/tests/unit/policies/test_console_auth_tokens.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.api.openstack.compute import console_auth_tokens
from nova.tests.unit.api.openstack import fakes
diff --git a/nova/tests/unit/policies/test_console_output.py b/nova/tests/unit/policies/test_console_output.py
index 4067987541..c1bccf1d55 100644
--- a/nova/tests/unit/policies/test_console_output.py
+++ b/nova/tests/unit/policies/test_console_output.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
@@ -72,8 +73,8 @@ class ConsoleOutputNoLegacyNoScopePolicyTest(ConsoleOutputPolicyTest):
super(ConsoleOutputNoLegacyNoScopePolicyTest, self).setUp()
# With no legacy rule, only project admin or member is able to
# get the server console.
- self.project_member_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
class ConsoleOutputScopeTypePolicyTest(ConsoleOutputPolicyTest):
@@ -91,10 +92,8 @@ class ConsoleOutputScopeTypePolicyTest(ConsoleOutputPolicyTest):
super(ConsoleOutputScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
# Scope enable will not allow system admin.
- self.project_member_authorized_contexts = [
- self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
+ self.project_member_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
class ConsoleOutputScopeTypeNoLegacyPolicyTest(
@@ -109,5 +108,5 @@ class ConsoleOutputScopeTypeNoLegacyPolicyTest(
# With scope enable and no legacy rule, only project admin/member can
# get the server console.
- self.project_member_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_create_backup.py b/nova/tests/unit/policies/test_create_backup.py
index a0eb696b89..b54ed366df 100644
--- a/nova/tests/unit/policies/test_create_backup.py
+++ b/nova/tests/unit/policies/test_create_backup.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
@@ -80,8 +81,8 @@ class CreateBackupNoLegacyNoScopePolicyTest(CreateBackupPolicyTest):
super(CreateBackupNoLegacyNoScopePolicyTest, self).setUp()
# With no legacy rule, only project admin or member will be
# able to create the server backup.
- self.project_member_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
class CreateBackupScopeTypePolicyTest(CreateBackupPolicyTest):
@@ -99,10 +100,8 @@ class CreateBackupScopeTypePolicyTest(CreateBackupPolicyTest):
super(CreateBackupScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
# Scope enable will not allow system users to create the server.
- self.project_member_authorized_contexts = [
- self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
+ self.project_member_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
class CreateBackupScopeTypeNoLegacyPolicyTest(CreateBackupScopeTypePolicyTest):
@@ -115,5 +114,5 @@ class CreateBackupScopeTypeNoLegacyPolicyTest(CreateBackupScopeTypePolicyTest):
super(CreateBackupScopeTypeNoLegacyPolicyTest, self).setUp()
# With scope enable and no legacy rule, only project admin/member
# will be able to create the server backup.
- self.project_member_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_deferred_delete.py b/nova/tests/unit/policies/test_deferred_delete.py
index 1eb4f365c7..08bb0213f4 100644
--- a/nova/tests/unit/policies/test_deferred_delete.py
+++ b/nova/tests/unit/policies/test_deferred_delete.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
@@ -104,16 +105,16 @@ class DeferredDeleteNoLegacyNoScopePolicyTest(DeferredDeletePolicyTest):
without_deprecated_rules = True
rules_without_deprecation = {
dd_policies.BASE_POLICY_NAME % 'restore':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
dd_policies.BASE_POLICY_NAME % 'force':
- base_policy.PROJECT_MEMBER}
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
def setUp(self):
super(DeferredDeleteNoLegacyNoScopePolicyTest, self).setUp()
# With no legacy rule, only project admin or member is able to force
# delete or restore server.
- self.project_member_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
class DeferredDeleteScopeTypePolicyTest(DeferredDeletePolicyTest):
@@ -131,10 +132,8 @@ class DeferredDeleteScopeTypePolicyTest(DeferredDeletePolicyTest):
super(DeferredDeleteScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
# Scope enable will not allow system admin.
- self.project_member_authorized_contexts = [
- self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
+ self.project_member_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
class DeferredDeleteScopeTypeNoLegacyPolicyTest(
@@ -145,14 +144,14 @@ class DeferredDeleteScopeTypeNoLegacyPolicyTest(
without_deprecated_rules = True
rules_without_deprecation = {
dd_policies.BASE_POLICY_NAME % 'restore':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
dd_policies.BASE_POLICY_NAME % 'force':
- base_policy.PROJECT_MEMBER}
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
def setUp(self):
super(DeferredDeleteScopeTypeNoLegacyPolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
# With scope enable and no legacy rule, only project admin/member is
# able to force delete or restore server.
- self.project_member_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_evacuate.py b/nova/tests/unit/policies/test_evacuate.py
index c9993814ff..ddc8241003 100644
--- a/nova/tests/unit/policies/test_evacuate.py
+++ b/nova/tests/unit/policies/test_evacuate.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
@@ -113,12 +114,6 @@ class EvacuateNoLegacyNoScopePolicyTest(EvacuatePolicyTest):
without_deprecated_rules = True
- def setUp(self):
- super(EvacuateNoLegacyNoScopePolicyTest, self).setUp()
- # With no legacy rule and scope disable, only project admin
- # will be able to evacuate server.
- self.project_action_authorized_contexts = [self.project_admin_context]
-
class EvacuateScopeTypePolicyTest(EvacuatePolicyTest):
"""Test Evacuate APIs policies with system scope enabled.
@@ -145,10 +140,3 @@ class EvacuateScopeTypeNoLegacyPolicyTest(EvacuateScopeTypePolicyTest):
and no more deprecated rules which means scope + new defaults.
"""
without_deprecated_rules = True
-
- def setUp(self):
- super(EvacuateScopeTypeNoLegacyPolicyTest, self).setUp()
- # This is how our RBAC will looks like. With no legacy rule
- # and scope enable, only project admin is able to evacuate
- # server.
- self.project_action_authorized_contexts = [self.project_admin_context]
diff --git a/nova/tests/unit/policies/test_extensions.py b/nova/tests/unit/policies/test_extensions.py
index 7865ececba..d2e3c6adde 100644
--- a/nova/tests/unit/policies/test_extensions.py
+++ b/nova/tests/unit/policies/test_extensions.py
@@ -71,6 +71,16 @@ class ExtensionsScopeTypePolicyTest(ExtensionsPolicyTest):
def setUp(self):
super(ExtensionsScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+ self.everyone_authorized_contexts = [
+ self.legacy_admin_context,
+ self.project_admin_context, self.project_member_context,
+ self.project_reader_context, self.project_foo_context,
+ self.other_project_reader_context,
+ self.other_project_member_context
+ ]
+ self.everyone_unauthorized_contexts = [
+ self.system_admin_context, self.system_member_context,
+ self.system_reader_context, self.system_foo_context]
class ExtensionsNoLegacyPolicyTest(ExtensionsScopeTypePolicyTest):
diff --git a/nova/tests/unit/policies/test_flavor_access.py b/nova/tests/unit/policies/test_flavor_access.py
index 68c31c75f9..cfdbbd2470 100644
--- a/nova/tests/unit/policies/test_flavor_access.py
+++ b/nova/tests/unit/policies/test_flavor_access.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from nova.api.openstack.compute import flavor_access
@@ -121,12 +122,11 @@ class FlavorAccessScopeTypePolicyTest(FlavorAccessPolicyTest):
super(FlavorAccessScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
- # Scope checks remove project users power.
+ # Scope checks remove system users' power.
self.admin_authorized_contexts = [
- self.system_admin_context]
- self.admin_index_authorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.system_reader_context, self.system_foo_context]
+ self.legacy_admin_context,
+ self.project_admin_context]
+ self.admin_index_authorized_contexts = self.all_project_contexts
class FlavorAccessScopeTypeNoLegacyPolicyTest(FlavorAccessScopeTypePolicyTest):
@@ -145,5 +145,9 @@ class FlavorAccessScopeTypeNoLegacyPolicyTest(FlavorAccessScopeTypePolicyTest):
def setUp(self):
super(FlavorAccessScopeTypeNoLegacyPolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
- self.admin_index_authorized_contexts = [
- self.system_admin_context]
+
+ # New defaults make this admin-only
+ self.admin_authorized_contexts = [
+ self.legacy_admin_context,
+ self.project_admin_context]
+ self.admin_index_authorized_contexts = self.admin_authorized_contexts
diff --git a/nova/tests/unit/policies/test_flavor_extra_specs.py b/nova/tests/unit/policies/test_flavor_extra_specs.py
index 7da297b6e1..f3c8cacd57 100644
--- a/nova/tests/unit/policies/test_flavor_extra_specs.py
+++ b/nova/tests/unit/policies/test_flavor_extra_specs.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from nova.api.openstack.compute import flavor_manage
@@ -56,7 +57,7 @@ class FlavorExtraSpecsPolicyTest(base.BasePolicyTest):
# In the base/legacy case, all project and system contexts are
# authorized in the case of things that distinguish between
# scopes, since scope checking is disabled.
- self.all_system_authorized_contexts = (self.all_project_contexts |
+ self.all_project_authorized_contexts = (self.all_project_contexts |
self.all_system_contexts)
# In the base/legacy case, any admin is an admin.
@@ -166,7 +167,7 @@ class FlavorExtraSpecsPolicyTest(base.BasePolicyTest):
}
}
authorize_res, unauthorize_res = self.common_policy_auth(
- self.all_system_authorized_contexts,
+ self.all_project_authorized_contexts,
rule_name, self.fm_ctrl._create, req, body=body,
fatal=False)
for resp in authorize_res:
@@ -186,7 +187,7 @@ class FlavorExtraSpecsPolicyTest(base.BasePolicyTest):
req = fakes.HTTPRequest.blank('', version='2.61')
authorize_res, unauthorize_res = self.common_policy_auth(
- self.all_system_authorized_contexts,
+ self.all_project_authorized_contexts,
rule_name, self.fm_ctrl._update, req, '1',
body={'flavor': {'description': None}},
fatal=False)
@@ -210,11 +211,13 @@ class FlavorExtraSpecsScopeTypePolicyTest(FlavorExtraSpecsPolicyTest):
super(FlavorExtraSpecsScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
- # Only system users are authorized for system APIs
- self.all_system_authorized_contexts = self.all_system_contexts
+ # Only project users are authorized
+ self.reduce_set('all_project_authorized', self.all_project_contexts)
+ self.reduce_set('all_authorized', self.all_project_contexts)
- # Only system_admin can do system admin things
- self.admin_authorized_contexts = [self.system_admin_context]
+ # Only admins can do admin things
+ self.admin_authorized_contexts = [self.legacy_admin_context,
+ self.project_admin_context]
class FlavorExtraSpecsNoLegacyNoScopeTest(FlavorExtraSpecsPolicyTest):
@@ -234,7 +237,7 @@ class FlavorExtraSpecsNoLegacyNoScopeTest(FlavorExtraSpecsPolicyTest):
self.system_foo_context,
self.project_foo_context,
])
- self.reduce_set('all_system_authorized', everything_but_foo)
+ self.reduce_set('all_project_authorized', everything_but_foo)
self.reduce_set('all_authorized', everything_but_foo)
@@ -251,11 +254,10 @@ class FlavorExtraSpecsNoLegacyPolicyTest(FlavorExtraSpecsScopeTypePolicyTest):
# contexts. With scope checking enabled, project and system
# contexts stay separate.
self.reduce_set(
- 'all_system_authorized',
- self.all_system_contexts - set([self.system_foo_context]))
- everything_but_foo = (
- self.all_project_contexts | self.all_system_contexts) - set([
- self.system_foo_context,
+ 'all_project_authorized',
+ self.all_project_contexts - set([self.project_foo_context]))
+ everything_but_foo_and_system = (
+ self.all_contexts - set([
self.project_foo_context,
- ])
- self.reduce_set('all_authorized', everything_but_foo)
+ ]) - self.all_system_contexts)
+ self.reduce_set('all_authorized', everything_but_foo_and_system)
diff --git a/nova/tests/unit/policies/test_flavor_manage.py b/nova/tests/unit/policies/test_flavor_manage.py
index 32e25b5474..0663a689cb 100644
--- a/nova/tests/unit/policies/test_flavor_manage.py
+++ b/nova/tests/unit/policies/test_flavor_manage.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from nova.api.openstack.compute import flavor_manage
@@ -104,10 +105,11 @@ class FlavorManageScopeTypePolicyTest(FlavorManagePolicyTest):
super(FlavorManageScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
- # With scope enable, only system admin is able to manage
+ # With scope enabled, only project admin is able to manage
# the flavors.
self.admin_authorized_contexts = [
- self.system_admin_context]
+ self.legacy_admin_context,
+ self.project_admin_context]
class FlavorManageScopeTypeNoLegacyPolicyTest(
diff --git a/nova/tests/unit/policies/test_floating_ip_pools.py b/nova/tests/unit/policies/test_floating_ip_pools.py
index 08f36134d5..551f482bd4 100644
--- a/nova/tests/unit/policies/test_floating_ip_pools.py
+++ b/nova/tests/unit/policies/test_floating_ip_pools.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.api.openstack.compute import floating_ip_pools
from nova.tests.unit.api.openstack import fakes
@@ -32,15 +32,15 @@ class FloatingIPPoolsPolicyTest(base.BasePolicyTest):
self.req = fakes.HTTPRequest.blank('')
# Check that everyone is able to list FIP pools.
- self.everyone_authorized_contexts = [
+ self.everyone_authorized_contexts = set([
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context, self.project_member_context,
self.project_reader_context, self.project_foo_context,
self.other_project_reader_context,
self.other_project_member_context,
self.system_member_context, self.system_reader_context,
- self.system_foo_context]
- self.everyone_unauthorized_contexts = []
+ self.system_foo_context])
+ self.everyone_unauthorized_contexts = set([])
@mock.patch('nova.network.neutron.API.get_floating_ip_pools')
def test_floating_ip_pools_policy(self, mock_get):
@@ -66,6 +66,10 @@ class FloatingIPPoolsScopeTypePolicyTest(FloatingIPPoolsPolicyTest):
super(FloatingIPPoolsScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+ self.reduce_set('everyone_authorized', self.all_project_contexts)
+ self.everyone_unauthorized_contexts = (
+ self.all_contexts - self.everyone_authorized_contexts)
+
class FloatingIPPoolsNoLegacyPolicyTest(FloatingIPPoolsScopeTypePolicyTest):
"""Test Floating IP Pools APIs policies with system scope enabled,
diff --git a/nova/tests/unit/policies/test_floating_ips.py b/nova/tests/unit/policies/test_floating_ips.py
index b170d9049b..26c721e9e9 100644
--- a/nova/tests/unit/policies/test_floating_ips.py
+++ b/nova/tests/unit/policies/test_floating_ips.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
@@ -151,24 +152,24 @@ class FloatingIPNoLegacyNoScopePolicyTest(FloatingIPPolicyTest):
without_deprecated_rules = True
rules_without_deprecation = {
fip_policies.BASE_POLICY_NAME % 'list':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
fip_policies.BASE_POLICY_NAME % 'show':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
fip_policies.BASE_POLICY_NAME % 'create':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
fip_policies.BASE_POLICY_NAME % 'delete':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
fip_policies.BASE_POLICY_NAME % 'add':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
fip_policies.BASE_POLICY_NAME % 'remove':
- base_policy.PROJECT_MEMBER}
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
def setUp(self):
super(FloatingIPNoLegacyNoScopePolicyTest, self).setUp()
# With no legacy rule, only project admin or member will be
# able to add/remove FIP to server.
- self.project_member_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
# With no legacy, project other roles like foo will not be able
# to operate on FIP.
self.member_authorized_contexts = [
@@ -202,10 +203,8 @@ class FloatingIPScopeTypePolicyTest(FloatingIPPolicyTest):
super(FloatingIPScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
# Scope enable will not allow system users.
- self.project_member_authorized_contexts = [
- self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
+ self.project_member_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
self.member_authorized_contexts = [
self.legacy_admin_context, self.project_admin_context,
self.project_member_context, self.project_reader_context,
@@ -227,24 +226,24 @@ class FloatingIPScopeTypeNoLegacyPolicyTest(FloatingIPScopeTypePolicyTest):
without_deprecated_rules = True
rules_without_deprecation = {
fip_policies.BASE_POLICY_NAME % 'list':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
fip_policies.BASE_POLICY_NAME % 'show':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
fip_policies.BASE_POLICY_NAME % 'create':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
fip_policies.BASE_POLICY_NAME % 'delete':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
fip_policies.BASE_POLICY_NAME % 'add':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
fip_policies.BASE_POLICY_NAME % 'remove':
- base_policy.PROJECT_MEMBER}
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
def setUp(self):
super(FloatingIPScopeTypeNoLegacyPolicyTest, self).setUp()
# Check that system admin or owner is able to
# add/delete FIP to server.
- self.project_member_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
# With no legacy and scope enabled, system users and project
# other roles like foo will not be able to operate FIP.
self.member_authorized_contexts = [
diff --git a/nova/tests/unit/policies/test_hosts.py b/nova/tests/unit/policies/test_hosts.py
index 19727f7060..e07c907cf8 100644
--- a/nova/tests/unit/policies/test_hosts.py
+++ b/nova/tests/unit/policies/test_hosts.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.api.openstack.compute import hosts
from nova.policies import base as base_policy
@@ -35,14 +35,14 @@ class HostsPolicyTest(base.BasePolicyTest):
# With legacy rule and scope check disabled by default, system admin,
# legacy admin, and project admin will be able to perform hosts
# Operations.
- self.system_admin_authorized_contexts = [
+ self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context]
@mock.patch('nova.compute.api.HostAPI.service_get_all')
def test_list_hosts_policy(self, mock_get):
rule_name = policies.POLICY_NAME % 'list'
- self.common_policy_auth(self.system_admin_authorized_contexts,
+ self.common_policy_auth(self.project_admin_authorized_contexts,
rule_name, self.controller.index,
self.req)
@@ -53,34 +53,34 @@ class HostsPolicyTest(base.BasePolicyTest):
@mock.patch('nova.compute.api.HostAPI.instance_get_all_by_host')
def test_show_host_policy(self, mock_get, mock_node, mock_map, mock_set):
rule_name = policies.POLICY_NAME % 'show'
- self.common_policy_auth(self.system_admin_authorized_contexts,
+ self.common_policy_auth(self.project_admin_authorized_contexts,
rule_name, self.controller.show,
self.req, 11111)
def test_update_host_policy(self):
rule_name = policies.POLICY_NAME % 'update'
- self.common_policy_auth(self.system_admin_authorized_contexts,
+ self.common_policy_auth(self.project_admin_authorized_contexts,
rule_name, self.controller.update,
self.req, 11111, body={})
@mock.patch('nova.compute.api.HostAPI.host_power_action')
def test_reboot_host_policy(self, mock_action):
rule_name = policies.POLICY_NAME % 'reboot'
- self.common_policy_auth(self.system_admin_authorized_contexts,
+ self.common_policy_auth(self.project_admin_authorized_contexts,
rule_name, self.controller.reboot,
self.req, 11111)
@mock.patch('nova.compute.api.HostAPI.host_power_action')
def test_shutdown_host_policy(self, mock_action):
rule_name = policies.POLICY_NAME % 'shutdown'
- self.common_policy_auth(self.system_admin_authorized_contexts,
+ self.common_policy_auth(self.project_admin_authorized_contexts,
rule_name, self.controller.shutdown,
self.req, 11111)
@mock.patch('nova.compute.api.HostAPI.host_power_action')
def test_startup_host_policy(self, mock_action):
rule_name = policies.POLICY_NAME % 'start'
- self.common_policy_auth(self.system_admin_authorized_contexts,
+ self.common_policy_auth(self.project_admin_authorized_contexts,
rule_name, self.controller.startup,
self.req, 11111)
@@ -113,7 +113,8 @@ class HostsScopeTypePolicyTest(HostsPolicyTest):
# With scope checks enable, only system admin is able to perform
# hosts Operations.
- self.system_admin_authorized_contexts = [self.system_admin_context]
+ self.project_admin_authorized_contexts = [self.legacy_admin_context,
+ self.project_admin_context]
class HostsScopeTypeNoLegacyPolicyTest(HostsScopeTypePolicyTest):
diff --git a/nova/tests/unit/policies/test_hypervisors.py b/nova/tests/unit/policies/test_hypervisors.py
index 281e32d026..dd17ebe2fe 100644
--- a/nova/tests/unit/policies/test_hypervisors.py
+++ b/nova/tests/unit/policies/test_hypervisors.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.api.openstack.compute import hypervisors
from nova.policies import base as base_policy
@@ -39,51 +39,51 @@ class HypervisorsPolicyTest(base.BasePolicyTest):
# With legacy rule and scope check disabled by default, system admin,
# legacy admin, and project admin will be able to perform hypervisors
# Operations.
- self.system_admin_authorized_contexts = [
+ self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context]
def test_list_hypervisors_policy(self):
rule_name = hv_policies.BASE_POLICY_NAME % 'list'
- self.common_policy_auth(self.system_admin_authorized_contexts,
+ self.common_policy_auth(self.project_admin_authorized_contexts,
rule_name, self.controller.index,
self.req)
def test_list_details_hypervisors_policy(self):
rule_name = hv_policies.BASE_POLICY_NAME % 'list-detail'
- self.common_policy_auth(self.system_admin_authorized_contexts,
+ self.common_policy_auth(self.project_admin_authorized_contexts,
rule_name, self.controller.detail,
self.req)
def test_show_hypervisors_policy(self):
rule_name = hv_policies.BASE_POLICY_NAME % 'show'
- self.common_policy_auth(self.system_admin_authorized_contexts,
+ self.common_policy_auth(self.project_admin_authorized_contexts,
rule_name, self.controller.show,
self.req, 11111)
@mock.patch('nova.compute.api.HostAPI.get_host_uptime')
def test_uptime_hypervisors_policy(self, mock_uptime):
rule_name = hv_policies.BASE_POLICY_NAME % 'uptime'
- self.common_policy_auth(self.system_admin_authorized_contexts,
+ self.common_policy_auth(self.project_admin_authorized_contexts,
rule_name, self.controller.uptime,
self.req, 11111)
def test_search_hypervisors_policy(self):
rule_name = hv_policies.BASE_POLICY_NAME % 'search'
- self.common_policy_auth(self.system_admin_authorized_contexts,
+ self.common_policy_auth(self.project_admin_authorized_contexts,
rule_name, self.controller.search,
self.req, 11111)
def test_servers_hypervisors_policy(self):
rule_name = hv_policies.BASE_POLICY_NAME % 'servers'
- self.common_policy_auth(self.system_admin_authorized_contexts,
+ self.common_policy_auth(self.project_admin_authorized_contexts,
rule_name, self.controller.servers,
self.req, 11111)
@mock.patch('nova.compute.api.HostAPI.compute_node_statistics')
def test_statistics_hypervisors_policy(self, mock_statistics):
rule_name = hv_policies.BASE_POLICY_NAME % 'statistics'
- self.common_policy_auth(self.system_admin_authorized_contexts,
+ self.common_policy_auth(self.project_admin_authorized_contexts,
rule_name, self.controller.statistics,
self.req)
@@ -115,7 +115,8 @@ class HypervisorsScopeTypePolicyTest(HypervisorsPolicyTest):
# With scope checks enable, only system admin is able to perform
# hypervisors Operations.
- self.system_admin_authorized_contexts = [self.system_admin_context]
+ self.project_admin_authorized_contexts = [self.legacy_admin_context,
+ self.project_admin_context]
class HypervisorsScopeTypeNoLegacyPolicyTest(HypervisorsScopeTypePolicyTest):
diff --git a/nova/tests/unit/policies/test_instance_actions.py b/nova/tests/unit/policies/test_instance_actions.py
index 4a3e409244..1ca9a66c14 100644
--- a/nova/tests/unit/policies/test_instance_actions.py
+++ b/nova/tests/unit/policies/test_instance_actions.py
@@ -11,8 +11,9 @@
# under the License.
import copy
+from unittest import mock
+
import fixtures
-import mock
from nova.api.openstack import api_version_request
from oslo_policy import policy as oslo_policy
@@ -139,20 +140,17 @@ class InstanceActionsNoLegacyNoScopePolicyTest(InstanceActionsPolicyTest):
without_deprecated_rules = True
rules_without_deprecation = {
ia_policies.BASE_POLICY_NAME % 'list':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
ia_policies.BASE_POLICY_NAME % 'show':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
ia_policies.BASE_POLICY_NAME % 'events':
- base_policy.PROJECT_ADMIN,
+ base_policy.ADMIN,
}
def setUp(self):
super(InstanceActionsNoLegacyNoScopePolicyTest, self).setUp()
- # With no legacy rule, legacy admin loose power.
- self.project_admin_authorized_contexts = [self.project_admin_context]
- self.project_reader_authorized_contexts = [
- self.project_admin_context, self.project_member_context,
- self.project_reader_context]
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_no_scope_no_legacy)
class InstanceActionsDeprecatedPolicyTest(base.BasePolicyTest):
@@ -230,10 +228,8 @@ class InstanceActionsScopeTypePolicyTest(InstanceActionsPolicyTest):
# With Scope enable, system users no longer allowed.
self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.project_admin_context]
- self.project_reader_authorized_contexts = [
- self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
+ self.project_reader_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
@mock.patch('nova.objects.InstanceActionEventList.get_by_action')
@mock.patch('nova.objects.InstanceAction.get_by_request_id')
@@ -279,27 +275,25 @@ class InstanceActionsScopeTypePolicyTest(InstanceActionsPolicyTest):
self.assertNotIn('details', event)
-class InstanceActionsScopeTypeNoLegacyPolicyTest(InstanceActionsPolicyTest):
+class InstanceActionsScopeTypeNoLegacyPolicyTest(
+ InstanceActionsScopeTypePolicyTest):
"""Test os-instance-actions APIs policies with system scope enabled,
and no more deprecated rules.
"""
without_deprecated_rules = True
rules_without_deprecation = {
ia_policies.BASE_POLICY_NAME % 'list':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
ia_policies.BASE_POLICY_NAME % 'show':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
ia_policies.BASE_POLICY_NAME % 'events':
- base_policy.PROJECT_ADMIN,
+ base_policy.ADMIN,
}
def setUp(self):
super(InstanceActionsScopeTypeNoLegacyPolicyTest, self).setUp()
- self.flags(enforce_scope=True, group="oslo_policy")
# With no legacy and scope enable, only project admin, member,
# and reader will be able to get server action and only admin
# with event details.
- self.project_admin_authorized_contexts = [self.project_admin_context]
- self.project_reader_authorized_contexts = [
- self.project_admin_context, self.project_member_context,
- self.project_reader_context]
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_instance_usage_audit_log.py b/nova/tests/unit/policies/test_instance_usage_audit_log.py
index 4a73d4328a..71b0cdd2aa 100644
--- a/nova/tests/unit/policies/test_instance_usage_audit_log.py
+++ b/nova/tests/unit/policies/test_instance_usage_audit_log.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.api.openstack.compute import instance_usage_audit_log as iual
from nova.policies import base as base_policy
@@ -85,7 +85,8 @@ class InstanceUsageScopeTypePolicyTest(InstanceUsageAuditLogPolicyTest):
# Scope checks remove project users power.
self.admin_authorized_contexts = [
- self.system_admin_context]
+ self.legacy_admin_context,
+ self.project_admin_context]
class InstanceUsageScopeTypeNoLegacyPolicyTest(
diff --git a/nova/tests/unit/policies/test_keypairs.py b/nova/tests/unit/policies/test_keypairs.py
index d74e929ef2..ee39133b7a 100644
--- a/nova/tests/unit/policies/test_keypairs.py
+++ b/nova/tests/unit/policies/test_keypairs.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from nova.policies import keypairs as policies
from nova.api.openstack.compute import keypairs
@@ -34,7 +35,7 @@ class KeypairsPolicyTest(base.BasePolicyTest):
# Check that everyone is able to create, delete and get
# their keypairs.
- self.everyone_authorized_contexts = [
+ self.everyone_authorized_contexts = set([
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context,
self.system_member_context, self.system_reader_context,
@@ -42,13 +43,13 @@ class KeypairsPolicyTest(base.BasePolicyTest):
self.project_reader_context, self.project_foo_context,
self.other_project_member_context,
self.other_project_reader_context,
- ]
+ ])
# Check that admin is able to create, delete and get
# other users keypairs.
- self.admin_authorized_contexts = [
+ self.admin_authorized_contexts = set([
self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context]
+ self.project_admin_context])
@mock.patch('nova.compute.api.KeypairAPI.get_key_pairs')
def test_index_keypairs_policy(self, mock_get):
@@ -151,6 +152,12 @@ class KeypairsScopeTypePolicyTest(KeypairsPolicyTest):
super(KeypairsScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+ # With scope checking, only project-scoped users are allowed
+ self.reduce_set('everyone_authorized', self.all_project_contexts)
+ self.admin_authorized_contexts = [
+ self.legacy_admin_context,
+ self.project_admin_context]
+
class KeypairsNoLegacyPolicyTest(KeypairsScopeTypePolicyTest):
"""Test Keypairs APIs policies with system scope enabled,
diff --git a/nova/tests/unit/policies/test_limits.py b/nova/tests/unit/policies/test_limits.py
index e0e14fcfb2..aba647caec 100644
--- a/nova/tests/unit/policies/test_limits.py
+++ b/nova/tests/unit/policies/test_limits.py
@@ -11,8 +11,7 @@
# under the License.
import functools
-
-import mock
+from unittest import mock
from nova.api.openstack.compute import limits
import nova.conf
@@ -96,7 +95,7 @@ class LimitsNoLegacyNoScopeTest(LimitsPolicyTest):
without_deprecated_rules = True
rules_without_deprecation = {
limits_policies.OTHER_PROJECT_LIMIT_POLICY_NAME:
- base_policy.PROJECT_ADMIN}
+ base_policy.ADMIN}
def setUp(self):
super(LimitsNoLegacyNoScopeTest, self).setUp()
@@ -142,7 +141,7 @@ class LimitsScopeTypeNoLegacyPolicyTest(LimitsScopeTypePolicyTest):
without_deprecated_rules = True
rules_without_deprecation = {
limits_policies.OTHER_PROJECT_LIMIT_POLICY_NAME:
- base_policy.PROJECT_ADMIN}
+ base_policy.ADMIN}
def setUp(self):
super(LimitsScopeTypeNoLegacyPolicyTest, self).setUp()
diff --git a/nova/tests/unit/policies/test_lock_server.py b/nova/tests/unit/policies/test_lock_server.py
index 84f78e43bc..31de5cff0c 100644
--- a/nova/tests/unit/policies/test_lock_server.py
+++ b/nova/tests/unit/policies/test_lock_server.py
@@ -11,9 +11,9 @@
# under the License.
import functools
+from unittest import mock
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
@@ -139,11 +139,9 @@ class LockServerNoLegacyNoScopePolicyTest(LockServerPolicyTest):
def setUp(self):
super(LockServerNoLegacyNoScopePolicyTest, self).setUp()
# With no legacy rule, only project admin or member will be
- # able to lock/unlock the server and only project admin can
- # override the unlock.
- self.project_action_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
- self.project_admin_authorized_contexts = [self.project_admin_context]
+ # able to lock/unlock the server.
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
class LockServerScopeTypePolicyTest(LockServerPolicyTest):
@@ -160,10 +158,8 @@ class LockServerScopeTypePolicyTest(LockServerPolicyTest):
super(LockServerScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
# Scope enable will not allow system admin to lock/unlock the server.
- self.project_action_authorized_contexts = [
- self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
+ self.project_action_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.project_admin_context]
@@ -178,9 +174,8 @@ class LockServerScopeTypeNoLegacyPolicyTest(LockServerScopeTypePolicyTest):
super(LockServerScopeTypeNoLegacyPolicyTest, self).setUp()
# With scope enable and no legacy rule, only project admin/member
# will be able to lock/unlock the server.
- self.project_action_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
- self.project_admin_authorized_contexts = [self.project_admin_context]
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
class LockServerOverridePolicyTest(LockServerScopeTypeNoLegacyPolicyTest):
diff --git a/nova/tests/unit/policies/test_migrate_server.py b/nova/tests/unit/policies/test_migrate_server.py
index 8904eb01fb..0f750770d9 100644
--- a/nova/tests/unit/policies/test_migrate_server.py
+++ b/nova/tests/unit/policies/test_migrate_server.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
@@ -82,11 +83,6 @@ class MigrateServerNoLegacyNoScopeTest(MigrateServerPolicyTest):
without_deprecated_rules = True
- def setUp(self):
- super(MigrateServerNoLegacyNoScopeTest, self).setUp()
- self.project_admin_authorized_contexts = [
- self.project_admin_context]
-
class MigrateServerScopeTypePolicyTest(MigrateServerPolicyTest):
"""Test Migrate Server APIs policies with system scope enabled.
@@ -114,12 +110,6 @@ class MigrateServerScopeTypeNoLegacyPolicyTest(
"""
without_deprecated_rules = True
- def setUp(self):
- super(MigrateServerScopeTypeNoLegacyPolicyTest, self).setUp()
- # with no legacy rule and scope enable., only project admin is able to
- # migrate the server.
- self.project_admin_authorized_contexts = [self.project_admin_context]
-
class MigrateServerOverridePolicyTest(
MigrateServerScopeTypeNoLegacyPolicyTest):
diff --git a/nova/tests/unit/policies/test_migrations.py b/nova/tests/unit/policies/test_migrations.py
index 61b5240a95..25cd75a125 100644
--- a/nova/tests/unit/policies/test_migrations.py
+++ b/nova/tests/unit/policies/test_migrations.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.api.openstack.compute import migrations
from nova.policies import migrations as migrations_policies
diff --git a/nova/tests/unit/policies/test_multinic.py b/nova/tests/unit/policies/test_multinic.py
index be04e8ba83..852ff25965 100644
--- a/nova/tests/unit/policies/test_multinic.py
+++ b/nova/tests/unit/policies/test_multinic.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
@@ -82,16 +83,16 @@ class MultinicNoLegacyNoScopePolicyTest(MultinicPolicyTest):
without_deprecated_rules = True
rules_without_deprecation = {
policies.BASE_POLICY_NAME % 'add':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
policies.BASE_POLICY_NAME % 'remove':
- base_policy.PROJECT_MEMBER}
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
def setUp(self):
super(MultinicNoLegacyNoScopePolicyTest, self).setUp()
# With no legacy rule, only project admin or member will be
# able to add/remove the fixed ip.
- self.project_action_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
class MultinicScopeTypePolicyTest(MultinicPolicyTest):
@@ -110,10 +111,8 @@ class MultinicScopeTypePolicyTest(MultinicPolicyTest):
self.flags(enforce_scope=True, group="oslo_policy")
# Scope enable will not allow system admin to add/remove
# the fixed ip.
- self.project_action_authorized_contexts = [
- self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
+ self.project_action_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
class MultinicScopeTypeNoLegacyPolicyTest(MultinicScopeTypePolicyTest):
@@ -123,13 +122,13 @@ class MultinicScopeTypeNoLegacyPolicyTest(MultinicScopeTypePolicyTest):
without_deprecated_rules = True
rules_without_deprecation = {
policies.BASE_POLICY_NAME % 'add':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
policies.BASE_POLICY_NAME % 'remove':
- base_policy.PROJECT_MEMBER}
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
def setUp(self):
super(MultinicScopeTypeNoLegacyPolicyTest, self).setUp()
# With scope enable and no legacy rule, only project admin/member
# will be able to add/remove the fixed ip.
- self.project_action_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_networks.py b/nova/tests/unit/policies/test_networks.py
index c5578c4d34..9c3e0b735a 100644
--- a/nova/tests/unit/policies/test_networks.py
+++ b/nova/tests/unit/policies/test_networks.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from nova.api.openstack.compute import networks
@@ -72,9 +73,9 @@ class NetworksNoLegacyNoScopePolicyTest(NetworksPolicyTest):
without_deprecated_rules = True
rules_without_deprecation = {
policies.POLICY_ROOT % 'list':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
policies.POLICY_ROOT % 'show':
- base_policy.PROJECT_READER}
+ base_policy.PROJECT_READER_OR_ADMIN}
def setUp(self):
super(NetworksNoLegacyNoScopePolicyTest, self).setUp()
@@ -119,9 +120,9 @@ class NetworksScopeTypeNoLegacyPolicyTest(NetworksScopeTypePolicyTest):
without_deprecated_rules = True
rules_without_deprecation = {
policies.POLICY_ROOT % 'list':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
policies.POLICY_ROOT % 'show':
- base_policy.PROJECT_READER}
+ base_policy.PROJECT_READER_OR_ADMIN}
def setUp(self):
super(NetworksScopeTypeNoLegacyPolicyTest, self).setUp()
diff --git a/nova/tests/unit/policies/test_pause_server.py b/nova/tests/unit/policies/test_pause_server.py
index 69602eac4e..86a3e616dd 100644
--- a/nova/tests/unit/policies/test_pause_server.py
+++ b/nova/tests/unit/policies/test_pause_server.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
@@ -108,8 +109,8 @@ class PauseServerNoLegacyNoScopePolicyTest(PauseServerPolicyTest):
super(PauseServerNoLegacyNoScopePolicyTest, self).setUp()
# With no legacy rule, only project admin or member will be
# able to pause/unpause the server.
- self.project_action_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
class PauseServerScopeTypePolicyTest(PauseServerPolicyTest):
@@ -126,10 +127,8 @@ class PauseServerScopeTypePolicyTest(PauseServerPolicyTest):
super(PauseServerScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
# Scope enable will not allow system admin to pause/unpause the server.
- self.project_action_authorized_contexts = [
- self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
+ self.project_action_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
class PauseServerScopeTypeNoLegacyPolicyTest(PauseServerScopeTypePolicyTest):
@@ -142,5 +141,5 @@ class PauseServerScopeTypeNoLegacyPolicyTest(PauseServerScopeTypePolicyTest):
super(PauseServerScopeTypeNoLegacyPolicyTest, self).setUp()
# With scope enable and no legacy rule, only project admin/member
# will be able to pause/unpause the server.
- self.project_action_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_quota_class_sets.py b/nova/tests/unit/policies/test_quota_class_sets.py
index 98969f2fa1..09b90d5ebc 100644
--- a/nova/tests/unit/policies/test_quota_class_sets.py
+++ b/nova/tests/unit/policies/test_quota_class_sets.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.api.openstack.compute import quota_classes
from nova.policies import quota_class_sets as policies
@@ -34,7 +34,7 @@ class QuotaClassSetsPolicyTest(base.BasePolicyTest):
# With legacy rule and scope check disabled by default, system admin,
# legacy admin, and project admin will be able to get, update quota
# class.
- self.system_admin_authorized_contexts = [
+ self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context]
@@ -46,7 +46,7 @@ class QuotaClassSetsPolicyTest(base.BasePolicyTest):
'ram': 51200, 'floating_ips': -1,
'fixed_ips': -1, 'instances': 10,
'injected_files': 5, 'cores': 20}}
- self.common_policy_auth(self.system_admin_authorized_contexts,
+ self.common_policy_auth(self.project_admin_authorized_contexts,
rule_name,
self.controller.update,
self.req, 'test_class',
@@ -55,7 +55,7 @@ class QuotaClassSetsPolicyTest(base.BasePolicyTest):
@mock.patch('nova.quota.QUOTAS.get_class_quotas')
def test_show_quota_class_sets_policy(self, mock_get):
rule_name = policies.POLICY_ROOT % 'show'
- self.common_policy_auth(self.system_admin_authorized_contexts,
+ self.common_policy_auth(self.project_admin_authorized_contexts,
rule_name,
self.controller.show,
self.req, 'test_class')
@@ -86,9 +86,10 @@ class QuotaClassSetsScopeTypePolicyTest(QuotaClassSetsPolicyTest):
super(QuotaClassSetsScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
- # With scope checks enable, only system admin is able to update
- # and get quota class.
- self.system_admin_authorized_contexts = [self.system_admin_context]
+ # With scope checks enable, only project admins are able to
+ # update and get quota class.
+ self.project_admin_authorized_contexts = [self.legacy_admin_context,
+ self.project_admin_context]
class QuotaClassScopeTypeNoLegacyPolicyTest(QuotaClassSetsScopeTypePolicyTest):
diff --git a/nova/tests/unit/policies/test_quota_sets.py b/nova/tests/unit/policies/test_quota_sets.py
index c0e29236b6..3ff8cd1c02 100644
--- a/nova/tests/unit/policies/test_quota_sets.py
+++ b/nova/tests/unit/policies/test_quota_sets.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.api.openstack.compute import quota_sets
from nova import exception
@@ -36,27 +36,27 @@ class QuotaSetsPolicyTest(base.BasePolicyTest):
# With legacy rule all admin is able to update or revert their quota
# to default or get other project quota.
- self.project_admin_authorized_contexts = [
+ self.project_admin_authorized_contexts = set([
self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context]
+ self.project_admin_context])
# With legacy rule, everyone is able to get their own quota.
- self.project_reader_authorized_contexts = [
+ self.project_reader_authorized_contexts = set([
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context,
self.system_member_context, self.system_reader_context,
self.system_foo_context, self.project_member_context,
self.project_reader_context, self.project_foo_context,
self.other_project_member_context,
- self.other_project_reader_context]
+ self.other_project_reader_context])
# Everyone is able to get the default quota
- self.everyone_authorized_contexts = [
+ self.everyone_authorized_contexts = set([
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context,
self.system_member_context, self.system_reader_context,
self.system_foo_context, self.project_member_context,
self.project_reader_context, self.project_foo_context,
self.other_project_member_context,
- self.other_project_reader_context]
+ self.other_project_reader_context])
@mock.patch('nova.quota.QUOTAS.get_project_quotas')
@mock.patch('nova.quota.QUOTAS.get_settable_quotas')
@@ -176,16 +176,13 @@ class QuotaSetsScopeTypePolicyTest(QuotaSetsPolicyTest):
super(QuotaSetsScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
- # With scope enable, system users will be disallowed.
- self.project_admin_authorized_contexts = [
+ # With scope enabled, system users will be disallowed.
+ self.reduce_set('project_admin_authorized', set([
self.legacy_admin_context,
- self.project_admin_context]
- self.project_reader_authorized_contexts = [
- self.legacy_admin_context, self.project_admin_context,
- self.project_member_context, self.project_reader_context,
- self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context]
+ self.project_admin_context]))
+ self.reduce_set('project_reader_authorized',
+ self.all_project_contexts)
+ self.everyone_authorized_contexts = self.all_project_contexts
class QuotaSetsScopeTypeNoLegacyPolicyTest(QuotaSetsScopeTypePolicyTest):
@@ -197,6 +194,8 @@ class QuotaSetsScopeTypeNoLegacyPolicyTest(QuotaSetsScopeTypePolicyTest):
def setUp(self):
super(QuotaSetsScopeTypeNoLegacyPolicyTest, self).setUp()
- self.project_reader_authorized_contexts = [
- self.legacy_admin_context, self.project_admin_context,
- self.project_member_context, self.project_reader_context]
+ # With scope enabled and no legacy, system and
+ # non-reader/member users are disallowed.
+ self.reduce_set('project_reader_authorized',
+ self.all_project_contexts -
+ set([self.project_foo_context]))
diff --git a/nova/tests/unit/policies/test_remote_consoles.py b/nova/tests/unit/policies/test_remote_consoles.py
index 648e5594bc..a441d1c550 100644
--- a/nova/tests/unit/policies/test_remote_consoles.py
+++ b/nova/tests/unit/policies/test_remote_consoles.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from nova.policies import remote_consoles as rc_policies
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
@@ -78,8 +79,8 @@ class RemoteConsolesNoLegacyNoScopePolicyTest(RemoteConsolesPolicyTest):
super(RemoteConsolesNoLegacyNoScopePolicyTest, self).setUp()
# With no legacy rule, only project admin or member will be
# able get server remote consoles.
- self.project_action_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
class RemoteConsolesScopeTypePolicyTest(RemoteConsolesPolicyTest):
@@ -97,10 +98,8 @@ class RemoteConsolesScopeTypePolicyTest(RemoteConsolesPolicyTest):
self.flags(enforce_scope=True, group="oslo_policy")
# Scope enable will not allow system admin to get server
# remote console.
- self.project_action_authorized_contexts = [
- self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
+ self.project_action_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
class RemoteConsolesScopeTypeNoLegacyPolicyTest(
@@ -115,5 +114,5 @@ class RemoteConsolesScopeTypeNoLegacyPolicyTest(
super(RemoteConsolesScopeTypeNoLegacyPolicyTest, self).setUp()
# With scope enable and no legacy rule, only project admin/member
# will be able to get server remote console.
- self.project_action_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_rescue.py b/nova/tests/unit/policies/test_rescue.py
index 3e159bebaa..120809877c 100644
--- a/nova/tests/unit/policies/test_rescue.py
+++ b/nova/tests/unit/policies/test_rescue.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from nova.policies import base as base_policy
from nova.policies import rescue as rs_policies
from oslo_utils.fixture import uuidsentinel as uuids
@@ -107,16 +108,16 @@ class RescueServerNoLegacyNoScopePolicyTest(RescueServerPolicyTest):
without_deprecated_rules = True
rules_without_deprecation = {
rs_policies.UNRESCUE_POLICY_NAME:
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
rs_policies.BASE_POLICY_NAME:
- base_policy.PROJECT_MEMBER}
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
def setUp(self):
super(RescueServerNoLegacyNoScopePolicyTest, self).setUp()
# With no legacy rule, only project admin or member will be
# able to rescue/unrescue the server.
- self.project_action_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
class RescueServerScopeTypePolicyTest(RescueServerPolicyTest):
@@ -134,10 +135,8 @@ class RescueServerScopeTypePolicyTest(RescueServerPolicyTest):
self.flags(enforce_scope=True, group="oslo_policy")
# Scope enable will not allow system admin to rescue/unrescue the
# server.
- self.project_action_authorized_contexts = [
- self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
+ self.project_action_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
class RescueServerScopeTypeNoLegacyPolicyTest(RescueServerScopeTypePolicyTest):
@@ -148,13 +147,13 @@ class RescueServerScopeTypeNoLegacyPolicyTest(RescueServerScopeTypePolicyTest):
without_deprecated_rules = True
rules_without_deprecation = {
rs_policies.UNRESCUE_POLICY_NAME:
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
rs_policies.BASE_POLICY_NAME:
- base_policy.PROJECT_MEMBER}
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
def setUp(self):
super(RescueServerScopeTypeNoLegacyPolicyTest, self).setUp()
# With scope enable and no legacy rule, only project admin/member
# will be able to rescue/unrescue the server.
- self.project_action_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_security_groups.py b/nova/tests/unit/policies/test_security_groups.py
index 689c71d93b..a9d2f484ba 100644
--- a/nova/tests/unit/policies/test_security_groups.py
+++ b/nova/tests/unit/policies/test_security_groups.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
@@ -103,22 +104,20 @@ class ServerSecurityGroupsNoLegacyNoScopePolicyTest(
without_deprecated_rules = True
rules_without_deprecation = {
policies.POLICY_NAME % 'list':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
policies.POLICY_NAME % 'add':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
policies.POLICY_NAME % 'remove':
- base_policy.PROJECT_MEMBER}
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
def setUp(self):
super(ServerSecurityGroupsNoLegacyNoScopePolicyTest, self).setUp()
# With no legacy rule, only project admin or member will be
# able to add/remove SG to server and reader to get SG.
- self.project_member_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
- self.project_reader_authorized_contexts = [
- self.project_admin_context, self.project_member_context,
- self.project_reader_context,
- ]
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_no_scope_no_legacy)
class SecurityGroupsPolicyTest(base.BasePolicyTest):
@@ -242,19 +241,19 @@ class SecurityGroupsNoLegacyNoScopePolicyTest(
without_deprecated_rules = True
rules_without_deprecation = {
policies.POLICY_NAME % 'get':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
policies.POLICY_NAME % 'show':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
policies.POLICY_NAME % 'create':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
policies.POLICY_NAME % 'update':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
policies.POLICY_NAME % 'delete':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
policies.POLICY_NAME % 'rule:create':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
policies.POLICY_NAME % 'rule:delete':
- base_policy.PROJECT_MEMBER}
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
def setUp(self):
super(SecurityGroupsNoLegacyNoScopePolicyTest, self).setUp()
@@ -320,15 +319,10 @@ class ServerSecurityGroupsScopeTypePolicyTest(ServerSecurityGroupsPolicyTest):
super(ServerSecurityGroupsScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
# Scope enable will not allow system users.
- self.project_member_authorized_contexts = [
- self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
- self.project_reader_authorized_contexts = [
- self.legacy_admin_context, self.project_admin_context,
- self.project_member_context, self.project_reader_context,
- self.project_foo_context
- ]
+ self.project_member_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
class ServerSecurityGroupsScopeTypeNoLegacyPolicyTest(
@@ -339,23 +333,21 @@ class ServerSecurityGroupsScopeTypeNoLegacyPolicyTest(
without_deprecated_rules = True
rules_without_deprecation = {
policies.POLICY_NAME % 'list':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
policies.POLICY_NAME % 'add':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
policies.POLICY_NAME % 'remove':
- base_policy.PROJECT_MEMBER}
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
def setUp(self):
super(ServerSecurityGroupsScopeTypeNoLegacyPolicyTest, self).setUp()
# With scope enable and no legacy rule, only project admin/member
# will be able to add/remove the SG to their server and reader
# will get SG of server.
- self.project_member_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
- self.project_reader_authorized_contexts = [
- self.project_admin_context, self.project_member_context,
- self.project_reader_context
- ]
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_scope_no_legacy)
class SecurityGroupsNoLegacyPolicyTest(SecurityGroupsScopeTypePolicyTest):
@@ -365,19 +357,19 @@ class SecurityGroupsNoLegacyPolicyTest(SecurityGroupsScopeTypePolicyTest):
without_deprecated_rules = True
rules_without_deprecation = {
policies.POLICY_NAME % 'get':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
policies.POLICY_NAME % 'show':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
policies.POLICY_NAME % 'create':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
policies.POLICY_NAME % 'update':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
policies.POLICY_NAME % 'delete':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
policies.POLICY_NAME % 'rule:create':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
policies.POLICY_NAME % 'rule:delete':
- base_policy.PROJECT_MEMBER}
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
def setUp(self):
super(SecurityGroupsNoLegacyPolicyTest, self).setUp()
diff --git a/nova/tests/unit/policies/test_server_diagnostics.py b/nova/tests/unit/policies/test_server_diagnostics.py
index 76c0b4594c..4a4b192baa 100644
--- a/nova/tests/unit/policies/test_server_diagnostics.py
+++ b/nova/tests/unit/policies/test_server_diagnostics.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
@@ -65,11 +66,6 @@ class ServerDiagnosticsNoLegacyNoScopeTest(ServerDiagnosticsPolicyTest):
without_deprecated_rules = True
- def setUp(self):
- super(ServerDiagnosticsNoLegacyNoScopeTest, self).setUp()
- self.project_admin_authorized_contexts = [
- self.project_admin_context]
-
class ServerDiagnosticsScopeTypePolicyTest(ServerDiagnosticsPolicyTest):
"""Test Server Diagnostics APIs policies with system scope enabled.
@@ -97,12 +93,6 @@ class ServerDiagnosticsScopeTypeNoLegacyPolicyTest(
"""
without_deprecated_rules = True
- def setUp(self):
- super(ServerDiagnosticsScopeTypeNoLegacyPolicyTest, self).setUp()
- # with no legacy rule and scope enable., only project admin is able to
- # get server diagnostics.
- self.project_admin_authorized_contexts = [self.project_admin_context]
-
class ServerDiagnosticsOverridePolicyTest(
ServerDiagnosticsScopeTypeNoLegacyPolicyTest):
diff --git a/nova/tests/unit/policies/test_server_external_events.py b/nova/tests/unit/policies/test_server_external_events.py
index 630b3e8531..401b55325f 100644
--- a/nova/tests/unit/policies/test_server_external_events.py
+++ b/nova/tests/unit/policies/test_server_external_events.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from nova.api.openstack.compute import server_external_events as ev
diff --git a/nova/tests/unit/policies/test_server_groups.py b/nova/tests/unit/policies/test_server_groups.py
index d8894cb8ed..b0df7ccb89 100644
--- a/nova/tests/unit/policies/test_server_groups.py
+++ b/nova/tests/unit/policies/test_server_groups.py
@@ -11,8 +11,9 @@
# under the License.
import functools
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from nova.api.openstack.compute import server_groups
@@ -162,12 +163,10 @@ class ServerGroupNoLegacyNoScopePolicyTest(ServerGroupPolicyTest):
super(ServerGroupNoLegacyNoScopePolicyTest, self).setUp()
# With no legacy, only project admin, member will be able to delete
# the SG and also reader will be able to get the SG.
- self.project_member_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
-
- self.project_reader_authorized_contexts = [
- self.project_admin_context, self.project_member_context,
- self.project_reader_context]
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_no_scope_no_legacy)
# Even with no legacy rule, legacy admin is allowed to create SG
# use requesting context's project_id. Same for list SG.
@@ -204,16 +203,10 @@ class ServerGroupScopeTypePolicyTest(ServerGroupPolicyTest):
self.flags(enforce_scope=True, group="oslo_policy")
# With scope enable, it disallow system users.
- self.project_member_authorized_contexts = [
- self.legacy_admin_context, self.project_admin_context,
- self.project_member_context, self.project_reader_context,
- self.project_foo_context,
- ]
- self.project_reader_authorized_contexts = [
- self.legacy_admin_context, self.project_admin_context,
- self.project_member_context, self.project_reader_context,
- self.project_foo_context,
- ]
+ self.project_member_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
self.project_create_authorized_contexts = [
self.legacy_admin_context, self.project_admin_context,
@@ -243,17 +236,16 @@ class ServerGroupScopeTypeNoLegacyPolicyTest(ServerGroupScopeTypePolicyTest):
def setUp(self):
super(ServerGroupScopeTypeNoLegacyPolicyTest, self).setUp()
- self.project_member_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
self.project_create_authorized_contexts = [
self.legacy_admin_context, self.project_admin_context,
self.project_member_context,
self.other_project_member_context]
- self.project_reader_authorized_contexts = [
- self.project_admin_context, self.project_member_context,
- self.project_reader_context]
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_scope_no_legacy)
self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.project_admin_context]
diff --git a/nova/tests/unit/policies/test_server_ips.py b/nova/tests/unit/policies/test_server_ips.py
index f0ce600705..b837d2d0e2 100644
--- a/nova/tests/unit/policies/test_server_ips.py
+++ b/nova/tests/unit/policies/test_server_ips.py
@@ -84,10 +84,8 @@ class ServerIpsNoLegacyNoScopePolicyTest(ServerIpsPolicyTest):
super(ServerIpsNoLegacyNoScopePolicyTest, self).setUp()
# With no legacy, only project admin, member, and reader will be able
# to get their server IP addresses.
- self.project_reader_authorized_contexts = [
- self.project_admin_context, self.project_member_context,
- self.project_reader_context,
- ]
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_no_scope_no_legacy)
class ServerIpsScopeTypePolicyTest(ServerIpsPolicyTest):
@@ -105,11 +103,8 @@ class ServerIpsScopeTypePolicyTest(ServerIpsPolicyTest):
self.flags(enforce_scope=True, group="oslo_policy")
# With scope enabled, system users will not be able
# to get the server IP addresses.
- self.project_reader_authorized_contexts = [
- self.legacy_admin_context, self.project_admin_context,
- self.project_member_context, self.project_reader_context,
- self.project_foo_context
- ]
+ self.project_reader_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
class ServerIpsScopeTypeNoLegacyPolicyTest(ServerIpsScopeTypePolicyTest):
@@ -120,9 +115,7 @@ class ServerIpsScopeTypeNoLegacyPolicyTest(ServerIpsScopeTypePolicyTest):
def setUp(self):
super(ServerIpsScopeTypeNoLegacyPolicyTest, self).setUp()
- # With no legacy and scope enable, only project admin, member,
+ # With no legacy and scope enable, only admin, member,
# and reader will be able to get their server IP addresses.
- self.project_reader_authorized_contexts = [
- self.project_admin_context, self.project_member_context,
- self.project_reader_context
- ]
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_server_metadata.py b/nova/tests/unit/policies/test_server_metadata.py
index a915245ea8..cf4fb19e7b 100644
--- a/nova/tests/unit/policies/test_server_metadata.py
+++ b/nova/tests/unit/policies/test_server_metadata.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from nova.api.openstack.compute import server_metadata
@@ -118,11 +119,10 @@ class ServerMetadataNoLegacyNoScopePolicyTest(ServerMetadataPolicyTest):
def setUp(self):
super(ServerMetadataNoLegacyNoScopePolicyTest, self).setUp()
# With no legacy rule, legacy admin loose power.
- self.project_member_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
- self.project_reader_authorized_contexts = [
- self.project_admin_context, self.project_member_context,
- self.project_reader_context]
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_no_scope_no_legacy)
class ServerMetadataScopeTypePolicyTest(ServerMetadataPolicyTest):
@@ -139,12 +139,10 @@ class ServerMetadataScopeTypePolicyTest(ServerMetadataPolicyTest):
super(ServerMetadataScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
# With Scope enable, system users no longer allowed.
- self.project_member_authorized_contexts = [
- self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
+ self.project_member_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
self.project_reader_authorized_contexts = (
- self.project_member_authorized_contexts)
+ self.project_m_r_or_admin_with_scope_and_legacy)
class ServerMetadataScopeTypeNoLegacyPolicyTest(
@@ -159,8 +157,7 @@ class ServerMetadataScopeTypeNoLegacyPolicyTest(
super(ServerMetadataScopeTypeNoLegacyPolicyTest, self).setUp()
# With no legacy and scope enable, only project admin, member,
# and reader will be able to allowed operation on server metadata.
- self.project_member_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
- self.project_reader_authorized_contexts = [
- self.project_admin_context, self.project_member_context,
- self.project_reader_context]
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_server_migrations.py b/nova/tests/unit/policies/test_server_migrations.py
index d6c249d166..b17d4ded1d 100644
--- a/nova/tests/unit/policies/test_server_migrations.py
+++ b/nova/tests/unit/policies/test_server_migrations.py
@@ -10,9 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
-import fixtures
-import mock
+from unittest import mock
+import fixtures
from oslo_utils.fixture import uuidsentinel as uuids
from nova.api.openstack.compute import server_migrations
@@ -93,11 +93,6 @@ class ServerMigrationsNoLegacyNoScopeTest(ServerMigrationsPolicyTest):
without_deprecated_rules = True
- def setUp(self):
- super(ServerMigrationsNoLegacyNoScopeTest, self).setUp()
- self.project_admin_authorized_contexts = [
- self.project_admin_context]
-
class ServerMigrationsScopeTypePolicyTest(ServerMigrationsPolicyTest):
"""Test Server Migrations APIs policies with system scope enabled.
@@ -124,12 +119,6 @@ class ServerMigrationsScopeTypeNoLegacyPolicyTest(
"""
without_deprecated_rules = True
- def setUp(self):
- super(ServerMigrationsScopeTypeNoLegacyPolicyTest, self).setUp()
- # Check that admin is able to perform operations
- # for server migrations.
- self.project_admin_authorized_contexts = [self.project_admin_context]
-
class ServerMigrationsOverridePolicyTest(
ServerMigrationsScopeTypeNoLegacyPolicyTest):
diff --git a/nova/tests/unit/policies/test_server_password.py b/nova/tests/unit/policies/test_server_password.py
index 613b60a30b..b163c6c562 100644
--- a/nova/tests/unit/policies/test_server_password.py
+++ b/nova/tests/unit/policies/test_server_password.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from nova.api.openstack.compute import server_password
@@ -79,18 +80,17 @@ class ServerPasswordNoLegacyNoScopePolicyTest(ServerPasswordPolicyTest):
without_deprecated_rules = True
rules_without_deprecation = {
policies.BASE_POLICY_NAME % 'show':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
policies.BASE_POLICY_NAME % 'clear':
- base_policy.PROJECT_MEMBER}
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
def setUp(self):
super(ServerPasswordNoLegacyNoScopePolicyTest, self).setUp()
# With no legacy rule, legacy admin loose power.
- self.project_member_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
- self.project_reader_authorized_contexts = [
- self.project_admin_context, self.project_member_context,
- self.project_reader_context]
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_no_scope_no_legacy)
class ServerPasswordScopeTypePolicyTest(ServerPasswordPolicyTest):
@@ -107,12 +107,10 @@ class ServerPasswordScopeTypePolicyTest(ServerPasswordPolicyTest):
super(ServerPasswordScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
# With Scope enable, system users no longer allowed.
- self.project_member_authorized_contexts = [
- self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
+ self.project_member_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
self.project_reader_authorized_contexts = (
- self.project_member_authorized_contexts)
+ self.project_m_r_or_admin_with_scope_and_legacy)
class ServerPasswordScopeTypeNoLegacyPolicyTest(
@@ -123,16 +121,15 @@ class ServerPasswordScopeTypeNoLegacyPolicyTest(
without_deprecated_rules = True
rules_without_deprecation = {
policies.BASE_POLICY_NAME % 'show':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
policies.BASE_POLICY_NAME % 'clear':
- base_policy.PROJECT_MEMBER}
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
def setUp(self):
super(ServerPasswordScopeTypeNoLegacyPolicyTest, self).setUp()
# With no legacy and scope enable, only project admin, member,
# and reader will be able to allowed operation on server password.
- self.project_member_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
- self.project_reader_authorized_contexts = [
- self.project_admin_context, self.project_member_context,
- self.project_reader_context]
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_server_tags.py b/nova/tests/unit/policies/test_server_tags.py
index 427401ddfc..412177408c 100644
--- a/nova/tests/unit/policies/test_server_tags.py
+++ b/nova/tests/unit/policies/test_server_tags.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from nova.api.openstack.compute import server_tags
@@ -131,11 +132,10 @@ class ServerTagsNoLegacyNoScopePolicyTest(ServerTagsPolicyTest):
def setUp(self):
super(ServerTagsNoLegacyNoScopePolicyTest, self).setUp()
# With no legacy rule, legacy admin loose power.
- self.project_member_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
- self.project_reader_authorized_contexts = [
- self.project_admin_context, self.project_member_context,
- self.project_reader_context]
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_no_scope_no_legacy)
class ServerTagsScopeTypePolicyTest(ServerTagsPolicyTest):
@@ -152,12 +152,10 @@ class ServerTagsScopeTypePolicyTest(ServerTagsPolicyTest):
super(ServerTagsScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
# With Scope enable, system users no longer allowed.
- self.project_member_authorized_contexts = [
- self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
+ self.project_member_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
self.project_reader_authorized_contexts = (
- self.project_member_authorized_contexts)
+ self.project_m_r_or_admin_with_scope_and_legacy)
class ServerTagsScopeTypeNoLegacyPolicyTest(ServerTagsScopeTypePolicyTest):
@@ -171,8 +169,7 @@ class ServerTagsScopeTypeNoLegacyPolicyTest(ServerTagsScopeTypePolicyTest):
super(ServerTagsScopeTypeNoLegacyPolicyTest, self).setUp()
# With no legacy and scope enable, only project admin, member,
# and reader will be able to allowed operation on server tags.
- self.project_member_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
- self.project_reader_authorized_contexts = [
- self.project_admin_context, self.project_member_context,
- self.project_reader_context]
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_server_topology.py b/nova/tests/unit/policies/test_server_topology.py
index 8624c3e7e7..e2f81dfaad 100644
--- a/nova/tests/unit/policies/test_server_topology.py
+++ b/nova/tests/unit/policies/test_server_topology.py
@@ -98,11 +98,8 @@ class ServerTopologyNoLegacyNoScopePolicyTest(ServerTopologyPolicyTest):
def setUp(self):
super(ServerTopologyNoLegacyNoScopePolicyTest, self).setUp()
- # With no legacy rule, legacy admin loose power.
- self.project_admin_authorized_contexts = [self.project_admin_context]
- self.project_reader_authorized_contexts = [
- self.project_admin_context, self.project_member_context,
- self.project_reader_context]
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_no_scope_no_legacy)
class ServerTopologyScopeTypePolicyTest(ServerTopologyPolicyTest):
@@ -121,10 +118,8 @@ class ServerTopologyScopeTypePolicyTest(ServerTopologyPolicyTest):
# With Scope enable, system users no longer allowed.
self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.project_admin_context]
- self.project_reader_authorized_contexts = [
- self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
+ self.project_reader_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
class ServerTopologyScopeTypeNoLegacyPolicyTest(
@@ -138,9 +133,6 @@ class ServerTopologyScopeTypeNoLegacyPolicyTest(
def setUp(self):
super(ServerTopologyScopeTypeNoLegacyPolicyTest, self).setUp()
# With no legacy and scope enable, only project admin, member,
- # and reader will be able to get server topology and only admin
- # with host info.
- self.project_admin_authorized_contexts = [self.project_admin_context]
- self.project_reader_authorized_contexts = [
- self.project_admin_context, self.project_member_context,
- self.project_reader_context]
+ # and reader will be able to get server topology.
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_servers.py b/nova/tests/unit/policies/test_servers.py
index 3ed4bfe085..eee1e4ba51 100644
--- a/nova/tests/unit/policies/test_servers.py
+++ b/nova/tests/unit/policies/test_servers.py
@@ -11,9 +11,9 @@
# under the License.
import functools
+from unittest import mock
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
@@ -1229,10 +1229,9 @@ class ServersPolicyTest(base.BasePolicyTest):
@mock.patch('nova.compute.api.API._allow_resize_to_same_host')
@mock.patch('nova.objects.RequestSpec.get_by_instance_uuid')
@mock.patch('nova.objects.Instance.save')
- @mock.patch('nova.api.openstack.common.get_instance')
@mock.patch('nova.conductor.ComputeTaskAPI.resize_instance')
def test_cross_cell_resize_server_policy(
- self, mock_resize, mock_get, mock_save, mock_rs, mock_allow, m_net
+ self, mock_resize, mock_save, mock_rs, mock_allow, m_net
):
# 'migrate' policy is checked before 'resize:cross_cell' so
@@ -1262,7 +1261,7 @@ class ServersPolicyTest(base.BasePolicyTest):
)
return inst
- mock_get.side_effect = fake_get
+ self.mock_get.side_effect = fake_get
def fake_validate(context, instance,
host_name, allow_cross_cell_resize):
@@ -1325,7 +1324,7 @@ class ServersNoLegacyNoScopeTest(ServersPolicyTest):
without_deprecated_rules = True
rules_without_deprecation = {
policies.SERVERS % 'show:flavor-extra-specs':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
}
def setUp(self):
@@ -1333,23 +1332,14 @@ class ServersNoLegacyNoScopeTest(ServersPolicyTest):
# Disabling legacy rule support means that we no longer allow
# random roles on our project to take action on our
- # resources. We also do not allow admin on other projects
- # (i.e. legacy_admin), nor system (because it's admin on no
- # project).
- self.reduce_set('project_action_authorized', set([
- self.project_admin_context, self.project_member_context,
- ]))
-
- self.reduce_set('project_admin_authorized', set([
- self.project_admin_context
- ]))
+ # resources. Legacy admin will have access.
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
# The only additional role that can read our resources is our
# own project_reader.
self.project_reader_authorized_contexts = (
- self.project_action_authorized_contexts |
- set([self.project_reader_context])
- )
+ self.project_reader_or_admin_with_no_scope_no_legacy)
# Disabling legacy support means random roles lose power to
# see everything in their project.
@@ -1439,7 +1429,7 @@ class ServersNoLegacyPolicyTest(ServersScopeTypePolicyTest):
without_deprecated_rules = True
rules_without_deprecation = {
policies.SERVERS % 'show:flavor-extra-specs':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
}
def setUp(self):
@@ -1449,15 +1439,8 @@ class ServersNoLegacyPolicyTest(ServersScopeTypePolicyTest):
# powerful on our project. Also, we drop the "any role on the
# project means you can do stuff" behavior, so project_reader
# and project_foo lose power.
- self.reduce_set('project_action_authorized', set([
- self.project_admin_context,
- self.project_member_context,
- ]))
-
- # With no legacy rule and scope checks enable, only project
- # admin can do admin things on project resource.
- self.reduce_set('project_admin_authorized',
- set([self.project_admin_context]))
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
# Only project_reader has additional read access to our
# project resources.
diff --git a/nova/tests/unit/policies/test_services.py b/nova/tests/unit/policies/test_services.py
index aae037d678..72465eb748 100644
--- a/nova/tests/unit/policies/test_services.py
+++ b/nova/tests/unit/policies/test_services.py
@@ -11,7 +11,7 @@
# under the License.
-import mock
+from unittest import mock
from nova.api.openstack.compute import services as services_v21
from nova.tests.unit.api.openstack import fakes
@@ -35,21 +35,21 @@ class ServicesPolicyTest(base.BasePolicyTest):
# With legacy rule and scope check disabled by default, system admin,
# legacy admin, and project admin will be able to perform Services
# Operations.
- self.system_admin_authorized_contexts = [
+ self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context]
def test_delete_service_policy(self):
rule_name = "os_compute_api:os-services:delete"
with mock.patch('nova.compute.api.HostAPI.service_get_by_id'):
- self.common_policy_auth(self.system_admin_authorized_contexts,
+ self.common_policy_auth(self.project_admin_authorized_contexts,
rule_name, self.controller.delete,
self.req, 1)
def test_index_service_policy(self):
rule_name = "os_compute_api:os-services:list"
with mock.patch('nova.compute.api.HostAPI.service_get_all'):
- self.common_policy_auth(self.system_admin_authorized_contexts,
+ self.common_policy_auth(self.project_admin_authorized_contexts,
rule_name, self.controller.index,
self.req)
@@ -58,7 +58,7 @@ class ServicesPolicyTest(base.BasePolicyTest):
body = {'host': 'host1', 'binary': 'nova-compute'}
update = 'nova.compute.api.HostAPI.service_update_by_host_and_binary'
with mock.patch(update):
- self.common_policy_auth(self.system_admin_authorized_contexts,
+ self.common_policy_auth(self.project_admin_authorized_contexts,
rule_name, self.controller.update,
self.req, 'enable', body=body)
@@ -69,7 +69,7 @@ class ServicesPolicyTest(base.BasePolicyTest):
service = self.start_service(
'compute', 'fake-compute-host').service_ref
with mock.patch('nova.compute.api.HostAPI.service_update'):
- self.common_policy_auth(self.system_admin_authorized_contexts,
+ self.common_policy_auth(self.project_admin_authorized_contexts,
rule_name, self.controller.update,
req, service.uuid,
body={'status': 'enabled'})
@@ -107,7 +107,8 @@ class ServicesScopeTypePolicyTest(ServicesPolicyTest):
# With scope checks enable, only system admin is able to perform
# Service Operations.
- self.system_admin_authorized_contexts = [self.system_admin_context]
+ self.project_admin_authorized_contexts = [self.legacy_admin_context,
+ self.project_admin_context]
class ServicesScopeTypeNoLegacyPolicyTest(ServicesScopeTypePolicyTest):
diff --git a/nova/tests/unit/policies/test_shelve.py b/nova/tests/unit/policies/test_shelve.py
index 2424d78461..052f844c3d 100644
--- a/nova/tests/unit/policies/test_shelve.py
+++ b/nova/tests/unit/policies/test_shelve.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from nova.api.openstack.compute import shelve
@@ -121,9 +122,8 @@ class ShelveServerNoLegacyNoScopePolicyTest(ShelveServerPolicyTest):
# With no legacy rule, only project admin or member will be
# able to shelve/unshelve the server and only project admin can
# shelve offload the server.
- self.project_action_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
- self.project_admin_authorized_contexts = [self.project_admin_context]
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
class ShelveServerScopeTypePolicyTest(ShelveServerPolicyTest):
@@ -141,10 +141,8 @@ class ShelveServerScopeTypePolicyTest(ShelveServerPolicyTest):
self.flags(enforce_scope=True, group="oslo_policy")
# Scope enable will not allow system admin to shelve/unshelve the
# server.
- self.project_action_authorized_contexts = [
- self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
+ self.project_action_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.project_admin_context]
@@ -159,6 +157,5 @@ class ShelveServerScopeTypeNoLegacyPolicyTest(ShelveServerScopeTypePolicyTest):
super(ShelveServerScopeTypeNoLegacyPolicyTest, self).setUp()
# With scope enable and no legacy rule, only project admin/member
# will be able to shelve/unshelve the server.
- self.project_action_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
- self.project_admin_authorized_contexts = [self.project_admin_context]
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_simple_tenant_usage.py b/nova/tests/unit/policies/test_simple_tenant_usage.py
index 7ae028bd5e..d6aa7af901 100644
--- a/nova/tests/unit/policies/test_simple_tenant_usage.py
+++ b/nova/tests/unit/policies/test_simple_tenant_usage.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.api.openstack.compute import simple_tenant_usage
from nova.policies import simple_tenant_usage as policies
@@ -70,10 +70,8 @@ class SimpleTenantUsageNoLegacyNoScopePolicyTest(SimpleTenantUsagePolicyTest):
super(SimpleTenantUsageNoLegacyNoScopePolicyTest, self).setUp()
# With no legacy, project other roles like foo will not be able
# to get tenant usage.
- self.project_reader_authorized_contexts = [
- self.project_admin_context, self.project_member_context,
- self.project_reader_context,
- ]
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_no_scope_no_legacy)
class SimpleTenantUsageScopeTypePolicyTest(SimpleTenantUsagePolicyTest):
@@ -92,11 +90,8 @@ class SimpleTenantUsageScopeTypePolicyTest(SimpleTenantUsagePolicyTest):
# With Scope enable, system users no longer allowed.
self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.project_admin_context]
- self.project_reader_authorized_contexts = [
- self.legacy_admin_context, self.project_admin_context,
- self.project_member_context, self.project_reader_context,
- self.project_foo_context,
- ]
+ self.project_reader_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
class SimpleTenantUsageScopeTypeNoLegacyPolicyTest(
@@ -109,7 +104,5 @@ class SimpleTenantUsageScopeTypeNoLegacyPolicyTest(
def setUp(self):
super(SimpleTenantUsageScopeTypeNoLegacyPolicyTest, self).setUp()
- self.project_reader_authorized_contexts = [
- self.project_admin_context,
- self.project_member_context, self.project_reader_context,
- ]
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_suspend_server.py b/nova/tests/unit/policies/test_suspend_server.py
index 9ef95c61d9..7d3cde2799 100644
--- a/nova/tests/unit/policies/test_suspend_server.py
+++ b/nova/tests/unit/policies/test_suspend_server.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from nova.api.openstack.compute import suspend_server
@@ -106,8 +107,8 @@ class SuspendServerNoLegacyNoScopePolicyTest(SuspendServerPolicyTest):
super(SuspendServerNoLegacyNoScopePolicyTest, self).setUp()
# With no legacy rule, only project admin or member will be
# able to suspend/resume the server.
- self.project_action_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
class SuspendServerScopeTypePolicyTest(SuspendServerPolicyTest):
@@ -124,10 +125,8 @@ class SuspendServerScopeTypePolicyTest(SuspendServerPolicyTest):
super(SuspendServerScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
# Scope enable will not allow system admin to suspend/resume server.
- self.project_action_authorized_contexts = [
- self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
+ self.project_action_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
class SuspendServerScopeTypeNoLegacyTest(SuspendServerScopeTypePolicyTest):
@@ -142,5 +141,5 @@ class SuspendServerScopeTypeNoLegacyTest(SuspendServerScopeTypePolicyTest):
super(SuspendServerScopeTypeNoLegacyTest, self).setUp()
# With scope enable and no legacy rule only project admin/member
# will be able to suspend/resume the server.
- self.project_action_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_tenant_networks.py b/nova/tests/unit/policies/test_tenant_networks.py
index 9359567b65..a5bc614902 100644
--- a/nova/tests/unit/policies/test_tenant_networks.py
+++ b/nova/tests/unit/policies/test_tenant_networks.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from nova.api.openstack.compute import tenant_networks
@@ -71,9 +72,9 @@ class TenantNetworksNoLegacyNoScopePolicyTest(TenantNetworksPolicyTest):
without_deprecated_rules = True
rules_without_deprecation = {
policies.POLICY_NAME % 'list':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
policies.POLICY_NAME % 'show':
- base_policy.PROJECT_READER}
+ base_policy.PROJECT_READER_OR_ADMIN}
def setUp(self):
super(TenantNetworksNoLegacyNoScopePolicyTest, self).setUp()
@@ -119,9 +120,9 @@ class TenantNetworksScopeTypeNoLegacyPolicyTest(
without_deprecated_rules = True
rules_without_deprecation = {
policies.POLICY_NAME % 'list':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
policies.POLICY_NAME % 'show':
- base_policy.PROJECT_READER}
+ base_policy.PROJECT_READER_OR_ADMIN}
def setUp(self):
super(TenantNetworksScopeTypeNoLegacyPolicyTest, self).setUp()
diff --git a/nova/tests/unit/policies/test_volumes.py b/nova/tests/unit/policies/test_volumes.py
index 2caefed3be..896881c03f 100644
--- a/nova/tests/unit/policies/test_volumes.py
+++ b/nova/tests/unit/policies/test_volumes.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
@@ -214,14 +215,12 @@ class VolumeAttachNoLegacyNoScopePolicyTest(VolumeAttachPolicyTest):
def setUp(self):
super(VolumeAttachNoLegacyNoScopePolicyTest, self).setUp()
- # With no legacy rule, only project admin, member, or reader will be
+ # With no legacy rule, only admin, member, or reader will be
# able to perform volume attachment operation on its own project.
- self.project_reader_authorized_contexts = [
- self.project_admin_context, self.project_member_context,
- self.project_reader_context]
-
- self.project_member_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_no_scope_no_legacy)
class VolumeAttachScopeTypePolicyTest(VolumeAttachPolicyTest):
@@ -241,15 +240,10 @@ class VolumeAttachScopeTypePolicyTest(VolumeAttachPolicyTest):
# Scope enable will not allow system admin to perform the
# volume attachments.
- self.project_member_authorized_contexts = [
- self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
-
- self.project_reader_authorized_contexts = [
- self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
+ self.project_member_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.project_admin_context]
@@ -267,11 +261,10 @@ class VolumeAttachScopeTypeNoLegacyPolicyTest(VolumeAttachScopeTypePolicyTest):
# With scope enable and no legacy rule, it will not allow
# system users and project admin/member/reader will be able to
# perform volume attachment operation on its own project.
- self.project_reader_authorized_contexts = [
- self.project_admin_context, self.project_member_context,
- self.project_reader_context]
- self.project_member_authorized_contexts = [
- self.project_admin_context, self.project_member_context]
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_scope_no_legacy)
class VolumesPolicyTest(base.BasePolicyTest):
@@ -402,25 +395,25 @@ class VolumesNoLegacyNoScopePolicyTest(VolumesPolicyTest):
without_deprecated_rules = True
rules_without_deprecation = {
v_policies.POLICY_NAME % 'list':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
v_policies.POLICY_NAME % 'detail':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
v_policies.POLICY_NAME % 'show':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
v_policies.POLICY_NAME % 'create':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
v_policies.POLICY_NAME % 'delete':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
v_policies.POLICY_NAME % 'snapshots:list':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
v_policies.POLICY_NAME % 'snapshots:detail':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
v_policies.POLICY_NAME % 'snapshots:delete':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
v_policies.POLICY_NAME % 'snapshots:create':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
v_policies.POLICY_NAME % 'snapshots:show':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
}
def setUp(self):
@@ -481,25 +474,25 @@ class VolumesScopeTypeNoLegacyPolicyTest(VolumesScopeTypePolicyTest):
rules_without_deprecation = {
v_policies.POLICY_NAME % 'list':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
v_policies.POLICY_NAME % 'detail':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
v_policies.POLICY_NAME % 'show':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
v_policies.POLICY_NAME % 'create':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
v_policies.POLICY_NAME % 'delete':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
v_policies.POLICY_NAME % 'snapshots:list':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
v_policies.POLICY_NAME % 'snapshots:detail':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
v_policies.POLICY_NAME % 'snapshots:delete':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
v_policies.POLICY_NAME % 'snapshots:create':
- base_policy.PROJECT_MEMBER,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
v_policies.POLICY_NAME % 'snapshots:show':
- base_policy.PROJECT_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
}
def setUp(self):
diff --git a/nova/tests/unit/privsep/test_fs.py b/nova/tests/unit/privsep/test_fs.py
index 89062acce9..919b6c553d 100644
--- a/nova/tests/unit/privsep/test_fs.py
+++ b/nova/tests/unit/privsep/test_fs.py
@@ -14,7 +14,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
import nova.privsep.fs
from nova import test
diff --git a/nova/tests/unit/privsep/test_idmapshift.py b/nova/tests/unit/privsep/test_idmapshift.py
index 2b5acbe33c..7c6f7833ff 100644
--- a/nova/tests/unit/privsep/test_idmapshift.py
+++ b/nova/tests/unit/privsep/test_idmapshift.py
@@ -13,9 +13,9 @@
# limitations under the License.
from io import StringIO
+from unittest import mock
import fixtures
-import mock
import nova.privsep.idmapshift
from nova import test
diff --git a/nova/tests/unit/privsep/test_libvirt.py b/nova/tests/unit/privsep/test_libvirt.py
index 32d375bb1c..eebcf6c231 100644
--- a/nova/tests/unit/privsep/test_libvirt.py
+++ b/nova/tests/unit/privsep/test_libvirt.py
@@ -15,8 +15,9 @@
# under the License.
import binascii
+from unittest import mock
+
import ddt
-import mock
import os
import nova.privsep.libvirt
diff --git a/nova/tests/unit/privsep/test_linux_net.py b/nova/tests/unit/privsep/test_linux_net.py
index 5bdac6ca02..6b226359c3 100644
--- a/nova/tests/unit/privsep/test_linux_net.py
+++ b/nova/tests/unit/privsep/test_linux_net.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from oslo_concurrency import processutils
diff --git a/nova/tests/unit/privsep/test_path.py b/nova/tests/unit/privsep/test_path.py
index 1b4955837d..853ee01d09 100644
--- a/nova/tests/unit/privsep/test_path.py
+++ b/nova/tests/unit/privsep/test_path.py
@@ -14,8 +14,9 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
import os
+from unittest import mock
+
import tempfile
from nova import exception
diff --git a/nova/tests/unit/privsep/test_qemu.py b/nova/tests/unit/privsep/test_qemu.py
index 85c48aa4ae..f3fe5599f2 100644
--- a/nova/tests/unit/privsep/test_qemu.py
+++ b/nova/tests/unit/privsep/test_qemu.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
import nova.privsep.qemu
from nova import test
diff --git a/nova/tests/unit/privsep/test_utils.py b/nova/tests/unit/privsep/test_utils.py
index 84d0767c29..887e6dfa8b 100644
--- a/nova/tests/unit/privsep/test_utils.py
+++ b/nova/tests/unit/privsep/test_utils.py
@@ -13,8 +13,8 @@
# under the License.
import errno
-import mock
import os
+from unittest import mock
import nova.privsep.utils
from nova import test
diff --git a/nova/tests/unit/scheduler/client/test_query.py b/nova/tests/unit/scheduler/client/test_query.py
index f8ea4aa337..fe23cf88e3 100644
--- a/nova/tests/unit/scheduler/client/test_query.py
+++ b/nova/tests/unit/scheduler/client/test_query.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from nova import context
diff --git a/nova/tests/unit/scheduler/client/test_report.py b/nova/tests/unit/scheduler/client/test_report.py
index 0650c62096..40ebac9af9 100644
--- a/nova/tests/unit/scheduler/client/test_report.py
+++ b/nova/tests/unit/scheduler/client/test_report.py
@@ -9,13 +9,15 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+
import copy
+import ddt
import time
+from unittest import mock
from urllib import parse
import fixtures
from keystoneauth1 import exceptions as ks_exc
-import mock
import os_resource_classes as orc
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
@@ -41,8 +43,14 @@ class SafeConnectedTestCase(test.NoDBTestCase):
super(SafeConnectedTestCase, self).setUp()
self.context = context.get_admin_context()
- with mock.patch('keystoneauth1.loading.load_auth_from_conf_options'):
- self.client = report.SchedulerReportClient()
+ # need to mock this globally as SchedulerReportClient._create_client
+ # is called again when EndpointNotFound is raised
+ self.useFixture(
+ fixtures.MonkeyPatch(
+ 'keystoneauth1.loading.load_auth_from_conf_options',
+ mock.MagicMock()))
+
+ self.client = report.SchedulerReportClient()
@mock.patch('keystoneauth1.session.Session.request')
def test_missing_endpoint(self, req):
@@ -150,6 +158,60 @@ class SafeConnectedTestCase(test.NoDBTestCase):
self.assertTrue(req.called)
+@ddt.ddt
+class TestSingleton(test.NoDBTestCase):
+ def test_singleton(self):
+ # Make sure we start with a clean slate
+ self.assertIsNone(report.PLACEMENTCLIENT)
+
+ # Make sure the first call creates the singleton, sets it
+ # globally, and returns it
+ client = report.report_client_singleton()
+ self.assertEqual(client, report.PLACEMENTCLIENT)
+
+ # Make sure that a subsequent call returns the same thing
+ # again and that the global is unchanged
+ self.assertEqual(client, report.report_client_singleton())
+ self.assertEqual(client, report.PLACEMENTCLIENT)
+
+ @ddt.data(ks_exc.EndpointNotFound,
+ ks_exc.MissingAuthPlugin,
+ ks_exc.Unauthorized,
+ ks_exc.DiscoveryFailure,
+ ks_exc.ConnectFailure,
+ ks_exc.RequestTimeout,
+ ks_exc.GatewayTimeout,
+ test.TestingException)
+ def test_errors(self, exc):
+ self._test_error(exc)
+
+ @mock.patch.object(report, 'LOG')
+ def _test_error(self, exc, mock_log):
+ with mock.patch.object(report.SchedulerReportClient, '_create_client',
+ side_effect=exc):
+ self.assertRaises(exc, report.report_client_singleton)
+ mock_log.error.assert_called_once()
+
+ def test_error_then_success(self):
+ # Simulate an error
+ self._test_error(ks_exc.ConnectFailure)
+
+ # Ensure we did not set the global client
+ self.assertIsNone(report.PLACEMENTCLIENT)
+
+ # Call again, with no error
+ client = report.report_client_singleton()
+
+ # Make sure we got a client and that it was set as the global
+ # one
+ self.assertIsNotNone(client)
+ self.assertEqual(client, report.PLACEMENTCLIENT)
+
+ # Make sure we keep getting the same one
+ client2 = report.report_client_singleton()
+ self.assertEqual(client, client2)
+
+
class TestConstructor(test.NoDBTestCase):
def setUp(self):
super(TestConstructor, self).setUp()
diff --git a/nova/tests/unit/scheduler/filters/test_affinity_filters.py b/nova/tests/unit/scheduler/filters/test_affinity_filters.py
index 45c4d9834c..778fbd9073 100644
--- a/nova/tests/unit/scheduler/filters/test_affinity_filters.py
+++ b/nova/tests/unit/scheduler/filters/test_affinity_filters.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from nova import objects
diff --git a/nova/tests/unit/scheduler/filters/test_aggregate_image_properties_isolation_filters.py b/nova/tests/unit/scheduler/filters/test_aggregate_image_properties_isolation_filters.py
index f17a7168f1..09b8d728b2 100644
--- a/nova/tests/unit/scheduler/filters/test_aggregate_image_properties_isolation_filters.py
+++ b/nova/tests/unit/scheduler/filters/test_aggregate_image_properties_isolation_filters.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova import objects
from nova.scheduler.filters import aggregate_image_properties_isolation as aipi
diff --git a/nova/tests/unit/scheduler/filters/test_aggregate_instance_extra_specs_filters.py b/nova/tests/unit/scheduler/filters/test_aggregate_instance_extra_specs_filters.py
index 3567d85a62..971e1a366c 100644
--- a/nova/tests/unit/scheduler/filters/test_aggregate_instance_extra_specs_filters.py
+++ b/nova/tests/unit/scheduler/filters/test_aggregate_instance_extra_specs_filters.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova import objects
from nova.scheduler.filters import aggregate_instance_extra_specs as agg_specs
diff --git a/nova/tests/unit/scheduler/filters/test_aggregate_multitenancy_isolation_filters.py b/nova/tests/unit/scheduler/filters/test_aggregate_multitenancy_isolation_filters.py
index 6e6ae9a421..7f2f75a5bd 100644
--- a/nova/tests/unit/scheduler/filters/test_aggregate_multitenancy_isolation_filters.py
+++ b/nova/tests/unit/scheduler/filters/test_aggregate_multitenancy_isolation_filters.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova import objects
from nova.scheduler.filters import aggregate_multitenancy_isolation as ami
diff --git a/nova/tests/unit/scheduler/filters/test_availability_zone_filters.py b/nova/tests/unit/scheduler/filters/test_availability_zone_filters.py
index 2c1a43225e..38a75452ba 100644
--- a/nova/tests/unit/scheduler/filters/test_availability_zone_filters.py
+++ b/nova/tests/unit/scheduler/filters/test_availability_zone_filters.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova import objects
from nova.scheduler.filters import availability_zone_filter
diff --git a/nova/tests/unit/scheduler/filters/test_compute_filters.py b/nova/tests/unit/scheduler/filters/test_compute_filters.py
index d9cee4c410..335b9d07be 100644
--- a/nova/tests/unit/scheduler/filters/test_compute_filters.py
+++ b/nova/tests/unit/scheduler/filters/test_compute_filters.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova import objects
from nova.scheduler.filters import compute_filter
diff --git a/nova/tests/unit/scheduler/filters/test_io_ops_filters.py b/nova/tests/unit/scheduler/filters/test_io_ops_filters.py
index fd0dc3aca1..3b06aaf069 100644
--- a/nova/tests/unit/scheduler/filters/test_io_ops_filters.py
+++ b/nova/tests/unit/scheduler/filters/test_io_ops_filters.py
@@ -11,7 +11,7 @@
# under the License.
-import mock
+from unittest import mock
from nova import objects
from nova.scheduler.filters import io_ops_filter
diff --git a/nova/tests/unit/scheduler/filters/test_num_instances_filters.py b/nova/tests/unit/scheduler/filters/test_num_instances_filters.py
index 070cc3a785..b43a9b1dc1 100644
--- a/nova/tests/unit/scheduler/filters/test_num_instances_filters.py
+++ b/nova/tests/unit/scheduler/filters/test_num_instances_filters.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova import objects
from nova.scheduler.filters import num_instances_filter
diff --git a/nova/tests/unit/scheduler/filters/test_pci_passthrough_filters.py b/nova/tests/unit/scheduler/filters/test_pci_passthrough_filters.py
index c500b4a887..edd9735b34 100644
--- a/nova/tests/unit/scheduler/filters/test_pci_passthrough_filters.py
+++ b/nova/tests/unit/scheduler/filters/test_pci_passthrough_filters.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova import objects
from nova.pci import stats
diff --git a/nova/tests/unit/scheduler/filters/test_type_filters.py b/nova/tests/unit/scheduler/filters/test_type_filters.py
index d3f01a5c0e..c2567b5205 100644
--- a/nova/tests/unit/scheduler/filters/test_type_filters.py
+++ b/nova/tests/unit/scheduler/filters/test_type_filters.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova import objects
from nova.scheduler.filters import type_filter
diff --git a/nova/tests/unit/scheduler/test_filters.py b/nova/tests/unit/scheduler/test_filters.py
index cb1c3ec32b..64f4121eb0 100644
--- a/nova/tests/unit/scheduler/test_filters.py
+++ b/nova/tests/unit/scheduler/test_filters.py
@@ -16,8 +16,8 @@ Tests For Scheduler Host Filters.
"""
import inspect
+from unittest import mock
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from nova import filters
diff --git a/nova/tests/unit/scheduler/test_host_manager.py b/nova/tests/unit/scheduler/test_host_manager.py
index 5a1e665be3..c4445d5578 100644
--- a/nova/tests/unit/scheduler/test_host_manager.py
+++ b/nova/tests/unit/scheduler/test_host_manager.py
@@ -19,8 +19,8 @@ Tests For HostManager
import collections
import contextlib
import datetime
+from unittest import mock
-import mock
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import versionutils
diff --git a/nova/tests/unit/scheduler/test_manager.py b/nova/tests/unit/scheduler/test_manager.py
index 70689f6047..4e7c0dc008 100644
--- a/nova/tests/unit/scheduler/test_manager.py
+++ b/nova/tests/unit/scheduler/test_manager.py
@@ -17,7 +17,8 @@
Tests For Scheduler
"""
-import mock
+from unittest import mock
+
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
@@ -25,6 +26,7 @@ from oslo_utils.fixture import uuidsentinel as uuids
from nova import context
from nova import exception
from nova import objects
+from nova.scheduler import filters
from nova.scheduler import host_manager
from nova.scheduler import manager
from nova.scheduler import utils as scheduler_utils
@@ -395,9 +397,16 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
# assertion at the end of the test.
spec_obj.obj_reset_changes(recursive=True)
- host_state = mock.Mock(spec=host_manager.HostState, host="fake_host",
- uuid=uuids.cn1, cell_uuid=uuids.cell, nodename="fake_node",
- limits={}, aggregates=[])
+ host_state = mock.Mock(
+ spec=host_manager.HostState,
+ host="fake_host",
+ uuid=uuids.cn1,
+ cell_uuid=uuids.cell,
+ nodename="fake_node",
+ limits={},
+ aggregates=[],
+ allocation_candidates=[],
+ )
all_host_states = [host_state]
mock_get_all_states.return_value = all_host_states
@@ -458,20 +467,29 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
is_public=True,
name="small_flavor"),
project_id=uuids.project_id,
- instance_group=group)
-
- host_state = mock.Mock(spec=host_manager.HostState,
- host="fake_host", nodename="fake_node", uuid=uuids.cn1,
- limits={}, cell_uuid=uuids.cell, instances={}, aggregates=[])
+ instance_group=group,
+ requested_resources=[],
+ )
+
+ host_state = mock.Mock(
+ spec=host_manager.HostState,
+ host="fake_host",
+ nodename="fake_node",
+ uuid=uuids.cn1,
+ limits={},
+ cell_uuid=uuids.cell,
+ instances={},
+ aggregates=[],
+ allocation_candidates=[],
+ )
all_host_states = [host_state]
mock_get_all_states.return_value = all_host_states
- mock_get_hosts.return_value = all_host_states
+ mock_get_hosts.side_effect = lambda spec_obj, hosts, num: list(hosts)
instance_uuids = None
ctx = mock.Mock()
selected_hosts = self.manager._schedule(ctx, spec_obj,
- instance_uuids, mock.sentinel.alloc_reqs_by_rp_uuid,
- mock.sentinel.provider_summaries)
+ instance_uuids, None, mock.sentinel.provider_summaries)
mock_get_all_states.assert_called_once_with(
ctx.elevated.return_value, spec_obj,
@@ -509,14 +527,24 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
is_public=True,
name="small_flavor"),
project_id=uuids.project_id,
- instance_group=None)
-
- host_state = mock.Mock(spec=host_manager.HostState,
- host="fake_host", nodename="fake_node", uuid=uuids.cn1,
- cell_uuid=uuids.cell1, limits={}, aggregates=[])
+ instance_group=None,
+ requested_resources=[],
+ )
+
+ host_state = mock.Mock(
+ spec=host_manager.HostState,
+ host="fake_host",
+ nodename="fake_node",
+ uuid=uuids.cn1,
+ cell_uuid=uuids.cell1,
+ limits={},
+ aggregates=[],
+ allocation_candidates=[],
+ )
all_host_states = [host_state]
mock_get_all_states.return_value = all_host_states
- mock_get_hosts.return_value = all_host_states
+ # simulate that every host passes the filtering
+ mock_get_hosts.side_effect = lambda spec_obj, hosts, num: list(hosts)
mock_claim.return_value = True
instance_uuids = [uuids.instance]
@@ -582,11 +610,16 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
project_id=uuids.project_id,
instance_group=None)
- host_state = mock.Mock(spec=host_manager.HostState,
- host=mock.sentinel.host, uuid=uuids.cn1, cell_uuid=uuids.cell1)
+ host_state = mock.Mock(
+ spec=host_manager.HostState,
+ host=mock.sentinel.host,
+ uuid=uuids.cn1,
+ cell_uuid=uuids.cell1,
+ allocations_candidates=[],
+ )
all_host_states = [host_state]
mock_get_all_states.return_value = all_host_states
- mock_get_hosts.return_value = all_host_states
+ mock_get_hosts.side_effect = lambda spec_obj, hosts, num: list(hosts)
mock_claim.return_value = False
instance_uuids = [uuids.instance]
@@ -603,7 +636,7 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
mock_get_all_states.assert_called_once_with(
ctx.elevated.return_value, spec_obj,
mock.sentinel.provider_summaries)
- mock_get_hosts.assert_called_once_with(spec_obj, all_host_states, 0)
+ mock_get_hosts.assert_called_once_with(spec_obj, mock.ANY, 0)
mock_claim.assert_called_once_with(ctx.elevated.return_value,
self.manager.placement_client, spec_obj, uuids.instance,
alloc_reqs_by_rp_uuid[uuids.cn1][0],
@@ -634,18 +667,41 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
is_public=True,
name="small_flavor"),
project_id=uuids.project_id,
- instance_group=None)
-
- host_state = mock.Mock(spec=host_manager.HostState,
- host="fake_host", nodename="fake_node", uuid=uuids.cn1,
- cell_uuid=uuids.cell1, limits={}, updated='fake')
+ instance_group=None,
+ requested_resources=[],
+ )
+
+ host_state = mock.Mock(
+ spec=host_manager.HostState,
+ host="fake_host",
+ nodename="fake_node",
+ uuid=uuids.cn1,
+ cell_uuid=uuids.cell1,
+ limits={},
+ updated="fake",
+ allocation_candidates=[],
+ )
all_host_states = [host_state]
mock_get_all_states.return_value = all_host_states
- mock_get_hosts.side_effect = [
- all_host_states, # first instance: return all the hosts (only one)
- [], # second: act as if no more hosts that meet criteria
- all_host_states, # the final call when creating alternates
- ]
+
+ calls = []
+
+ def fake_get_sorted_hosts(spec_obj, hosts, num):
+ c = len(calls)
+ calls.append(1)
+ # first instance: return all the hosts (only one)
+ if c == 0:
+ return hosts
+ # second: act as if no more hosts that meet criteria
+ elif c == 1:
+ return []
+ # the final call when creating alternates
+ elif c == 2:
+ return hosts
+ else:
+ raise StopIteration()
+
+ mock_get_hosts.side_effect = fake_get_sorted_hosts
mock_claim.return_value = True
instance_uuids = [uuids.instance1, uuids.instance2]
@@ -678,20 +734,44 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
swap=0,
vcpus=1),
project_id=uuids.project_id,
- instance_group=None)
-
- host_state0 = mock.Mock(spec=host_manager.HostState,
- host="fake_host0", nodename="fake_node0", uuid=uuids.cn0,
- cell_uuid=uuids.cell, limits={}, aggregates=[])
- host_state1 = mock.Mock(spec=host_manager.HostState,
- host="fake_host1", nodename="fake_node1", uuid=uuids.cn1,
- cell_uuid=uuids.cell, limits={}, aggregates=[])
- host_state2 = mock.Mock(spec=host_manager.HostState,
- host="fake_host2", nodename="fake_node2", uuid=uuids.cn2,
- cell_uuid=uuids.cell, limits={}, aggregates=[])
+ instance_group=None,
+ requested_resources=[],
+ )
+
+ host_state0 = mock.Mock(
+ spec=host_manager.HostState,
+ host="fake_host0",
+ nodename="fake_node0",
+ uuid=uuids.cn0,
+ cell_uuid=uuids.cell,
+ limits={},
+ aggregates=[],
+ allocation_candidates=[],
+ )
+ host_state1 = mock.Mock(
+ spec=host_manager.HostState,
+ host="fake_host1",
+ nodename="fake_node1",
+ uuid=uuids.cn1,
+ cell_uuid=uuids.cell,
+ limits={},
+ aggregates=[],
+ allocation_candidates=[],
+ )
+ host_state2 = mock.Mock(
+ spec=host_manager.HostState,
+ host="fake_host2",
+ nodename="fake_node2",
+ uuid=uuids.cn2,
+ cell_uuid=uuids.cell,
+ limits={},
+ aggregates=[],
+ allocation_candidates=[],
+ )
all_host_states = [host_state0, host_state1, host_state2]
mock_get_all_states.return_value = all_host_states
- mock_get_hosts.return_value = all_host_states
+ # simulate that every host passes the filtering
+ mock_get_hosts.side_effect = lambda spec_obj, hosts, num: list(hosts)
mock_claim.return_value = True
instance_uuids = [uuids.instance0]
@@ -743,20 +823,44 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
swap=0,
vcpus=1),
project_id=uuids.project_id,
- instance_group=None)
-
- host_state0 = mock.Mock(spec=host_manager.HostState,
- host="fake_host0", nodename="fake_node0", uuid=uuids.cn0,
- cell_uuid=uuids.cell, limits={}, aggregates=[])
- host_state1 = mock.Mock(spec=host_manager.HostState,
- host="fake_host1", nodename="fake_node1", uuid=uuids.cn1,
- cell_uuid=uuids.cell, limits={}, aggregates=[])
- host_state2 = mock.Mock(spec=host_manager.HostState,
- host="fake_host2", nodename="fake_node2", uuid=uuids.cn2,
- cell_uuid=uuids.cell, limits={}, aggregates=[])
+ instance_group=None,
+ requested_resources=[],
+ )
+
+ host_state0 = mock.Mock(
+ spec=host_manager.HostState,
+ host="fake_host0",
+ nodename="fake_node0",
+ uuid=uuids.cn0,
+ cell_uuid=uuids.cell,
+ limits={},
+ aggregates=[],
+ allocation_candidates=[],
+ )
+ host_state1 = mock.Mock(
+ spec=host_manager.HostState,
+ host="fake_host1",
+ nodename="fake_node1",
+ uuid=uuids.cn1,
+ cell_uuid=uuids.cell,
+ limits={},
+ aggregates=[],
+ allocation_candidates=[],
+ )
+ host_state2 = mock.Mock(
+ spec=host_manager.HostState,
+ host="fake_host2",
+ nodename="fake_node2",
+ uuid=uuids.cn2,
+ cell_uuid=uuids.cell,
+ limits={},
+ aggregates=[],
+ allocation_candidates=[],
+ )
all_host_states = [host_state0, host_state1, host_state2]
mock_get_all_states.return_value = all_host_states
- mock_get_hosts.return_value = all_host_states
+ # simulate that every host passes the filtering
+ mock_get_hosts.side_effect = lambda spec_obj, hosts, num: list(hosts)
mock_claim.return_value = True
instance_uuids = [uuids.instance0]
@@ -813,17 +917,36 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
is_public=True,
name="small_flavor"),
project_id=uuids.project_id,
- instance_group=ig, instance_uuid=uuids.instance0)
+ instance_group=ig,
+ instance_uuid=uuids.instance0,
+ requested_resources=[],
+ )
# Reset the RequestSpec changes so they don't interfere with the
# assertion at the end of the test.
spec_obj.obj_reset_changes(recursive=True)
- hs1 = mock.Mock(spec=host_manager.HostState, host='host1',
- nodename="node1", limits={}, uuid=uuids.cn1,
- cell_uuid=uuids.cell1, instances={}, aggregates=[])
- hs2 = mock.Mock(spec=host_manager.HostState, host='host2',
- nodename="node2", limits={}, uuid=uuids.cn2,
- cell_uuid=uuids.cell2, instances={}, aggregates=[])
+ hs1 = mock.Mock(
+ spec=host_manager.HostState,
+ host="host1",
+ nodename="node1",
+ limits={},
+ uuid=uuids.cn1,
+ cell_uuid=uuids.cell1,
+ instances={},
+ aggregates=[],
+ allocation_candidates=[],
+ )
+ hs2 = mock.Mock(
+ spec=host_manager.HostState,
+ host="host2",
+ nodename="node2",
+ limits={},
+ uuid=uuids.cn2,
+ cell_uuid=uuids.cell2,
+ instances={},
+ aggregates=[],
+ allocation_candidates=[],
+ )
all_host_states = [hs1, hs2]
mock_get_all_states.return_value = all_host_states
mock_claim.return_value = True
@@ -837,13 +960,18 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
# _get_sorted_hosts() in the two iterations for each instance in
# num_instances
visited_instances = set([])
+ get_sorted_hosts_called_with_host_states = []
def fake_get_sorted_hosts(_spec_obj, host_states, index):
# Keep track of which instances are passed to the filters.
visited_instances.add(_spec_obj.instance_uuid)
if index % 2:
- return [hs1, hs2]
- return [hs2, hs1]
+ s = list(host_states)
+ get_sorted_hosts_called_with_host_states.append(s)
+ return s
+ s = list(host_states)
+ get_sorted_hosts_called_with_host_states.append(s)
+ return reversed(s)
mock_get_hosts.side_effect = fake_get_sorted_hosts
instance_uuids = [
getattr(uuids, 'instance%d' % x) for x in range(num_instances)
@@ -870,10 +998,14 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
# second time, we pass it the hosts that were returned from
# _get_sorted_hosts() the first time
sorted_host_calls = [
- mock.call(spec_obj, all_host_states, 0),
- mock.call(spec_obj, [hs2, hs1], 1),
+ mock.call(spec_obj, mock.ANY, 0),
+ mock.call(spec_obj, mock.ANY, 1),
]
mock_get_hosts.assert_has_calls(sorted_host_calls)
+ self.assertEqual(
+ all_host_states, get_sorted_hosts_called_with_host_states[0])
+ self.assertEqual(
+ [hs1], get_sorted_hosts_called_with_host_states[1])
# The instance group object should have both host1 and host2 in its
# instance group hosts list and there should not be any "changes" to
@@ -1167,14 +1299,36 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
name="small_flavor"),
project_id=uuids.project_id,
instance_uuid=uuids.instance_id,
- instance_group=None)
-
- host_state = mock.Mock(spec=host_manager.HostState, host="fake_host",
- uuid=uuids.cn1, cell_uuid=uuids.cell, nodename="fake_node",
- limits={}, updated="Not None")
+ instance_group=None,
+ requested_resources=[],
+ )
+
+ host_state = mock.Mock(
+ spec=host_manager.HostState,
+ host="fake_host",
+ uuid=uuids.cn1,
+ cell_uuid=uuids.cell,
+ nodename="fake_node",
+ limits={},
+ updated="Not None",
+ allocation_candidates=[],
+ )
all_host_states = [host_state]
mock_get_all_states.return_value = all_host_states
- mock_get_hosts.side_effect = [all_host_states, []]
+
+ calls = []
+
+ def fake_get_sorted_hosts(spec_obj, hosts, num):
+ c = len(calls)
+ calls.append(1)
+ if c == 0:
+ return list(hosts)
+ elif c == 1:
+ return []
+ else:
+ raise StopIteration
+
+ mock_get_hosts.side_effect = fake_get_sorted_hosts
instance_uuids = [uuids.inst1, uuids.inst2]
fake_allocs_by_rp = {uuids.cn1: [{}]}
@@ -1203,7 +1357,7 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
alloc_reqs[hs.uuid] = [{}]
mock_get_all_hosts.return_value = all_host_states
- mock_sorted.return_value = all_host_states
+ mock_sorted.side_effect = lambda spec_obj, hosts, num: list(hosts)
mock_claim.return_value = True
total_returned = num_alternates + 1
self.flags(max_attempts=total_returned, group="scheduler")
@@ -1211,14 +1365,14 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
for num in range(num_instances)]
spec_obj = objects.RequestSpec(
- num_instances=num_instances,
- flavor=objects.Flavor(memory_mb=512,
- root_gb=512,
- ephemeral_gb=0,
- swap=0,
- vcpus=1),
- project_id=uuids.project_id,
- instance_group=None)
+ num_instances=num_instances,
+ flavor=objects.Flavor(
+ memory_mb=512, root_gb=512, ephemeral_gb=0, swap=0, vcpus=1
+ ),
+ project_id=uuids.project_id,
+ instance_group=None,
+ requested_resources=[],
+ )
dests = self.manager._schedule(self.context, spec_obj,
instance_uuids, alloc_reqs, None, return_alternates=True)
@@ -1269,11 +1423,24 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
alloc_reqs[hs.uuid] = [{}]
mock_get_all_hosts.return_value = all_host_states
+
# There are two instances so _get_sorted_hosts is called once per
# instance and then once again before picking alternates.
- mock_sorted.side_effect = [all_host_states,
- list(reversed(all_host_states)),
- all_host_states]
+ calls = []
+
+ def fake_get_sorted_hosts(spec_obj, hosts, num):
+ c = len(calls)
+ calls.append(1)
+ if c == 0:
+ return list(hosts)
+ elif c == 1:
+ return list(reversed(all_host_states))
+ elif c == 2:
+ return list(hosts)
+ else:
+ raise StopIteration()
+
+ mock_sorted.side_effect = fake_get_sorted_hosts
mock_claim.return_value = True
total_returned = 3
self.flags(max_attempts=total_returned, group="scheduler")
@@ -1281,14 +1448,14 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
num_instances = len(instance_uuids)
spec_obj = objects.RequestSpec(
- num_instances=num_instances,
- flavor=objects.Flavor(memory_mb=512,
- root_gb=512,
- ephemeral_gb=0,
- swap=0,
- vcpus=1),
- project_id=uuids.project_id,
- instance_group=None)
+ num_instances=num_instances,
+ flavor=objects.Flavor(
+ memory_mb=512, root_gb=512, ephemeral_gb=0, swap=0, vcpus=1
+ ),
+ project_id=uuids.project_id,
+ instance_group=None,
+ requested_resources=[],
+ )
dests = self.manager._schedule(self.context, spec_obj,
instance_uuids, alloc_reqs, None, return_alternates=True)
@@ -1322,7 +1489,7 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
alloc_reqs[hs.uuid] = [{}]
mock_get_all_hosts.return_value = all_host_states
- mock_sorted.return_value = all_host_states
+ mock_sorted.side_effect = lambda spec_obj, hosts, num: list(hosts)
mock_claim.return_value = True
# Set the total returned to more than the number of available hosts
self.flags(max_attempts=max_attempts, group="scheduler")
@@ -1330,14 +1497,14 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
num_instances = len(instance_uuids)
spec_obj = objects.RequestSpec(
- num_instances=num_instances,
- flavor=objects.Flavor(memory_mb=512,
- root_gb=512,
- ephemeral_gb=0,
- swap=0,
- vcpus=1),
- project_id=uuids.project_id,
- instance_group=None)
+ num_instances=num_instances,
+ flavor=objects.Flavor(
+ memory_mb=512, root_gb=512, ephemeral_gb=0, swap=0, vcpus=1
+ ),
+ project_id=uuids.project_id,
+ instance_group=None,
+ requested_resources=[],
+ )
dests = self.manager._schedule(self.context, spec_obj,
instance_uuids, alloc_reqs, None, return_alternates=True)
@@ -1520,3 +1687,503 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
self.manager._discover_hosts_in_cells(mock.sentinel.context)
mock_log_warning.assert_not_called()
mock_log_debug.assert_called_once_with(msg)
+
+
+class SchedulerManagerAllocationCandidateTestCase(test.NoDBTestCase):
+
+ class ACRecorderFilter(filters.BaseHostFilter):
+ """A filter that records what allocation candidates it saw on each host
+ """
+
+ def __init__(self):
+ super().__init__()
+ self.seen_candidates = []
+
+ def host_passes(self, host_state, filter_properties):
+ # record what candidate the filter saw for each host
+ self.seen_candidates.append(list(host_state.allocation_candidates))
+ return True
+
+ class DropFirstFilter(filters.BaseHostFilter):
+ """A filter that removes one candidate and keeps the rest on each
+ host
+ """
+
+ def host_passes(self, host_state, filter_properties):
+ host_state.allocation_candidates.pop(0)
+ return bool(host_state.allocation_candidates)
+
+ @mock.patch.object(
+ host_manager.HostManager, '_init_instance_info', new=mock.Mock())
+ @mock.patch.object(
+ host_manager.HostManager, '_init_aggregates', new=mock.Mock())
+ def setUp(self):
+ super().setUp()
+ self.context = context.RequestContext('fake_user', 'fake_project')
+ self.manager = manager.SchedulerManager()
+ self.manager.host_manager.weighers = []
+ self.request_spec = objects.RequestSpec(
+ ignore_hosts=[],
+ force_hosts=[],
+ force_nodes=[],
+ requested_resources=[],
+ )
+
+ @mock.patch("nova.objects.selection.Selection.from_host_state")
+ @mock.patch(
+ "nova.scheduler.manager.SchedulerManager._consume_selected_host",
+ )
+ @mock.patch(
+ "nova.scheduler.utils.claim_resources",
+ new=mock.Mock(return_value=True),
+ )
+ @mock.patch("nova.scheduler.manager.SchedulerManager._get_all_host_states")
+ def test_filters_see_allocation_candidates_for_each_host(
+ self,
+ mock_get_all_host_states,
+ mock_consume,
+ mock_selection_from_host_state,
+ ):
+ # have a single filter configured where we can assert that the filter
+ # see the allocation_candidates of each host
+ filter = self.ACRecorderFilter()
+ self.manager.host_manager.enabled_filters = [filter]
+
+ instance_uuids = [uuids.inst1]
+
+ alloc_reqs_by_rp_uuid = {}
+ # have two hosts with different candidates
+ host1 = host_manager.HostState("host1", "node1", uuids.cell1)
+ host1.uuid = uuids.host1
+ alloc_reqs_by_rp_uuid[uuids.host1] = [
+ mock.sentinel.host1_a_c_1,
+ mock.sentinel.host1_a_c_2,
+ ]
+ host2 = host_manager.HostState("host2", "node2", uuids.cell1)
+ host2.uuid = uuids.host2
+ alloc_reqs_by_rp_uuid[uuids.host2] = [
+ mock.sentinel.host2_a_c_1,
+ ]
+ mock_get_all_host_states.return_value = iter([host1, host2])
+
+ self.manager._schedule(
+ self.context,
+ self.request_spec,
+ instance_uuids,
+ alloc_reqs_by_rp_uuid,
+ mock.sentinel.provider_summaries,
+ mock.sentinel.allocation_request_version,
+ )
+
+ # we expect that our filter seen the allocation candidate list of
+ # each host respectively
+ self.assertEqual(
+ [
+ alloc_reqs_by_rp_uuid[uuids.host1],
+ alloc_reqs_by_rp_uuid[uuids.host2],
+ ],
+ filter.seen_candidates,
+ )
+
+ @mock.patch(
+ "nova.scheduler.manager.SchedulerManager._consume_selected_host",
+ )
+ @mock.patch(
+ "nova.scheduler.utils.claim_resources",
+ new=mock.Mock(return_value=True),
+ )
+ @mock.patch("nova.scheduler.manager.SchedulerManager._get_all_host_states")
+ def test_scheduler_selects_filtered_a_c_from_hosts_state(
+ self,
+ mock_get_all_host_states,
+ mock_consume,
+ ):
+ """Assert that if a filter removes an allocation candidate from a host
+ then even if that host is selected the removed allocation candidate
+ is not used by the scheduler.
+ """
+
+ self.manager.host_manager.enabled_filters = [self.DropFirstFilter()]
+
+ instance_uuids = [uuids.inst1]
+ alloc_reqs_by_rp_uuid = {}
+ # have a host with two candidates
+ host1 = host_manager.HostState("host1", "node1", uuids.cell1)
+ host1.uuid = uuids.host1
+ alloc_reqs_by_rp_uuid[uuids.host1] = [
+ "host1-candidate1",
+ "host1-candidate2",
+ ]
+ mock_get_all_host_states.return_value = iter([host1])
+
+ result = self.manager._schedule(
+ self.context,
+ self.request_spec,
+ instance_uuids,
+ alloc_reqs_by_rp_uuid,
+ mock.sentinel.provider_summaries,
+ 'fake-alloc-req-version',
+ )
+ # we have requested one instance to be scheduled so expect on set
+ # of selections
+ self.assertEqual(1, len(result))
+ selections = result[0]
+ # we did not ask for alternatives so a single selection is expected
+ self.assertEqual(1, len(selections))
+ selection = selections[0]
+ # we expect that candidate2 is used as candidate1 is dropped by
+ # the filter
+ self.assertEqual(
+ "host1-candidate2",
+ jsonutils.loads(selection.allocation_request)
+ )
+
+ @mock.patch("nova.objects.selection.Selection.from_host_state")
+ @mock.patch(
+ "nova.scheduler.manager.SchedulerManager._consume_selected_host",
+ )
+ @mock.patch(
+ "nova.scheduler.utils.claim_resources",
+ new=mock.Mock(return_value=True),
+ )
+ @mock.patch("nova.scheduler.manager.SchedulerManager._get_all_host_states")
+ def test_consecutive_filter_sees_filtered_a_c_list(
+ self,
+ mock_get_all_host_states,
+ mock_consume,
+ mock_selection_from_host_state,
+ ):
+ # create two filters
+ # 1) DropFirstFilter runs first and drops the first candidate from each
+ # host
+ # 2) ACRecorderFilter runs next and records what candidates it saw
+ recorder_filter = self.ACRecorderFilter()
+ self.manager.host_manager.enabled_filters = [
+ self.DropFirstFilter(),
+ recorder_filter,
+ ]
+
+ instance_uuids = [uuids.inst1]
+ alloc_reqs_by_rp_uuid = {}
+ # have a host with two candidates
+ host1 = host_manager.HostState("host1", "node1", uuids.cell1)
+ host1.uuid = uuids.host1
+ alloc_reqs_by_rp_uuid[uuids.host1] = [
+ "host1-candidate1",
+ "host1-candidate2",
+ ]
+ mock_get_all_host_states.return_value = iter([host1])
+
+ self.manager._schedule(
+ self.context,
+ self.request_spec,
+ instance_uuids,
+ alloc_reqs_by_rp_uuid,
+ mock.sentinel.provider_summaries,
+ 'fake-alloc-req-version',
+ )
+
+ # we expect that the second filter saw one host with one candidate and
+ # as candidate1 was already filtered out by the run of the first filter
+ self.assertEqual(
+ [["host1-candidate2"]],
+ recorder_filter.seen_candidates
+ )
+
+ @mock.patch(
+ "nova.scheduler.manager.SchedulerManager._consume_selected_host",
+ )
+ @mock.patch(
+ "nova.scheduler.utils.claim_resources",
+ new=mock.Mock(return_value=True),
+ )
+ @mock.patch("nova.scheduler.manager.SchedulerManager._get_all_host_states")
+ def test_filters_removes_all_a_c_host_is_not_selected(
+ self,
+ mock_get_all_host_states,
+ mock_consume,
+ ):
+ # use the filter that always drops the first candidate on each host
+ self.manager.host_manager.enabled_filters = [self.DropFirstFilter()]
+
+ instance_uuids = [uuids.inst1]
+ alloc_reqs_by_rp_uuid = {}
+ # have two hosts
+ # first with a single candidate
+ host1 = host_manager.HostState("host1", "node1", uuids.cell1)
+ host1.uuid = uuids.host1
+ alloc_reqs_by_rp_uuid[uuids.host1] = [
+ "host1-candidate1",
+ ]
+ # second with two candidates
+ host2 = host_manager.HostState("host2", "node2", uuids.cell1)
+ host2.uuid = uuids.host2
+ alloc_reqs_by_rp_uuid[uuids.host2] = [
+ "host2-candidate1",
+ "host2-candidate2",
+ ]
+ mock_get_all_host_states.return_value = iter([host1, host2])
+
+ result = self.manager._schedule(
+ self.context,
+ self.request_spec,
+ instance_uuids,
+ alloc_reqs_by_rp_uuid,
+ mock.sentinel.provider_summaries,
+ 'fake-alloc-req-version',
+ )
+ # we expect that the first host is not selected as the filter
+ # removed every candidate from the host
+ # also we expect that on the second host only candidate2 could have
+ # been selected
+ # we asked for one instance, so we expect one set of selections
+ self.assertEqual(1, len(result))
+ selections = result[0]
+ # we did not ask for alternatives so a single selection is expected
+ self.assertEqual(1, len(selections))
+ selection = selections[0]
+ # we expect that candidate2 is used as candidate1 is dropped by
+ # the filter
+ self.assertEqual(uuids.host2, selection.compute_node_uuid)
+ self.assertEqual(
+ "host2-candidate2",
+ jsonutils.loads(selection.allocation_request)
+ )
+
+ @mock.patch(
+ "nova.scheduler.manager.SchedulerManager._consume_selected_host",
+ )
+ @mock.patch(
+ "nova.scheduler.utils.claim_resources",
+ new=mock.Mock(return_value=True),
+ )
+ @mock.patch("nova.scheduler.manager.SchedulerManager._get_all_host_states")
+ def test_consume_selected_host_sees_updated_request_spec(
+ self,
+ mock_get_all_host_states,
+ mock_consume,
+ ):
+ # simulate that nothing is filtered out, by not having any filters
+ self.manager.host_manager.enabled_filters = []
+
+ # set up the request spec with a request group to be updated
+ # by the selected candidate
+ self.request_spec.requested_resources = [
+ objects.RequestGroup(
+ requester_id=uuids.group_req1, provider_uuids=[]
+ )
+ ]
+
+ instance_uuids = [uuids.inst1]
+ alloc_reqs_by_rp_uuid = {}
+ # have single host with a single candidate
+ # first with a single candidate
+ host1 = host_manager.HostState("host1", "node1", uuids.cell1)
+ host1.uuid = uuids.host1
+ # simulate that placement fulfilled the above RequestGroup from
+ # a certain child RP of the host.
+ alloc_reqs_by_rp_uuid[uuids.host1] = [
+ {
+ "mappings": {
+ "": [uuids.host1],
+ uuids.group_req1: [uuids.host1_child_rp],
+ }
+ }
+ ]
+ mock_get_all_host_states.return_value = iter([host1])
+
+ # make asserts on the request_spec passed to consume
+ def assert_request_spec_updated_with_selected_candidate(
+ selected_host, spec_obj, instance_uuid=None
+ ):
+ # we expect that the scheduler updated the request_spec based
+ # the selected candidate before called consume
+ self.assertEqual(
+ [uuids.host1_child_rp],
+ spec_obj.requested_resources[0].provider_uuids,
+ )
+
+ mock_consume.side_effect = (
+ assert_request_spec_updated_with_selected_candidate)
+
+ self.manager._schedule(
+ self.context,
+ self.request_spec,
+ instance_uuids,
+ alloc_reqs_by_rp_uuid,
+ mock.sentinel.provider_summaries,
+ 'fake-alloc-req-version',
+ )
+
+ mock_consume.assert_called_once()
+
+ @mock.patch(
+ "nova.scheduler.manager.SchedulerManager._consume_selected_host",
+ )
+ @mock.patch(
+ "nova.scheduler.utils.claim_resources",
+ return_value=True,
+ )
+ @mock.patch("nova.scheduler.manager.SchedulerManager._get_all_host_states")
+ def test_get_alternate_hosts_returns_main_selection_with_claimed_a_c(
+ self,
+ mock_get_all_host_states,
+ mock_claim,
+ mock_consume,
+ ):
+ """Assert that the first (a.k.a main) selection returned for an
+ instance always maps to the allocation candidate, that was claimed by
+ the scheduler in placement.
+ """
+ # use the filter that always drops the first candidate on each host
+ self.manager.host_manager.enabled_filters = [self.DropFirstFilter()]
+
+ instance_uuids = [uuids.inst1]
+ alloc_reqs_by_rp_uuid = {}
+ # have one host with 3 candidates each fulfilling a request group
+ # from different child RP
+ host1 = host_manager.HostState("host1", "node1", uuids.cell1)
+ host1.uuid = uuids.host1
+ alloc_reqs_by_rp_uuid[uuids.host1] = [
+ {
+ "mappings": {
+ "": [uuids.host1],
+ uuids.group_req1: [getattr(uuids, f"host1_child{i}")],
+ }
+ } for i in [1, 2, 3]
+ ]
+ mock_get_all_host_states.return_value = iter([host1])
+
+ result = self.manager._schedule(
+ self.context,
+ self.request_spec,
+ instance_uuids,
+ alloc_reqs_by_rp_uuid,
+ mock.sentinel.provider_summaries,
+ 'fake-alloc-req-version',
+ return_alternates=True,
+ )
+
+ # we scheduled one instance
+ self.assertEqual(1, len(result))
+ selections = result[0]
+ # we did not ask for alternatives
+ self.assertEqual(1, len(selections))
+ selection = selections[0]
+ self.assertEqual(uuids.host1, selection.compute_node_uuid)
+ # we expect that host1_child2 candidate is selected
+ expected_a_c = {
+ "mappings": {
+ "": [uuids.host1],
+ uuids.group_req1: [uuids.host1_child2],
+ }
+ }
+ self.assertEqual(
+ expected_a_c,
+ jsonutils.loads(selection.allocation_request),
+ )
+ # and we expect that the same candidate was claimed in placement
+ mock_claim.assert_called_once_with(
+ mock.ANY,
+ self.manager.placement_client,
+ self.request_spec,
+ uuids.inst1,
+ expected_a_c,
+ allocation_request_version="fake-alloc-req-version",
+ )
+
+ @mock.patch(
+ "nova.scheduler.manager.SchedulerManager._consume_selected_host",
+ )
+ @mock.patch(
+ "nova.scheduler.utils.claim_resources",
+ return_value=True,
+ )
+ @mock.patch("nova.scheduler.manager.SchedulerManager._get_all_host_states")
+ def test_get_alternate_hosts_returns_alts_with_filtered_a_c(
+ self,
+ mock_get_all_host_states,
+ mock_claim,
+ mock_consume,
+ ):
+ """Assert that alternate generation also works based on filtered
+ candidates.
+ """
+
+ class RPFilter(filters.BaseHostFilter):
+ """A filter that only allows candidates with specific RPs"""
+
+ def __init__(self, allowed_rp_uuids):
+ self.allowed_rp_uuids = allowed_rp_uuids
+
+ def host_passes(self, host_state, filter_properties):
+ host_state.allocation_candidates = [
+ a_c
+ for a_c in host_state.allocation_candidates
+ if a_c["mappings"][uuids.group_req1][0]
+ in self.allowed_rp_uuids
+ ]
+ return True
+
+ instance_uuids = [uuids.inst1]
+ alloc_reqs_by_rp_uuid = {}
+ # have 3 hosts each with 2 allocation candidates fulfilling a request
+ # group from a different child RP
+ hosts = []
+ for i in [1, 2, 3]:
+ host = host_manager.HostState(f"host{i}", f"node{i}", uuids.cell1)
+ host.uuid = getattr(uuids, f"host{i}")
+ alloc_reqs_by_rp_uuid[host.uuid] = [
+ {
+ "mappings": {
+ "": [host.uuid],
+ uuids.group_req1: [
+ getattr(uuids, f"host{i}_child{j}")
+ ],
+ }
+ }
+ for j in [1, 2]
+ ]
+ hosts.append(host)
+ mock_get_all_host_states.return_value = iter(hosts)
+
+ # configure a filter that only "likes" host1_child2 and host3_child2
+ # RPs. This means host2 is totally out and host1 and host3 only have
+ # one viable candidate
+ self.manager.host_manager.enabled_filters = [
+ RPFilter(allowed_rp_uuids=[uuids.host1_child2, uuids.host3_child2])
+ ]
+
+ result = self.manager._schedule(
+ self.context,
+ self.request_spec,
+ instance_uuids,
+ alloc_reqs_by_rp_uuid,
+ mock.sentinel.provider_summaries,
+ 'fake-alloc-req-version',
+ return_alternates=True,
+ )
+ # we scheduled one instance
+ self.assertEqual(1, len(result))
+ selections = result[0]
+ # we expect a main selection and a single alternative
+ # (host1, and host3) on both selection we expect child2 as selected
+ # candidate
+ self.assertEqual(2, len(selections))
+ main_selection = selections[0]
+ self.assertEqual(uuids.host1, main_selection.compute_node_uuid)
+ self.assertEqual(
+ [uuids.host1_child2],
+ jsonutils.loads(main_selection.allocation_request)["mappings"][
+ uuids.group_req1
+ ],
+ )
+
+ alt_selection = selections[1]
+ self.assertEqual(uuids.host3, alt_selection.compute_node_uuid)
+ self.assertEqual(
+ [uuids.host3_child2],
+ jsonutils.loads(alt_selection.allocation_request)["mappings"][
+ uuids.group_req1
+ ],
+ )
diff --git a/nova/tests/unit/scheduler/test_request_filter.py b/nova/tests/unit/scheduler/test_request_filter.py
index 57f2f93bf6..77e538006a 100644
--- a/nova/tests/unit/scheduler/test_request_filter.py
+++ b/nova/tests/unit/scheduler/test_request_filter.py
@@ -10,8 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
import os_traits as ot
+from unittest import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
@@ -612,3 +612,90 @@ class TestRequestFilter(test.NoDBTestCase):
mock_get_aggs_network.assert_has_calls([
mock.call(self.context, mock.ANY, mock.ANY, uuids.net1),
mock.call(self.context, mock.ANY, mock.ANY, uuids.net2)])
+
+ def test_ephemeral_encryption_filter_no_encryption(self):
+ # First ensure that ephemeral_encryption_filter is included
+ self.assertIn(request_filter.ephemeral_encryption_filter,
+ request_filter.ALL_REQUEST_FILTERS)
+
+ reqspec = objects.RequestSpec(
+ flavor=objects.Flavor(extra_specs={}),
+ image=objects.ImageMeta(
+ properties=objects.ImageMetaProps()))
+
+ self.assertEqual(set(), reqspec.root_required)
+ self.assertEqual(set(), reqspec.root_forbidden)
+
+ # Assert that the filter returns false and doesn't update the reqspec
+ self.assertFalse(
+ request_filter.ephemeral_encryption_filter(self.context, reqspec))
+ self.assertEqual(set(), reqspec.root_required)
+ self.assertEqual(set(), reqspec.root_forbidden)
+
+ def test_ephemeral_encryption_filter_encryption_disabled(self):
+ # First ensure that ephemeral_encryption_filter is included
+ self.assertIn(request_filter.ephemeral_encryption_filter,
+ request_filter.ALL_REQUEST_FILTERS)
+
+ reqspec = objects.RequestSpec(
+ flavor=objects.Flavor(extra_specs={}),
+ image=objects.ImageMeta(
+ properties=objects.ImageMetaProps(
+ hw_ephemeral_encryption=False)))
+ self.assertFalse(
+ request_filter.ephemeral_encryption_filter(
+ self.context, reqspec))
+ self.assertEqual(set(), reqspec.root_required)
+ self.assertEqual(set(), reqspec.root_forbidden)
+
+ reqspec = objects.RequestSpec(
+ flavor=objects.Flavor(extra_specs={
+ 'hw:ephemeral_encryption': 'False'}),
+ image=objects.ImageMeta(
+ properties=objects.ImageMetaProps()))
+ self.assertFalse(
+ request_filter.ephemeral_encryption_filter(
+ self.context, reqspec))
+ self.assertEqual(set(), reqspec.root_required)
+ self.assertEqual(set(), reqspec.root_forbidden)
+
+ def test_ephemeral_encryption_filter_encryption_no_format(self):
+ # First ensure that ephemeral_encryption_filter is included
+ self.assertIn(request_filter.ephemeral_encryption_filter,
+ request_filter.ALL_REQUEST_FILTERS)
+
+ reqspec = objects.RequestSpec(
+ flavor=objects.Flavor(extra_specs={
+ 'hw:ephemeral_encryption': 'True'}),
+ image=objects.ImageMeta(
+ properties=objects.ImageMetaProps()))
+ self.assertEqual(set(), reqspec.root_required)
+ self.assertEqual(set(), reqspec.root_forbidden)
+ self.assertTrue(
+ request_filter.ephemeral_encryption_filter(self.context, reqspec))
+ self.assertEqual(
+ {ot.COMPUTE_EPHEMERAL_ENCRYPTION}, reqspec.root_required)
+ self.assertEqual(set(), reqspec.root_forbidden)
+
+ def test_ephemeral_encryption_filter_encryption_and_format(self):
+ # First ensure that ephemeral_encryption_filter is included
+ self.assertIn(request_filter.ephemeral_encryption_filter,
+ request_filter.ALL_REQUEST_FILTERS)
+
+ reqspec = objects.RequestSpec(
+ flavor=objects.Flavor(
+ extra_specs={
+ 'hw:ephemeral_encryption': 'True',
+ 'hw:ephemeral_encryption_format': 'luks'
+ }),
+ image=objects.ImageMeta(
+ properties=objects.ImageMetaProps()))
+ self.assertEqual(set(), reqspec.root_required)
+ self.assertEqual(set(), reqspec.root_forbidden)
+ self.assertTrue(
+ request_filter.ephemeral_encryption_filter(self.context, reqspec))
+ self.assertEqual(
+ {ot.COMPUTE_EPHEMERAL_ENCRYPTION,
+ ot.COMPUTE_EPHEMERAL_ENCRYPTION_LUKS},
+ reqspec.root_required)
+ self.assertEqual(set(), reqspec.root_forbidden)
diff --git a/nova/tests/unit/scheduler/test_rpcapi.py b/nova/tests/unit/scheduler/test_rpcapi.py
index 3c56946975..51582891aa 100644
--- a/nova/tests/unit/scheduler/test_rpcapi.py
+++ b/nova/tests/unit/scheduler/test_rpcapi.py
@@ -16,7 +16,8 @@
Unit Tests for nova.scheduler.rpcapi
"""
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from nova import conf
diff --git a/nova/tests/unit/scheduler/test_utils.py b/nova/tests/unit/scheduler/test_utils.py
index 8aff5b902e..55957f3d55 100644
--- a/nova/tests/unit/scheduler/test_utils.py
+++ b/nova/tests/unit/scheduler/test_utils.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import ddt
-import mock
import os_resource_classes as orc
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
diff --git a/nova/tests/unit/scheduler/weights/test_weights_affinity.py b/nova/tests/unit/scheduler/weights/test_weights_affinity.py
index 10ec7e698d..3048e9f06c 100644
--- a/nova/tests/unit/scheduler/weights/test_weights_affinity.py
+++ b/nova/tests/unit/scheduler/weights/test_weights_affinity.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova import objects
from nova.scheduler import weights
diff --git a/nova/tests/unit/servicegroup/test_api.py b/nova/tests/unit/servicegroup/test_api.py
index b451285e4e..4ded10360a 100644
--- a/nova/tests/unit/servicegroup/test_api.py
+++ b/nova/tests/unit/servicegroup/test_api.py
@@ -15,7 +15,7 @@
"""
Test the base class for the servicegroup API
"""
-import mock
+from unittest import mock
from nova import servicegroup
from nova import test
diff --git a/nova/tests/unit/servicegroup/test_db_servicegroup.py b/nova/tests/unit/servicegroup/test_db_servicegroup.py
index 9e04451ec7..9f718e17b7 100644
--- a/nova/tests/unit/servicegroup/test_db_servicegroup.py
+++ b/nova/tests/unit/servicegroup/test_db_servicegroup.py
@@ -12,7 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
import oslo_messaging as messaging
from oslo_utils import fixture as utils_fixture
from oslo_utils import timeutils
diff --git a/nova/tests/unit/servicegroup/test_mc_servicegroup.py b/nova/tests/unit/servicegroup/test_mc_servicegroup.py
index 3b8399dfe3..e3896bb375 100644
--- a/nova/tests/unit/servicegroup/test_mc_servicegroup.py
+++ b/nova/tests/unit/servicegroup/test_mc_servicegroup.py
@@ -16,7 +16,7 @@
# under the License.
import iso8601
-import mock
+from unittest import mock
from nova import servicegroup
from nova import test
diff --git a/nova/tests/unit/storage/test_rbd.py b/nova/tests/unit/storage/test_rbd.py
index 396f22c643..f89c2dee89 100644
--- a/nova/tests/unit/storage/test_rbd.py
+++ b/nova/tests/unit/storage/test_rbd.py
@@ -10,9 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
from eventlet import tpool
-import mock
from oslo_concurrency import processutils
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
@@ -524,7 +524,7 @@ class RbdTestCase(test.NoDBTestCase):
self.driver.destroy_volume(vol)
# Make sure both params have the expected values
- retryctx = mock_loopingcall.call_args.args[3]
+ retryctx = mock_loopingcall.call_args[0][3]
self.assertEqual(retryctx, {'retries': 6})
loopingcall.start.assert_called_with(interval=10)
diff --git a/nova/tests/unit/test_availability_zones.py b/nova/tests/unit/test_availability_zones.py
index 438e8dba24..f2e02e39c7 100644
--- a/nova/tests/unit/test_availability_zones.py
+++ b/nova/tests/unit/test_availability_zones.py
@@ -17,7 +17,8 @@
Tests for availability zones
"""
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel
from nova import availability_zones as az
diff --git a/nova/tests/unit/test_block_device.py b/nova/tests/unit/test_block_device.py
index f5a4fc5694..40020a203f 100644
--- a/nova/tests/unit/test_block_device.py
+++ b/nova/tests/unit/test_block_device.py
@@ -17,7 +17,8 @@
Tests for Block Device utility functions.
"""
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import units
diff --git a/nova/tests/unit/test_cache.py b/nova/tests/unit/test_cache.py
index b7059796f1..3f656a49b0 100644
--- a/nova/tests/unit/test_cache.py
+++ b/nova/tests/unit/test_cache.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova import cache_utils
from nova import test
diff --git a/nova/tests/unit/test_cinder.py b/nova/tests/unit/test_cinder.py
index 00e79711ec..e758343549 100644
--- a/nova/tests/unit/test_cinder.py
+++ b/nova/tests/unit/test_cinder.py
@@ -13,9 +13,9 @@
# under the License.
import collections
+from unittest import mock
from cinderclient.v3 import client as cinder_client_v3
-import mock
from requests_mock.contrib import fixture
import nova.conf
diff --git a/nova/tests/unit/test_conf.py b/nova/tests/unit/test_conf.py
index 95a7c45114..4496922e26 100644
--- a/nova/tests/unit/test_conf.py
+++ b/nova/tests/unit/test_conf.py
@@ -14,8 +14,8 @@
import os
import tempfile
+from unittest import mock
-import mock
from oslo_config import cfg
import nova.conf.compute
diff --git a/nova/tests/unit/test_configdrive2.py b/nova/tests/unit/test_configdrive2.py
index 4c0ae0acb4..d04310639b 100644
--- a/nova/tests/unit/test_configdrive2.py
+++ b/nova/tests/unit/test_configdrive2.py
@@ -16,8 +16,8 @@
import os
import tempfile
+from unittest import mock
-import mock
from oslo_config import cfg
from oslo_utils import fileutils
diff --git a/nova/tests/unit/test_context.py b/nova/tests/unit/test_context.py
index 940738c50d..53c8825046 100644
--- a/nova/tests/unit/test_context.py
+++ b/nova/tests/unit/test_context.py
@@ -12,7 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_context import context as o_context
from oslo_context import fixture as o_fixture
from oslo_utils.fixture import uuidsentinel as uuids
diff --git a/nova/tests/unit/test_crypto.py b/nova/tests/unit/test_crypto.py
index 30152b2b01..5cf92af448 100644
--- a/nova/tests/unit/test_crypto.py
+++ b/nova/tests/unit/test_crypto.py
@@ -18,11 +18,11 @@ Tests for Crypto module.
import io
import os
+from unittest import mock
from castellan.common import exception as castellan_exception
from cryptography.hazmat import backends
from cryptography.hazmat.primitives import serialization
-import mock
from oslo_concurrency import processutils
from oslo_utils.fixture import uuidsentinel as uuids
import paramiko
diff --git a/nova/tests/unit/test_exception_wrapper.py b/nova/tests/unit/test_exception_wrapper.py
index 56eadf6952..71da124fd9 100644
--- a/nova/tests/unit/test_exception_wrapper.py
+++ b/nova/tests/unit/test_exception_wrapper.py
@@ -14,7 +14,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova import context as nova_context
from nova import exception_wrapper
diff --git a/nova/tests/unit/test_fixtures.py b/nova/tests/unit/test_fixtures.py
index 5dbcd6d57a..8a5db79855 100644
--- a/nova/tests/unit/test_fixtures.py
+++ b/nova/tests/unit/test_fixtures.py
@@ -17,10 +17,10 @@
import copy
import datetime
import io
+from unittest import mock
import fixtures as fx
import futurist
-import mock
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_log import log as logging
diff --git a/nova/tests/unit/test_hacking.py b/nova/tests/unit/test_hacking.py
index e5a6efb0ad..10b2a79db4 100644
--- a/nova/tests/unit/test_hacking.py
+++ b/nova/tests/unit/test_hacking.py
@@ -13,8 +13,8 @@
# under the License.
import textwrap
+from unittest import mock
-import mock
import pycodestyle
from nova.hacking import checks
@@ -1030,3 +1030,16 @@ class HackingTestCase(test.NoDBTestCase):
"""
errors = [(x + 1, 0, 'N370') for x in range(4)]
self._assert_has_errors(code, checks.check_six, expected_errors=errors)
+
+ def test_import_stock_mock(self):
+ self._assert_has_errors(
+ "import mock",
+ checks.import_stock_mock, expected_errors=[(1, 0, 'N371')])
+ self._assert_has_errors(
+ "from mock import patch",
+ checks.import_stock_mock, expected_errors=[(1, 0, 'N371')])
+ code = """
+ from unittest import mock
+ import unittest.mock
+ """
+ self._assert_has_no_errors(code, checks.import_stock_mock)
diff --git a/nova/tests/unit/test_identity.py b/nova/tests/unit/test_identity.py
index 83252d8c38..2bb5e7f9c0 100644
--- a/nova/tests/unit/test_identity.py
+++ b/nova/tests/unit/test_identity.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from keystoneauth1.adapter import Adapter
from keystoneauth1 import exceptions as kse
diff --git a/nova/tests/unit/test_json_ref.py b/nova/tests/unit/test_json_ref.py
index 5a139055f5..e7cbbc9133 100644
--- a/nova/tests/unit/test_json_ref.py
+++ b/nova/tests/unit/test_json_ref.py
@@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
import copy
-import mock
+from unittest import mock
from nova import test
from nova.tests import json_ref
diff --git a/nova/tests/unit/test_metadata.py b/nova/tests/unit/test_metadata.py
index 9d70ff252d..d013aeb651 100644
--- a/nova/tests/unit/test_metadata.py
+++ b/nova/tests/unit/test_metadata.py
@@ -22,10 +22,10 @@ import hmac
import os
import pickle
import re
+from unittest import mock
from keystoneauth1 import exceptions as ks_exceptions
from keystoneauth1 import session
-import mock
from oslo_config import cfg
from oslo_serialization import base64
from oslo_serialization import jsonutils
@@ -1458,20 +1458,17 @@ class MetadataHandlerTestCase(test.TestCase):
for c in range(ord('a'), ord('z'))]
mock_client.list_subnets.return_value = {
'subnets': subnet_list}
+ mock_client.list_ports.side_effect = fake_list_ports
- with mock.patch.object(
- mock_client, 'list_ports',
- side_effect=fake_list_ports) as mock_list_ports:
-
- response = fake_request(
- self, self.mdinst,
- relpath="/2009-04-04/user-data",
- address="192.192.192.2",
- fake_get_metadata_by_instance_id=self._fake_x_get_metadata,
- headers={'X-Forwarded-For': '192.192.192.2',
- 'X-Metadata-Provider': proxy_lb_id})
-
- self.assertEqual(3, mock_list_ports.call_count)
+ response = fake_request(
+ self, self.mdinst,
+ relpath="/2009-04-04/user-data",
+ address="192.192.192.2",
+ fake_get_metadata_by_instance_id=self._fake_x_get_metadata,
+ headers={'X-Forwarded-For': '192.192.192.2',
+ 'X-Metadata-Provider': proxy_lb_id})
+
+ self.assertEqual(3, mock_client.list_ports.call_count)
self.assertEqual(200, response.status_int)
diff --git a/nova/tests/unit/test_notifications.py b/nova/tests/unit/test_notifications.py
index 05b446fc20..062eeb7f4f 100644
--- a/nova/tests/unit/test_notifications.py
+++ b/nova/tests/unit/test_notifications.py
@@ -17,8 +17,8 @@
import copy
import datetime
+from unittest import mock
-import mock
from oslo_config import cfg
from oslo_context import fixture as o_fixture
from oslo_utils.fixture import uuidsentinel as uuids
diff --git a/nova/tests/unit/test_notifier.py b/nova/tests/unit/test_notifier.py
index 95366cdf28..fc01b1cf83 100644
--- a/nova/tests/unit/test_notifier.py
+++ b/nova/tests/unit/test_notifier.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova import rpc
from nova import test
diff --git a/nova/tests/unit/test_policy.py b/nova/tests/unit/test_policy.py
index c78b4bfba6..871e836d87 100644
--- a/nova/tests/unit/test_policy.py
+++ b/nova/tests/unit/test_policy.py
@@ -16,8 +16,8 @@
"""Test of Policy Engine For Nova."""
import os.path
+from unittest import mock
-import mock
from oslo_policy import policy as oslo_policy
from oslo_serialization import jsonutils
import requests_mock
@@ -358,6 +358,7 @@ class RealRolePolicyTestCase(test.NoDBTestCase):
"os_compute_api:os-services:update",
"os_compute_api:os-services:delete",
"os_compute_api:os-shelve:shelve_offload",
+"os_compute_api:os-shelve:unshelve_to_host",
"os_compute_api:os-availability-zone:detail",
"os_compute_api:os-assisted-volume-snapshots:create",
"os_compute_api:os-assisted-volume-snapshots:delete",
@@ -553,7 +554,8 @@ class RealRolePolicyTestCase(test.NoDBTestCase):
special_rules = ('admin_api', 'admin_or_owner', 'context_is_admin',
'os_compute_api:os-quota-class-sets:show',
'project_admin_api', 'project_member_api',
- 'project_reader_api', 'project_reader_or_admin')
+ 'project_reader_api', 'project_member_or_admin',
+ 'project_reader_or_admin')
result = set(rules.keys()) - set(self.admin_only_rules +
self.admin_or_owner_rules +
self.allow_all_rules +
diff --git a/nova/tests/unit/test_quota.py b/nova/tests/unit/test_quota.py
index edbb814ba7..7979d83e91 100644
--- a/nova/tests/unit/test_quota.py
+++ b/nova/tests/unit/test_quota.py
@@ -14,8 +14,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import ddt
-import mock
from oslo_db.sqlalchemy import enginefacade
from oslo_limit import fixture as limit_fixture
from oslo_utils.fixture import uuidsentinel as uuids
diff --git a/nova/tests/unit/test_rpc.py b/nova/tests/unit/test_rpc.py
index eece75af96..3fe56013bd 100644
--- a/nova/tests/unit/test_rpc.py
+++ b/nova/tests/unit/test_rpc.py
@@ -12,7 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
import oslo_messaging as messaging
from oslo_messaging.rpc import dispatcher
from oslo_serialization import jsonutils
diff --git a/nova/tests/unit/test_service.py b/nova/tests/unit/test_service.py
index 04fa10d4c5..9fb6fa1c40 100644
--- a/nova/tests/unit/test_service.py
+++ b/nova/tests/unit/test_service.py
@@ -18,7 +18,8 @@
Unit Tests for remote procedure calls using queue
"""
-import mock
+from unittest import mock
+
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_service import service as _service
diff --git a/nova/tests/unit/test_service_auth.py b/nova/tests/unit/test_service_auth.py
index db2a2e2899..5f07515188 100644
--- a/nova/tests/unit/test_service_auth.py
+++ b/nova/tests/unit/test_service_auth.py
@@ -10,9 +10,10 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
from keystoneauth1 import loading as ks_loading
from keystoneauth1 import service_token
-import mock
from nova import context
from nova import service_auth
diff --git a/nova/tests/unit/test_test.py b/nova/tests/unit/test_test.py
index 8381792de6..1042153b10 100644
--- a/nova/tests/unit/test_test.py
+++ b/nova/tests/unit/test_test.py
@@ -18,9 +18,9 @@
import os.path
import tempfile
+from unittest import mock
import uuid
-import mock
from oslo_log import log as logging
import oslo_messaging as messaging
@@ -361,21 +361,6 @@ class PatchExistsTestCase(test.NoDBTestCase):
self.assertTrue(os.path.exists(os.path.dirname(__file__)))
self.assertFalse(os.path.exists('non-existent/file'))
- @test.patch_exists('fake_file1', True)
- @test.patch_exists('fake_file2', True)
- @test.patch_exists(__file__, False)
- def test_patch_exists_multiple_decorators(self):
- """Test that @patch_exists can be used multiple times on the
- same method.
- """
- self.assertTrue(os.path.exists('fake_file1'))
- self.assertTrue(os.path.exists('fake_file2'))
- self.assertFalse(os.path.exists(__file__))
-
- # Check non-patched parameters
- self.assertTrue(os.path.exists(os.path.dirname(__file__)))
- self.assertFalse(os.path.exists('non-existent/file'))
-
class PatchOpenTestCase(test.NoDBTestCase):
fake_contents = "These file contents don't really exist"
diff --git a/nova/tests/unit/test_utils.py b/nova/tests/unit/test_utils.py
index bd69ccbb65..ca4e09b087 100644
--- a/nova/tests/unit/test_utils.py
+++ b/nova/tests/unit/test_utils.py
@@ -16,13 +16,13 @@ import datetime
import os
import os.path
import tempfile
+from unittest import mock
import eventlet
import fixtures
from keystoneauth1 import adapter as ks_adapter
from keystoneauth1.identity import base as ks_identity
from keystoneauth1 import session as ks_session
-import mock
import netaddr
from openstack import exceptions as sdk_exc
from oslo_config import cfg
diff --git a/nova/tests/unit/test_weights.py b/nova/tests/unit/test_weights.py
index 5758e9aa2f..ad0a203ff4 100644
--- a/nova/tests/unit/test_weights.py
+++ b/nova/tests/unit/test_weights.py
@@ -16,7 +16,7 @@
Tests For weights.
"""
-import mock
+from unittest import mock
from nova.scheduler import weights as scheduler_weights
from nova.scheduler.weights import ram
diff --git a/nova/tests/unit/test_wsgi.py b/nova/tests/unit/test_wsgi.py
index e46318cd17..45a0406b5c 100644
--- a/nova/tests/unit/test_wsgi.py
+++ b/nova/tests/unit/test_wsgi.py
@@ -19,10 +19,10 @@
import os.path
import socket
import tempfile
+from unittest import mock
import eventlet
import eventlet.wsgi
-import mock
from oslo_config import cfg
import requests
import testtools
diff --git a/nova/tests/unit/utils.py b/nova/tests/unit/utils.py
index 6311475522..51edc45686 100644
--- a/nova/tests/unit/utils.py
+++ b/nova/tests/unit/utils.py
@@ -17,8 +17,7 @@ import errno
import platform
import socket
import sys
-
-import mock
+from unittest import mock
from nova.compute import flavors
import nova.conf
diff --git a/nova/tests/unit/virt/disk/mount/test_api.py b/nova/tests/unit/virt/disk/mount/test_api.py
index d51b2be8b0..7d8a741914 100644
--- a/nova/tests/unit/virt/disk/mount/test_api.py
+++ b/nova/tests/unit/virt/disk/mount/test_api.py
@@ -12,7 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_service import fixture as service_fixture
from nova import test
diff --git a/nova/tests/unit/virt/disk/mount/test_loop.py b/nova/tests/unit/virt/disk/mount/test_loop.py
index 3c0c18fa60..312b88db35 100644
--- a/nova/tests/unit/virt/disk/mount/test_loop.py
+++ b/nova/tests/unit/virt/disk/mount/test_loop.py
@@ -14,8 +14,9 @@
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from nova import test
from nova.virt.disk.mount import loop
diff --git a/nova/tests/unit/virt/disk/mount/test_nbd.py b/nova/tests/unit/virt/disk/mount/test_nbd.py
index 0024b2f6d6..cc0e04337e 100644
--- a/nova/tests/unit/virt/disk/mount/test_nbd.py
+++ b/nova/tests/unit/virt/disk/mount/test_nbd.py
@@ -14,10 +14,10 @@
# under the License.
-import mock
import os
import tempfile
import time
+from unittest import mock
import eventlet
import fixtures
diff --git a/nova/tests/unit/virt/disk/test_api.py b/nova/tests/unit/virt/disk/test_api.py
index 5b90fd186e..62005de525 100644
--- a/nova/tests/unit/virt/disk/test_api.py
+++ b/nova/tests/unit/virt/disk/test_api.py
@@ -14,8 +14,8 @@
# under the License.
import tempfile
+from unittest import mock
-import mock
from oslo_concurrency import processutils
from oslo_utils import units
diff --git a/nova/tests/unit/virt/disk/vfs/test_guestfs.py b/nova/tests/unit/virt/disk/vfs/test_guestfs.py
index b1c619c955..9dc937202a 100644
--- a/nova/tests/unit/virt/disk/vfs/test_guestfs.py
+++ b/nova/tests/unit/virt/disk/vfs/test_guestfs.py
@@ -13,9 +13,9 @@
# under the License.
import collections
+from unittest import mock
import fixtures
-import mock
from nova import exception
from nova import test
diff --git a/nova/tests/unit/virt/hyperv/test_base.py b/nova/tests/unit/virt/hyperv/test_base.py
index e895fc600e..1dd7db367b 100644
--- a/nova/tests/unit/virt/hyperv/test_base.py
+++ b/nova/tests/unit/virt/hyperv/test_base.py
@@ -14,7 +14,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from os_win import utilsfactory
from nova import test
diff --git a/nova/tests/unit/virt/hyperv/test_block_device_manager.py b/nova/tests/unit/virt/hyperv/test_block_device_manager.py
index ded2ffa0d4..0d914a55a5 100644
--- a/nova/tests/unit/virt/hyperv/test_block_device_manager.py
+++ b/nova/tests/unit/virt/hyperv/test_block_device_manager.py
@@ -12,8 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
from os_win import constants as os_win_const
+from unittest import mock
from nova import exception
from nova.tests.unit.virt.hyperv import test_base
diff --git a/nova/tests/unit/virt/hyperv/test_driver.py b/nova/tests/unit/virt/hyperv/test_driver.py
index 07f251390e..c9ccc6e8f1 100644
--- a/nova/tests/unit/virt/hyperv/test_driver.py
+++ b/nova/tests/unit/virt/hyperv/test_driver.py
@@ -19,8 +19,8 @@ Unit tests for the Hyper-V Driver.
import platform
import sys
+from unittest import mock
-import mock
from os_win import exceptions as os_win_exc
from nova import exception
diff --git a/nova/tests/unit/virt/hyperv/test_eventhandler.py b/nova/tests/unit/virt/hyperv/test_eventhandler.py
index 658a49c5c1..9825bc9141 100644
--- a/nova/tests/unit/virt/hyperv/test_eventhandler.py
+++ b/nova/tests/unit/virt/hyperv/test_eventhandler.py
@@ -13,10 +13,10 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
from os_win import constants
from os_win import exceptions as os_win_exc
from os_win import utilsfactory
+from unittest import mock
from nova.tests.unit.virt.hyperv import test_base
from nova import utils
diff --git a/nova/tests/unit/virt/hyperv/test_hostops.py b/nova/tests/unit/virt/hyperv/test_hostops.py
index ebe2979f8a..04434dd37e 100644
--- a/nova/tests/unit/virt/hyperv/test_hostops.py
+++ b/nova/tests/unit/virt/hyperv/test_hostops.py
@@ -14,8 +14,8 @@
# under the License.
import datetime
+from unittest import mock
-import mock
import os_resource_classes as orc
from os_win import constants as os_win_const
from oslo_config import cfg
diff --git a/nova/tests/unit/virt/hyperv/test_imagecache.py b/nova/tests/unit/virt/hyperv/test_imagecache.py
index 4c0c1318ae..827d52133d 100644
--- a/nova/tests/unit/virt/hyperv/test_imagecache.py
+++ b/nova/tests/unit/virt/hyperv/test_imagecache.py
@@ -14,10 +14,10 @@
# under the License.
import os
+from unittest import mock
import ddt
import fixtures
-import mock
from oslo_config import cfg
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import units
diff --git a/nova/tests/unit/virt/hyperv/test_livemigrationops.py b/nova/tests/unit/virt/hyperv/test_livemigrationops.py
index 8a3df843b9..79cb4318c5 100644
--- a/nova/tests/unit/virt/hyperv/test_livemigrationops.py
+++ b/nova/tests/unit/virt/hyperv/test_livemigrationops.py
@@ -13,8 +13,9 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
from os_win import exceptions as os_win_exc
+from unittest import mock
+
from oslo_config import cfg
from nova import exception
diff --git a/nova/tests/unit/virt/hyperv/test_migrationops.py b/nova/tests/unit/virt/hyperv/test_migrationops.py
index 86844b11cf..d0b7ff32fd 100644
--- a/nova/tests/unit/virt/hyperv/test_migrationops.py
+++ b/nova/tests/unit/virt/hyperv/test_migrationops.py
@@ -13,8 +13,8 @@
# under the License.
import os
+from unittest import mock
-import mock
from os_win import exceptions as os_win_exc
from oslo_utils import units
diff --git a/nova/tests/unit/virt/hyperv/test_pathutils.py b/nova/tests/unit/virt/hyperv/test_pathutils.py
index 573fe557a5..7bd9e91e3f 100644
--- a/nova/tests/unit/virt/hyperv/test_pathutils.py
+++ b/nova/tests/unit/virt/hyperv/test_pathutils.py
@@ -14,8 +14,7 @@
import os
import time
-
-import mock
+from unittest import mock
from nova import exception
from nova.tests.unit.virt.hyperv import test_base
diff --git a/nova/tests/unit/virt/hyperv/test_rdpconsoleops.py b/nova/tests/unit/virt/hyperv/test_rdpconsoleops.py
index ffc1e4cd0c..5e6bf9a3c3 100644
--- a/nova/tests/unit/virt/hyperv/test_rdpconsoleops.py
+++ b/nova/tests/unit/virt/hyperv/test_rdpconsoleops.py
@@ -17,7 +17,7 @@
Unit tests for the Hyper-V RDPConsoleOps.
"""
-import mock
+from unittest import mock
from nova.tests.unit.virt.hyperv import test_base
from nova.virt.hyperv import rdpconsoleops
diff --git a/nova/tests/unit/virt/hyperv/test_serialconsolehandler.py b/nova/tests/unit/virt/hyperv/test_serialconsolehandler.py
index 4240b8eb95..e9461408c4 100644
--- a/nova/tests/unit/virt/hyperv/test_serialconsolehandler.py
+++ b/nova/tests/unit/virt/hyperv/test_serialconsolehandler.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova import exception
from nova.tests.unit.virt.hyperv import test_base
diff --git a/nova/tests/unit/virt/hyperv/test_serialconsoleops.py b/nova/tests/unit/virt/hyperv/test_serialconsoleops.py
index 1e8a9c7557..4a4b7c8e4f 100644
--- a/nova/tests/unit/virt/hyperv/test_serialconsoleops.py
+++ b/nova/tests/unit/virt/hyperv/test_serialconsoleops.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova import exception
from nova.tests.unit.virt.hyperv import test_base
diff --git a/nova/tests/unit/virt/hyperv/test_serialproxy.py b/nova/tests/unit/virt/hyperv/test_serialproxy.py
index 4d1cf80f80..b7e08a67dd 100644
--- a/nova/tests/unit/virt/hyperv/test_serialproxy.py
+++ b/nova/tests/unit/virt/hyperv/test_serialproxy.py
@@ -14,8 +14,8 @@
# under the License.
import socket
+from unittest import mock
-import mock
from nova import exception
from nova.tests.unit.virt.hyperv import test_base
diff --git a/nova/tests/unit/virt/hyperv/test_snapshotops.py b/nova/tests/unit/virt/hyperv/test_snapshotops.py
index 60f5876296..1bb2f8dd4b 100644
--- a/nova/tests/unit/virt/hyperv/test_snapshotops.py
+++ b/nova/tests/unit/virt/hyperv/test_snapshotops.py
@@ -14,8 +14,8 @@
# under the License.
import os
+from unittest import mock
-import mock
from nova.compute import task_states
from nova.tests.unit import fake_instance
diff --git a/nova/tests/unit/virt/hyperv/test_vif.py b/nova/tests/unit/virt/hyperv/test_vif.py
index c1f5951b79..d4c8d7af58 100644
--- a/nova/tests/unit/virt/hyperv/test_vif.py
+++ b/nova/tests/unit/virt/hyperv/test_vif.py
@@ -14,7 +14,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
import nova.conf
from nova import exception
diff --git a/nova/tests/unit/virt/hyperv/test_vmops.py b/nova/tests/unit/virt/hyperv/test_vmops.py
index 4065b0f7bb..07e1774f9a 100644
--- a/nova/tests/unit/virt/hyperv/test_vmops.py
+++ b/nova/tests/unit/virt/hyperv/test_vmops.py
@@ -13,10 +13,10 @@
# under the License.
import os
+from unittest import mock
import ddt
from eventlet import timeout as etimeout
-import mock
from os_win import constants as os_win_const
from os_win import exceptions as os_win_exc
from oslo_concurrency import processutils
@@ -1374,12 +1374,10 @@ class VMOpsTestCase(test_base.HyperVBaseTestCase):
def test_get_vm_state(self):
summary_info = {'EnabledState': os_win_const.HYPERV_VM_STATE_DISABLED}
- with mock.patch.object(self._vmops._vmutils,
- 'get_vm_summary_info') as mock_get_summary_info:
- mock_get_summary_info.return_value = summary_info
+ self._vmops._vmutils.get_vm_summary_info.return_value = summary_info
- response = self._vmops._get_vm_state(mock.sentinel.FAKE_VM_NAME)
- self.assertEqual(response, os_win_const.HYPERV_VM_STATE_DISABLED)
+ response = self._vmops._get_vm_state(mock.sentinel.FAKE_VM_NAME)
+ self.assertEqual(response, os_win_const.HYPERV_VM_STATE_DISABLED)
@mock.patch.object(vmops.VMOps, '_get_vm_state')
def test_wait_for_power_off_true(self, mock_get_state):
@@ -1418,12 +1416,11 @@ class VMOpsTestCase(test_base.HyperVBaseTestCase):
def test_list_instance_uuids(self):
fake_uuid = '4f54fb69-d3a2-45b7-bb9b-b6e6b3d893b3'
- with mock.patch.object(self._vmops._vmutils,
- 'list_instance_notes') as mock_list_notes:
- mock_list_notes.return_value = [('fake_name', [fake_uuid])]
+ self._vmops._vmutils.list_instance_notes.return_value = (
+ [('fake_name', [fake_uuid])])
- response = self._vmops.list_instance_uuids()
- mock_list_notes.assert_called_once_with()
+ response = self._vmops.list_instance_uuids()
+ self._vmops._vmutils.list_instance_notes.assert_called_once_with()
self.assertEqual(response, [fake_uuid])
diff --git a/nova/tests/unit/virt/hyperv/test_volumeops.py b/nova/tests/unit/virt/hyperv/test_volumeops.py
index da7262085d..66d2c2527f 100644
--- a/nova/tests/unit/virt/hyperv/test_volumeops.py
+++ b/nova/tests/unit/virt/hyperv/test_volumeops.py
@@ -14,7 +14,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from os_brick.initiator import connector
from oslo_config import cfg
from oslo_utils import units
diff --git a/nova/tests/unit/virt/ironic/test_client_wrapper.py b/nova/tests/unit/virt/ironic/test_client_wrapper.py
index 9c2ffe3dca..512f1438d6 100644
--- a/nova/tests/unit/virt/ironic/test_client_wrapper.py
+++ b/nova/tests/unit/virt/ironic/test_client_wrapper.py
@@ -13,11 +13,12 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
from ironicclient import client as ironic_client
from ironicclient import exc as ironic_exception
from keystoneauth1 import discover as ksa_disc
import keystoneauth1.session
-import mock
from oslo_config import cfg
import nova.conf
diff --git a/nova/tests/unit/virt/ironic/test_driver.py b/nova/tests/unit/virt/ironic/test_driver.py
index 7b377b21c2..52aa37ac13 100644
--- a/nova/tests/unit/virt/ironic/test_driver.py
+++ b/nova/tests/unit/virt/ironic/test_driver.py
@@ -15,9 +15,10 @@
"""Tests for the ironic driver."""
+from unittest import mock
+
import fixtures
from ironicclient import exc as ironic_exception
-import mock
from openstack import exceptions as sdk_exc
from oslo_config import cfg
from oslo_service import loopingcall
@@ -934,6 +935,48 @@ class IronicDriverTestCase(test.NoDBTestCase):
expected = {
'CUSTOM_IRON_NFV': {
'total': 1,
+ 'reserved': 1,
+ 'min_unit': 1,
+ 'max_unit': 1,
+ 'step_size': 1,
+ 'allocation_ratio': 1.0,
+ },
+ }
+ mock_nfc.assert_called_once_with(mock.sentinel.nodename)
+ mock_nr.assert_called_once_with(mock_nfc.return_value)
+ mock_res_used.assert_called_once_with(mock_nfc.return_value)
+ mock_res_unavail.assert_called_once_with(mock_nfc.return_value)
+ result = self.ptree.data(mock.sentinel.nodename).inventory
+ self.assertEqual(expected, result)
+
+ @mock.patch.object(ironic_driver.IronicDriver,
+ '_node_resources_used', return_value=True)
+ @mock.patch.object(ironic_driver.IronicDriver,
+ '_node_resources_unavailable', return_value=False)
+ @mock.patch.object(ironic_driver.IronicDriver, '_node_resource')
+ @mock.patch.object(ironic_driver.IronicDriver, '_node_from_cache')
+ def test_update_provider_tree_with_rc_occupied_workaround(self,
+ mock_nfc, mock_nr, mock_res_unavail, mock_res_used):
+ """Ensure that when a node is used, we report the inventory matching
+ the consumed resources.
+ """
+ self.flags(skip_reserve_in_use_ironic_nodes=True,
+ group="workarounds")
+ mock_nr.return_value = {
+ 'vcpus': 24,
+ 'vcpus_used': 24,
+ 'memory_mb': 1024,
+ 'memory_mb_used': 1024,
+ 'local_gb': 100,
+ 'local_gb_used': 100,
+ 'resource_class': 'iron-nfv',
+ }
+
+ self.driver.update_provider_tree(self.ptree, mock.sentinel.nodename)
+
+ expected = {
+ 'CUSTOM_IRON_NFV': {
+ 'total': 1,
'reserved': 0,
'min_unit': 1,
'max_unit': 1,
@@ -944,7 +987,7 @@ class IronicDriverTestCase(test.NoDBTestCase):
mock_nfc.assert_called_once_with(mock.sentinel.nodename)
mock_nr.assert_called_once_with(mock_nfc.return_value)
mock_res_used.assert_called_once_with(mock_nfc.return_value)
- self.assertFalse(mock_res_unavail.called)
+ mock_res_unavail.assert_called_once_with(mock_nfc.return_value)
result = self.ptree.data(mock.sentinel.nodename).inventory
self.assertEqual(expected, result)
@@ -1015,7 +1058,7 @@ class IronicDriverTestCase(test.NoDBTestCase):
mock_nfc.assert_called_once_with(mock.sentinel.nodename)
mock_nr.assert_called_once_with(mock_nfc.return_value)
mock_res_used.assert_called_once_with(mock_nfc.return_value)
- self.assertFalse(mock_res_unavail.called)
+ mock_res_unavail.assert_called_once_with(mock_nfc.return_value)
result = self.ptree.data(mock.sentinel.nodename).traits
self.assertEqual(set(), result)
@@ -1047,7 +1090,7 @@ class IronicDriverTestCase(test.NoDBTestCase):
mock_nfc.assert_called_once_with(mock.sentinel.nodename)
mock_nr.assert_called_once_with(mock_nfc.return_value)
mock_res_used.assert_called_once_with(mock_nfc.return_value)
- self.assertFalse(mock_res_unavail.called)
+ mock_res_unavail.assert_called_once_with(mock_nfc.return_value)
result = self.ptree.data(mock.sentinel.nodename).traits
self.assertEqual(set(traits), result)
@@ -2499,7 +2542,10 @@ class IronicDriverTestCase(test.NoDBTestCase):
@mock.patch.object(cw.IronicClientWrapper, 'call')
def test_prepare_for_spawn(self, mock_call):
- node = ironic_utils.get_test_node(driver='fake')
+ node = ironic_utils.get_test_node(
+ driver='fake', instance_uuid=None,
+ provision_state=ironic_states.AVAILABLE,
+ power_state=ironic_states.POWER_OFF)
self.mock_conn.get_node.return_value = node
instance = fake_instance.fake_instance_obj(self.ctx,
node=node.uuid)
@@ -2531,7 +2577,10 @@ class IronicDriverTestCase(test.NoDBTestCase):
instance)
def test_prepare_for_spawn_conflict(self):
- node = ironic_utils.get_test_node(driver='fake')
+ node = ironic_utils.get_test_node(
+ driver='fake', instance_uuid=None,
+ provision_state=ironic_states.AVAILABLE,
+ power_state=ironic_states.POWER_OFF)
self.mock_conn.get_node.return_value = node
self.mock_conn.update_node.side_effect = sdk_exc.ConflictException
instance = fake_instance.fake_instance_obj(self.ctx, node=node.id)
@@ -2539,6 +2588,18 @@ class IronicDriverTestCase(test.NoDBTestCase):
self.driver.prepare_for_spawn,
instance)
+ def test_prepare_for_spawn_not_available(self):
+ node = ironic_utils.get_test_node(
+ driver='fake', instance_uuid=None,
+ provision_state=ironic_states.CLEANWAIT,
+ power_state=ironic_states.POWER_OFF)
+ self.mock_conn.get_node.return_value = node
+ self.mock_conn.update_node.side_effect = sdk_exc.ConflictException
+ instance = fake_instance.fake_instance_obj(self.ctx, node=node.id)
+ self.assertRaises(exception.ComputeResourcesUnavailable,
+ self.driver.prepare_for_spawn,
+ instance)
+
@mock.patch.object(ironic_driver.IronicDriver, '_cleanup_deploy')
def test_failed_spawn_cleanup(self, mock_cleanup):
node = ironic_utils.get_test_node(driver='fake')
@@ -2597,9 +2658,6 @@ class IronicDriverSyncTestCase(IronicDriverTestCase):
# that the thread completes.
self.useFixture(nova_fixtures.SpawnIsSynchronousFixture())
- self.mock_conn = self.useFixture(
- fixtures.MockPatchObject(self.driver, '_ironic_connection')).mock
-
@mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
@mock.patch.object(FAKE_CLIENT.node, 'set_provision_state')
def test_rescue(self, mock_sps, mock_looping):
diff --git a/nova/tests/unit/virt/libvirt/storage/test_dmcrypt.py b/nova/tests/unit/virt/libvirt/storage/test_dmcrypt.py
index cd45bac54a..28c93e4855 100644
--- a/nova/tests/unit/virt/libvirt/storage/test_dmcrypt.py
+++ b/nova/tests/unit/virt/libvirt/storage/test_dmcrypt.py
@@ -14,7 +14,8 @@
# under the License.
import binascii
-import mock
+from unittest import mock
+
from oslo_concurrency import processutils
from nova import test
diff --git a/nova/tests/unit/virt/libvirt/storage/test_lvm.py b/nova/tests/unit/virt/libvirt/storage/test_lvm.py
index fbec2dcae9..04d9ffdcbf 100644
--- a/nova/tests/unit/virt/libvirt/storage/test_lvm.py
+++ b/nova/tests/unit/virt/libvirt/storage/test_lvm.py
@@ -13,7 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_utils import units
diff --git a/nova/tests/unit/virt/libvirt/test_blockinfo.py b/nova/tests/unit/virt/libvirt/test_blockinfo.py
index 7707f745e3..5a0dbb40ce 100644
--- a/nova/tests/unit/virt/libvirt/test_blockinfo.py
+++ b/nova/tests/unit/virt/libvirt/test_blockinfo.py
@@ -14,9 +14,9 @@
# under the License.
import copy
+from unittest import mock
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from nova import block_device
@@ -74,6 +74,7 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
def _test_block_device_info(self, with_eph=True, with_swap=True,
with_bdms=True):
swap = {'device_name': '/dev/vdb', 'swap_size': 1}
+ image = [{'device_type': 'disk', 'boot_index': 0}]
ephemerals = [{'device_type': 'disk', 'guest_format': 'ext4',
'device_name': '/dev/vdc1', 'size': 10},
{'disk_bus': 'ide', 'guest_format': None,
@@ -84,6 +85,7 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
'device_path': 'fake_device'}]
return {'root_device_name': '/dev/vda',
'swap': swap if with_swap else {},
+ 'image': image,
'ephemerals': ephemerals if with_eph else [],
'block_device_mapping':
block_device_mapping if with_bdms else []}
@@ -178,11 +180,16 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
+ block_device_info = {
+ 'image': [
+ {'device_type': 'disk', 'boot_index': 0},
+ ]
+ }
with mock.patch.object(instance_ref, 'get_flavor',
return_value=instance_ref.flavor) as get_flavor:
- mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
- "virtio", "ide",
- image_meta)
+ mapping = blockinfo.get_disk_mapping(
+ "kvm", instance_ref, "virtio", "ide", image_meta,
+ block_device_info=block_device_info)
# Since there was no block_device_info passed to get_disk_mapping we
# expect to get the swap info from the flavor in the instance.
get_flavor.assert_called_once_with()
@@ -202,7 +209,8 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
block_device_info = {
- 'root_device_name': '/dev/sda'
+ 'root_device_name': '/dev/sda',
+ 'image': [{'device_type': 'disk', 'boot_index': 0}],
}
mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
@@ -490,9 +498,12 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
- mapping = blockinfo.get_disk_mapping("lxc", instance_ref,
- "lxc", "lxc",
- image_meta)
+ block_device_info = {
+ 'image': [{'device_type': 'disk', 'boot_index': 0}],
+ }
+ mapping = blockinfo.get_disk_mapping(
+ "lxc", instance_ref, "lxc", "lxc", image_meta,
+ block_device_info=block_device_info)
expect = {
'disk': {'bus': 'lxc', 'dev': None,
'type': 'disk', 'boot_index': '1'},
@@ -527,9 +538,14 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
instance_ref.flavor.swap = 5
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
- mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
- "virtio", "ide",
- image_meta)
+ block_device_info = {
+ 'image': [
+ {'device_type': 'disk', 'boot_index': 0},
+ ]
+ }
+ mapping = blockinfo.get_disk_mapping(
+ "kvm", instance_ref, "virtio", "ide", image_meta,
+ block_device_info=block_device_info)
expect = {
'disk': {'bus': 'virtio', 'dev': 'vda',
@@ -549,6 +565,7 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
instance_ref.ephemeral_gb = 0
block_dev_info = {'swap': None, 'root_device_name': u'/dev/vda',
+ 'image': [],
'ephemerals': [],
'block_device_mapping': [{'boot_index': None,
'mount_device': u'/dev/vdb',
@@ -591,8 +608,14 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
+ block_device_info = {
+ 'image': [
+ {'device_type': 'disk', 'boot_index': 0},
+ ]
+ }
mapping = blockinfo.get_disk_mapping(
- "kvm", instance_ref, "virtio", "ide", image_meta)
+ "kvm", instance_ref, "virtio", "ide", image_meta,
+ block_device_info=block_device_info)
# Pick the first drive letter on the bus that is available
# as the config drive. Delete the last device hardcode as
@@ -647,8 +670,14 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
+ block_device_info = {
+ 'image': [
+ {'device_type': 'disk', 'boot_index': 0},
+ ]
+ }
mapping = blockinfo.get_disk_mapping(
- "kvm", instance_ref, "virtio", "ide", image_meta)
+ "kvm", instance_ref, "virtio", "ide", image_meta,
+ block_device_info=block_device_info)
expect = {
'disk': {
@@ -697,9 +726,14 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
- mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
- "virtio", "ide",
- image_meta)
+ block_device_info = {
+ 'image': [
+ {'device_type': 'disk', 'boot_index': 0},
+ ]
+ }
+ mapping = blockinfo.get_disk_mapping(
+ "kvm", instance_ref, "virtio", "ide", image_meta,
+ block_device_info=block_device_info)
expect = {
'disk': {'bus': 'virtio', 'dev': 'vda',
@@ -718,6 +752,9 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
block_device_info = {
+ 'image': [
+ {'device_type': 'disk', 'boot_index': 0},
+ ],
'ephemerals': [
{'device_type': 'disk', 'guest_format': 'ext4',
'device_name': '/dev/vdb', 'size': 10},
@@ -754,6 +791,8 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
block_device_info = {
'swap': {'device_name': '/dev/vdb',
'swap_size': 10},
+ 'image': [{'device_type': 'disk',
+ 'boot_index': 0}],
}
mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
"virtio", "ide",
@@ -775,6 +814,7 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
block_device_info = {
+ 'image': [],
'block_device_mapping': [
{'connection_info': "fake",
'mount_device': "/dev/vda",
@@ -803,6 +843,7 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
image_meta = {}
block_device_info = {
+ 'image': [],
'block_device_mapping': [
{'connection_info': None,
'mount_device': None,
@@ -858,6 +899,7 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
block_device_info = {
+ 'image': [],
'block_device_mapping': [
{'connection_info': "fake",
'mount_device': "/dev/vda",
@@ -899,6 +941,7 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
'root_device_name': '/dev/vdf',
'swap': {'device_name': '/dev/vdy',
'swap_size': 10},
+ 'image': [{'device_type': 'disk', 'boot_index': 0}],
'ephemerals': [
{'device_type': 'disk', 'guest_format': 'ext4',
'device_name': '/dev/vdb', 'size': 10},
@@ -940,6 +983,8 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
'swap': {'device_name': '/dev/vdb',
'device_type': 'really_lame_type',
'swap_size': 10},
+ 'image': [{'device_name': '/dev/vda',
+ 'device_type': 'disk'}],
'ephemerals': [{'disk_bus': 'no_such_bus',
'device_type': 'yeah_right',
'device_name': '/dev/vdc', 'size': 10}],
@@ -951,6 +996,8 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
}
expected_swap = {'device_name': '/dev/vdb', 'disk_bus': 'virtio',
'device_type': 'disk', 'swap_size': 10}
+ expected_image = {'device_name': '/dev/vda', 'device_type': 'disk',
+ 'disk_bus': 'virtio'}
expected_ephemeral = {'disk_bus': 'virtio',
'device_type': 'disk',
'device_name': '/dev/vdc', 'size': 10}
@@ -970,6 +1017,7 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
self.assertFalse(get_flavor_mock.called)
self.assertEqual(expected_swap, block_device_info['swap'])
+ self.assertEqual(expected_image, block_device_info['image'][0])
self.assertEqual(expected_ephemeral,
block_device_info['ephemerals'][0])
self.assertEqual(expected_bdm,
@@ -1124,7 +1172,10 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
'device_type': 'lame_type',
'delete_on_termination': True},
{'disk_bus': 'sata', 'guest_format': None,
- 'device_name': '/dev/sda', 'size': 3}]
+ 'device_name': '/dev/sda', 'size': 3},
+ {'encrypted': True, 'encryption_secret_uuid': uuids.secret,
+ 'encryption_format': 'luks',
+ 'encryption_options': '{"json": "options"}'}]
expected = [{'dev': 'vds', 'type': 'disk', 'bus': 'usb'},
{'dev': 'vdb', 'type': 'disk',
'bus': 'virtio', 'format': 'ext4'},
@@ -1133,7 +1184,11 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
'bus': 'scsi', 'boot_index': '1'},
{'dev': 'vdo', 'type': 'disk',
'bus': 'scsi', 'boot_index': '2'},
- {'dev': 'sda', 'type': 'disk', 'bus': 'sata'}]
+ {'dev': 'sda', 'type': 'disk', 'bus': 'sata'},
+ {'dev': 'vda', 'type': 'disk', 'bus': 'virtio',
+ 'encrypted': True, 'encryption_secret_uuid': uuids.secret,
+ 'encryption_format': 'luks',
+ 'encryption_options': {'json': 'options'}}]
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
for bdm, expected in zip(bdms, expected):
@@ -1441,6 +1496,15 @@ class DefaultDeviceNamesTestCase(test.NoDBTestCase):
'destination_type': 'volume',
'boot_index': -1}))]
+ self.image = [
+ objects.BlockDeviceMapping(self.context,
+ **fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 6, 'instance_uuid': uuids.instance,
+ 'source_type': 'image',
+ 'destination_type': 'local',
+ 'device_type': 'disk',
+ 'boot_index': 0}))]
+
def tearDown(self):
super(DefaultDeviceNamesTestCase, self).tearDown()
for patcher in self.patchers:
@@ -1450,7 +1514,7 @@ class DefaultDeviceNamesTestCase(test.NoDBTestCase):
'nova.virt.libvirt.utils.get_arch',
return_value=obj_fields.Architecture.X86_64)
def _test_default_device_names(self, eph, swap, bdm, mock_get_arch):
- bdms = eph + swap + bdm
+ bdms = self.image + eph + swap + bdm
bdi = driver.get_block_device_info(self.instance, bdms)
blockinfo.default_device_names(self.virt_type,
self.context,
diff --git a/nova/tests/unit/virt/libvirt/test_config.py b/nova/tests/unit/virt/libvirt/test_config.py
index 1967939e56..4a8aa027a9 100644
--- a/nova/tests/unit/virt/libvirt/test_config.py
+++ b/nova/tests/unit/virt/libvirt/test_config.py
@@ -16,6 +16,7 @@ from lxml import etree
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import units
+from nova import exception
from nova.objects import fields as obj_fields
from nova import test
from nova.tests.fixtures import libvirt_data as fake_libvirt_data
@@ -70,6 +71,23 @@ class LibvirtConfigTest(LibvirtConfigBaseTest):
obj = config.LibvirtConfigObject(root_name="demo")
obj.parse_str(inxml)
+ def test_parse_on_off_str(self):
+ obj = config.LibvirtConfigObject(root_name="demo")
+ self.assertTrue(obj.parse_on_off_str('on'))
+ self.assertFalse(obj.parse_on_off_str('off'))
+ self.assertFalse(obj.parse_on_off_str(None))
+ self.assertRaises(exception.InvalidInput, obj.parse_on_off_str, 'foo')
+
+ def test_get_yes_no_str(self):
+ obj = config.LibvirtConfigObject(root_name="demo")
+ self.assertEqual('yes', obj.get_yes_no_str(True))
+ self.assertEqual('no', obj.get_yes_no_str(False))
+
+ def test_get_on_off_str(self):
+ obj = config.LibvirtConfigObject(root_name="demo")
+ self.assertEqual('on', obj.get_on_off_str(True))
+ self.assertEqual('off', obj.get_on_off_str(False))
+
class LibvirtConfigCapsTest(LibvirtConfigBaseTest):
@@ -2321,6 +2339,15 @@ class LibvirtConfigGuestFeatureTest(LibvirtConfigBaseTest):
obj.vapic = True
obj.spinlocks = True
obj.vendorid_spoof = True
+ obj.vpindex = True
+ obj.runtime = True
+ obj.synic = True
+ obj.reset = True
+ obj.frequencies = True
+ obj.reenlightenment = True
+ obj.tlbflush = True
+ obj.ipi = True
+ obj.evmcs = True
xml = obj.to_xml()
self.assertXmlEqual(xml, """
@@ -2329,6 +2356,15 @@ class LibvirtConfigGuestFeatureTest(LibvirtConfigBaseTest):
<vapic state="on"/>
<spinlocks state="on" retries="4095"/>
<vendor_id state="on" value="1234567890ab"/>
+ <vpindex state='on'/>
+ <runtime state='on'/>
+ <synic state='on'/>
+ <reset state='on'/>
+ <frequencies state='on'/>
+ <reenlightenment state='on'/>
+ <tlbflush state='on'/>
+ <ipi state='on'/>
+ <evmcs state='on'/>
</hyperv>""")
def test_feature_pmu(self):
@@ -2347,6 +2383,13 @@ class LibvirtConfigGuestFeatureTest(LibvirtConfigBaseTest):
xml = obj.to_xml()
self.assertXmlEqual(xml, "<pmu state='off'/>")
+ def test_feature_ioapic(self):
+ obj = config.LibvirtConfigGuestFeatureIOAPIC()
+ obj.driver = "libvirt"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, "<ioapic driver='libvirt'/>")
+
class LibvirtConfigGuestTest(LibvirtConfigBaseTest):
@@ -3138,6 +3181,32 @@ class LibvirtConfigNodeDeviceTest(LibvirtConfigBaseTest):
config.LibvirtConfigNodeDeviceMdevInformation)
self.assertEqual("nvidia-11", obj.mdev_information.type)
self.assertEqual(12, obj.mdev_information.iommu_group)
+ self.assertIsNone(obj.mdev_information.uuid)
+
+ def test_config_mdev_device_uuid(self):
+ xmlin = """
+ <device>
+ <name>mdev_b2107403_110c_45b0_af87_32cc91597b8a_0000_41_00_0</name>
+ <path>/sys/devices/pci0000:40/0000:40:03.1/0000:41:00.0/b2107403-110c-45b0-af87-32cc91597b8a</path>
+ <parent>pci_0000_41_00_0</parent>
+ <driver>
+ <name>vfio_mdev</name>
+ </driver>
+ <capability type='mdev'>
+ <type id='nvidia-442'/>
+ <uuid>b2107403-110c-45b0-af87-32cc91597b8a</uuid>
+ <iommuGroup number='57'/>
+ </capability>
+ </device>"""
+
+ obj = config.LibvirtConfigNodeDevice()
+ obj.parse_str(xmlin)
+ self.assertIsInstance(obj.mdev_information,
+ config.LibvirtConfigNodeDeviceMdevInformation)
+ self.assertEqual("nvidia-442", obj.mdev_information.type)
+ self.assertEqual(57, obj.mdev_information.iommu_group)
+ self.assertEqual("b2107403-110c-45b0-af87-32cc91597b8a",
+ obj.mdev_information.uuid)
def test_config_vdpa_device(self):
xmlin = """
@@ -3975,6 +4044,28 @@ class LibvirtConfigGuestVPMEMTest(LibvirtConfigBaseTest):
</memory>""")
+class LibvirtConfigGuestIOMMUTest(LibvirtConfigBaseTest):
+
+ def test_config_iommu(self):
+ obj = config.LibvirtConfigGuestIOMMU()
+ obj.model = "intel"
+ obj.interrupt_remapping = True
+ obj.caching_mode = True
+ obj.aw_bits = 48
+ obj.eim = True
+ obj.iotlb = True
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(
+ xml,
+ """
+<iommu model='intel'>
+ <driver intremap='on' caching_mode='on' aw_bits='48' eim='on' iotlb='on'/>
+</iommu>
+ """,
+ )
+
+
class LibvirtConfigDomainCapsVideoModelsTests(LibvirtConfigBaseTest):
def test_parse_video_model(self):
@@ -4091,7 +4182,8 @@ class LibvirtConfigDomainCapsDevicesTests(LibvirtConfigBaseTest):
obj.parse_str(xml)
# we only use the video and disk devices today.
device_types = [config.LibvirtConfigDomainCapsDiskBuses,
- config.LibvirtConfigDomainCapsVideoModels]
+ config.LibvirtConfigDomainCapsVideoModels,
+ ]
# so we assert there are only two device types parsed
self.assertEqual(2, len(obj.devices))
# we then assert that the parsed devices are of the correct type
diff --git a/nova/tests/unit/virt/libvirt/test_designer.py b/nova/tests/unit/virt/libvirt/test_designer.py
index a6ad7f9ccc..cb435286e9 100644
--- a/nova/tests/unit/virt/libvirt/test_designer.py
+++ b/nova/tests/unit/virt/libvirt/test_designer.py
@@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.pci import utils as pci_utils
from nova import test
diff --git a/nova/tests/unit/virt/libvirt/test_driver.py b/nova/tests/unit/virt/libvirt/test_driver.py
index 7bc5b34e5e..1c5f79dc89 100644
--- a/nova/tests/unit/virt/libvirt/test_driver.py
+++ b/nova/tests/unit/virt/libvirt/test_driver.py
@@ -31,6 +31,7 @@ import testtools
import threading
import time
import unittest
+from unittest import mock
from castellan import key_manager
import ddt
@@ -38,7 +39,6 @@ import eventlet
from eventlet import greenthread
import fixtures
from lxml import etree
-import mock
from os_brick import encryptors
from os_brick import exception as brick_exception
from os_brick.initiator import connector
@@ -76,7 +76,6 @@ from nova.objects import block_device as block_device_obj
from nova.objects import fields
from nova.objects import migrate_data as migrate_data_obj
from nova.objects import virtual_interface as obj_vif
-from nova.pci import manager as pci_manager
from nova.pci import utils as pci_utils
import nova.privsep.fs
import nova.privsep.libvirt
@@ -740,16 +739,14 @@ class LibvirtConnTestCase(test.NoDBTestCase,
'resolve_driver_format',
imagebackend.Image._get_driver_format)
- self.useFixture(nova_fixtures.LibvirtFixture())
+ self.libvirt = self.useFixture(nova_fixtures.LibvirtFixture())
# ensure tests perform the same on all host architectures; this is
# already done by the fakelibvirt fixture but we want to change the
# architecture in some tests
- _p = mock.patch('os.uname')
- self.mock_uname = _p.start()
+ self.mock_uname = self.libvirt.mock_uname
self.mock_uname.return_value = fakelibvirt.os_uname(
'Linux', '', '5.4.0-0-generic', '', fields.Architecture.X86_64)
- self.addCleanup(_p.stop)
self.test_instance = _create_test_instance()
network_info = objects.InstanceInfoCache(
@@ -963,9 +960,10 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch.object(libvirt_driver.LibvirtDriver, '_get_storage_bus_traits')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_get_video_model_traits')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_get_vif_model_traits')
+ @mock.patch.object(host.Host, "has_min_version")
def test_static_traits(
- self, mock_vif_traits, mock_video_traits, mock_storage_traits,
- mock_cpu_traits,
+ self, mock_version, mock_vif_traits, mock_video_traits,
+ mock_storage_traits, mock_cpu_traits,
):
"""Ensure driver capabilities are correctly retrieved and cached."""
@@ -976,14 +974,21 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_video_traits.return_value = {'COMPUTE_GRAPHICS_MODEL_VGA': True}
mock_vif_traits.return_value = {'COMPUTE_NET_VIF_MODEL_VIRTIO': True}
+ # for support COMPUTE_VIOMMU_MODEL_VIRTIO
+ mock_version.return_value = True
+
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
expected = {
- 'HW_CPU_HYPERTHREADING': True,
- 'COMPUTE_STORAGE_BUS_VIRTIO': True,
'COMPUTE_GRAPHICS_MODEL_VGA': True,
'COMPUTE_NET_VIF_MODEL_VIRTIO': True,
'COMPUTE_SECURITY_TPM_1_2': False,
'COMPUTE_SECURITY_TPM_2_0': False,
+ 'COMPUTE_STORAGE_BUS_VIRTIO': True,
+ 'COMPUTE_VIOMMU_MODEL_AUTO': True,
+ 'COMPUTE_VIOMMU_MODEL_INTEL': True,
+ 'COMPUTE_VIOMMU_MODEL_SMMUV3': True,
+ 'COMPUTE_VIOMMU_MODEL_VIRTIO': True,
+ 'HW_CPU_HYPERTHREADING': True
}
static_traits = drvr.static_traits
@@ -1029,6 +1034,10 @@ class LibvirtConnTestCase(test.NoDBTestCase,
'COMPUTE_NET_VIF_MODEL_VIRTIO': True,
'COMPUTE_SECURITY_TPM_1_2': False,
'COMPUTE_SECURITY_TPM_2_0': False,
+ 'COMPUTE_VIOMMU_MODEL_AUTO': True,
+ 'COMPUTE_VIOMMU_MODEL_INTEL': True,
+ 'COMPUTE_VIOMMU_MODEL_SMMUV3': True,
+ 'COMPUTE_VIOMMU_MODEL_VIRTIO': False
}
static_traits = drvr.static_traits
@@ -2260,6 +2269,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
instance_ref.info_cache = objects.InstanceInfoCache(
network_info=network_info)
+ pci_utils.get_mac_by_pci_address.side_effect = None
+ pci_utils.get_mac_by_pci_address.return_value = 'da:d1:f2:91:95:c1'
with test.nested(
mock.patch('nova.objects.VirtualInterfaceList'
'.get_by_instance_uuid', return_value=vifs),
@@ -2269,8 +2280,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
return_value=guest),
mock.patch.object(nova.virt.libvirt.guest.Guest, 'get_xml_desc',
return_value=xml),
- mock.patch.object(pci_utils, 'get_mac_by_pci_address',
- return_value='da:d1:f2:91:95:c1')):
+ ):
metadata_obj = drvr._build_device_metadata(self.context,
instance_ref)
metadata = metadata_obj.devices
@@ -2567,6 +2577,11 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch.object(time, "time")
def test_get_guest_config(self, time_mock):
+ """Generate a "standard" guest with minimal configuration.
+
+ This uses i440fx by default since that's our default machine type and
+ x86 is our default architecture (in our test env, anyway).
+ """
time_mock.return_value = 1234567.89
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
@@ -2575,178 +2590,249 @@ class LibvirtConnTestCase(test.NoDBTestCase,
test_instance["display_name"] = "purple tomatoes"
test_instance['system_metadata']['owner_project_name'] = 'sweetshop'
test_instance['system_metadata']['owner_user_name'] = 'cupcake'
-
- ctxt = context.RequestContext(project_id=123,
- project_name="aubergine",
- user_id=456,
- user_name="pie")
-
- flavor = objects.Flavor(name='m1.small',
- memory_mb=6,
- vcpus=28,
- root_gb=496,
- ephemeral_gb=8128,
- swap=33550336,
- extra_specs={})
+ ctxt = context.RequestContext(
+ project_id=123,
+ project_name="aubergine",
+ user_id=456,
+ user_name="pie",
+ )
+ flavor = objects.Flavor(
+ name='m1.small',
+ memory_mb=6,
+ vcpus=28,
+ root_gb=496,
+ ephemeral_gb=8128,
+ swap=33550336,
+ extra_specs={},
+ )
instance_ref = objects.Instance(**test_instance)
instance_ref.flavor = flavor
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
+ disk_info = blockinfo.get_disk_info(
+ CONF.libvirt.virt_type,
+ instance_ref,
+ image_meta,
+ )
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref,
- image_meta)
-
- cfg = drvr._get_guest_config(instance_ref,
- _fake_network_info(self),
- image_meta, disk_info,
- context=ctxt)
+ cfg = drvr._get_guest_config(
+ instance_ref,
+ _fake_network_info(self),
+ image_meta, disk_info,
+ context=ctxt,
+ )
self.assertEqual(cfg.uuid, instance_ref["uuid"])
- self.assertEqual(3, len(cfg.features))
- self.assertIsInstance(cfg.features[0],
- vconfig.LibvirtConfigGuestFeatureACPI)
- self.assertIsInstance(cfg.features[1],
- vconfig.LibvirtConfigGuestFeatureAPIC)
- self.assertIsInstance(
- cfg.features[2], vconfig.LibvirtConfigGuestFeatureVMCoreInfo)
self.assertEqual(cfg.memory, 6 * units.Ki)
self.assertEqual(cfg.vcpus, 28)
self.assertEqual(cfg.os_type, fields.VMMode.HVM)
self.assertEqual(cfg.os_boot_dev, ["hd"])
self.assertIsNone(cfg.os_root)
+
+ self.assertEqual(3, len(cfg.features))
+ for idx, device_type in enumerate([
+ vconfig.LibvirtConfigGuestFeatureACPI,
+ vconfig.LibvirtConfigGuestFeatureAPIC,
+ vconfig.LibvirtConfigGuestFeatureVMCoreInfo,
+ ]):
+ self.assertIsInstance(cfg.features[idx], device_type)
+
self.assertEqual(len(cfg.devices), 11)
- self.assertIsInstance(cfg.devices[0],
- vconfig.LibvirtConfigGuestDisk)
- self.assertIsInstance(cfg.devices[1],
- vconfig.LibvirtConfigGuestDisk)
- self.assertIsInstance(cfg.devices[2],
- vconfig.LibvirtConfigGuestDisk)
- self.assertIsInstance(cfg.devices[3],
- vconfig.LibvirtConfigGuestInterface)
- self.assertIsInstance(cfg.devices[4],
- vconfig.LibvirtConfigGuestSerial)
- self.assertIsInstance(cfg.devices[5],
- vconfig.LibvirtConfigGuestGraphics)
- self.assertIsInstance(cfg.devices[6],
- vconfig.LibvirtConfigGuestVideo)
- self.assertIsInstance(cfg.devices[7],
- vconfig.LibvirtConfigGuestInput)
- self.assertIsInstance(cfg.devices[8],
- vconfig.LibvirtConfigGuestRng)
- self.assertIsInstance(cfg.devices[9],
- vconfig.LibvirtConfigGuestUSBHostController)
- self.assertIsInstance(cfg.devices[10],
- vconfig.LibvirtConfigMemoryBalloon)
+ for idx, device_type in enumerate([
+ vconfig.LibvirtConfigGuestDisk,
+ vconfig.LibvirtConfigGuestDisk,
+ vconfig.LibvirtConfigGuestDisk,
+ vconfig.LibvirtConfigGuestInterface,
+ vconfig.LibvirtConfigGuestSerial,
+ vconfig.LibvirtConfigGuestGraphics,
+ vconfig.LibvirtConfigGuestVideo,
+ vconfig.LibvirtConfigGuestInput,
+ vconfig.LibvirtConfigGuestRng,
+ vconfig.LibvirtConfigGuestUSBHostController,
+ vconfig.LibvirtConfigMemoryBalloon,
+ ]):
+ self.assertIsInstance(cfg.devices[idx], device_type)
+
self.assertEqual(len(cfg.metadata), 1)
- self.assertIsInstance(cfg.metadata[0],
- vconfig.LibvirtConfigGuestMetaNovaInstance)
- self.assertEqual(version.version_string_with_package(),
- cfg.metadata[0].package)
- self.assertEqual("purple tomatoes",
- cfg.metadata[0].name)
- self.assertEqual(1234567.89,
- cfg.metadata[0].creationTime)
- self.assertEqual("image",
- cfg.metadata[0].roottype)
- self.assertEqual(str(instance_ref["image_ref"]),
- cfg.metadata[0].rootid)
-
- self.assertIsInstance(cfg.metadata[0].owner,
- vconfig.LibvirtConfigGuestMetaNovaOwner)
- self.assertEqual("838a72b0-0d54-4827-8fd6-fb1227633ceb",
- cfg.metadata[0].owner.userid)
- self.assertEqual("cupcake",
- cfg.metadata[0].owner.username)
- self.assertEqual("fake",
- cfg.metadata[0].owner.projectid)
- self.assertEqual("sweetshop",
- cfg.metadata[0].owner.projectname)
+ self.assertIsInstance(
+ cfg.metadata[0], vconfig.LibvirtConfigGuestMetaNovaInstance)
+ self.assertEqual(
+ version.version_string_with_package(), cfg.metadata[0].package)
+ self.assertEqual("purple tomatoes", cfg.metadata[0].name)
+ self.assertEqual(1234567.89, cfg.metadata[0].creationTime)
+ self.assertEqual("image", cfg.metadata[0].roottype)
+ self.assertEqual(
+ str(instance_ref["image_ref"]), cfg.metadata[0].rootid)
- self.assertIsInstance(cfg.metadata[0].flavor,
- vconfig.LibvirtConfigGuestMetaNovaFlavor)
- self.assertEqual("m1.small",
- cfg.metadata[0].flavor.name)
- self.assertEqual(6,
- cfg.metadata[0].flavor.memory)
- self.assertEqual(28,
- cfg.metadata[0].flavor.vcpus)
- self.assertEqual(496,
- cfg.metadata[0].flavor.disk)
- self.assertEqual(8128,
- cfg.metadata[0].flavor.ephemeral)
- self.assertEqual(33550336,
- cfg.metadata[0].flavor.swap)
+ self.assertIsInstance(
+ cfg.metadata[0].owner, vconfig.LibvirtConfigGuestMetaNovaOwner)
+ self.assertEqual(
+ "838a72b0-0d54-4827-8fd6-fb1227633ceb",
+ cfg.metadata[0].owner.userid)
+ self.assertEqual("cupcake", cfg.metadata[0].owner.username)
+ self.assertEqual("fake", cfg.metadata[0].owner.projectid)
+ self.assertEqual("sweetshop", cfg.metadata[0].owner.projectname)
+ self.assertIsInstance(
+ cfg.metadata[0].flavor, vconfig.LibvirtConfigGuestMetaNovaFlavor)
+ self.assertEqual("m1.small", cfg.metadata[0].flavor.name)
+ self.assertEqual(6, cfg.metadata[0].flavor.memory)
+ self.assertEqual(28, cfg.metadata[0].flavor.vcpus)
+ self.assertEqual(496, cfg.metadata[0].flavor.disk)
+ self.assertEqual(8128, cfg.metadata[0].flavor.ephemeral)
+ self.assertEqual(33550336, cfg.metadata[0].flavor.swap)
- @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
- def test_get_guest_config_q35(self):
- self.flags(virt_type="kvm",
- group='libvirt')
+ num_ports = 0
+ for device in cfg.devices:
+ try:
+ if (
+ device.root_name == 'controller' and
+ device.model == 'pcie-root-port'
+ ):
+ num_ports += 1
+ except AttributeError:
+ pass
- TEST_AMOUNT_OF_PCIE_SLOTS = 8
- CONF.set_override("num_pcie_ports", TEST_AMOUNT_OF_PCIE_SLOTS,
- group='libvirt')
+ # i440fx is not pcie machine so there should be no pcie ports
+ self.assertEqual(0, num_ports)
+
+ @mock.patch.object(time, "time")
+ def test_get_guest_config_no_pcie_ports(self, time_mock):
+ """Generate a "standard" guest with minimal configuration.
+
+ This uses i440fx by default since that's our default machine type and
+ x86 is our default architecture (in our test env, anyway).
+ """
+ time_mock.return_value = 1234567.89
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- instance_ref = objects.Instance(**self.test_instance)
- image_meta = objects.ImageMeta.from_dict({
- "disk_format": "raw",
- "properties": {"hw_machine_type":
- "pc-q35-test"}})
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref,
- image_meta)
+ test_instance = copy.deepcopy(self.test_instance)
+ test_instance["display_name"] = "purple tomatoes"
+ test_instance['system_metadata']['owner_project_name'] = 'sweetshop'
+ test_instance['system_metadata']['owner_user_name'] = 'cupcake'
+ ctxt = context.RequestContext(
+ project_id=123,
+ project_name="aubergine",
+ user_id=456,
+ user_name="pie",
+ )
+ flavor = objects.Flavor(
+ name='m1.small',
+ memory_mb=6,
+ vcpus=28,
+ root_gb=496,
+ ephemeral_gb=8128,
+ swap=33550336,
+ extra_specs={},
+ )
+ instance_ref = objects.Instance(**test_instance)
+ instance_ref.flavor = flavor
+ image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
+ disk_info = blockinfo.get_disk_info(
+ CONF.libvirt.virt_type,
+ instance_ref,
+ image_meta,
+ )
- cfg = drvr._get_guest_config(instance_ref,
- _fake_network_info(self),
- image_meta, disk_info)
+ cfg = drvr._get_guest_config(
+ instance_ref,
+ _fake_network_info(self),
+ image_meta, disk_info,
+ context=ctxt,
+ )
num_ports = 0
for device in cfg.devices:
try:
- if (device.root_name == 'controller' and
- device.model == 'pcie-root-port'):
+ if (
+ device.root_name == 'controller' and
+ device.model == 'pcie-root-port'
+ ):
num_ports += 1
except AttributeError:
pass
- self.assertEqual(TEST_AMOUNT_OF_PCIE_SLOTS, num_ports)
+ # i440fx is not pcie machine so there should be no pcie ports
+ self.assertEqual(0, num_ports)
@mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
- def test_get_guest_config_pcie_i440fx(self):
- self.flags(virt_type="kvm",
- group='libvirt')
+ def test_get_guest_config_q35(self):
+ """Generate a "q35" guest with minimal configuration.
+
+ This configures an explicit machine type (q35) but defaults to x86
+ since this is our default architecture (in our test env, anyway).
+ """
+ self.flags(virt_type="kvm", group='libvirt')
TEST_AMOUNT_OF_PCIE_SLOTS = 8
- CONF.set_override("num_pcie_ports", TEST_AMOUNT_OF_PCIE_SLOTS,
- group='libvirt')
+ CONF.set_override(
+ "num_pcie_ports", TEST_AMOUNT_OF_PCIE_SLOTS,
+ group='libvirt',
+ )
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
- "properties": {"hw_machine_type":
- "pc-i440fx-test"}})
+ "properties": {"hw_machine_type": "q35"},
+ })
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref,
- image_meta)
+ disk_info = blockinfo.get_disk_info(
+ CONF.libvirt.virt_type,
+ instance_ref,
+ image_meta,
+ )
- cfg = drvr._get_guest_config(instance_ref,
- _fake_network_info(self),
- image_meta, disk_info)
+ cfg = drvr._get_guest_config(
+ instance_ref,
+ _fake_network_info(self),
+ image_meta,
+ disk_info,
+ )
+
+ self.assertEqual(3, len(cfg.features))
+ for idx, device_type in enumerate([
+ vconfig.LibvirtConfigGuestFeatureACPI,
+ vconfig.LibvirtConfigGuestFeatureAPIC,
+ vconfig.LibvirtConfigGuestFeatureVMCoreInfo,
+ ]):
+ self.assertIsInstance(cfg.features[idx], device_type)
+
+ self.assertEqual(len(cfg.devices), 19)
+ for idx, device_type in enumerate([
+ vconfig.LibvirtConfigGuestDisk,
+ vconfig.LibvirtConfigGuestDisk,
+ vconfig.LibvirtConfigGuestInterface,
+ vconfig.LibvirtConfigGuestSerial,
+ vconfig.LibvirtConfigGuestGraphics,
+ vconfig.LibvirtConfigGuestVideo,
+ vconfig.LibvirtConfigGuestInput,
+ vconfig.LibvirtConfigGuestRng,
+ vconfig.LibvirtConfigGuestPCIeRootController,
+ vconfig.LibvirtConfigGuestPCIeRootPortController,
+ vconfig.LibvirtConfigGuestPCIeRootPortController,
+ vconfig.LibvirtConfigGuestPCIeRootPortController,
+ vconfig.LibvirtConfigGuestPCIeRootPortController,
+ vconfig.LibvirtConfigGuestPCIeRootPortController,
+ vconfig.LibvirtConfigGuestPCIeRootPortController,
+ vconfig.LibvirtConfigGuestPCIeRootPortController,
+ vconfig.LibvirtConfigGuestPCIeRootPortController,
+ vconfig.LibvirtConfigGuestUSBHostController,
+ vconfig.LibvirtConfigMemoryBalloon,
+ ]):
+ self.assertIsInstance(cfg.devices[idx], device_type)
num_ports = 0
for device in cfg.devices:
try:
- if (device.root_name == 'controller' and
- device.model == 'pcie-root-port'):
+ if (
+ device.root_name == 'controller' and
+ device.model == 'pcie-root-port'
+ ):
num_ports += 1
except AttributeError:
pass
- # i440fx is not pcie machine so there should be no pcie ports
- self.assertEqual(0, num_ports)
+ self.assertEqual(TEST_AMOUNT_OF_PCIE_SLOTS, num_ports)
@mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
@mock.patch('nova.virt.libvirt.utils.get_default_machine_type',
@@ -3132,6 +3218,41 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertTrue(membacking.locked)
self.assertFalse(membacking.sharedpages)
+ def test_get_guest_memory_backing_config_locked_flavor(self):
+ extra_specs = {
+ "hw:locked_memory": "True",
+ "hw:mem_page_size": 1000,
+ }
+ flavor = objects.Flavor(
+ name='m1.small', memory_mb=6, vcpus=28, root_gb=496,
+ ephemeral_gb=8128, swap=33550336, extra_specs=extra_specs)
+ image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ membacking = drvr._get_guest_memory_backing_config(
+ None, None, flavor, image_meta)
+ self.assertTrue(membacking.locked)
+
+ def test_get_guest_memory_backing_config_locked_image_meta(self):
+ extra_specs = {}
+ flavor = objects.Flavor(
+ name='m1.small',
+ memory_mb=6,
+ vcpus=28,
+ root_gb=496,
+ ephemeral_gb=8128,
+ swap=33550336,
+ extra_specs=extra_specs)
+ image_meta = objects.ImageMeta.from_dict({
+ "disk_format": "raw",
+ "properties": {
+ "hw_locked_memory": "True",
+ "hw_mem_page_size": 1000,
+ }})
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ membacking = drvr._get_guest_memory_backing_config(
+ None, None, flavor, image_meta)
+ self.assertTrue(membacking.locked)
+
def test_get_guest_memory_backing_config_realtime_invalid_share(self):
"""Test behavior when there is no pool of shared CPUS on which to place
the emulator threads, isolating them from the instance CPU processes.
@@ -3425,10 +3546,15 @@ class LibvirtConnTestCase(test.NoDBTestCase,
status=fields.PciDeviceStatus.AVAILABLE,
address='0000:00:00.1',
instance_uuid=None,
- request_id=None,
+ request_id=uuids.pci_req1,
extra_info={},
numa_node=None)
pci_device = objects.PciDevice(**pci_device_info)
+ instance_ref.pci_devices = objects.PciDeviceList(objects=[pci_device])
+ pci_req = objects.InstancePCIRequest(
+ request_id=uuids.pci_req1, alias_name='pci-alias-1')
+ instance_ref.pci_requests = objects.InstancePCIRequests(
+ requests=[pci_req])
with test.nested(
mock.patch.object(host.Host, 'has_min_version',
@@ -3436,9 +3562,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock.patch.object(host.Host, "get_capabilities",
return_value=caps),
mock.patch.object(host.Host, 'get_online_cpus',
- return_value=set([3])),
- mock.patch.object(pci_manager, "get_instance_pci_devs",
- return_value=[pci_device])):
+ return_value=set([3]))
+ ):
cfg = conn._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(set([3]), cfg.cpuset)
@@ -3477,23 +3602,31 @@ class LibvirtConnTestCase(test.NoDBTestCase,
status=fields.PciDeviceStatus.AVAILABLE,
address='0000:00:00.1',
instance_uuid=None,
- request_id=None,
+ request_id=uuids.pci_req1,
extra_info={},
numa_node=1)
pci_device = objects.PciDevice(**pci_device_info)
pci_device_info.update(numa_node=0, address='0000:00:00.2')
pci_device2 = objects.PciDevice(**pci_device_info)
+ instance_ref.pci_devices = objects.PciDeviceList(
+ objects=[pci_device, pci_device2]
+ )
+ instance_ref.pci_requests = objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ request_id=uuids.pci_req1, alias_name="pci-alias-1"
+ )
+ ]
+ )
with test.nested(
mock.patch.object(
host.Host, "get_capabilities", return_value=caps),
mock.patch.object(host.Host, 'get_online_cpus',
return_value=set([3])),
mock.patch.object(random, 'choice'),
- mock.patch.object(pci_manager, "get_instance_pci_devs",
- return_value=[pci_device, pci_device2]),
mock.patch.object(conn, '_has_numa_support',
return_value=False)
- ) as (_, _, choice_mock, pci_mock, _):
+ ) as (_, _, choice_mock, _):
cfg = conn._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertFalse(choice_mock.called)
@@ -7004,14 +7137,12 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual(cfg.devices[5].rate_bytes, 1024)
self.assertEqual(cfg.devices[5].rate_period, 2)
- @mock.patch('nova.virt.libvirt.driver.os.path.exists')
- @test.patch_exists(SEV_KERNEL_PARAM_FILE, False)
- def test_get_guest_config_with_rng_backend(self, mock_path):
+ @test.patch_exists(SEV_KERNEL_PARAM_FILE, result=False, other=True)
+ def test_get_guest_config_with_rng_backend(self):
self.flags(virt_type='kvm',
rng_dev_path='/dev/hw_rng',
group='libvirt')
self.flags(pointer_model='ps2mouse')
- mock_path.return_value = True
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
@@ -7480,12 +7611,19 @@ class LibvirtConnTestCase(test.NoDBTestCase,
address='0000:00:00.1',
compute_id=compute_ref.id,
instance_uuid=instance.uuid,
- request_id=None,
+ request_id=uuids.pci_req1,
extra_info={})
pci_device = objects.PciDevice(**pci_device_info)
pci_list = objects.PciDeviceList()
pci_list.objects.append(pci_device)
instance.pci_devices = pci_list
+ instance.pci_requests = objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ request_id=uuids.pci_req1, alias_name="pci-alias"
+ )
+ ]
+ )
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
@@ -7601,11 +7739,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch.object(libvirt_driver.LibvirtDriver,
"_get_guest_storage_config")
@mock.patch.object(libvirt_driver.LibvirtDriver, "_has_numa_support")
- @mock.patch('os.path.exists', return_value=True)
- @test.patch_exists(SEV_KERNEL_PARAM_FILE, False)
- def test_get_guest_config_aarch64(
- self, mock_path_exists, mock_numa, mock_storage,
- ):
+ @test.patch_exists(SEV_KERNEL_PARAM_FILE, result=False, other=True)
+ def test_get_guest_config_aarch64(self, mock_numa, mock_storage):
TEST_AMOUNT_OF_PCIE_SLOTS = 8
CONF.set_override("num_pcie_ports", TEST_AMOUNT_OF_PCIE_SLOTS,
group='libvirt')
@@ -7625,7 +7760,6 @@ class LibvirtConnTestCase(test.NoDBTestCase,
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self),
image_meta, disk_info)
- self.assertTrue(mock_path_exists.called)
self.assertEqual(cfg.os_mach_type, "virt")
num_ports = 0
@@ -7642,10 +7776,9 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch.object(libvirt_driver.LibvirtDriver,
"_get_guest_storage_config")
@mock.patch.object(libvirt_driver.LibvirtDriver, "_has_numa_support")
- @mock.patch('os.path.exists', return_value=True)
- @test.patch_exists(SEV_KERNEL_PARAM_FILE, False)
+ @test.patch_exists(SEV_KERNEL_PARAM_FILE, result=False, other=True)
def test_get_guest_config_aarch64_with_graphics(
- self, mock_path_exists, mock_numa, mock_storage,
+ self, mock_numa, mock_storage,
):
self.mock_uname.return_value = fakelibvirt.os_uname(
'Linux', '', '5.4.0-0-generic', '', fields.Architecture.AARCH64)
@@ -7655,7 +7788,6 @@ class LibvirtConnTestCase(test.NoDBTestCase,
cfg = self._get_guest_config_with_graphics()
- self.assertTrue(mock_path_exists.called)
self.assertEqual(cfg.os_mach_type, "virt")
usbhost_exists = False
@@ -8410,6 +8542,206 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual(conf.cpu.cores, 2)
self.assertEqual(conf.cpu.threads, 1)
+ def test_get_guest_iommu_not_enabled(self):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ test_instance = _create_test_instance()
+ instance_ref = objects.Instance(**test_instance)
+ image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref,
+ image_meta)
+
+ cfg = drvr._get_guest_config(instance_ref, [],
+ image_meta, disk_info)
+ for device in cfg.devices:
+ self.assertNotEqual('iommu', device.root_name)
+
+ def test_get_guest_iommu_config_model(self):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ image_meta = objects.ImageMeta.from_dict({
+ "disk_format": "raw",
+ "properties": {"hw_machine_type": "q35"},
+ })
+
+ extra_specs = {
+ "hw:viommu_model": 'intel',
+ }
+ test_instance = _create_test_instance()
+ test_instance["flavor"]["extra_specs"] = extra_specs
+ instance_ref = objects.Instance(**test_instance)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref,
+ image_meta)
+
+ cfg = drvr._get_guest_config(instance_ref, [],
+ image_meta, disk_info)
+ count = 0
+ for device in cfg.devices:
+ if device.root_name == 'iommu':
+ count += 1
+ self.assertIsInstance(device,
+ vconfig.LibvirtConfigGuestIOMMU)
+ self.assertEqual('intel', device.model)
+ self.assertFalse(hasattr(device, "aw_bits"))
+ self.assertTrue(device.interrupt_remapping)
+ self.assertTrue(device.caching_mode)
+ self.assertTrue(device.eim)
+ self.assertTrue(device.iotlb)
+
+ self.assertEqual(1, count)
+ self.assertEqual('q35', cfg.os_mach_type)
+
+ @mock.patch.object(host.Host, 'has_min_version', return_value=True)
+ def test_get_guest_iommu_config_model_auto(self, has_min_version):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ image_meta = objects.ImageMeta.from_dict({
+ "disk_format": "raw",
+ "properties": {"hw_machine_type": "q35"},
+ })
+
+ extra_specs = {
+ "hw:viommu_model": 'auto',
+ }
+ test_instance = _create_test_instance()
+ test_instance["flavor"]["extra_specs"] = extra_specs
+ instance_ref = objects.Instance(**test_instance)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref,
+ image_meta)
+
+ cfg = drvr._get_guest_config(instance_ref, [],
+ image_meta, disk_info)
+ count = 0
+ for device in cfg.devices:
+ if device.root_name == 'iommu':
+ count += 1
+ self.assertIsInstance(device,
+ vconfig.LibvirtConfigGuestIOMMU)
+ self.assertEqual('virtio', device.model)
+ self.assertEqual(48, device.aw_bits)
+ self.assertTrue(device.interrupt_remapping)
+ self.assertTrue(device.caching_mode)
+ self.assertTrue(device.eim)
+ self.assertTrue(device.iotlb)
+
+ self.assertEqual(1, count)
+ self.assertEqual('q35', cfg.os_mach_type)
+
+ @mock.patch.object(host.Host, 'has_min_version', return_value=False)
+ def test_get_guest_iommu_config_model_auto_intel(self, has_min_version):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ image_meta = objects.ImageMeta.from_dict({
+ "disk_format": "raw",
+ "properties": {"hw_machine_type": "q35"},
+ })
+
+ extra_specs = {
+ "hw:viommu_model": 'auto',
+ }
+ test_instance = _create_test_instance()
+ test_instance["flavor"]["extra_specs"] = extra_specs
+ instance_ref = objects.Instance(**test_instance)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref,
+ image_meta)
+
+ cfg = drvr._get_guest_config(instance_ref, [],
+ image_meta, disk_info)
+ count = 0
+ for device in cfg.devices:
+ if device.root_name == 'iommu':
+ count += 1
+ self.assertIsInstance(device,
+ vconfig.LibvirtConfigGuestIOMMU)
+ self.assertEqual('intel', device.model)
+ self.assertTrue(device.interrupt_remapping)
+ self.assertTrue(device.caching_mode)
+ self.assertTrue(device.eim)
+ self.assertTrue(device.iotlb)
+
+ self.assertEqual(1, count)
+ self.assertEqual('q35', cfg.os_mach_type)
+
+ @mock.patch.object(host.Host, 'has_min_version', return_value=False)
+ def test_get_guest_iommu_config_model_auto_aarch64(self, has_min_version):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ image_meta = objects.ImageMeta.from_dict({
+ "disk_format": "raw",
+ "properties": {"hw_viommu_model": 'auto',
+ "hw_architecture": fields.Architecture.AARCH64,
+ "hw_machine_type": "virt"},
+ })
+ extra_specs = {
+ "hw:viommu_model": 'auto',
+ }
+ test_instance = _create_test_instance()
+ test_instance["flavor"]["extra_specs"] = extra_specs
+ instance_ref = objects.Instance(**test_instance)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref,
+ image_meta)
+
+ cfg = drvr._get_guest_config(instance_ref, [],
+ image_meta, disk_info)
+ count = 0
+ for device in cfg.devices:
+ if device.root_name == 'iommu':
+ count += 1
+ self.assertIsInstance(device,
+ vconfig.LibvirtConfigGuestIOMMU)
+ self.assertEqual('smmuv3', device.model)
+ self.assertFalse(hasattr(device, "aw_bits"))
+ self.assertTrue(device.interrupt_remapping)
+ self.assertTrue(device.caching_mode)
+ self.assertFalse(device.eim)
+ self.assertTrue(device.iotlb)
+ self.assertEqual(1, count)
+
+ def test_get_guest_iommu_config_not_support_machine_type(self):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ image_meta = objects.ImageMeta.from_dict({
+ "disk_format": "raw",
+ "properties": {"hw_machine_type": "pc-i440fx-2.11"},
+ })
+ extra_specs = {
+ "hw:viommu_model": 'auto',
+ }
+ test_instance = _create_test_instance()
+ test_instance["flavor"]["extra_specs"] = extra_specs
+ instance_ref = objects.Instance(**test_instance)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref,
+ image_meta)
+
+ self.assertRaises(
+ exception.InvalidVIOMMUMachineType, drvr._get_guest_config,
+ instance_ref, [], image_meta, disk_info
+ )
+
+ def test_get_guest_iommu_config_not_support_architecture(self):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ image_meta = objects.ImageMeta.from_dict({
+ "disk_format": "raw",
+ "properties": {"hw_architecture": fields.Architecture.PPC64LE,
+ "hw_machine_type": "pc-i440fx-2.11"},
+ })
+ extra_specs = {
+ "hw:viommu_model": 'auto',
+ }
+ test_instance = _create_test_instance()
+ test_instance["flavor"]["extra_specs"] = extra_specs
+ instance_ref = objects.Instance(**test_instance)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref,
+ image_meta)
+
+ self.assertRaises(
+ exception.InvalidVIOMMUArchitecture, drvr._get_guest_config,
+ instance_ref, [], image_meta, disk_info
+ )
+
def test_get_guest_memory_balloon_config_by_default(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
@@ -8912,6 +9244,26 @@ class LibvirtConnTestCase(test.NoDBTestCase,
image_meta))
mock_fsthaw.assert_called_once_with()
+ def test_set_quiesced_agent_connection_fails(self):
+ # This is require to mock guest host
+ self.create_fake_libvirt_mock(lookupByUUIDString=self.fake_lookup)
+
+ with mock.patch.object(FakeVirtDomain, "fsFreeze") as mock_fsfreeze:
+ error = fakelibvirt.make_libvirtError(
+ fakelibvirt.libvirtError,
+ "QEMU guest agent is not connected",
+ error_code=fakelibvirt.VIR_ERR_AGENT_UNRESPONSIVE)
+
+ mock_fsfreeze.side_effect = error
+ mock_fsfreeze.error_code = error.get_error_code()
+
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
+ instance = objects.Instance(**self.test_instance)
+ image_meta = objects.ImageMeta.from_dict(
+ {"properties": {"hw_qemu_guest_agent": "yes", }})
+ self.assertRaises(exception.InstanceQuiesceFailed,
+ drvr._set_quiesced, self.context, instance, image_meta, True)
+
def test_create_snapshot_metadata(self):
base = objects.ImageMeta.from_dict(
{'disk_format': 'raw'})
@@ -11486,13 +11838,11 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'_assert_dest_node_has_enough_disk')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
- '_assert_dest_node_has_enough_disk')
- @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'_is_shared_block_storage')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'_check_shared_storage_test_file')
def test_check_can_live_migration_source_disk_over_commit_none(self,
- mock_check, mock_shared_block, mock_enough, mock_disk_check):
+ mock_check, mock_shared_block, mock_disk_check):
mock_check.return_value = False
mock_shared_block.return_value = False
@@ -13707,7 +14057,10 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_fetch.assert_called_once_with(self.context, instance,
fallback_from_host=None)
mock_create.assert_called_once_with(
- disk_info['type'], mock.ANY, disk_info['virt_disk_size'])
+ '/fake/instance/dir/foo',
+ disk_info['type'],
+ disk_info['virt_disk_size'],
+ )
mock_exists.assert_called_once_with('/fake/instance/dir/foo')
def test_create_images_and_backing_qcow2(self):
@@ -13739,7 +14092,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.context, instance,
"/fake/instance/dir", disk_info)
- @mock.patch('nova.virt.libvirt.utils.create_cow_image')
+ @mock.patch('nova.virt.libvirt.utils.create_image')
@mock.patch('nova.privsep.path.utime')
def test_create_images_and_backing_images_not_exist_fallback(
self, mock_utime, mock_create_cow_image):
@@ -13819,7 +14172,11 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_utime.assert_called()
mock_create_cow_image.assert_called_once_with(
- backfile_path, '/fake/instance/dir/disk_path', virt_disk_size)
+ '/fake/instance/dir/disk_path',
+ 'qcow2',
+ virt_disk_size,
+ backing_file=backfile_path,
+ )
@mock.patch('nova.virt.libvirt.imagebackend.Image.exists',
new=mock.Mock(return_value=True))
@@ -13912,7 +14269,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertFalse(mock_fetch_image.called)
@mock.patch('nova.privsep.path.utime')
- @mock.patch('nova.virt.libvirt.utils.create_cow_image')
+ @mock.patch('nova.virt.libvirt.utils.create_image')
def test_create_images_and_backing_ephemeral_gets_created(
self, mock_create_cow_image, mock_utime):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
@@ -13965,14 +14322,16 @@ class LibvirtConnTestCase(test.NoDBTestCase,
# TODO(efried): Should these be disk_info[path]??
mock_create_cow_image.assert_has_calls([
mock.call(
- root_backing,
CONF.instances_path + '/disk',
- disk_info_byname['disk']['virt_disk_size']
+ 'qcow2',
+ disk_info_byname['disk']['virt_disk_size'],
+ backing_file=root_backing,
),
mock.call(
- ephemeral_backing,
CONF.instances_path + '/disk.local',
- disk_info_byname['disk.local']['virt_disk_size']
+ 'qcow2',
+ disk_info_byname['disk.local']['virt_disk_size'],
+ backing_file=ephemeral_backing,
),
])
@@ -15626,8 +15985,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
filename=filename, size=100 * units.Gi, ephemeral_size=mock.ANY,
specified_fs=None)
- @mock.patch.object(nova.virt.libvirt.imagebackend.Image, 'cache')
- def test_create_image_resize_snap_backend(self, mock_cache):
+ def test_create_image_resize_snap_backend(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
instance.task_state = task_states.RESIZE_FINISH
@@ -15655,7 +16013,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch('nova.privsep.path.utime')
@mock.patch('nova.virt.libvirt.utils.fetch_image')
- @mock.patch('nova.virt.libvirt.utils.create_cow_image')
+ @mock.patch('nova.virt.libvirt.utils.create_image')
def test_create_ephemeral_specified_fs_not_valid(
self, mock_create_cow_image, mock_fetch_image, mock_utime):
CONF.set_override('default_ephemeral_format', 'ext4')
@@ -15671,10 +16029,9 @@ class LibvirtConnTestCase(test.NoDBTestCase,
instance = objects.Instance(**instance_ref)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
image_meta = objects.ImageMeta.from_dict({'disk_format': 'raw'})
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance,
- image_meta)
- disk_info['mapping'].pop('disk.local')
+ disk_info = blockinfo.get_disk_info(
+ CONF.libvirt.virt_type, instance, image_meta,
+ block_device_info=block_device_info)
with test.nested(
mock.patch('oslo_concurrency.processutils.execute'),
@@ -16086,9 +16443,10 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual(ip, CONF.my_ip)
@mock.patch.object(libvirt_driver.LOG, 'warning')
- @mock.patch('nova.compute.utils.get_machine_ips')
- def test_check_my_ip(self, mock_ips, mock_log):
- mock_ips.return_value = ['8.8.8.8', '75.75.75.75']
+ def test_check_my_ip(self, mock_log):
+
+ self.libvirt.mock_get_machine_ips.return_value = [
+ '8.8.8.8', '75.75.75.75']
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr._check_my_ip()
mock_log.assert_called_once_with(u'my_ip address (%(my_ip)s) was '
@@ -16110,6 +16468,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
service_mock = mock.MagicMock()
service_mock.disabled.return_value = False
+ drvr._host._init_events.return_value = None
with test.nested(
mock.patch.object(drvr._host, "_connect",
side_effect=fakelibvirt.make_libvirtError(
@@ -16117,8 +16476,6 @@ class LibvirtConnTestCase(test.NoDBTestCase,
"Failed to connect to host",
error_code=
fakelibvirt.VIR_ERR_INTERNAL_ERROR)),
- mock.patch.object(drvr._host, "_init_events",
- return_value=None),
mock.patch.object(objects.Service, "get_by_compute_host",
return_value=service_mock)):
@@ -16133,6 +16490,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
service_mock = mock.MagicMock()
service_mock.disabled.return_value = False
+ drvr._host._init_events.return_value = None
with test.nested(
mock.patch.object(drvr._host, "_connect",
side_effect=fakelibvirt.make_libvirtError(
@@ -16140,8 +16498,6 @@ class LibvirtConnTestCase(test.NoDBTestCase,
"Failed to connect to host",
error_code=
fakelibvirt.VIR_ERR_INTERNAL_ERROR)),
- mock.patch.object(drvr._host, "_init_events",
- return_value=None),
mock.patch.object(host.Host, "has_min_version",
return_value=True),
mock.patch.object(drvr, "_do_quality_warnings",
@@ -16161,11 +16517,10 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
service_mock = mock.MagicMock()
service_mock.disabled.return_value = True
+ drvr._host._init_events.return_value = None
with test.nested(
mock.patch.object(drvr._host, "_connect",
return_value=mock.MagicMock()),
- mock.patch.object(drvr._host, "_init_events",
- return_value=None),
mock.patch.object(host.Host, "has_min_version",
return_value=True),
mock.patch.object(drvr, "_do_quality_warnings",
@@ -16324,7 +16679,6 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_get.return_value = fake_guest
self.stub_out('oslo_service.loopingcall.FixedIntervalLoopingCall',
lambda *a, **k: FakeLoopingCall())
- self.stub_out('nova.pci.manager.get_instance_pci_devs', lambda *a: [])
drvr.reboot(None, instance, [], 'SOFT')
@@ -16336,14 +16690,12 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_get.assert_has_calls([mock.call(instance)] * 2, any_order=True)
self.assertEqual(2, mock_get.call_count)
- @mock.patch.object(pci_manager, 'get_instance_pci_devs')
@mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
@mock.patch.object(greenthread, 'sleep')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_hard_reboot')
@mock.patch.object(host.Host, '_get_domain')
def test_reboot_same_ids(self, mock_get_domain, mock_hard_reboot,
- mock_sleep, mock_loopingcall,
- mock_get_instance_pci_devs):
+ mock_sleep, mock_loopingcall):
class FakeLoopingCall(object):
def start(self, *a, **k):
return self
@@ -16371,7 +16723,6 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_get_domain.return_value = mock_domain
mock_hard_reboot.side_effect = fake_hard_reboot
mock_loopingcall.return_value = FakeLoopingCall()
- mock_get_instance_pci_devs.return_value = []
drvr.reboot(None, instance, [], 'SOFT')
self.assertTrue(self.reboot_hard_reboot_called)
@@ -16569,7 +16920,6 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch('oslo_utils.fileutils.ensure_tree')
@mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall')
- @mock.patch('nova.pci.manager.get_instance_pci_devs')
@mock.patch('nova.virt.libvirt.LibvirtDriver._create_guest_with_network')
@mock.patch('nova.virt.libvirt.LibvirtDriver._create_images_and_backing')
@mock.patch('nova.virt.libvirt.LibvirtDriver.'
@@ -16586,7 +16936,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_get_guest_config, mock_get_instance_path,
mock_get_instance_disk_info, mock_create_images_and_backing,
mock_create_domand_and_network,
- mock_get_instance_pci_devs, mock_looping_call, mock_ensure_tree):
+ mock_looping_call, mock_ensure_tree):
"""For a hard reboot, we shouldn't need an additional call to glance
to get the image metadata.
@@ -16632,10 +16982,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch.object(conn, '_detach_mediated_devices')
@mock.patch.object(conn, '_detach_direct_passthrough_ports')
@mock.patch.object(conn, '_detach_pci_devices')
- @mock.patch.object(pci_manager, 'get_instance_pci_devs',
- return_value='pci devs')
@mock.patch.object(conn._host, 'get_guest', return_value=guest)
- def suspend(mock_get_guest, mock_get_instance_pci_devs,
+ def suspend(mock_get_guest,
mock_detach_pci_devices,
mock_detach_direct_passthrough_ports,
mock_detach_mediated_devices,
@@ -16778,15 +17126,17 @@ class LibvirtConnTestCase(test.NoDBTestCase,
instance.system_metadata)
self.assertTrue(mock_attachDevice.called)
- @mock.patch.object(host.Host,
- 'has_min_version', return_value=True)
- def _test_detach_direct_passthrough_ports(self,
- mock_has_min_version, vif_type):
+ @mock.patch.object(
+ host.Host, 'has_min_version', new=mock.Mock(return_value=True)
+ )
+ def _test_detach_direct_passthrough_ports(
+ self, vif_type, detach_device=True,
+ vnic_type=network_model.VNIC_TYPE_DIRECT):
instance = objects.Instance(**self.test_instance)
expeted_pci_slot = "0000:00:00.0"
network_info = _fake_network_info(self)
- network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT
+ network_info[0]['vnic_type'] = vnic_type
# some more adjustments for the fake network_info so that
# the correct get_config function will be executed (vif's
# get_config_hw_veb - which is according to the real SRIOV vif)
@@ -16799,32 +17149,55 @@ class LibvirtConnTestCase(test.NoDBTestCase,
instance.info_cache = objects.InstanceInfoCache(
network_info=network_info)
# fill the pci_devices of the instance so that
- # pci_manager.get_instance_pci_devs will not return an empty list
+ # instance.get_instance_pci_devs will not return an empty list
# which will eventually fail the assertion for detachDeviceFlags
expected_pci_device_obj = (
- objects.PciDevice(address=expeted_pci_slot, request_id=None))
+ objects.PciDevice(
+ address=expeted_pci_slot, request_id=None, compute_node_id=42
+ )
+ )
instance.pci_devices = objects.PciDeviceList()
instance.pci_devices.objects = [expected_pci_device_obj]
- domain = FakeVirtDomain()
+ domain = FakeVirtDomain(id=24601, name='Jean Valjean')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
guest = libvirt_guest.Guest(domain)
- with mock.patch.object(drvr, '_detach_pci_devices') as mock_detach_pci:
+ with mock.patch.object(
+ drvr, '_detach_pci_devices'
+ ) as mock_detach_pci, mock.patch.object(
+ drvr, 'detach_interface'
+ ) as mock_detach_interface:
drvr._detach_direct_passthrough_ports(
self.context, instance, guest)
- mock_detach_pci.assert_called_once_with(
- guest, [expected_pci_device_obj])
+ if detach_device:
+ mock_detach_pci.assert_called_once_with(
+ guest, [expected_pci_device_obj])
+ else:
+ mock_detach_interface.assert_called_once()
- def test_detach_direct_passthrough_ports_interface_interface_hostdev(self):
+ def test_detach_direct_passthrough_ports_ovs_hw_offload(self):
# Note: test detach_direct_passthrough_ports method for vif with config
# LibvirtConfigGuestInterface
- self._test_detach_direct_passthrough_ports(vif_type="hw_veb")
+ self._test_detach_direct_passthrough_ports("ovs", detach_device=False)
- def test_detach_direct_passthrough_ports_interface_pci_hostdev(self):
+ def test_detach_direct_passthrough_ports_sriov_nic_agent(self):
+ # Note: test detach_direct_passthrough_ports method for vif with config
+ # LibvirtConfigGuestInterface
+ self._test_detach_direct_passthrough_ports(
+ "hw_veb", detach_device=False
+ )
+
+ def test_detach_direct_physical_passthrough_ports_sriov_nic_agent(self):
+ self._test_detach_direct_passthrough_ports(
+ "hostdev_physical",
+ vnic_type=network_model.VNIC_TYPE_DIRECT_PHYSICAL
+ )
+
+ def test_detach_direct_passthrough_ports_infiniband(self):
# Note: test detach_direct_passthrough_ports method for vif with config
# LibvirtConfigGuestHostdevPCI
- self._test_detach_direct_passthrough_ports(vif_type="ib_hostdev")
+ self._test_detach_direct_passthrough_ports("ib_hostdev")
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
@mock.patch.object(FakeVirtDomain, 'detachDeviceFlags')
@@ -16834,9 +17207,10 @@ class LibvirtConnTestCase(test.NoDBTestCase,
network_info = _fake_network_info(self, 2)
+ direct_physical = network_model.VNIC_TYPE_DIRECT_PHYSICAL
for network_info_inst in network_info:
- network_info_inst['vnic_type'] = network_model.VNIC_TYPE_DIRECT
- network_info_inst['type'] = "hw_veb"
+ network_info_inst['vnic_type'] = direct_physical
+ network_info_inst['type'] = "hostdev_physical"
network_info_inst['details'] = dict(vlan="2145")
network_info_inst['address'] = "fa:16:3e:96:2a:48"
@@ -16846,7 +17220,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
instance.info_cache = objects.InstanceInfoCache(
network_info=network_info)
# fill the pci_devices of the instance so that
- # pci_manager.get_instance_pci_devs will not return an empty list
+ # instance.get_instance_pci_devs will not return an empty list
# which will eventually fail the assertion for detachDeviceFlags
instance.pci_devices = objects.PciDeviceList()
instance.pci_devices.objects = [
@@ -16901,8 +17275,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock.patch.object(drvr, '_create_guest_with_network',
return_value=guest),
mock.patch.object(drvr, '_attach_pci_devices'),
- mock.patch.object(pci_manager, 'get_instance_pci_devs',
- return_value='fake_pci_devs'),
+ mock.patch('nova.objects.Instance.get_pci_devices',
+ return_value='fake_pci_devs'),
mock.patch.object(utils, 'get_image_from_system_metadata'),
mock.patch.object(guest, 'sync_guest_time'),
mock.patch.object(drvr, '_wait_for_running',
@@ -17653,12 +18027,11 @@ class LibvirtConnTestCase(test.NoDBTestCase,
got = drvr._get_cpu_info()
self.assertEqual(want, got)
- @mock.patch.object(pci_utils, 'get_ifname_by_pci_address',
- return_value='ens1')
@mock.patch.object(host.Host, 'list_pci_devices',
return_value=['pci_0000_04_00_3', 'pci_0000_04_10_7',
'pci_0000_04_11_7'])
- def test_get_pci_passthrough_devices(self, mock_list, mock_get_ifname):
+ def test_get_pci_passthrough_devices(self, mock_list):
+ pci_utils.get_ifname_by_pci_address.return_value = 'ens1'
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
@@ -17731,7 +18104,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
# The first call for every VF is to determine parent_ifname and
# the second call to determine the MAC address.
- mock_get_ifname.assert_has_calls([
+ pci_utils.get_ifname_by_pci_address.assert_has_calls([
mock.call('0000:04:10.7', pf_interface=True),
mock.call('0000:04:11.7', pf_interface=True),
])
@@ -20144,7 +20517,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock.patch.object(drvr._conn, 'defineXML', create=True),
mock.patch('nova.virt.libvirt.utils.get_disk_size'),
mock.patch('nova.virt.libvirt.utils.get_disk_backing_file'),
- mock.patch('nova.virt.libvirt.utils.create_cow_image'),
+ mock.patch('nova.virt.libvirt.utils.create_image'),
mock.patch('nova.virt.libvirt.utils.extract_snapshot'),
mock.patch.object(drvr, '_set_quiesced'),
mock.patch.object(drvr, '_can_quiesce')
@@ -20187,7 +20560,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_size.assert_called_once_with(srcfile, format="qcow2")
mock_backing.assert_called_once_with(srcfile, basename=False,
format="qcow2")
- mock_create_cow.assert_called_once_with(bckfile, dltfile, 1004009)
+ mock_create_cow.assert_called_once_with(
+ dltfile, 'qcow2', 1004009, backing_file=bckfile)
mock_chown.assert_called_once_with(dltfile, uid=os.getuid())
mock_snapshot.assert_called_once_with(dltfile, "qcow2",
dstfile, "qcow2")
@@ -20421,7 +20795,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
**fake_block_device.FakeDbBlockDeviceDict(
{'id': 0,
'source_type': 'volume', 'destination_type': 'volume',
- 'device_name': '/dev/sda'}))
+ 'device_name': '/dev/sda', 'boot_index': 0}))
info = {'block_device_mapping': driver_block_device.convert_volumes(
[bdm])}
info['block_device_mapping'][0]['connection_info'] = conn_info
@@ -20531,8 +20905,10 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr._get_disk_config_image_type())
self.assertEqual(2, drvr.image_backend.by_name.call_count)
- call1 = mock.call(instance, 'disk.config', 'rbd')
- call2 = mock.call(instance, 'disk.config', 'flat')
+ call1 = mock.call(instance, 'disk.config', 'rbd',
+ disk_info_mapping=disk_mapping['disk.config'])
+ call2 = mock.call(instance, 'disk.config', 'flat',
+ disk_info_mapping=disk_mapping['disk.config'])
drvr.image_backend.by_name.assert_has_calls([call1, call2])
self.assertEqual(mock.sentinel.diskconfig, diskconfig)
@@ -20575,7 +20951,6 @@ class LibvirtConnTestCase(test.NoDBTestCase,
guest = mock.Mock()
with test.nested(
- mock.patch.object(pci_manager, 'get_instance_pci_devs'),
mock.patch.object(drvr, '_attach_pci_devices'),
mock.patch.object(drvr, '_attach_direct_passthrough_ports'),
):
@@ -21011,7 +21386,7 @@ class TestUpdateProviderTree(test.NoDBTestCase):
'min_unit': 1,
'max_unit': self.vcpus,
'step_size': 1,
- 'allocation_ratio': 16.0,
+ 'allocation_ratio': 4.0,
'reserved': 0,
},
orc.PCPU: {
@@ -21027,7 +21402,7 @@ class TestUpdateProviderTree(test.NoDBTestCase):
'min_unit': 1,
'max_unit': self.memory_mb,
'step_size': 1,
- 'allocation_ratio': 1.5,
+ 'allocation_ratio': 1.0,
'reserved': 512,
},
orc.DISK_GB: {
@@ -22141,11 +22516,8 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
self.drvr.migrate_disk_and_power_off,
'ctx', instance, '10.0.0.1', flavor_obj, None)
- @mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
- '._get_instance_disk_info')
@mock.patch('nova.virt.driver.block_device_info_get_ephemerals')
- def test_migrate_disk_and_power_off_resize_error_eph(self, mock_get,
- mock_get_disk_info):
+ def test_migrate_disk_and_power_off_resize_error_eph(self, mock_get):
mappings = [
{
'device_name': '/dev/sdb4',
@@ -22192,7 +22564,6 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
# Old flavor, eph is 20, real disk is 3, target is 2, fail
flavor = {'root_gb': 10, 'ephemeral_gb': 2}
flavor_obj = objects.Flavor(**flavor)
- mock_get_disk_info.return_value = fake_disk_info_json(instance)
self.assertRaises(
exception.InstanceFaultRollback,
@@ -23153,6 +23524,9 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
}
instance = self._create_instance(params=inst_params)
+ image_meta = objects.ImageMeta.from_dict({})
+ disk_info = blockinfo.get_disk_info(
+ CONF.libvirt.virt_type, instance, image_meta)
disk_images = {'image_id': instance.image_ref}
instance_dir = libvirt_utils.get_instance_path(instance)
disk_path = os.path.join(instance_dir, 'disk')
@@ -23172,7 +23546,8 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
]
drvr._create_and_inject_local_root(
- self.context, instance, False, '', disk_images, None, None)
+ self.context, instance, disk_info['mapping'], False, '',
+ disk_images, None, None)
mock_fetch_calls = [
mock.call(test.MatchType(nova.virt.libvirt.imagebackend.Qcow2),
@@ -23255,9 +23630,13 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
# config_drive is True by default, configdrive.required_by()
# returns True
instance_ref = self._create_instance()
+ image_meta = objects.ImageMeta.from_dict({})
+ disk_info = blockinfo.get_disk_info(
+ CONF.libvirt.virt_type, instance_ref, image_meta)
disk_images = {'image_id': None}
- drvr._create_and_inject_local_root(self.context, instance_ref, False,
+ drvr._create_and_inject_local_root(self.context, instance_ref,
+ disk_info['mapping'], False,
'', disk_images, get_injection_info(),
None)
self.assertFalse(mock_inject.called)
@@ -23277,6 +23656,9 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
mock_image.get.return_value = {'locations': [], 'disk_format': 'raw'}
instance = self._create_instance()
+ image_meta = objects.ImageMeta.from_dict({})
+ disk_info = blockinfo.get_disk_info(
+ CONF.libvirt.virt_type, instance, image_meta)
disk_images = {'image_id': 'foo'}
self.flags(images_type='rbd', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
@@ -23287,6 +23669,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
mock_fetch.reset_mock()
drvr._create_and_inject_local_root(self.context,
instance,
+ disk_info['mapping'],
False,
'',
disk_images,
@@ -24646,7 +25029,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
self.assertEqual('raw', disk.image_type)
# Assert that the root rescue disk was created as the default type
- self.assertIsNone(disks['disk.rescue'].image_type)
+ self.assertEqual('default', disks['disk.rescue'].image_type)
# We expect the generated domain to contain disk.rescue and
# disk, in that order
@@ -24718,7 +25101,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
self.assertEqual('raw', disk.image_type)
# Assert that the root rescue disk was created as the default type
- self.assertIsNone(disks['disk.rescue'].image_type)
+ self.assertEqual('default', disks['disk.rescue'].image_type)
# We expect the generated domain to contain disk.rescue, disk, and
# disk.config.rescue in that order
@@ -24956,7 +25339,8 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
'id': 1,
'source_type': 'volume',
'destination_type': 'volume',
- 'device_name': '/dev/vda'}))
+ 'device_name': '/dev/vda',
+ 'boot_index': 0}))
bdms = driver_block_device.convert_volumes([bdm])
block_device_info = {'root_device_name': '/dev/vda',
'ephemerals': [],
@@ -25642,9 +26026,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
}
self._test_get_gpu_inventories(drvr, expected, ['nvidia-11'])
- @mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
- '._get_mdev_capable_devices')
- def test_get_gpu_inventories_with_two_types(self, get_mdev_capable_devs):
+ def test_get_gpu_inventories_with_two_types(self):
self.flags(enabled_mdev_types=['nvidia-11', 'nvidia-12'],
group='devices')
# we need to call the below again to ensure the updated
@@ -27195,6 +27577,35 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
mock_get_guest.return_value.assert_not_called()
self.assertIsNone(mock_find.call_args.args[3])
+ def test_set_features_windows(self):
+ self.flags(virt_type='kvm', group='libvirt')
+ guest = vconfig.LibvirtConfigGuest()
+ self.drvr._set_features(
+ guest, 'windows',
+ objects.ImageMeta(
+ properties=objects.ImageMetaProps()
+ ),
+ objects.Flavor(extra_specs={})
+ )
+ features = guest.features
+ hv = None
+ for feature in features:
+ if feature.root_name == 'hyperv':
+ hv = feature
+ self.assertTrue(hv.relaxed)
+ self.assertTrue(hv.vapic)
+ self.assertTrue(hv.spinlocks)
+ self.assertEqual(8191, hv.spinlock_retries)
+ self.assertTrue(hv.vpindex)
+ self.assertTrue(hv.runtime)
+ self.assertTrue(hv.synic)
+ self.assertTrue(hv.reset)
+ self.assertTrue(hv.frequencies)
+ self.assertTrue(hv.reenlightenment)
+ self.assertTrue(hv.tlbflush)
+ self.assertTrue(hv.ipi)
+ self.assertTrue(hv.evmcs)
+
class LibvirtVolumeUsageTestCase(test.NoDBTestCase):
"""Test for LibvirtDriver.get_all_volume_usage."""
@@ -28265,7 +28676,7 @@ class _BaseSnapshotTests(test.NoDBTestCase):
@mock.patch.object(host.Host, '_get_domain')
@mock.patch('nova.virt.libvirt.utils.get_disk_size',
new=mock.Mock(return_value=0))
- @mock.patch('nova.virt.libvirt.utils.create_cow_image',
+ @mock.patch('nova.virt.libvirt.utils.create_image',
new=mock.Mock())
@mock.patch('nova.virt.libvirt.utils.get_disk_backing_file',
new=mock.Mock(return_value=None))
@@ -28591,13 +29002,11 @@ class LVMSnapshotTests(_BaseSnapshotTests):
new=mock.Mock(return_value=None))
@mock.patch('nova.virt.libvirt.utils.get_disk_type_from_path',
new=mock.Mock(return_value='lvm'))
- @mock.patch('nova.virt.libvirt.utils.file_open',
- side_effect=[io.BytesIO(b''), io.BytesIO(b'')])
@mock.patch.object(libvirt_driver.imagebackend.images,
'convert_image')
@mock.patch.object(libvirt_driver.imagebackend.lvm, 'volume_info')
def _test_lvm_snapshot(self, disk_format, mock_volume_info,
- mock_convert_image, mock_file_open):
+ mock_convert_image):
self.flags(images_type='lvm',
images_volume_group='nova-vg', group='libvirt')
diff --git a/nova/tests/unit/virt/libvirt/test_guest.py b/nova/tests/unit/virt/libvirt/test_guest.py
index 70d438d816..5b181b8f06 100644
--- a/nova/tests/unit/virt/libvirt/test_guest.py
+++ b/nova/tests/unit/virt/libvirt/test_guest.py
@@ -14,7 +14,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_service import fixture as service_fixture
from oslo_utils import encodeutils
@@ -403,9 +404,21 @@ class GuestTestCase(test.NoDBTestCase):
self.assertIsNotNone(
self.guest.get_interface_by_cfg(
cfg, from_persistent_config=True))
+ cfg = vconfig.LibvirtConfigGuestInterface()
+ # NOTE(sean-k-mooney): a default constructed object is not valid
+ # to pass to get_interface_by_cfg as so we just modify the xml to
+ # make it not match
+ cfg.parse_str("""
+ <interface type="wont_match">
+ <mac address="fa:16:3e:f9:af:ae"/>
+ <model type="virtio"/>
+ <driver name="qemu"/>
+ <source bridge="qbr84008d03-11"/>
+ <target dev="tap84008d03-11"/>
+ </interface>""")
self.assertIsNone(
self.guest.get_interface_by_cfg(
- vconfig.LibvirtConfigGuestInterface(),
+ cfg,
from_persistent_config=True))
self.domain.XMLDesc.assert_has_calls(
[
@@ -1040,3 +1053,25 @@ class JobInfoTestCase(test.NoDBTestCase):
mock_stats.assert_called_once_with()
mock_info.assert_called_once_with()
+
+ @mock.patch.object(fakelibvirt.virDomain, "jobInfo")
+ @mock.patch.object(fakelibvirt.virDomain, "jobStats")
+ def test_job_stats_no_ram(self, mock_stats, mock_info):
+ mock_stats.side_effect = fakelibvirt.make_libvirtError(
+ fakelibvirt.libvirtError,
+ "internal error: migration was active, but no RAM info was set",
+ error_code=fakelibvirt.VIR_ERR_INTERNAL_ERROR,
+ error_message="migration was active, but no RAM info was set")
+
+ info = self.guest.get_job_info()
+
+ self.assertIsInstance(info, libvirt_guest.JobInfo)
+ self.assertEqual(fakelibvirt.VIR_DOMAIN_JOB_NONE, info.type)
+ self.assertEqual(0, info.time_elapsed)
+ self.assertEqual(0, info.time_remaining)
+ self.assertEqual(0, info.memory_total)
+ self.assertEqual(0, info.memory_processed)
+ self.assertEqual(0, info.memory_remaining)
+
+ mock_stats.assert_called_once_with()
+ self.assertFalse(mock_info.called)
diff --git a/nova/tests/unit/virt/libvirt/test_host.py b/nova/tests/unit/virt/libvirt/test_host.py
index 7082c3ad95..3afd6c139d 100644
--- a/nova/tests/unit/virt/libvirt/test_host.py
+++ b/nova/tests/unit/virt/libvirt/test_host.py
@@ -15,12 +15,12 @@
# under the License.
import os
+from unittest import mock
import ddt
import eventlet
from eventlet import greenthread
from eventlet import tpool
-import mock
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import uuidutils
@@ -72,11 +72,10 @@ class HostTestCase(test.NoDBTestCase):
self.useFixture(nova_fixtures.LibvirtFixture())
self.host = host.Host("qemu:///system")
- @mock.patch("nova.virt.libvirt.host.Host._init_events")
- def test_repeat_initialization(self, mock_init_events):
+ def test_repeat_initialization(self):
for i in range(3):
self.host.initialize()
- mock_init_events.assert_called_once_with()
+ self.host._init_events.assert_called_once_with()
@mock.patch.object(fakelibvirt.virConnect, "registerCloseCallback")
def test_close_callback(self, mock_close):
@@ -1156,12 +1155,9 @@ Active: 8381604 kB
expect_vf = ["rx", "tx", "sg", "tso", "gso", "gro", "rxvlan", "txvlan"]
self.assertEqual(expect_vf, actualvf)
- @mock.patch.object(pci_utils, 'get_mac_by_pci_address',
- new=mock.MagicMock(
- side_effect=exception.PciDeviceNotFoundById(
- '0000:04:00.3')))
- @mock.patch.object(pci_utils, 'get_ifname_by_pci_address')
- def test_get_pcidev_info_non_nic(self, mock_get_ifname):
+ def test_get_pcidev_info_non_nic(self):
+ pci_utils.get_mac_by_pci_address.side_effect = (
+ exception.PciDeviceNotFoundById('0000:04:00.3'))
dev_name = "pci_0000_04_11_7"
pci_dev = fakelibvirt.NodeDevice(
self.host._get_connection(),
@@ -1175,11 +1171,10 @@ Active: 8381604 kB
'parent_addr': '0000:04:00.3',
}
self.assertEqual(expect_vf, actual_vf)
- mock_get_ifname.assert_not_called()
+ pci_utils.get_ifname_by_pci_address.assert_not_called()
- @mock.patch.object(pci_utils, 'get_ifname_by_pci_address',
- return_value='ens1')
- def test_get_pcidev_info(self, mock_get_ifname):
+ def test_get_pcidev_info(self):
+ pci_utils.get_ifname_by_pci_address.return_value = 'ens1'
devs = {
"pci_0000_04_00_3", "pci_0000_04_10_7", "pci_0000_04_11_7",
"pci_0000_04_00_1", "pci_0000_03_00_0", "pci_0000_03_00_1",
diff --git a/nova/tests/unit/virt/libvirt/test_imagebackend.py b/nova/tests/unit/virt/libvirt/test_imagebackend.py
index decb27f982..0dc1009c92 100644
--- a/nova/tests/unit/virt/libvirt/test_imagebackend.py
+++ b/nova/tests/unit/virt/libvirt/test_imagebackend.py
@@ -19,11 +19,11 @@ import inspect
import os
import shutil
import tempfile
+from unittest import mock
from castellan import key_manager
import ddt
import fixtures
-import mock
from oslo_concurrency import lockutils
from oslo_config import fixture as config_fixture
from oslo_service import loopingcall
@@ -163,7 +163,13 @@ class _ImageTestCase(object):
self.assertEqual(fs.source_file, image.path)
def test_libvirt_info(self):
- image = self.image_class(self.INSTANCE, self.NAME)
+ disk_info = {
+ 'bus': 'virtio',
+ 'dev': '/dev/vda',
+ 'type': 'cdrom',
+ }
+ image = self.image_class(
+ self.INSTANCE, self.NAME, disk_info_mapping=disk_info)
extra_specs = {
'quota:disk_read_bytes_sec': 10 * units.Mi,
'quota:disk_read_iops_sec': 1 * units.Ki,
@@ -172,15 +178,9 @@ class _ImageTestCase(object):
'quota:disk_total_bytes_sec': 30 * units.Mi,
'quota:disk_total_iops_sec': 3 * units.Ki,
}
- disk_info = {
- 'bus': 'virtio',
- 'dev': '/dev/vda',
- 'type': 'cdrom',
- }
disk = image.libvirt_info(
- disk_info, cache_mode="none", extra_specs=extra_specs,
- boot_order="1")
+ cache_mode="none", extra_specs=extra_specs, boot_order="1")
self.assertIsInstance(disk, vconfig.LibvirtConfigGuestDisk)
self.assertEqual("/dev/vda", disk.target_dev)
@@ -205,16 +205,18 @@ class _ImageTestCase(object):
get_disk_size.assert_called_once_with(image.path)
def _test_libvirt_info_scsi_with_unit(self, disk_unit):
- # The address should be set if bus is scsi and unit is set.
- # Otherwise, it should not be set at all.
- image = self.image_class(self.INSTANCE, self.NAME)
disk_info = {
'bus': 'scsi',
'dev': '/dev/sda',
'type': 'disk',
}
+ # The address should be set if bus is scsi and unit is set.
+ # Otherwise, it should not be set at all.
+ image = self.image_class(
+ self.INSTANCE, self.NAME, disk_info_mapping=disk_info)
+
disk = image.libvirt_info(
- disk_info, cache_mode='none', extra_specs={}, disk_unit=disk_unit)
+ cache_mode='none', extra_specs={}, disk_unit=disk_unit)
if disk_unit:
self.assertEqual(0, disk.device_addr.controller)
self.assertEqual(disk_unit, disk.device_addr.unit)
@@ -523,7 +525,7 @@ class Qcow2TestCase(_ImageTestCase, test.NoDBTestCase):
mock_exists.assert_has_calls(exist_calls)
@mock.patch.object(imagebackend.utils, 'synchronized')
- @mock.patch('nova.virt.libvirt.utils.create_cow_image')
+ @mock.patch('nova.virt.libvirt.utils.create_image')
@mock.patch.object(os.path, 'exists', side_effect=[])
@mock.patch.object(imagebackend.Image, 'verify_base_size')
@mock.patch('nova.privsep.path.utime')
@@ -544,14 +546,14 @@ class Qcow2TestCase(_ImageTestCase, test.NoDBTestCase):
mock_verify.assert_called_once_with(self.TEMPLATE_PATH, self.SIZE)
mock_create.assert_called_once_with(
- self.TEMPLATE_PATH, self.PATH, self.SIZE)
+ self.PATH, 'qcow2', self.SIZE, backing_file=self.TEMPLATE_PATH)
fn.assert_called_once_with(target=self.TEMPLATE_PATH)
mock_exist.assert_has_calls(exist_calls)
self.assertTrue(mock_sync.called)
mock_utime.assert_called()
@mock.patch.object(imagebackend.utils, 'synchronized')
- @mock.patch('nova.virt.libvirt.utils.create_cow_image')
+ @mock.patch('nova.virt.libvirt.utils.create_image')
@mock.patch.object(imagebackend.disk, 'extend')
@mock.patch.object(os.path, 'exists', side_effect=[])
@mock.patch.object(imagebackend.Qcow2, 'get_disk_size')
@@ -576,7 +578,7 @@ class Qcow2TestCase(_ImageTestCase, test.NoDBTestCase):
self.assertFalse(mock_extend.called)
@mock.patch.object(imagebackend.utils, 'synchronized')
- @mock.patch('nova.virt.libvirt.utils.create_cow_image')
+ @mock.patch('nova.virt.libvirt.utils.create_image')
@mock.patch('nova.virt.libvirt.utils.get_disk_backing_file')
@mock.patch.object(imagebackend.disk, 'extend')
@mock.patch.object(os.path, 'exists', side_effect=[])
@@ -615,7 +617,7 @@ class Qcow2TestCase(_ImageTestCase, test.NoDBTestCase):
mock_utime.assert_called()
@mock.patch.object(imagebackend.utils, 'synchronized')
- @mock.patch('nova.virt.libvirt.utils.create_cow_image')
+ @mock.patch('nova.virt.libvirt.utils.create_image')
@mock.patch('nova.virt.libvirt.utils.get_disk_backing_file')
@mock.patch.object(imagebackend.disk, 'extend')
@mock.patch.object(os.path, 'exists', side_effect=[])
diff --git a/nova/tests/unit/virt/libvirt/test_imagecache.py b/nova/tests/unit/virt/libvirt/test_imagecache.py
index f6e592231d..a005a6cf20 100644
--- a/nova/tests/unit/virt/libvirt/test_imagecache.py
+++ b/nova/tests/unit/virt/libvirt/test_imagecache.py
@@ -18,8 +18,8 @@ import contextlib
import io
import os
import time
+from unittest import mock
-import mock
from oslo_concurrency import lockutils
from oslo_concurrency import processutils
from oslo_log import formatters
diff --git a/nova/tests/unit/virt/libvirt/test_machine_type_utils.py b/nova/tests/unit/virt/libvirt/test_machine_type_utils.py
index 42043ac495..08c54d02d3 100644
--- a/nova/tests/unit/virt/libvirt/test_machine_type_utils.py
+++ b/nova/tests/unit/virt/libvirt/test_machine_type_utils.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import ddt
-import mock
from oslo_utils.fixture import uuidsentinel
from oslo_utils import uuidutils
diff --git a/nova/tests/unit/virt/libvirt/test_migration.py b/nova/tests/unit/virt/libvirt/test_migration.py
index 70488f88cf..155c259986 100644
--- a/nova/tests/unit/virt/libvirt/test_migration.py
+++ b/nova/tests/unit/virt/libvirt/test_migration.py
@@ -15,9 +15,9 @@
from collections import deque
import copy
import textwrap
+from unittest import mock
from lxml import etree
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import units
diff --git a/nova/tests/unit/virt/libvirt/test_utils.py b/nova/tests/unit/virt/libvirt/test_utils.py
index 4e73c662c5..0b80bde49f 100644
--- a/nova/tests/unit/virt/libvirt/test_utils.py
+++ b/nova/tests/unit/virt/libvirt/test_utils.py
@@ -18,9 +18,9 @@ import grp
import os
import pwd
import tempfile
+from unittest import mock
import ddt
-import mock
import os_traits
from oslo_config import cfg
from oslo_utils import fileutils
@@ -104,32 +104,60 @@ class LibvirtUtilsTestCase(test.NoDBTestCase):
self.assertFalse(libvirt_utils.is_valid_hostname("foo/?com=/bin/sh"))
@mock.patch('oslo_concurrency.processutils.execute')
- def test_create_image(self, mock_execute):
- libvirt_utils.create_image('raw', '/some/path', '10G')
- libvirt_utils.create_image('qcow2', '/some/stuff', '1234567891234')
- expected_args = [(('qemu-img', 'create', '-f', 'raw',
- '/some/path', '10G'),),
- (('qemu-img', 'create', '-f', 'qcow2',
- '/some/stuff', '1234567891234'),)]
- self.assertEqual(expected_args, mock_execute.call_args_list)
-
- @mock.patch('os.path.exists', return_value=True)
- @mock.patch('oslo_concurrency.processutils.execute')
@mock.patch('nova.virt.images.qemu_img_info')
- def test_create_cow_image(self, mock_info, mock_execute, mock_exists):
- mock_execute.return_value = ('stdout', None)
+ def _test_create_image(
+ self, path, disk_format, disk_size, mock_info, mock_execute,
+ backing_file=None
+ ):
mock_info.return_value = mock.Mock(
file_format=mock.sentinel.backing_fmt,
- cluster_size=mock.sentinel.cluster_size)
- libvirt_utils.create_cow_image(mock.sentinel.backing_path,
- mock.sentinel.new_path)
- mock_info.assert_called_once_with(mock.sentinel.backing_path)
- mock_execute.assert_has_calls([mock.call(
- 'qemu-img', 'create', '-f', 'qcow2', '-o',
- 'backing_file=%s,backing_fmt=%s,cluster_size=%s' % (
- mock.sentinel.backing_path, mock.sentinel.backing_fmt,
- mock.sentinel.cluster_size),
- mock.sentinel.new_path)])
+ cluster_size=mock.sentinel.cluster_size,
+ )
+
+ libvirt_utils.create_image(
+ path, disk_format, disk_size, backing_file=backing_file)
+
+ cow_opts = []
+
+ if backing_file is None:
+ mock_info.assert_not_called()
+ else:
+ mock_info.assert_called_once_with(backing_file)
+ cow_opts = [
+ '-o',
+ f'backing_file={mock.sentinel.backing_file},'
+ f'backing_fmt={mock.sentinel.backing_fmt},'
+ f'cluster_size={mock.sentinel.cluster_size}',
+ ]
+
+ expected_args = (
+ 'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'create', '-f',
+ disk_format, *cow_opts, path,
+ )
+ if disk_size is not None:
+ expected_args += (disk_size,)
+
+ self.assertEqual([(expected_args,)], mock_execute.call_args_list)
+
+ def test_create_image_raw(self):
+ self._test_create_image('/some/path', 'raw', '10G')
+
+ def test_create_image_qcow2(self):
+ self._test_create_image(
+ '/some/stuff', 'qcow2', '1234567891234',
+ )
+
+ def test_create_image_backing_file(self):
+ self._test_create_image(
+ '/some/stuff', 'qcow2', '1234567891234',
+ backing_file=mock.sentinel.backing_file,
+ )
+
+ def test_create_image_size_none(self):
+ self._test_create_image(
+ '/some/stuff', 'qcow2', None,
+ backing_file=mock.sentinel.backing_file,
+ )
@ddt.unpack
@ddt.data({'fs_type': 'some_fs_type',
diff --git a/nova/tests/unit/virt/libvirt/test_vif.py b/nova/tests/unit/virt/libvirt/test_vif.py
index 4710f5226e..6d87ed727c 100644
--- a/nova/tests/unit/virt/libvirt/test_vif.py
+++ b/nova/tests/unit/virt/libvirt/test_vif.py
@@ -12,9 +12,10 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
from lxml import etree
-import mock
import os_vif
from os_vif import exception as osv_exception
from os_vif import objects as osv_objects
@@ -517,18 +518,17 @@ class LibvirtVifTestCase(test.NoDBTestCase):
def setUp(self):
super(LibvirtVifTestCase, self).setUp()
- self.useFixture(nova_fixtures.LibvirtFixture(stub_os_vif=False))
+ self.libvirt = self.useFixture(
+ nova_fixtures.LibvirtFixture(stub_os_vif=False))
# os_vif.initialize is typically done in nova-compute startup
os_vif.initialize()
self.setup_os_vif_objects()
# multiqueue configuration is host OS specific
- _a = mock.patch('os.uname')
- self.mock_uname = _a.start()
+ self.mock_uname = self.libvirt.mock_uname
self.mock_uname.return_value = fakelibvirt.os_uname(
'Linux', '', '5.10.13-200-generic', '', 'x86_64')
- self.addCleanup(_a.stop)
def _get_node(self, xml):
doc = etree.fromstring(xml)
@@ -983,14 +983,9 @@ class LibvirtVifTestCase(test.NoDBTestCase):
self.vif_bridge,
self.vif_bridge['network']['bridge'])
- @mock.patch.object(pci_utils, 'get_ifname_by_pci_address')
- @mock.patch.object(pci_utils, 'get_vf_num_by_pci_address', return_value=1)
- @mock.patch('nova.privsep.linux_net.set_device_macaddr')
- @mock.patch('nova.privsep.linux_net.set_device_macaddr_and_vlan')
- def _test_hw_veb_op(self, op, vlan, mock_set_macaddr_and_vlan,
- mock_set_macaddr, mock_get_vf_num,
- mock_get_ifname):
- mock_get_ifname.side_effect = ['eth1', 'eth13']
+ def _test_hw_veb_op(self, op, vlan):
+ self.libvirt.mock_get_vf_num_by_pci_address.return_value = 1
+ pci_utils.get_ifname_by_pci_address.side_effect = ['eth1', 'eth13']
vlan_id = int(vlan)
port_state = 'up' if vlan_id > 0 else 'down'
mac = ('00:00:00:00:00:00' if op.__name__ == 'unplug'
@@ -1005,10 +1000,13 @@ class LibvirtVifTestCase(test.NoDBTestCase):
'set_macaddr': [mock.call('eth13', mac, port_state=port_state)]
}
op(self.instance, self.vif_hw_veb_macvtap)
- mock_get_ifname.assert_has_calls(calls['get_ifname'])
- mock_get_vf_num.assert_has_calls(calls['get_vf_num'])
- mock_set_macaddr.assert_has_calls(calls['set_macaddr'])
- mock_set_macaddr_and_vlan.assert_called_once_with(
+ pci_utils.get_ifname_by_pci_address.assert_has_calls(
+ calls['get_ifname'])
+ self.libvirt.mock_get_vf_num_by_pci_address.assert_has_calls(
+ calls['get_vf_num'])
+ self.libvirt.mock_set_device_macaddr.assert_has_calls(
+ calls['set_macaddr'])
+ self.libvirt.mock_set_device_macaddr_and_vlan.assert_called_once_with(
'eth1', 1, mock.ANY, vlan_id)
def test_plug_hw_veb(self):
@@ -1218,9 +1216,8 @@ class LibvirtVifTestCase(test.NoDBTestCase):
self.assertEqual(1, len(node))
self._assertPciEqual(node, self.vif_hostdev_physical)
- @mock.patch.object(pci_utils, 'get_ifname_by_pci_address',
- return_value='eth1')
- def test_hw_veb_driver_macvtap(self, mock_get_ifname):
+ def test_hw_veb_driver_macvtap(self):
+ pci_utils.get_ifname_by_pci_address.return_value = 'eth1'
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_hw_veb_macvtap)
node = self._get_node(xml)
diff --git a/nova/tests/unit/virt/libvirt/volume/test_fibrechannel.py b/nova/tests/unit/virt/libvirt/volume/test_fibrechannel.py
index 89a59f2f1a..06065322f6 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_fibrechannel.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_fibrechannel.py
@@ -11,8 +11,8 @@
# under the License.
import platform
+from unittest import mock
-import mock
from os_brick.initiator import connector
from nova.objects import fields as obj_fields
diff --git a/nova/tests/unit/virt/libvirt/volume/test_fs.py b/nova/tests/unit/virt/libvirt/volume/test_fs.py
index eaa6568999..5619dff589 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_fs.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_fs.py
@@ -13,8 +13,8 @@
# under the License.
import os
+from unittest import mock
-import mock
from nova import test
from nova import utils
diff --git a/nova/tests/unit/virt/libvirt/volume/test_iscsi.py b/nova/tests/unit/virt/libvirt/volume/test_iscsi.py
index f8a64abea5..bd516b1dd6 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_iscsi.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_iscsi.py
@@ -10,9 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
from os_brick import exception as os_brick_exception
from os_brick.initiator import connector
+from unittest import mock
from nova.tests.unit.virt.libvirt.volume import test_volume
from nova.virt.libvirt.volume import iscsi
diff --git a/nova/tests/unit/virt/libvirt/volume/test_lightos.py b/nova/tests/unit/virt/libvirt/volume/test_lightos.py
index 554647acf4..8a85d73059 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_lightos.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_lightos.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.tests.unit.virt.libvirt.volume import test_volume
from nova.virt.libvirt.volume import lightos
@@ -30,7 +30,7 @@ class LibvirtLightVolumeDriverTestCase(test_volume.LibvirtVolumeBaseTestCase):
device_scan_attempts=5)
@mock.patch('os_brick.initiator.connector.InitiatorConnector.factory',
- new=mock.Mock(return_value=mock.Mock()))
+ new=mock.Mock())
def test_libvirt_lightos_driver_connect(self):
lightos_driver = lightos.LibvirtLightOSVolumeDriver(
self.fake_host)
@@ -40,15 +40,16 @@ class LibvirtLightVolumeDriverTestCase(test_volume.LibvirtVolumeBaseTestCase):
'name': 'aLightVolume',
'conf': config}
connection_info = {'data': disk_info}
- with mock.patch.object(lightos_driver.connector,
- 'connect_volume',
- return_value={'path': '/dev/dms1234567'}):
- lightos_driver.connect_volume(connection_info, None)
- (lightos_driver.connector.connect_volume.
- assert_called_once_with(
- connection_info['data']))
- self.assertEqual('/dev/dms1234567',
- connection_info['data']['device_path'])
+ lightos_driver.connector.connect_volume.return_value = (
+ {'path': '/dev/dms1234567'})
+
+ lightos_driver.connect_volume(connection_info, None)
+
+ lightos_driver.connector.connect_volume.assert_called_once_with(
+ connection_info['data'])
+ self.assertEqual(
+ '/dev/dms1234567',
+ connection_info['data']['device_path'])
@mock.patch('os_brick.initiator.connector.InitiatorConnector.factory',
new=mock.Mock(return_value=mock.Mock()))
diff --git a/nova/tests/unit/virt/libvirt/volume/test_mount.py b/nova/tests/unit/virt/libvirt/volume/test_mount.py
index b618e090ba..8ecb117f05 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_mount.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_mount.py
@@ -15,10 +15,10 @@
import os.path
import threading
import time
+from unittest import mock
import eventlet
import fixtures
-import mock
from oslo_concurrency import processutils
from oslo_utils.fixture import uuidsentinel as uuids
diff --git a/nova/tests/unit/virt/libvirt/volume/test_net.py b/nova/tests/unit/virt/libvirt/volume/test_net.py
index a694351629..8d8167b3d7 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_net.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_net.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
import nova.conf
from nova.tests.unit.virt.libvirt.volume import test_volume
diff --git a/nova/tests/unit/virt/libvirt/volume/test_nfs.py b/nova/tests/unit/virt/libvirt/volume/test_nfs.py
index 16c41f5387..a98efaac1c 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_nfs.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_nfs.py
@@ -11,8 +11,8 @@
# under the License.
import os
+from unittest import mock
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from nova.tests.unit.virt.libvirt.volume import test_mount
diff --git a/nova/tests/unit/virt/libvirt/volume/test_nvme.py b/nova/tests/unit/virt/libvirt/volume/test_nvme.py
index fcb303b4c3..3f593841fa 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_nvme.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_nvme.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.tests.unit.virt.libvirt.volume import test_volume
from nova.virt.libvirt.volume import nvme
@@ -56,14 +56,15 @@ class LibvirtNVMEVolumeDriverTestCase(test_volume.LibvirtVolumeBaseTestCase):
'name': 'aNVMEVolume',
'conf': config}
connection_info = {'data': disk_info}
- with mock.patch.object(nvme_driver.connector,
- 'connect_volume',
- return_value={'path': '/dev/dms1234567'}):
- nvme_driver.connect_volume(connection_info, None)
- nvme_driver.connector.connect_volume.assert_called_once_with(
- connection_info['data'])
- self.assertEqual('/dev/dms1234567',
- connection_info['data']['device_path'])
+ nvme_driver.connector.connect_volume.return_value = (
+ {'path': '/dev/dms1234567'})
+
+ nvme_driver.connect_volume(connection_info, None)
+
+ nvme_driver.connector.connect_volume.assert_called_once_with(
+ connection_info['data'])
+ self.assertEqual(
+ '/dev/dms1234567', connection_info['data']['device_path'])
@mock.patch('os_brick.initiator.connector.InitiatorConnector.factory',
new=mock.Mock(return_value=mock.Mock()))
diff --git a/nova/tests/unit/virt/libvirt/volume/test_quobyte.py b/nova/tests/unit/virt/libvirt/volume/test_quobyte.py
index 8a0c647fc8..bb3c86083c 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_quobyte.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_quobyte.py
@@ -16,9 +16,9 @@
import os
import traceback
+from unittest import mock
import ddt
-import mock
from oslo_concurrency import processutils
from oslo_utils import fileutils
import psutil
diff --git a/nova/tests/unit/virt/libvirt/volume/test_remotefs.py b/nova/tests/unit/virt/libvirt/volume/test_remotefs.py
index 62060bcf1e..67c126c2b1 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_remotefs.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_remotefs.py
@@ -13,7 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_concurrency import processutils
from nova import test
diff --git a/nova/tests/unit/virt/libvirt/volume/test_scaleio.py b/nova/tests/unit/virt/libvirt/volume/test_scaleio.py
index 6d9247cd2d..f0fcba1deb 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_scaleio.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_scaleio.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.tests.unit.virt.libvirt.volume import test_volume
from nova.virt.libvirt.volume import scaleio
diff --git a/nova/tests/unit/virt/libvirt/volume/test_smbfs.py b/nova/tests/unit/virt/libvirt/volume/test_smbfs.py
index 2c3ea574a9..0fba137740 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_smbfs.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_smbfs.py
@@ -11,8 +11,8 @@
# under the License.
import os
+from unittest import mock
-import mock
from nova.tests.unit.virt.libvirt.volume import test_volume
from nova import utils
diff --git a/nova/tests/unit/virt/libvirt/volume/test_storpool.py b/nova/tests/unit/virt/libvirt/volume/test_storpool.py
index e14954f148..678d4f8eb4 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_storpool.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_storpool.py
@@ -13,8 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
from os_brick import initiator
+from unittest import mock
from nova.tests.unit.virt.libvirt.volume import test_volume
from nova.virt.libvirt.volume import storpool as vol_sp
diff --git a/nova/tests/unit/virt/libvirt/volume/test_volume.py b/nova/tests/unit/virt/libvirt/volume/test_volume.py
index ac7bcf247d..9a3710a51d 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_volume.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_volume.py
@@ -13,8 +13,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import ddt
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from nova import exception
diff --git a/nova/tests/unit/virt/libvirt/volume/test_vzstorage.py b/nova/tests/unit/virt/libvirt/volume/test_vzstorage.py
index 883cebb55a..168efee944 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_vzstorage.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_vzstorage.py
@@ -11,8 +11,8 @@
# under the License.
import os
+from unittest import mock
-import mock
from os_brick.initiator import connector
from nova import exception
diff --git a/nova/tests/unit/virt/powervm/__init__.py b/nova/tests/unit/virt/powervm/__init__.py
deleted file mode 100644
index 3f8ef7b167..0000000000
--- a/nova/tests/unit/virt/powervm/__init__.py
+++ /dev/null
@@ -1,65 +0,0 @@
-# Copyright 2014, 2017 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import unittest
-
-from oslo_utils.fixture import uuidsentinel
-
-from nova.compute import power_state
-from nova.compute import vm_states
-from nova import objects
-
-try:
- import powervm # noqa: F401
-except ImportError:
- raise unittest.SkipTest(
- "The 'pypowervm' dependency is not installed."
- )
-
-
-TEST_FLAVOR = objects.flavor.Flavor(
- memory_mb=2048,
- swap=0,
- vcpu_weight=None,
- root_gb=10, id=2,
- name=u'm1.small',
- ephemeral_gb=0,
- rxtx_factor=1.0,
- flavorid=uuidsentinel.flav_id,
- vcpus=1)
-
-TEST_INSTANCE = objects.Instance(
- id=1,
- uuid=uuidsentinel.inst_id,
- display_name='Fake Instance',
- root_gb=10,
- ephemeral_gb=0,
- instance_type_id=TEST_FLAVOR.id,
- system_metadata={'image_os_distro': 'rhel'},
- host='host1',
- flavor=TEST_FLAVOR,
- task_state=None,
- vm_state=vm_states.STOPPED,
- power_state=power_state.SHUTDOWN,
-)
-
-IMAGE1 = {
- 'id': uuidsentinel.img_id,
- 'name': 'image1',
- 'size': 300,
- 'container_format': 'bare',
- 'disk_format': 'raw',
- 'checksum': 'b518a8ba2b152b5607aceb5703fac072',
-}
-TEST_IMAGE1 = objects.image_meta.ImageMeta.from_dict(IMAGE1)
diff --git a/nova/tests/unit/virt/powervm/disk/__init__.py b/nova/tests/unit/virt/powervm/disk/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/nova/tests/unit/virt/powervm/disk/__init__.py
+++ /dev/null
diff --git a/nova/tests/unit/virt/powervm/disk/fake_adapter.py b/nova/tests/unit/virt/powervm/disk/fake_adapter.py
deleted file mode 100644
index c0b4962e54..0000000000
--- a/nova/tests/unit/virt/powervm/disk/fake_adapter.py
+++ /dev/null
@@ -1,52 +0,0 @@
-# Copyright 2018 IBM Corp.
-#
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.virt.powervm.disk import driver as disk_dvr
-
-
-class FakeDiskAdapter(disk_dvr.DiskAdapter):
- """A fake subclass of DiskAdapter.
-
- This is done so that the abstract methods/properties can be stubbed and the
- class can be instantiated for testing.
- """
-
- def _vios_uuids(self):
- pass
-
- def _disk_match_func(self, disk_type, instance):
- pass
-
- def disconnect_disk_from_mgmt(self, vios_uuid, disk_name):
- pass
-
- def capacity(self):
- pass
-
- def capacity_used(self):
- pass
-
- def detach_disk(self, instance):
- pass
-
- def delete_disks(self, storage_elems):
- pass
-
- def create_disk_from_image(self, context, instance, image_meta):
- pass
-
- def attach_disk(self, instance, disk_info, stg_ftsk):
- pass
diff --git a/nova/tests/unit/virt/powervm/disk/test_driver.py b/nova/tests/unit/virt/powervm/disk/test_driver.py
deleted file mode 100644
index c27825801f..0000000000
--- a/nova/tests/unit/virt/powervm/disk/test_driver.py
+++ /dev/null
@@ -1,59 +0,0 @@
-# Copyright 2015, 2018 IBM Corp.
-#
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import fixtures
-import mock
-from pypowervm import const as pvm_const
-
-from nova import test
-from nova.tests.unit.virt.powervm.disk import fake_adapter
-
-
-class TestDiskAdapter(test.NoDBTestCase):
- """Unit Tests for the generic storage driver."""
-
- def setUp(self):
- super(TestDiskAdapter, self).setUp()
-
- # Return the mgmt uuid
- self.mgmt_uuid = self.useFixture(fixtures.MockPatch(
- 'nova.virt.powervm.mgmt.mgmt_uuid')).mock
- self.mgmt_uuid.return_value = 'mp_uuid'
-
- # The values (adapter and host uuid) are not used in the base.
- # Default them to None. We use the fake adapter here because we can't
- # instantiate DiskAdapter which is an abstract base class.
- self.st_adpt = fake_adapter.FakeDiskAdapter(None, None)
-
- @mock.patch("pypowervm.util.sanitize_file_name_for_api")
- def test_get_disk_name(self, mock_san):
- inst = mock.Mock()
- inst.configure_mock(name='a_name_that_is_longer_than_eight',
- uuid='01234567-abcd-abcd-abcd-123412341234')
-
- # Long
- self.assertEqual(mock_san.return_value,
- self.st_adpt._get_disk_name('type', inst))
- mock_san.assert_called_with(inst.name, prefix='type_',
- max_len=pvm_const.MaxLen.FILENAME_DEFAULT)
-
- mock_san.reset_mock()
-
- # Short
- self.assertEqual(mock_san.return_value,
- self.st_adpt._get_disk_name('type', inst, short=True))
- mock_san.assert_called_with('a_name_t_0123', prefix='t_',
- max_len=pvm_const.MaxLen.VDISK_NAME)
diff --git a/nova/tests/unit/virt/powervm/disk/test_localdisk.py b/nova/tests/unit/virt/powervm/disk/test_localdisk.py
deleted file mode 100644
index 25b8395bb2..0000000000
--- a/nova/tests/unit/virt/powervm/disk/test_localdisk.py
+++ /dev/null
@@ -1,312 +0,0 @@
-# Copyright 2015, 2018 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import fixtures
-import mock
-
-from nova import exception
-from nova import test
-from oslo_utils.fixture import uuidsentinel as uuids
-from pypowervm import const as pvm_const
-from pypowervm.tasks import storage as tsk_stg
-from pypowervm.utils import transaction as pvm_tx
-from pypowervm.wrappers import storage as pvm_stg
-from pypowervm.wrappers import virtual_io_server as pvm_vios
-
-from nova.virt.powervm.disk import driver as disk_dvr
-from nova.virt.powervm.disk import localdisk
-
-
-class TestLocalDisk(test.NoDBTestCase):
- """Unit Tests for the LocalDisk storage driver."""
-
- def setUp(self):
- super(TestLocalDisk, self).setUp()
- self.adpt = mock.Mock()
-
- # The mock VIOS needs to have scsi_mappings as a list. Internals are
- # set by individual test cases as needed.
- smaps = [mock.Mock()]
- self.vio_wrap = mock.create_autospec(
- pvm_vios.VIOS, instance=True, scsi_mappings=smaps,
- uuid='vios-uuid')
-
- # Return the mgmt uuid.
- self.mgmt_uuid = self.useFixture(fixtures.MockPatch(
- 'nova.virt.powervm.mgmt.mgmt_uuid', autospec=True)).mock
- self.mgmt_uuid.return_value = 'mgmt_uuid'
-
- self.pvm_uuid = self.useFixture(fixtures.MockPatch(
- 'nova.virt.powervm.vm.get_pvm_uuid')).mock
- self.pvm_uuid.return_value = 'pvm_uuid'
-
- # Set up for the mocks for the disk adapter.
- self.mock_find_vg = self.useFixture(fixtures.MockPatch(
- 'pypowervm.tasks.storage.find_vg', autospec=True)).mock
- self.vg_uuid = uuids.vg_uuid
- self.vg = mock.Mock(spec=pvm_stg.VG, uuid=self.vg_uuid)
- self.mock_find_vg.return_value = (self.vio_wrap, self.vg)
-
- self.flags(volume_group_name='fakevg', group='powervm')
-
- # Mock the feed tasks.
- self.mock_afs = self.useFixture(fixtures.MockPatch(
- 'pypowervm.utils.transaction.FeedTask.add_functor_subtask',
- autospec=True)).mock
- self.mock_wtsk = mock.create_autospec(
- pvm_tx.WrapperTask, instance=True)
- self.mock_wtsk.configure_mock(wrapper=self.vio_wrap)
- self.mock_ftsk = mock.create_autospec(pvm_tx.FeedTask, instance=True)
- self.mock_ftsk.configure_mock(
- wrapper_tasks={'vios-uuid': self.mock_wtsk})
-
- # Create the adapter.
- self.ld_adpt = localdisk.LocalStorage(self.adpt, 'host_uuid')
-
- def test_init(self):
- # Localdisk adapter already initialized in setUp()
- # From super __init__()
- self.assertEqual(self.adpt, self.ld_adpt._adapter)
- self.assertEqual('host_uuid', self.ld_adpt._host_uuid)
- self.assertEqual('mgmt_uuid', self.ld_adpt.mp_uuid)
-
- # From LocalStorage __init__()
- self.assertEqual('fakevg', self.ld_adpt.vg_name)
- self.mock_find_vg.assert_called_once_with(self.adpt, 'fakevg')
- self.assertEqual('vios-uuid', self.ld_adpt._vios_uuid)
- self.assertEqual(self.vg_uuid, self.ld_adpt.vg_uuid)
- self.assertFalse(self.ld_adpt.capabilities['shared_storage'])
- self.assertFalse(self.ld_adpt.capabilities['has_imagecache'])
- self.assertFalse(self.ld_adpt.capabilities['snapshot'])
-
- # Assert snapshot capability is true if hosting I/O on mgmt partition.
- self.mgmt_uuid.return_value = 'vios-uuid'
- self.ld_adpt = localdisk.LocalStorage(self.adpt, 'host_uuid')
- self.assertTrue(self.ld_adpt.capabilities['snapshot'])
-
- # Assert volume_group_name is required.
- self.flags(volume_group_name=None, group='powervm')
- self.assertRaises(exception.OptRequiredIfOtherOptValue,
- localdisk.LocalStorage, self.adpt, 'host_uuid')
-
- def test_vios_uuids(self):
- self.assertEqual(['vios-uuid'], self.ld_adpt._vios_uuids)
-
- @mock.patch('pypowervm.tasks.scsi_mapper.gen_match_func', autospec=True)
- @mock.patch('nova.virt.powervm.disk.driver.DiskAdapter._get_disk_name')
- def test_disk_match_func(self, mock_disk_name, mock_gen_match):
- mock_disk_name.return_value = 'disk_name'
- func = self.ld_adpt._disk_match_func('disk_type', 'instance')
- mock_disk_name.assert_called_once_with(
- 'disk_type', 'instance', short=True)
- mock_gen_match.assert_called_once_with(
- pvm_stg.VDisk, names=['disk_name'])
- self.assertEqual(mock_gen_match.return_value, func)
-
- @mock.patch('nova.virt.powervm.disk.localdisk.LocalStorage._get_vg_wrap')
- def test_capacity(self, mock_vg):
- """Tests the capacity methods."""
- mock_vg.return_value = mock.Mock(
- capacity='5120', available_size='2048')
- self.assertEqual(5120.0, self.ld_adpt.capacity)
- self.assertEqual(3072.0, self.ld_adpt.capacity_used)
-
- @mock.patch('pypowervm.tasks.storage.rm_vg_storage', autospec=True)
- @mock.patch('nova.virt.powervm.disk.localdisk.LocalStorage._get_vg_wrap')
- def test_delete_disks(self, mock_vg, mock_rm_vg):
- self.ld_adpt.delete_disks('storage_elems')
- mock_vg.assert_called_once_with()
- mock_rm_vg.assert_called_once_with(
- mock_vg.return_value, vdisks='storage_elems')
-
- @mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.get')
- @mock.patch('pypowervm.tasks.scsi_mapper.remove_maps', autospec=True)
- @mock.patch('pypowervm.tasks.scsi_mapper.gen_match_func', autospec=True)
- def test_detach_disk(self, mock_match_fn, mock_rm_maps, mock_vios):
- mock_match_fn.return_value = 'match_func'
- mock_vios.return_value = self.vio_wrap
- mock_map1 = mock.Mock(backing_storage='back_stor1')
- mock_map2 = mock.Mock(backing_storage='back_stor2')
- mock_rm_maps.return_value = [mock_map1, mock_map2]
-
- back_stores = self.ld_adpt.detach_disk('instance')
-
- self.assertEqual(['back_stor1', 'back_stor2'], back_stores)
- mock_match_fn.assert_called_once_with(pvm_stg.VDisk)
- mock_vios.assert_called_once_with(
- self.ld_adpt._adapter, uuid='vios-uuid',
- xag=[pvm_const.XAG.VIO_SMAP])
- mock_rm_maps.assert_called_with(self.vio_wrap, 'pvm_uuid',
- match_func=mock_match_fn.return_value)
- mock_vios.return_value.update.assert_called_once()
-
- @mock.patch('pypowervm.tasks.scsi_mapper.remove_vdisk_mapping',
- autospec=True)
- def test_disconnect_disk_from_mgmt(self, mock_rm_vdisk_map):
- self.ld_adpt.disconnect_disk_from_mgmt('vios-uuid', 'disk_name')
- mock_rm_vdisk_map.assert_called_with(
- self.ld_adpt._adapter, 'vios-uuid', 'mgmt_uuid',
- disk_names=['disk_name'])
-
- @mock.patch('nova.virt.powervm.disk.localdisk.LocalStorage._upload_image')
- def test_create_disk_from_image(self, mock_upload_image):
- mock_image_meta = mock.Mock()
- mock_image_meta.size = 30
- mock_upload_image.return_value = 'mock_img'
-
- self.ld_adpt.create_disk_from_image(
- 'context', 'instance', mock_image_meta)
-
- mock_upload_image.assert_called_once_with(
- 'context', 'instance', mock_image_meta)
-
- @mock.patch('nova.image.glance.API.download')
- @mock.patch('nova.virt.powervm.disk.driver.IterableToFileAdapter')
- @mock.patch('pypowervm.tasks.storage.upload_new_vdisk')
- @mock.patch('nova.virt.powervm.disk.driver.DiskAdapter._get_disk_name')
- def test_upload_image(self, mock_name, mock_upload, mock_iter, mock_dl):
- mock_meta = mock.Mock(id='1', size=1073741824, disk_format='raw')
- mock_upload.return_value = ['mock_img']
-
- mock_img = self.ld_adpt._upload_image('context', 'inst', mock_meta)
-
- self.assertEqual('mock_img', mock_img)
- mock_name.assert_called_once_with(
- disk_dvr.DiskType.BOOT, 'inst', short=True)
- mock_dl.assert_called_once_with('context', '1')
- mock_iter.assert_called_once_with(mock_dl.return_value)
- mock_upload.assert_called_once_with(
- self.adpt, 'vios-uuid', self.vg_uuid, mock_iter.return_value,
- mock_name.return_value, 1073741824, d_size=1073741824,
- upload_type=tsk_stg.UploadType.IO_STREAM, file_format='raw')
-
- @mock.patch('pypowervm.tasks.scsi_mapper.add_map', autospec=True)
- @mock.patch('pypowervm.tasks.scsi_mapper.build_vscsi_mapping',
- autospec=True)
- def test_attach_disk(self, mock_bldmap, mock_addmap):
- def test_afs(add_func):
- # Verify the internal add_func
- self.assertEqual(mock_addmap.return_value, add_func(self.vio_wrap))
- mock_bldmap.assert_called_once_with(
- self.ld_adpt._host_uuid, self.vio_wrap, 'pvm_uuid',
- 'disk_info')
- mock_addmap.assert_called_once_with(
- self.vio_wrap, mock_bldmap.return_value)
-
- self.mock_wtsk.add_functor_subtask.side_effect = test_afs
- self.ld_adpt.attach_disk('instance', 'disk_info', self.mock_ftsk)
- self.pvm_uuid.assert_called_once_with('instance')
- self.assertEqual(1, self.mock_wtsk.add_functor_subtask.call_count)
-
- @mock.patch('pypowervm.wrappers.storage.VG.get')
- def test_get_vg_wrap(self, mock_vg):
- vg_wrap = self.ld_adpt._get_vg_wrap()
- self.assertEqual(mock_vg.return_value, vg_wrap)
- mock_vg.assert_called_once_with(
- self.adpt, uuid=self.vg_uuid, parent_type=pvm_vios.VIOS,
- parent_uuid='vios-uuid')
-
- @mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.get')
- @mock.patch('pypowervm.tasks.scsi_mapper.find_maps', autospec=True)
- @mock.patch('nova.virt.powervm.disk.localdisk.LocalStorage.'
- '_disk_match_func')
- def test_get_bootdisk_path(self, mock_match_fn, mock_findmaps,
- mock_vios):
- mock_vios.return_value = self.vio_wrap
-
- # No maps found
- mock_findmaps.return_value = None
- devname = self.ld_adpt.get_bootdisk_path('inst', 'vios_uuid')
- self.pvm_uuid.assert_called_once_with('inst')
- mock_match_fn.assert_called_once_with(disk_dvr.DiskType.BOOT, 'inst')
- mock_vios.assert_called_once_with(
- self.adpt, uuid='vios_uuid', xag=[pvm_const.XAG.VIO_SMAP])
- mock_findmaps.assert_called_once_with(
- self.vio_wrap.scsi_mappings,
- client_lpar_id='pvm_uuid',
- match_func=mock_match_fn.return_value)
- self.assertIsNone(devname)
-
- # Good map
- mock_lu = mock.Mock()
- mock_lu.server_adapter.backing_dev_name = 'devname'
- mock_findmaps.return_value = [mock_lu]
- devname = self.ld_adpt.get_bootdisk_path('inst', 'vios_uuid')
- self.assertEqual('devname', devname)
-
- @mock.patch('nova.virt.powervm.vm.get_instance_wrapper', autospec=True)
- @mock.patch('pypowervm.tasks.scsi_mapper.find_maps')
- @mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.get')
- @mock.patch('pypowervm.wrappers.storage.VG.get', new=mock.Mock())
- def test_get_bootdisk_iter(self, mock_vios, mock_find_maps, mock_lw):
- inst, lpar_wrap, vios1 = self._bld_mocks_for_instance_disk()
- mock_lw.return_value = lpar_wrap
-
- # Good path
- mock_vios.return_value = vios1
- for vdisk, vios in self.ld_adpt._get_bootdisk_iter(inst):
- self.assertEqual(vios1.scsi_mappings[0].backing_storage, vdisk)
- self.assertEqual(vios1.uuid, vios.uuid)
- mock_vios.assert_called_once_with(
- self.adpt, uuid='vios-uuid', xag=[pvm_const.XAG.VIO_SMAP])
-
- # Not found, no storage of that name.
- mock_vios.reset_mock()
- mock_find_maps.return_value = []
- for vdisk, vios in self.ld_adpt._get_bootdisk_iter(inst):
- self.fail('Should not have found any storage elements.')
- mock_vios.assert_called_once_with(
- self.adpt, uuid='vios-uuid', xag=[pvm_const.XAG.VIO_SMAP])
-
- @mock.patch('nova.virt.powervm.disk.driver.DiskAdapter._get_bootdisk_iter',
- autospec=True)
- @mock.patch('nova.virt.powervm.vm.get_instance_wrapper', autospec=True)
- @mock.patch('pypowervm.tasks.scsi_mapper.add_vscsi_mapping', autospec=True)
- def test_connect_instance_disk_to_mgmt(self, mock_add, mock_lw, mock_iter):
- inst, lpar_wrap, vios1 = self._bld_mocks_for_instance_disk()
- mock_lw.return_value = lpar_wrap
-
- # Good path
- mock_iter.return_value = [(vios1.scsi_mappings[0].backing_storage,
- vios1)]
- vdisk, vios = self.ld_adpt.connect_instance_disk_to_mgmt(inst)
- self.assertEqual(vios1.scsi_mappings[0].backing_storage, vdisk)
- self.assertIs(vios1, vios)
- self.assertEqual(1, mock_add.call_count)
- mock_add.assert_called_with('host_uuid', vios, 'mgmt_uuid', vdisk)
-
- # add_vscsi_mapping raises. Show-stopper since only one VIOS.
- mock_add.reset_mock()
- mock_add.side_effect = Exception
- self.assertRaises(exception.InstanceDiskMappingFailed,
- self.ld_adpt.connect_instance_disk_to_mgmt, inst)
- self.assertEqual(1, mock_add.call_count)
-
- # Not found
- mock_add.reset_mock()
- mock_iter.return_value = []
- self.assertRaises(exception.InstanceDiskMappingFailed,
- self.ld_adpt.connect_instance_disk_to_mgmt, inst)
- self.assertFalse(mock_add.called)
-
- def _bld_mocks_for_instance_disk(self):
- inst = mock.Mock()
- inst.name = 'Name Of Instance'
- inst.uuid = uuids.inst_uuid
- lpar_wrap = mock.Mock()
- lpar_wrap.id = 2
- vios1 = self.vio_wrap
- back_stor_name = 'b_Name_Of__' + inst.uuid[:4]
- vios1.scsi_mappings[0].backing_storage.name = back_stor_name
- return inst, lpar_wrap, vios1
diff --git a/nova/tests/unit/virt/powervm/disk/test_ssp.py b/nova/tests/unit/virt/powervm/disk/test_ssp.py
deleted file mode 100644
index 86705dc29b..0000000000
--- a/nova/tests/unit/virt/powervm/disk/test_ssp.py
+++ /dev/null
@@ -1,424 +0,0 @@
-# Copyright 2015, 2018 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import fixtures
-import mock
-from oslo_utils import uuidutils
-from pypowervm import const as pvm_const
-from pypowervm import exceptions as pvm_exc
-from pypowervm.tasks import storage as tsk_stg
-from pypowervm.utils import transaction as pvm_tx
-from pypowervm.wrappers import cluster as pvm_clust
-from pypowervm.wrappers import storage as pvm_stg
-from pypowervm.wrappers import virtual_io_server as pvm_vios
-
-from nova import exception
-from nova import test
-from nova.tests.unit.virt import powervm
-from nova.virt.powervm.disk import ssp as ssp_dvr
-from nova.virt.powervm import vm
-
-FAKE_INST_UUID = uuidutils.generate_uuid(dashed=True)
-FAKE_INST_UUID_PVM = vm.get_pvm_uuid(mock.Mock(uuid=FAKE_INST_UUID))
-
-
-class TestSSPDiskAdapter(test.NoDBTestCase):
- """Unit Tests for the LocalDisk storage driver."""
-
- def setUp(self):
- super(TestSSPDiskAdapter, self).setUp()
-
- self.inst = powervm.TEST_INSTANCE
-
- self.apt = mock.Mock()
- self.host_uuid = 'host_uuid'
-
- self.ssp_wrap = mock.create_autospec(pvm_stg.SSP, instance=True)
-
- # SSP.refresh() returns itself
- self.ssp_wrap.refresh.return_value = self.ssp_wrap
- self.node1 = mock.create_autospec(pvm_clust.Node, instance=True)
- self.node2 = mock.create_autospec(pvm_clust.Node, instance=True)
- self.clust_wrap = mock.create_autospec(
- pvm_clust.Cluster, instance=True)
- self.clust_wrap.nodes = [self.node1, self.node2]
- self.clust_wrap.refresh.return_value = self.clust_wrap
- self.tier_wrap = mock.create_autospec(pvm_stg.Tier, instance=True)
- # Tier.refresh() returns itself
- self.tier_wrap.refresh.return_value = self.tier_wrap
- self.vio_wrap = mock.create_autospec(pvm_vios.VIOS, instance=True)
-
- # For _cluster
- self.mock_clust = self.useFixture(fixtures.MockPatch(
- 'pypowervm.wrappers.cluster.Cluster', autospec=True)).mock
- self.mock_clust.get.return_value = [self.clust_wrap]
-
- # For _ssp
- self.mock_ssp_gbhref = self.useFixture(fixtures.MockPatch(
- 'pypowervm.wrappers.storage.SSP.get_by_href')).mock
- self.mock_ssp_gbhref.return_value = self.ssp_wrap
-
- # For _tier
- self.mock_get_tier = self.useFixture(fixtures.MockPatch(
- 'pypowervm.tasks.storage.default_tier_for_ssp',
- autospec=True)).mock
- self.mock_get_tier.return_value = self.tier_wrap
-
- # A FeedTask
- self.mock_wtsk = mock.create_autospec(
- pvm_tx.WrapperTask, instance=True)
- self.mock_wtsk.configure_mock(wrapper=self.vio_wrap)
- self.mock_ftsk = mock.create_autospec(pvm_tx.FeedTask, instance=True)
- self.mock_afs = self.mock_ftsk.add_functor_subtask
- self.mock_ftsk.configure_mock(
- wrapper_tasks={self.vio_wrap.uuid: self.mock_wtsk})
-
- self.pvm_uuid = self.useFixture(fixtures.MockPatch(
- 'nova.virt.powervm.vm.get_pvm_uuid')).mock
-
- # Return the mgmt uuid
- self.mgmt_uuid = self.useFixture(fixtures.MockPatch(
- 'nova.virt.powervm.mgmt.mgmt_uuid')).mock
- self.mgmt_uuid.return_value = 'mp_uuid'
-
- # The SSP disk adapter
- self.ssp_drv = ssp_dvr.SSPDiskAdapter(self.apt, self.host_uuid)
-
- def test_init(self):
- self.assertEqual(self.apt, self.ssp_drv._adapter)
- self.assertEqual(self.host_uuid, self.ssp_drv._host_uuid)
- self.mock_clust.get.assert_called_once_with(self.apt)
- self.assertEqual(self.mock_clust.get.return_value,
- [self.ssp_drv._clust])
- self.mock_ssp_gbhref.assert_called_once_with(
- self.apt, self.clust_wrap.ssp_uri)
- self.assertEqual(self.mock_ssp_gbhref.return_value, self.ssp_drv._ssp)
- self.mock_get_tier.assert_called_once_with(self.ssp_wrap)
- self.assertEqual(self.mock_get_tier.return_value, self.ssp_drv._tier)
-
- def test_init_error(self):
- # Do these in reverse order to verify we trap all of 'em
- for raiser in (self.mock_get_tier, self.mock_ssp_gbhref,
- self.mock_clust.get):
- raiser.side_effect = pvm_exc.TimeoutError("timed out")
- self.assertRaises(exception.NotFound,
- ssp_dvr.SSPDiskAdapter, self.apt, self.host_uuid)
- raiser.side_effect = ValueError
- self.assertRaises(ValueError,
- ssp_dvr.SSPDiskAdapter, self.apt, self.host_uuid)
-
- def test_capabilities(self):
- self.assertTrue(self.ssp_drv.capabilities.get('shared_storage'))
- self.assertFalse(self.ssp_drv.capabilities.get('has_imagecache'))
- self.assertTrue(self.ssp_drv.capabilities.get('snapshot'))
-
- @mock.patch('pypowervm.util.get_req_path_uuid', autospec=True)
- def test_vios_uuids(self, mock_rpu):
- mock_rpu.return_value = self.host_uuid
- vios_uuids = self.ssp_drv._vios_uuids
- self.assertEqual({self.node1.vios_uuid, self.node2.vios_uuid},
- set(vios_uuids))
- mock_rpu.assert_has_calls(
- [mock.call(node.vios_uri, preserve_case=True, root=True)
- for node in [self.node1, self.node2]])
-
- mock_rpu.reset_mock()
-
- # Test VIOSes on other nodes, which won't have uuid or url
- node1 = mock.Mock(vios_uuid=None, vios_uri='uri1')
- node2 = mock.Mock(vios_uuid='2', vios_uri=None)
- # This mock is good and should be returned
- node3 = mock.Mock(vios_uuid='3', vios_uri='uri3')
- self.clust_wrap.nodes = [node1, node2, node3]
- self.assertEqual(['3'], self.ssp_drv._vios_uuids)
- # get_req_path_uuid was only called on the good one
- mock_rpu.assert_called_once_with('uri3', preserve_case=True, root=True)
-
- def test_capacity(self):
- self.tier_wrap.capacity = 10
- self.assertAlmostEqual(10.0, self.ssp_drv.capacity)
- self.tier_wrap.refresh.assert_called_once_with()
-
- def test_capacity_used(self):
- self.ssp_wrap.capacity = 4.56
- self.ssp_wrap.free_space = 1.23
- self.assertAlmostEqual((4.56 - 1.23), self.ssp_drv.capacity_used)
- self.ssp_wrap.refresh.assert_called_once_with()
-
- @mock.patch('pypowervm.tasks.cluster_ssp.get_or_upload_image_lu',
- autospec=True)
- @mock.patch('nova.virt.powervm.disk.ssp.SSPDiskAdapter._vios_uuids',
- new_callable=mock.PropertyMock)
- @mock.patch('pypowervm.util.sanitize_file_name_for_api', autospec=True)
- @mock.patch('pypowervm.tasks.storage.crt_lu', autospec=True)
- @mock.patch('nova.image.glance.API.download')
- @mock.patch('nova.virt.powervm.disk.driver.IterableToFileAdapter',
- autospec=True)
- def test_create_disk_from_image(self, mock_it2f, mock_dl, mock_crt_lu,
- mock_san, mock_vuuid, mock_goru):
- img = powervm.TEST_IMAGE1
-
- mock_crt_lu.return_value = self.ssp_drv._ssp, 'boot_lu'
- mock_san.return_value = 'disk_name'
- mock_vuuid.return_value = ['vuuid']
-
- self.assertEqual('boot_lu', self.ssp_drv.create_disk_from_image(
- 'context', self.inst, img))
- mock_dl.assert_called_once_with('context', img.id)
- mock_san.assert_has_calls([
- mock.call(img.name, prefix='image_', suffix='_' + img.checksum),
- mock.call(self.inst.name, prefix='boot_')])
- mock_it2f.assert_called_once_with(mock_dl.return_value)
- mock_goru.assert_called_once_with(
- self.ssp_drv._tier, 'disk_name', 'vuuid',
- mock_it2f.return_value, img.size,
- upload_type=tsk_stg.UploadType.IO_STREAM)
- mock_crt_lu.assert_called_once_with(
- self.mock_get_tier.return_value, mock_san.return_value,
- self.inst.flavor.root_gb, typ=pvm_stg.LUType.DISK,
- clone=mock_goru.return_value)
-
- @mock.patch('nova.virt.powervm.disk.ssp.SSPDiskAdapter._vios_uuids',
- new_callable=mock.PropertyMock)
- @mock.patch('pypowervm.tasks.scsi_mapper.add_map', autospec=True)
- @mock.patch('pypowervm.tasks.scsi_mapper.build_vscsi_mapping',
- autospec=True)
- @mock.patch('pypowervm.wrappers.storage.LU', autospec=True)
- def test_connect_disk(self, mock_lu, mock_bldmap, mock_addmap,
- mock_vio_uuids):
- disk_info = mock.Mock()
- disk_info.configure_mock(name='dname', udid='dudid')
- mock_vio_uuids.return_value = [self.vio_wrap.uuid]
-
- def test_afs(add_func):
- # Verify the internal add_func
- self.assertEqual(mock_addmap.return_value, add_func(self.vio_wrap))
- mock_bldmap.assert_called_once_with(
- self.host_uuid, self.vio_wrap, self.pvm_uuid.return_value,
- mock_lu.bld_ref.return_value)
- mock_addmap.assert_called_once_with(
- self.vio_wrap, mock_bldmap.return_value)
- self.mock_wtsk.add_functor_subtask.side_effect = test_afs
-
- self.ssp_drv.attach_disk(self.inst, disk_info, self.mock_ftsk)
- mock_lu.bld_ref.assert_called_once_with(self.apt, 'dname', 'dudid')
- self.pvm_uuid.assert_called_once_with(self.inst)
- self.assertEqual(1, self.mock_wtsk.add_functor_subtask.call_count)
-
- @mock.patch('pypowervm.tasks.storage.rm_tier_storage', autospec=True)
- def test_delete_disks(self, mock_rm_tstor):
- self.ssp_drv.delete_disks(['disk1', 'disk2'])
- mock_rm_tstor.assert_called_once_with(['disk1', 'disk2'],
- tier=self.ssp_drv._tier)
-
- @mock.patch('nova.virt.powervm.disk.ssp.SSPDiskAdapter._vios_uuids',
- new_callable=mock.PropertyMock)
- @mock.patch('pypowervm.tasks.scsi_mapper.find_maps', autospec=True)
- @mock.patch('pypowervm.tasks.scsi_mapper.remove_maps', autospec=True)
- @mock.patch('pypowervm.tasks.scsi_mapper.gen_match_func', autospec=True)
- @mock.patch('pypowervm.tasks.partition.build_active_vio_feed_task',
- autospec=True)
- def test_disconnect_disk(self, mock_bld_ftsk, mock_gmf, mock_rmmaps,
- mock_findmaps, mock_vio_uuids):
- mock_vio_uuids.return_value = [self.vio_wrap.uuid]
- mock_bld_ftsk.return_value = self.mock_ftsk
- lu1, lu2 = [mock.create_autospec(pvm_stg.LU, instance=True)] * 2
- # Two mappings have the same LU, to verify set behavior
- mock_findmaps.return_value = [
- mock.Mock(spec=pvm_vios.VSCSIMapping, backing_storage=lu)
- for lu in (lu1, lu2, lu1)]
-
- def test_afs(rm_func):
- # verify the internal rm_func
- self.assertEqual(mock_rmmaps.return_value, rm_func(self.vio_wrap))
- mock_rmmaps.assert_called_once_with(
- self.vio_wrap, self.pvm_uuid.return_value,
- match_func=mock_gmf.return_value)
- self.mock_wtsk.add_functor_subtask.side_effect = test_afs
-
- self.assertEqual(
- {lu1, lu2}, set(self.ssp_drv.detach_disk(self.inst)))
- mock_bld_ftsk.assert_called_once_with(
- self.apt, name='ssp', xag=[pvm_const.XAG.VIO_SMAP])
- self.pvm_uuid.assert_called_once_with(self.inst)
- mock_gmf.assert_called_once_with(pvm_stg.LU)
- self.assertEqual(1, self.mock_wtsk.add_functor_subtask.call_count)
- mock_findmaps.assert_called_once_with(
- self.vio_wrap.scsi_mappings,
- client_lpar_id=self.pvm_uuid.return_value,
- match_func=mock_gmf.return_value)
- self.mock_ftsk.execute.assert_called_once_with()
-
- @mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.get')
- @mock.patch('pypowervm.tasks.scsi_mapper.find_maps', autospec=True)
- @mock.patch('nova.virt.powervm.disk.ssp.SSPDiskAdapter._disk_match_func')
- def test_get_bootdisk_path(self, mock_match_fn, mock_findmaps,
- mock_vios):
- mock_vios.return_value = self.vio_wrap
-
- # No maps found
- mock_findmaps.return_value = None
- devname = self.ssp_drv.get_bootdisk_path('inst', 'vios_uuid')
- mock_vios.assert_called_once_with(
- self.apt, uuid='vios_uuid', xag=[pvm_const.XAG.VIO_SMAP])
- mock_findmaps.assert_called_once_with(
- self.vio_wrap.scsi_mappings,
- client_lpar_id=self.pvm_uuid.return_value,
- match_func=mock_match_fn.return_value)
- self.assertIsNone(devname)
-
- # Good map
- mock_lu = mock.Mock()
- mock_lu.server_adapter.backing_dev_name = 'devname'
- mock_findmaps.return_value = [mock_lu]
- devname = self.ssp_drv.get_bootdisk_path('inst', 'vios_uuid')
- self.assertEqual('devname', devname)
-
- @mock.patch('nova.virt.powervm.disk.ssp.SSPDiskAdapter.'
- '_vios_uuids', new_callable=mock.PropertyMock)
- @mock.patch('nova.virt.powervm.vm.get_instance_wrapper', autospec=True)
- @mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.get')
- @mock.patch('pypowervm.tasks.scsi_mapper.add_vscsi_mapping', autospec=True)
- def test_connect_instance_disk_to_mgmt(self, mock_add, mock_vio_get,
- mock_lw, mock_vio_uuids):
- inst, lpar_wrap, vio1, vio2, vio3 = self._bld_mocks_for_instance_disk()
- mock_lw.return_value = lpar_wrap
- mock_vio_uuids.return_value = [1, 2]
-
- # Test with two VIOSes, both of which contain the mapping
- mock_vio_get.side_effect = [vio1, vio2]
- lu, vios = self.ssp_drv.connect_instance_disk_to_mgmt(inst)
- self.assertEqual('lu_udid', lu.udid)
- # Should hit on the first VIOS
- self.assertIs(vio1, vios)
- mock_add.assert_called_once_with(self.host_uuid, vio1, 'mp_uuid', lu)
-
- # Now the first VIOS doesn't have the mapping, but the second does
- mock_add.reset_mock()
- mock_vio_get.side_effect = [vio3, vio2]
- lu, vios = self.ssp_drv.connect_instance_disk_to_mgmt(inst)
- self.assertEqual('lu_udid', lu.udid)
- # Should hit on the second VIOS
- self.assertIs(vio2, vios)
- self.assertEqual(1, mock_add.call_count)
- mock_add.assert_called_once_with(self.host_uuid, vio2, 'mp_uuid', lu)
-
- # No hits
- mock_add.reset_mock()
- mock_vio_get.side_effect = [vio3, vio3]
- self.assertRaises(exception.InstanceDiskMappingFailed,
- self.ssp_drv.connect_instance_disk_to_mgmt, inst)
- self.assertEqual(0, mock_add.call_count)
-
- # First add_vscsi_mapping call raises
- mock_vio_get.side_effect = [vio1, vio2]
- mock_add.side_effect = [Exception("mapping failed"), None]
- # Should hit on the second VIOS
- self.assertIs(vio2, vios)
-
- @mock.patch('pypowervm.tasks.scsi_mapper.remove_lu_mapping', autospec=True)
- def test_disconnect_disk_from_mgmt(self, mock_rm_lu_map):
- self.ssp_drv.disconnect_disk_from_mgmt('vios_uuid', 'disk_name')
- mock_rm_lu_map.assert_called_with(self.apt, 'vios_uuid',
- 'mp_uuid', disk_names=['disk_name'])
-
- @mock.patch('pypowervm.tasks.scsi_mapper.gen_match_func', autospec=True)
- @mock.patch('nova.virt.powervm.disk.ssp.SSPDiskAdapter._get_disk_name')
- def test_disk_match_func(self, mock_disk_name, mock_gen_match):
- mock_disk_name.return_value = 'disk_name'
- self.ssp_drv._disk_match_func('disk_type', 'instance')
- mock_disk_name.assert_called_once_with('disk_type', 'instance')
- mock_gen_match.assert_called_with(pvm_stg.LU, names=['disk_name'])
-
- @mock.patch('nova.virt.powervm.disk.ssp.SSPDiskAdapter.'
- '_vios_uuids', new_callable=mock.PropertyMock)
- @mock.patch('nova.virt.powervm.vm.get_instance_wrapper', autospec=True)
- @mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.get')
- def test_get_bootdisk_iter(self, mock_vio_get, mock_lw, mock_vio_uuids):
- inst, lpar_wrap, vio1, vio2, vio3 = self._bld_mocks_for_instance_disk()
- mock_lw.return_value = lpar_wrap
- mock_vio_uuids.return_value = [1, 2]
-
- # Test with two VIOSes, both of which contain the mapping. Force the
- # method to get the lpar_wrap.
- mock_vio_get.side_effect = [vio1, vio2]
- idi = self.ssp_drv._get_bootdisk_iter(inst)
- lu, vios = next(idi)
- self.assertEqual('lu_udid', lu.udid)
- self.assertEqual('vios1', vios.name)
- mock_vio_get.assert_called_once_with(self.apt, uuid=1,
- xag=[pvm_const.XAG.VIO_SMAP])
- lu, vios = next(idi)
- self.assertEqual('lu_udid', lu.udid)
- self.assertEqual('vios2', vios.name)
- mock_vio_get.assert_called_with(self.apt, uuid=2,
- xag=[pvm_const.XAG.VIO_SMAP])
- self.assertRaises(StopIteration, next, idi)
- self.assertEqual(2, mock_vio_get.call_count)
- mock_lw.assert_called_once_with(self.apt, inst)
-
- # Same, but prove that breaking out of the loop early avoids the second
- # get call. Supply lpar_wrap from here on, and prove no calls to
- # get_instance_wrapper
- mock_vio_get.reset_mock()
- mock_lw.reset_mock()
- mock_vio_get.side_effect = [vio1, vio2]
- for lu, vios in self.ssp_drv._get_bootdisk_iter(inst):
- self.assertEqual('lu_udid', lu.udid)
- self.assertEqual('vios1', vios.name)
- break
- mock_vio_get.assert_called_once_with(self.apt, uuid=1,
- xag=[pvm_const.XAG.VIO_SMAP])
-
- # Now the first VIOS doesn't have the mapping, but the second does
- mock_vio_get.reset_mock()
- mock_vio_get.side_effect = [vio3, vio2]
- idi = self.ssp_drv._get_bootdisk_iter(inst)
- lu, vios = next(idi)
- self.assertEqual('lu_udid', lu.udid)
- self.assertEqual('vios2', vios.name)
- mock_vio_get.assert_has_calls(
- [mock.call(self.apt, uuid=uuid, xag=[pvm_const.XAG.VIO_SMAP])
- for uuid in (1, 2)])
- self.assertRaises(StopIteration, next, idi)
- self.assertEqual(2, mock_vio_get.call_count)
-
- # No hits
- mock_vio_get.reset_mock()
- mock_vio_get.side_effect = [vio3, vio3]
- self.assertEqual([], list(self.ssp_drv._get_bootdisk_iter(inst)))
- self.assertEqual(2, mock_vio_get.call_count)
-
- def _bld_mocks_for_instance_disk(self):
- inst = mock.Mock()
- inst.name = 'my-instance-name'
- lpar_wrap = mock.Mock()
- lpar_wrap.id = 4
- lu_wrap = mock.Mock(spec=pvm_stg.LU)
- lu_wrap.configure_mock(name='boot_my_instance_name', udid='lu_udid')
- smap = mock.Mock(backing_storage=lu_wrap,
- server_adapter=mock.Mock(lpar_id=4))
- # Build mock VIOS Wrappers as the returns from VIOS.wrap.
- # vios1 and vios2 will both have the mapping for client ID 4 and LU
- # named boot_my_instance_name.
- smaps = [mock.Mock(), mock.Mock(), mock.Mock(), smap]
- vios1 = mock.Mock(spec=pvm_vios.VIOS)
- vios1.configure_mock(name='vios1', uuid='uuid1', scsi_mappings=smaps)
- vios2 = mock.Mock(spec=pvm_vios.VIOS)
- vios2.configure_mock(name='vios2', uuid='uuid2', scsi_mappings=smaps)
- # vios3 will not have the mapping
- vios3 = mock.Mock(spec=pvm_vios.VIOS)
- vios3.configure_mock(name='vios3', uuid='uuid3',
- scsi_mappings=[mock.Mock(), mock.Mock()])
- return inst, lpar_wrap, vios1, vios2, vios3
diff --git a/nova/tests/unit/virt/powervm/tasks/__init__.py b/nova/tests/unit/virt/powervm/tasks/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/nova/tests/unit/virt/powervm/tasks/__init__.py
+++ /dev/null
diff --git a/nova/tests/unit/virt/powervm/tasks/test_image.py b/nova/tests/unit/virt/powervm/tasks/test_image.py
deleted file mode 100644
index b9e3560a16..0000000000
--- a/nova/tests/unit/virt/powervm/tasks/test_image.py
+++ /dev/null
@@ -1,68 +0,0 @@
-# Copyright 2015, 2018 IBM Corp.
-#
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-
-from nova import test
-
-from nova.virt.powervm.tasks import image as tsk_img
-
-
-class TestImage(test.TestCase):
- def test_update_task_state(self):
- def func(task_state, expected_state='delirious'):
- self.assertEqual('task_state', task_state)
- self.assertEqual('delirious', expected_state)
- tf = tsk_img.UpdateTaskState(func, 'task_state')
- self.assertEqual('update_task_state_task_state', tf.name)
- tf.execute()
-
- def func2(task_state, expected_state=None):
- self.assertEqual('task_state', task_state)
- self.assertEqual('expected_state', expected_state)
- tf = tsk_img.UpdateTaskState(func2, 'task_state',
- expected_state='expected_state')
- tf.execute()
-
- # Validate args on taskflow.task.Task instantiation
- with mock.patch('taskflow.task.Task.__init__') as tf:
- tsk_img.UpdateTaskState(func, 'task_state')
- tf.assert_called_once_with(
- name='update_task_state_task_state')
-
- @mock.patch('nova.virt.powervm.image.stream_blockdev_to_glance',
- autospec=True)
- @mock.patch('nova.virt.powervm.image.generate_snapshot_metadata',
- autospec=True)
- def test_stream_to_glance(self, mock_metadata, mock_stream):
- mock_metadata.return_value = 'metadata'
- mock_inst = mock.Mock()
- mock_inst.name = 'instance_name'
- tf = tsk_img.StreamToGlance('context', 'image_api', 'image_id',
- mock_inst)
- self.assertEqual('stream_to_glance', tf.name)
- tf.execute('disk_path')
- mock_metadata.assert_called_with('context', 'image_api', 'image_id',
- mock_inst)
- mock_stream.assert_called_with('context', 'image_api', 'image_id',
- 'metadata', 'disk_path')
-
- # Validate args on taskflow.task.Task instantiation
- with mock.patch('taskflow.task.Task.__init__') as tf:
- tsk_img.StreamToGlance(
- 'context', 'image_api', 'image_id', mock_inst)
- tf.assert_called_once_with(
- name='stream_to_glance', requires='disk_path')
diff --git a/nova/tests/unit/virt/powervm/tasks/test_network.py b/nova/tests/unit/virt/powervm/tasks/test_network.py
deleted file mode 100644
index 9d6951eceb..0000000000
--- a/nova/tests/unit/virt/powervm/tasks/test_network.py
+++ /dev/null
@@ -1,323 +0,0 @@
-# Copyright 2015, 2017 IBM Corp.
-#
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import copy
-
-import eventlet
-import mock
-from pypowervm.wrappers import network as pvm_net
-
-from nova import exception
-from nova import test
-from nova.tests.unit.virt import powervm
-from nova.virt.powervm.tasks import network as tf_net
-
-
-def cna(mac):
- """Builds a mock Client Network Adapter for unit tests."""
- return mock.MagicMock(mac=mac, vswitch_uri='fake_href')
-
-
-class TestNetwork(test.NoDBTestCase):
- def setUp(self):
- super(TestNetwork, self).setUp()
- self.flags(host='host1')
- self.apt = mock.Mock()
-
- self.mock_lpar_wrap = mock.MagicMock()
- self.mock_lpar_wrap.can_modify_io.return_value = True, None
-
- @mock.patch('nova.virt.powervm.vm.get_instance_wrapper')
- @mock.patch('nova.virt.powervm.vif.unplug')
- @mock.patch('nova.virt.powervm.vm.get_cnas')
- def test_unplug_vifs(self, mock_vm_get, mock_unplug, mock_get_wrap):
- """Tests that a delete of the vif can be done."""
- inst = powervm.TEST_INSTANCE
-
- # Mock up the CNA responses.
- cnas = [cna('AABBCCDDEEFF'), cna('AABBCCDDEE11'), cna('AABBCCDDEE22')]
- mock_vm_get.return_value = cnas
-
- # Mock up the network info. This also validates that they will be
- # sanitized to upper case.
- net_info = [
- {'address': 'aa:bb:cc:dd:ee:ff'}, {'address': 'aa:bb:cc:dd:ee:22'},
- {'address': 'aa:bb:cc:dd:ee:33'}
- ]
-
- # Mock out the instance wrapper
- mock_get_wrap.return_value = self.mock_lpar_wrap
-
- # Mock out the vif driver
- def validate_unplug(adapter, instance, vif, cna_w_list=None):
- self.assertEqual(adapter, self.apt)
- self.assertEqual(instance, inst)
- self.assertIn(vif, net_info)
- self.assertEqual(cna_w_list, cnas)
-
- mock_unplug.side_effect = validate_unplug
-
- # Run method
- p_vifs = tf_net.UnplugVifs(self.apt, inst, net_info)
- p_vifs.execute()
-
- # Make sure the unplug was invoked, so that we know that the validation
- # code was called
- self.assertEqual(3, mock_unplug.call_count)
-
- # Validate args on taskflow.task.Task instantiation
- with mock.patch('taskflow.task.Task.__init__') as tf:
- tf_net.UnplugVifs(self.apt, inst, net_info)
- tf.assert_called_once_with(name='unplug_vifs')
-
- @mock.patch('nova.virt.powervm.vm.get_instance_wrapper')
- def test_unplug_vifs_invalid_state(self, mock_get_wrap):
- """Tests that the delete raises an exception if bad VM state."""
- inst = powervm.TEST_INSTANCE
-
- # Mock out the instance wrapper
- mock_get_wrap.return_value = self.mock_lpar_wrap
-
- # Mock that the state is incorrect
- self.mock_lpar_wrap.can_modify_io.return_value = False, 'bad'
-
- # Run method
- p_vifs = tf_net.UnplugVifs(self.apt, inst, mock.Mock())
- self.assertRaises(exception.VirtualInterfaceUnplugException,
- p_vifs.execute)
-
- @mock.patch('nova.virt.powervm.vif.plug')
- @mock.patch('nova.virt.powervm.vm.get_cnas')
- def test_plug_vifs_rmc(self, mock_cna_get, mock_plug):
- """Tests that a crt vif can be done with secure RMC."""
- inst = powervm.TEST_INSTANCE
-
- # Mock up the CNA response. One should already exist, the other
- # should not.
- pre_cnas = [cna('AABBCCDDEEFF'), cna('AABBCCDDEE11')]
- mock_cna_get.return_value = copy.deepcopy(pre_cnas)
-
- # Mock up the network info. This also validates that they will be
- # sanitized to upper case.
- net_info = [
- {'address': 'aa:bb:cc:dd:ee:ff', 'vnic_type': 'normal'},
- {'address': 'aa:bb:cc:dd:ee:22', 'vnic_type': 'normal'},
- ]
-
- # First run the CNA update, then the CNA create.
- mock_new_cna = mock.Mock(spec=pvm_net.CNA)
- mock_plug.side_effect = ['upd_cna', mock_new_cna]
-
- # Run method
- p_vifs = tf_net.PlugVifs(mock.MagicMock(), self.apt, inst, net_info)
-
- all_cnas = p_vifs.execute(self.mock_lpar_wrap)
-
- # new vif should be created twice.
- mock_plug.assert_any_call(self.apt, inst, net_info[0], new_vif=False)
- mock_plug.assert_any_call(self.apt, inst, net_info[1], new_vif=True)
-
- # The Task provides the list of original CNAs plus only CNAs that were
- # created.
- self.assertEqual(pre_cnas + [mock_new_cna], all_cnas)
-
- # Validate args on taskflow.task.Task instantiation
- with mock.patch('taskflow.task.Task.__init__') as tf:
- tf_net.PlugVifs(mock.MagicMock(), self.apt, inst, net_info)
- tf.assert_called_once_with(
- name='plug_vifs', provides='vm_cnas', requires=['lpar_wrap'])
-
- @mock.patch('nova.virt.powervm.vif.plug')
- @mock.patch('nova.virt.powervm.vm.get_cnas')
- def test_plug_vifs_rmc_no_create(self, mock_vm_get, mock_plug):
- """Verifies if no creates are needed, none are done."""
- inst = powervm.TEST_INSTANCE
-
- # Mock up the CNA response. Both should already exist.
- mock_vm_get.return_value = [cna('AABBCCDDEEFF'), cna('AABBCCDDEE11')]
-
- # Mock up the network info. This also validates that they will be
- # sanitized to upper case. This also validates that we don't call
- # get_vnics if no nets have vnic_type 'direct'.
- net_info = [
- {'address': 'aa:bb:cc:dd:ee:ff', 'vnic_type': 'normal'},
- {'address': 'aa:bb:cc:dd:ee:11', 'vnic_type': 'normal'}
- ]
-
- # Run method
- p_vifs = tf_net.PlugVifs(mock.MagicMock(), self.apt, inst, net_info)
- p_vifs.execute(self.mock_lpar_wrap)
-
- # The create should have been called with new_vif as False.
- mock_plug.assert_any_call(self.apt, inst, net_info[0], new_vif=False)
- mock_plug.assert_any_call(self.apt, inst, net_info[1], new_vif=False)
-
- @mock.patch('nova.virt.powervm.vif.plug')
- @mock.patch('nova.virt.powervm.vm.get_cnas')
- def test_plug_vifs_invalid_state(self, mock_vm_get, mock_plug):
- """Tests that a crt_vif fails when the LPAR state is bad."""
- inst = powervm.TEST_INSTANCE
-
- # Mock up the CNA response. Only doing one for simplicity
- mock_vm_get.return_value = []
- net_info = [{'address': 'aa:bb:cc:dd:ee:ff', 'vnic_type': 'normal'}]
-
- # Mock that the state is incorrect
- self.mock_lpar_wrap.can_modify_io.return_value = False, 'bad'
-
- # Run method
- p_vifs = tf_net.PlugVifs(mock.MagicMock(), self.apt, inst, net_info)
- self.assertRaises(exception.VirtualInterfaceCreateException,
- p_vifs.execute, self.mock_lpar_wrap)
-
- # The create should not have been invoked
- self.assertEqual(0, mock_plug.call_count)
-
- @mock.patch('nova.virt.powervm.vif.plug')
- @mock.patch('nova.virt.powervm.vm.get_cnas')
- def test_plug_vifs_timeout(self, mock_vm_get, mock_plug):
- """Tests that crt vif failure via loss of neutron callback."""
- inst = powervm.TEST_INSTANCE
-
- # Mock up the CNA response. Only doing one for simplicity
- mock_vm_get.return_value = [cna('AABBCCDDEE11')]
-
- # Mock up the network info.
- net_info = [{'address': 'aa:bb:cc:dd:ee:ff', 'vnic_type': 'normal'}]
-
- # Ensure that an exception is raised by a timeout.
- mock_plug.side_effect = eventlet.timeout.Timeout()
-
- # Run method
- p_vifs = tf_net.PlugVifs(mock.MagicMock(), self.apt, inst, net_info)
- self.assertRaises(exception.VirtualInterfaceCreateException,
- p_vifs.execute, self.mock_lpar_wrap)
-
- # The create should have only been called once.
- self.assertEqual(1, mock_plug.call_count)
-
- @mock.patch('nova.virt.powervm.vif.unplug')
- @mock.patch('nova.virt.powervm.vif.plug')
- @mock.patch('nova.virt.powervm.vm.get_cnas')
- def test_plug_vifs_revert(self, mock_vm_get, mock_plug, mock_unplug):
- """Tests that the revert flow works properly."""
- inst = powervm.TEST_INSTANCE
-
- # Fake CNA list. The one pre-existing VIF should *not* get reverted.
- cna_list = [cna('AABBCCDDEEFF'), cna('FFEEDDCCBBAA')]
- mock_vm_get.return_value = cna_list
-
- # Mock up the network info. Three roll backs.
- net_info = [
- {'address': 'aa:bb:cc:dd:ee:ff', 'vnic_type': 'normal'},
- {'address': 'aa:bb:cc:dd:ee:22', 'vnic_type': 'normal'},
- {'address': 'aa:bb:cc:dd:ee:33', 'vnic_type': 'normal'}
- ]
-
- # Make sure we test raising an exception
- mock_unplug.side_effect = [exception.NovaException(), None]
-
- # Run method
- p_vifs = tf_net.PlugVifs(mock.MagicMock(), self.apt, inst, net_info)
- p_vifs.execute(self.mock_lpar_wrap)
- p_vifs.revert(self.mock_lpar_wrap, mock.Mock(), mock.Mock())
-
- # The unplug should be called twice. The exception shouldn't stop the
- # second call.
- self.assertEqual(2, mock_unplug.call_count)
-
- # Make sure each call is invoked correctly. The first plug was not a
- # new vif, so it should not be reverted.
- c2 = mock.call(self.apt, inst, net_info[1], cna_w_list=cna_list)
- c3 = mock.call(self.apt, inst, net_info[2], cna_w_list=cna_list)
- mock_unplug.assert_has_calls([c2, c3])
-
- @mock.patch('pypowervm.tasks.cna.crt_cna')
- @mock.patch('pypowervm.wrappers.network.VSwitch.search')
- @mock.patch('nova.virt.powervm.vif.plug')
- @mock.patch('nova.virt.powervm.vm.get_cnas')
- def test_plug_mgmt_vif(self, mock_vm_get, mock_plug, mock_vs_search,
- mock_crt_cna):
- """Tests that a mgmt vif can be created."""
- inst = powervm.TEST_INSTANCE
-
- # Mock up the rmc vswitch
- vswitch_w = mock.MagicMock()
- vswitch_w.href = 'fake_mgmt_uri'
- mock_vs_search.return_value = [vswitch_w]
-
- # Run method such that it triggers a fresh CNA search
- p_vifs = tf_net.PlugMgmtVif(self.apt, inst)
- p_vifs.execute(None)
-
- # With the default get_cnas mock (which returns a Mock()), we think we
- # found an existing management CNA.
- mock_crt_cna.assert_not_called()
- mock_vm_get.assert_called_once_with(
- self.apt, inst, vswitch_uri='fake_mgmt_uri')
-
- # Now mock get_cnas to return no hits
- mock_vm_get.reset_mock()
- mock_vm_get.return_value = []
- p_vifs.execute(None)
-
- # Get was called; and since it didn't have the mgmt CNA, so was plug.
- self.assertEqual(1, mock_crt_cna.call_count)
- mock_vm_get.assert_called_once_with(
- self.apt, inst, vswitch_uri='fake_mgmt_uri')
-
- # Now pass CNAs, but not the mgmt vif, "from PlugVifs"
- cnas = [mock.Mock(vswitch_uri='uri1'), mock.Mock(vswitch_uri='uri2')]
- mock_crt_cna.reset_mock()
- mock_vm_get.reset_mock()
- p_vifs.execute(cnas)
-
- # Get wasn't called, since the CNAs were passed "from PlugVifs"; but
- # since the mgmt vif wasn't included, plug was called.
- mock_vm_get.assert_not_called()
- mock_crt_cna.assert_called()
-
- # Finally, pass CNAs including the mgmt.
- cnas.append(mock.Mock(vswitch_uri='fake_mgmt_uri'))
- mock_crt_cna.reset_mock()
- p_vifs.execute(cnas)
-
- # Neither get nor plug was called.
- mock_vm_get.assert_not_called()
- mock_crt_cna.assert_not_called()
-
- # Validate args on taskflow.task.Task instantiation
- with mock.patch('taskflow.task.Task.__init__') as tf:
- tf_net.PlugMgmtVif(self.apt, inst)
- tf.assert_called_once_with(
- name='plug_mgmt_vif', provides='mgmt_cna', requires=['vm_cnas'])
-
- def test_get_vif_events(self):
- # Set up common mocks.
- inst = powervm.TEST_INSTANCE
- net_info = [mock.MagicMock(), mock.MagicMock()]
- net_info[0]['id'] = 'a'
- net_info[0].get.return_value = False
- net_info[1]['id'] = 'b'
- net_info[1].get.return_value = True
-
- # Set up the runner.
- p_vifs = tf_net.PlugVifs(mock.MagicMock(), self.apt, inst, net_info)
- p_vifs.crt_network_infos = net_info
- resp = p_vifs._get_vif_events()
-
- # Only one should be returned since only one was active.
- self.assertEqual(1, len(resp))
diff --git a/nova/tests/unit/virt/powervm/tasks/test_storage.py b/nova/tests/unit/virt/powervm/tasks/test_storage.py
deleted file mode 100644
index 39fe9dec72..0000000000
--- a/nova/tests/unit/virt/powervm/tasks/test_storage.py
+++ /dev/null
@@ -1,354 +0,0 @@
-# Copyright 2015, 2018 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import fixtures
-import mock
-from pypowervm import exceptions as pvm_exc
-
-from nova import exception
-from nova import test
-from nova.virt.powervm.tasks import storage as tf_stg
-
-
-class TestStorage(test.NoDBTestCase):
-
- def setUp(self):
- super(TestStorage, self).setUp()
-
- self.adapter = mock.Mock()
- self.disk_dvr = mock.MagicMock()
- self.mock_cfg_drv = self.useFixture(fixtures.MockPatch(
- 'nova.virt.powervm.media.ConfigDrivePowerVM')).mock
- self.mock_mb = self.mock_cfg_drv.return_value
- self.instance = mock.MagicMock()
- self.context = 'context'
-
- def test_create_and_connect_cfg_drive(self):
- # With a specified FeedTask
- task = tf_stg.CreateAndConnectCfgDrive(
- self.adapter, self.instance, 'injected_files',
- 'network_info', 'stg_ftsk', admin_pass='admin_pass')
- task.execute('mgmt_cna')
- self.mock_cfg_drv.assert_called_once_with(self.adapter)
- self.mock_mb.create_cfg_drv_vopt.assert_called_once_with(
- self.instance, 'injected_files', 'network_info', 'stg_ftsk',
- admin_pass='admin_pass', mgmt_cna='mgmt_cna')
-
- # Normal revert
- task.revert('mgmt_cna', 'result', 'flow_failures')
- self.mock_mb.dlt_vopt.assert_called_once_with(self.instance,
- 'stg_ftsk')
-
- self.mock_mb.reset_mock()
-
- # Revert when dlt_vopt fails
- self.mock_mb.dlt_vopt.side_effect = pvm_exc.Error('fake-exc')
- task.revert('mgmt_cna', 'result', 'flow_failures')
- self.mock_mb.dlt_vopt.assert_called_once()
-
- self.mock_mb.reset_mock()
-
- # Revert when media builder not created
- task.mb = None
- task.revert('mgmt_cna', 'result', 'flow_failures')
- self.mock_mb.assert_not_called()
-
- # Validate args on taskflow.task.Task instantiation
- with mock.patch('taskflow.task.Task.__init__') as tf:
- tf_stg.CreateAndConnectCfgDrive(
- self.adapter, self.instance, 'injected_files',
- 'network_info', 'stg_ftsk', admin_pass='admin_pass')
- tf.assert_called_once_with(name='cfg_drive', requires=['mgmt_cna'])
-
- def test_delete_vopt(self):
- # Test with no FeedTask
- task = tf_stg.DeleteVOpt(self.adapter, self.instance)
- task.execute()
- self.mock_cfg_drv.assert_called_once_with(self.adapter)
- self.mock_mb.dlt_vopt.assert_called_once_with(
- self.instance, stg_ftsk=None)
-
- self.mock_cfg_drv.reset_mock()
- self.mock_mb.reset_mock()
-
- # With a specified FeedTask
- task = tf_stg.DeleteVOpt(self.adapter, self.instance, stg_ftsk='ftsk')
- task.execute()
- self.mock_cfg_drv.assert_called_once_with(self.adapter)
- self.mock_mb.dlt_vopt.assert_called_once_with(
- self.instance, stg_ftsk='ftsk')
-
- # Validate args on taskflow.task.Task instantiation
- with mock.patch('taskflow.task.Task.__init__') as tf:
- tf_stg.DeleteVOpt(self.adapter, self.instance)
- tf.assert_called_once_with(name='vopt_delete')
-
- def test_delete_disk(self):
- stor_adpt_mappings = mock.Mock()
-
- task = tf_stg.DeleteDisk(self.disk_dvr)
- task.execute(stor_adpt_mappings)
- self.disk_dvr.delete_disks.assert_called_once_with(stor_adpt_mappings)
-
- # Validate args on taskflow.task.Task instantiation
- with mock.patch('taskflow.task.Task.__init__') as tf:
- tf_stg.DeleteDisk(self.disk_dvr)
- tf.assert_called_once_with(
- name='delete_disk', requires=['stor_adpt_mappings'])
-
- def test_detach_disk(self):
- task = tf_stg.DetachDisk(self.disk_dvr, self.instance)
- task.execute()
- self.disk_dvr.detach_disk.assert_called_once_with(self.instance)
-
- # Validate args on taskflow.task.Task instantiation
- with mock.patch('taskflow.task.Task.__init__') as tf:
- tf_stg.DetachDisk(self.disk_dvr, self.instance)
- tf.assert_called_once_with(
- name='detach_disk', provides='stor_adpt_mappings')
-
- def test_attach_disk(self):
- stg_ftsk = mock.Mock()
- disk_dev_info = mock.Mock()
-
- task = tf_stg.AttachDisk(self.disk_dvr, self.instance, stg_ftsk)
- task.execute(disk_dev_info)
- self.disk_dvr.attach_disk.assert_called_once_with(
- self.instance, disk_dev_info, stg_ftsk)
-
- task.revert(disk_dev_info, 'result', 'flow failures')
- self.disk_dvr.detach_disk.assert_called_once_with(self.instance)
-
- self.disk_dvr.detach_disk.reset_mock()
-
- # Revert failures are not raised
- self.disk_dvr.detach_disk.side_effect = pvm_exc.TimeoutError(
- "timed out")
- task.revert(disk_dev_info, 'result', 'flow failures')
- self.disk_dvr.detach_disk.assert_called_once_with(self.instance)
-
- # Validate args on taskflow.task.Task instantiation
- with mock.patch('taskflow.task.Task.__init__') as tf:
- tf_stg.AttachDisk(self.disk_dvr, self.instance, stg_ftsk)
- tf.assert_called_once_with(
- name='attach_disk', requires=['disk_dev_info'])
-
- def test_create_disk_for_img(self):
- image_meta = mock.Mock()
-
- task = tf_stg.CreateDiskForImg(
- self.disk_dvr, self.context, self.instance, image_meta)
- task.execute()
- self.disk_dvr.create_disk_from_image.assert_called_once_with(
- self.context, self.instance, image_meta)
-
- task.revert('result', 'flow failures')
- self.disk_dvr.delete_disks.assert_called_once_with(['result'])
-
- self.disk_dvr.delete_disks.reset_mock()
-
- # Delete not called if no result
- task.revert(None, None)
- self.disk_dvr.delete_disks.assert_not_called()
-
- # Delete exception doesn't raise
- self.disk_dvr.delete_disks.side_effect = pvm_exc.TimeoutError(
- "timed out")
- task.revert('result', 'flow failures')
- self.disk_dvr.delete_disks.assert_called_once_with(['result'])
-
- # Validate args on taskflow.task.Task instantiation
- with mock.patch('taskflow.task.Task.__init__') as tf:
- tf_stg.CreateDiskForImg(
- self.disk_dvr, self.context, self.instance, image_meta)
- tf.assert_called_once_with(
- name='create_disk_from_img', provides='disk_dev_info')
-
- @mock.patch('pypowervm.tasks.scsi_mapper.find_maps', autospec=True)
- @mock.patch('nova.virt.powervm.mgmt.discover_vscsi_disk', autospec=True)
- @mock.patch('nova.virt.powervm.mgmt.remove_block_dev', autospec=True)
- def test_instance_disk_to_mgmt(self, mock_rm, mock_discover, mock_find):
- mock_discover.return_value = '/dev/disk'
- mock_instance = mock.Mock()
- mock_instance.name = 'instance_name'
- mock_stg = mock.Mock()
- mock_stg.name = 'stg_name'
- mock_vwrap = mock.Mock()
- mock_vwrap.name = 'vios_name'
- mock_vwrap.uuid = 'vios_uuid'
- mock_vwrap.scsi_mappings = ['mapping1']
-
- disk_dvr = mock.MagicMock()
- disk_dvr.mp_uuid = 'mp_uuid'
- disk_dvr.connect_instance_disk_to_mgmt.return_value = (mock_stg,
- mock_vwrap)
-
- def reset_mocks():
- mock_find.reset_mock()
- mock_discover.reset_mock()
- mock_rm.reset_mock()
- disk_dvr.reset_mock()
-
- # Good path - find_maps returns one result
- mock_find.return_value = ['one_mapping']
- tf = tf_stg.InstanceDiskToMgmt(disk_dvr, mock_instance)
- self.assertEqual('instance_disk_to_mgmt', tf.name)
- self.assertEqual((mock_stg, mock_vwrap, '/dev/disk'), tf.execute())
- disk_dvr.connect_instance_disk_to_mgmt.assert_called_with(
- mock_instance)
- mock_find.assert_called_with(['mapping1'], client_lpar_id='mp_uuid',
- stg_elem=mock_stg)
- mock_discover.assert_called_with('one_mapping')
- tf.revert('result', 'failures')
- disk_dvr.disconnect_disk_from_mgmt.assert_called_with('vios_uuid',
- 'stg_name')
- mock_rm.assert_called_with('/dev/disk')
-
- # Good path - find_maps returns >1 result
- reset_mocks()
- mock_find.return_value = ['first_mapping', 'second_mapping']
- tf = tf_stg.InstanceDiskToMgmt(disk_dvr, mock_instance)
- self.assertEqual((mock_stg, mock_vwrap, '/dev/disk'), tf.execute())
- disk_dvr.connect_instance_disk_to_mgmt.assert_called_with(
- mock_instance)
- mock_find.assert_called_with(['mapping1'], client_lpar_id='mp_uuid',
- stg_elem=mock_stg)
- mock_discover.assert_called_with('first_mapping')
- tf.revert('result', 'failures')
- disk_dvr.disconnect_disk_from_mgmt.assert_called_with('vios_uuid',
- 'stg_name')
- mock_rm.assert_called_with('/dev/disk')
-
- # Management Partition is VIOS and NovaLink hosted storage
- reset_mocks()
- disk_dvr._vios_uuids = ['mp_uuid']
- dev_name = '/dev/vg/fake_name'
- disk_dvr.get_bootdisk_path.return_value = dev_name
- tf = tf_stg.InstanceDiskToMgmt(disk_dvr, mock_instance)
- self.assertEqual((None, None, dev_name), tf.execute())
-
- # Management Partition is VIOS and not NovaLink hosted storage
- reset_mocks()
- disk_dvr._vios_uuids = ['mp_uuid']
- disk_dvr.get_bootdisk_path.return_value = None
- tf = tf_stg.InstanceDiskToMgmt(disk_dvr, mock_instance)
- tf.execute()
- disk_dvr.connect_instance_disk_to_mgmt.assert_called_with(
- mock_instance)
-
- # Bad path - find_maps returns no results
- reset_mocks()
- mock_find.return_value = []
- tf = tf_stg.InstanceDiskToMgmt(disk_dvr, mock_instance)
- self.assertRaises(exception.NewMgmtMappingNotFoundException,
- tf.execute)
- disk_dvr.connect_instance_disk_to_mgmt.assert_called_with(
- mock_instance)
- # find_maps was still called
- mock_find.assert_called_with(['mapping1'], client_lpar_id='mp_uuid',
- stg_elem=mock_stg)
- # discover_vscsi_disk didn't get called
- self.assertEqual(0, mock_discover.call_count)
- tf.revert('result', 'failures')
- # disconnect_disk_from_mgmt got called
- disk_dvr.disconnect_disk_from_mgmt.assert_called_with('vios_uuid',
- 'stg_name')
- # ...but remove_block_dev did not.
- self.assertEqual(0, mock_rm.call_count)
-
- # Bad path - connect raises
- reset_mocks()
- disk_dvr.connect_instance_disk_to_mgmt.side_effect = (
- exception.InstanceDiskMappingFailed(instance_name='inst_name'))
- tf = tf_stg.InstanceDiskToMgmt(disk_dvr, mock_instance)
- self.assertRaises(exception.InstanceDiskMappingFailed, tf.execute)
- disk_dvr.connect_instance_disk_to_mgmt.assert_called_with(
- mock_instance)
- self.assertEqual(0, mock_find.call_count)
- self.assertEqual(0, mock_discover.call_count)
- # revert shouldn't call disconnect or remove
- tf.revert('result', 'failures')
- self.assertEqual(0, disk_dvr.disconnect_disk_from_mgmt.call_count)
- self.assertEqual(0, mock_rm.call_count)
-
- # Validate args on taskflow.task.Task instantiation
- with mock.patch('taskflow.task.Task.__init__') as tf:
- tf_stg.InstanceDiskToMgmt(disk_dvr, mock_instance)
- tf.assert_called_once_with(
- name='instance_disk_to_mgmt',
- provides=['stg_elem', 'vios_wrap', 'disk_path'])
-
- @mock.patch('nova.virt.powervm.mgmt.remove_block_dev', autospec=True)
- def test_remove_instance_disk_from_mgmt(self, mock_rm):
- disk_dvr = mock.MagicMock()
- mock_instance = mock.Mock()
- mock_instance.name = 'instance_name'
- mock_stg = mock.Mock()
- mock_stg.name = 'stg_name'
- mock_vwrap = mock.Mock()
- mock_vwrap.name = 'vios_name'
- mock_vwrap.uuid = 'vios_uuid'
-
- tf = tf_stg.RemoveInstanceDiskFromMgmt(disk_dvr, mock_instance)
- self.assertEqual('remove_inst_disk_from_mgmt', tf.name)
-
- # Boot disk not mapped to mgmt partition
- tf.execute(None, mock_vwrap, '/dev/disk')
- self.assertEqual(disk_dvr.disconnect_disk_from_mgmt.call_count, 0)
- self.assertEqual(mock_rm.call_count, 0)
-
- # Boot disk mapped to mgmt partition
- tf.execute(mock_stg, mock_vwrap, '/dev/disk')
- disk_dvr.disconnect_disk_from_mgmt.assert_called_with('vios_uuid',
- 'stg_name')
- mock_rm.assert_called_with('/dev/disk')
-
- # Validate args on taskflow.task.Task instantiation
- with mock.patch('taskflow.task.Task.__init__') as tf:
- tf_stg.RemoveInstanceDiskFromMgmt(disk_dvr, mock_instance)
- tf.assert_called_once_with(
- name='remove_inst_disk_from_mgmt',
- requires=['stg_elem', 'vios_wrap', 'disk_path'])
-
- def test_attach_volume(self):
- vol_dvr = mock.Mock(connection_info={'data': {'volume_id': '1'}})
-
- task = tf_stg.AttachVolume(vol_dvr)
- task.execute()
- vol_dvr.attach_volume.assert_called_once_with()
-
- task.revert('result', 'flow failures')
- vol_dvr.reset_stg_ftsk.assert_called_once_with()
- vol_dvr.detach_volume.assert_called_once_with()
-
- # Validate args on taskflow.task.Task instantiation
- with mock.patch('taskflow.task.Task.__init__') as tf:
- tf_stg.AttachVolume(vol_dvr)
- tf.assert_called_once_with(name='attach_vol_1')
-
- def test_detach_volume(self):
- vol_dvr = mock.Mock(connection_info={'data': {'volume_id': '1'}})
-
- task = tf_stg.DetachVolume(vol_dvr)
- task.execute()
- vol_dvr.detach_volume.assert_called_once_with()
-
- task.revert('result', 'flow failures')
- vol_dvr.reset_stg_ftsk.assert_called_once_with()
- vol_dvr.detach_volume.assert_called_once_with()
-
- # Validate args on taskflow.task.Task instantiation
- with mock.patch('taskflow.task.Task.__init__') as tf:
- tf_stg.DetachVolume(vol_dvr)
- tf.assert_called_once_with(name='detach_vol_1')
diff --git a/nova/tests/unit/virt/powervm/tasks/test_vm.py b/nova/tests/unit/virt/powervm/tasks/test_vm.py
deleted file mode 100644
index fc68646acf..0000000000
--- a/nova/tests/unit/virt/powervm/tasks/test_vm.py
+++ /dev/null
@@ -1,134 +0,0 @@
-# Copyright 2015, 2018 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-from taskflow import engines as tf_eng
-from taskflow.patterns import linear_flow as tf_lf
-from taskflow import task as tf_tsk
-
-from nova import exception
-from nova import test
-from nova.virt.powervm.tasks import vm as tf_vm
-
-
-class TestVMTasks(test.NoDBTestCase):
- def setUp(self):
- super(TestVMTasks, self).setUp()
- self.apt = mock.Mock()
- self.instance = mock.Mock()
-
- @mock.patch('nova.virt.powervm.vm.get_instance_wrapper', autospec=True)
- def test_get(self, mock_get_wrap):
- get = tf_vm.Get(self.apt, self.instance)
- get.execute()
- mock_get_wrap.assert_called_once_with(self.apt, self.instance)
-
- # Validate args on taskflow.task.Task instantiation
- with mock.patch('taskflow.task.Task.__init__') as tf:
- tf_vm.Get(self.apt, self.instance)
- tf.assert_called_once_with(name='get_vm', provides='lpar_wrap')
-
- @mock.patch('pypowervm.tasks.storage.add_lpar_storage_scrub_tasks',
- autospec=True)
- @mock.patch('nova.virt.powervm.vm.create_lpar')
- def test_create(self, mock_vm_crt, mock_stg):
- lpar_entry = mock.Mock()
-
- # Test create with normal (non-recreate) ftsk
- crt = tf_vm.Create(self.apt, 'host_wrapper', self.instance, 'ftsk')
- mock_vm_crt.return_value = lpar_entry
- crt.execute()
-
- mock_vm_crt.assert_called_once_with(self.apt, 'host_wrapper',
- self.instance)
-
- mock_stg.assert_called_once_with(
- [lpar_entry.id], 'ftsk', lpars_exist=True)
- mock_stg.assert_called_once_with([mock_vm_crt.return_value.id], 'ftsk',
- lpars_exist=True)
-
- # Validate args on taskflow.task.Task instantiation
- with mock.patch('taskflow.task.Task.__init__') as tf:
- tf_vm.Create(self.apt, 'host_wrapper', self.instance, 'ftsk')
- tf.assert_called_once_with(name='crt_vm', provides='lpar_wrap')
-
- @mock.patch('nova.virt.powervm.vm.power_on')
- def test_power_on(self, mock_pwron):
- pwron = tf_vm.PowerOn(self.apt, self.instance)
- pwron.execute()
- mock_pwron.assert_called_once_with(self.apt, self.instance)
-
- # Validate args on taskflow.task.Task instantiation
- with mock.patch('taskflow.task.Task.__init__') as tf:
- tf_vm.PowerOn(self.apt, self.instance)
- tf.assert_called_once_with(name='pwr_vm')
-
- @mock.patch('nova.virt.powervm.vm.power_on')
- @mock.patch('nova.virt.powervm.vm.power_off')
- def test_power_on_revert(self, mock_pwroff, mock_pwron):
- flow = tf_lf.Flow('revert_power_on')
- pwron = tf_vm.PowerOn(self.apt, self.instance)
- flow.add(pwron)
-
- # Dummy Task that fails, triggering flow revert
- def failure(*a, **k):
- raise ValueError()
- flow.add(tf_tsk.FunctorTask(failure))
-
- # When PowerOn.execute doesn't fail, revert calls power_off
- self.assertRaises(ValueError, tf_eng.run, flow)
- mock_pwron.assert_called_once_with(self.apt, self.instance)
- mock_pwroff.assert_called_once_with(self.apt, self.instance,
- force_immediate=True)
-
- mock_pwron.reset_mock()
- mock_pwroff.reset_mock()
-
- # When PowerOn.execute fails, revert doesn't call power_off
- mock_pwron.side_effect = exception.NovaException()
- self.assertRaises(exception.NovaException, tf_eng.run, flow)
- mock_pwron.assert_called_once_with(self.apt, self.instance)
- mock_pwroff.assert_not_called()
-
- @mock.patch('nova.virt.powervm.vm.power_off')
- def test_power_off(self, mock_pwroff):
- # Default force_immediate
- pwroff = tf_vm.PowerOff(self.apt, self.instance)
- pwroff.execute()
- mock_pwroff.assert_called_once_with(self.apt, self.instance,
- force_immediate=False)
-
- mock_pwroff.reset_mock()
-
- # Explicit force_immediate
- pwroff = tf_vm.PowerOff(self.apt, self.instance, force_immediate=True)
- pwroff.execute()
- mock_pwroff.assert_called_once_with(self.apt, self.instance,
- force_immediate=True)
-
- # Validate args on taskflow.task.Task instantiation
- with mock.patch('taskflow.task.Task.__init__') as tf:
- tf_vm.PowerOff(self.apt, self.instance)
- tf.assert_called_once_with(name='pwr_off_vm')
-
- @mock.patch('nova.virt.powervm.vm.delete_lpar')
- def test_delete(self, mock_dlt):
- delete = tf_vm.Delete(self.apt, self.instance)
- delete.execute()
- mock_dlt.assert_called_once_with(self.apt, self.instance)
-
- # Validate args on taskflow.task.Task instantiation
- with mock.patch('taskflow.task.Task.__init__') as tf:
- tf_vm.Delete(self.apt, self.instance)
- tf.assert_called_once_with(name='dlt_vm')
diff --git a/nova/tests/unit/virt/powervm/test_driver.py b/nova/tests/unit/virt/powervm/test_driver.py
deleted file mode 100644
index 025d823d15..0000000000
--- a/nova/tests/unit/virt/powervm/test_driver.py
+++ /dev/null
@@ -1,649 +0,0 @@
-# Copyright 2016, 2018 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import contextlib
-
-import fixtures
-import mock
-from oslo_serialization import jsonutils
-from oslo_utils.fixture import uuidsentinel as uuids
-from pypowervm import const as pvm_const
-from pypowervm import exceptions as pvm_exc
-from pypowervm.helpers import log_helper as pvm_hlp_log
-from pypowervm.helpers import vios_busy as pvm_hlp_vbusy
-from pypowervm.utils import transaction as pvm_tx
-from pypowervm.wrappers import virtual_io_server as pvm_vios
-
-from nova import block_device as nova_block_device
-from nova.compute import provider_tree
-from nova import conf as cfg
-from nova import exception
-from nova.objects import block_device as bdmobj
-from nova import test
-from nova.tests.unit.virt import powervm
-from nova.virt import block_device as nova_virt_bdm
-from nova.virt import driver as nova_driver
-from nova.virt.driver import ComputeDriver
-from nova.virt import hardware
-from nova.virt.powervm.disk import ssp
-from nova.virt.powervm import driver
-
-CONF = cfg.CONF
-
-
-class TestPowerVMDriver(test.NoDBTestCase):
-
- def setUp(self):
- super(TestPowerVMDriver, self).setUp()
- self.drv = driver.PowerVMDriver('virtapi')
- self.adp = self.useFixture(fixtures.MockPatch(
- 'pypowervm.adapter.Adapter', autospec=True)).mock
- self.drv.adapter = self.adp
- self.sess = self.useFixture(fixtures.MockPatch(
- 'pypowervm.adapter.Session', autospec=True)).mock
-
- self.pwron = self.useFixture(fixtures.MockPatch(
- 'nova.virt.powervm.vm.power_on')).mock
- self.pwroff = self.useFixture(fixtures.MockPatch(
- 'nova.virt.powervm.vm.power_off')).mock
-
- # Create an instance to test with
- self.inst = powervm.TEST_INSTANCE
-
- def test_driver_capabilities(self):
- """Test the driver capabilities."""
- # check that the driver reports all capabilities
- self.assertEqual(set(ComputeDriver.capabilities),
- set(self.drv.capabilities))
- # check the values for each capability
- self.assertFalse(self.drv.capabilities['has_imagecache'])
- self.assertFalse(self.drv.capabilities['supports_evacuate'])
- self.assertFalse(
- self.drv.capabilities['supports_migrate_to_same_host'])
- self.assertTrue(self.drv.capabilities['supports_attach_interface'])
- self.assertFalse(self.drv.capabilities['supports_device_tagging'])
- self.assertFalse(
- self.drv.capabilities['supports_tagged_attach_interface'])
- self.assertFalse(
- self.drv.capabilities['supports_tagged_attach_volume'])
- self.assertTrue(self.drv.capabilities['supports_extend_volume'])
- self.assertFalse(self.drv.capabilities['supports_multiattach'])
-
- @mock.patch('nova.image.glance.API')
- @mock.patch('pypowervm.tasks.storage.ComprehensiveScrub', autospec=True)
- @mock.patch('oslo_utils.importutils.import_object_ns', autospec=True)
- @mock.patch('pypowervm.wrappers.managed_system.System', autospec=True)
- @mock.patch('pypowervm.tasks.partition.validate_vios_ready', autospec=True)
- def test_init_host(self, mock_vvr, mock_sys, mock_import, mock_scrub,
- mock_img):
- mock_hostw = mock.Mock(uuid='uuid')
- mock_sys.get.return_value = [mock_hostw]
- self.drv.init_host('host')
- self.sess.assert_called_once_with(conn_tries=60)
- self.adp.assert_called_once_with(
- self.sess.return_value, helpers=[
- pvm_hlp_log.log_helper, pvm_hlp_vbusy.vios_busy_retry_helper])
- mock_vvr.assert_called_once_with(self.drv.adapter)
- mock_sys.get.assert_called_once_with(self.drv.adapter)
- self.assertEqual(mock_hostw, self.drv.host_wrapper)
- mock_scrub.assert_called_once_with(self.drv.adapter)
- mock_scrub.return_value.execute.assert_called_once_with()
- mock_import.assert_called_once_with(
- 'nova.virt.powervm.disk', 'localdisk.LocalStorage',
- self.drv.adapter, 'uuid')
- self.assertEqual(mock_import.return_value, self.drv.disk_dvr)
- mock_img.assert_called_once_with()
- self.assertEqual(mock_img.return_value, self.drv.image_api)
-
- @mock.patch('nova.virt.powervm.vm.get_pvm_uuid')
- @mock.patch('nova.virt.powervm.vm.get_vm_qp')
- @mock.patch('nova.virt.powervm.vm._translate_vm_state')
- def test_get_info(self, mock_tx_state, mock_qp, mock_uuid):
- mock_tx_state.return_value = 'fake-state'
- self.assertEqual(hardware.InstanceInfo('fake-state'),
- self.drv.get_info('inst'))
- mock_uuid.assert_called_once_with('inst')
- mock_qp.assert_called_once_with(
- self.drv.adapter, mock_uuid.return_value, 'PartitionState')
- mock_tx_state.assert_called_once_with(mock_qp.return_value)
-
- @mock.patch('nova.virt.powervm.vm.get_lpar_names')
- def test_list_instances(self, mock_names):
- mock_names.return_value = ['one', 'two', 'three']
- self.assertEqual(['one', 'two', 'three'], self.drv.list_instances())
- mock_names.assert_called_once_with(self.adp)
-
- def test_get_available_nodes(self):
- self.flags(host='hostname')
- self.assertEqual(['hostname'], self.drv.get_available_nodes('node'))
-
- @mock.patch('pypowervm.wrappers.managed_system.System', autospec=True)
- @mock.patch('nova.virt.powervm.host.build_host_resource_from_ms')
- def test_get_available_resource(self, mock_bhrfm, mock_sys):
- mock_sys.get.return_value = ['sys']
- mock_bhrfm.return_value = {'foo': 'bar'}
- self.drv.disk_dvr = mock.create_autospec(ssp.SSPDiskAdapter,
- instance=True)
- self.assertEqual(
- {'foo': 'bar', 'local_gb': self.drv.disk_dvr.capacity,
- 'local_gb_used': self.drv.disk_dvr.capacity_used},
- self.drv.get_available_resource('node'))
- mock_sys.get.assert_called_once_with(self.adp)
- mock_bhrfm.assert_called_once_with('sys')
- self.assertEqual('sys', self.drv.host_wrapper)
-
- @contextlib.contextmanager
- def _update_provider_tree(self, allocations=None):
- """Host resource dict gets converted properly to provider tree inv."""
-
- with mock.patch('nova.virt.powervm.host.'
- 'build_host_resource_from_ms') as mock_bhrfm:
- mock_bhrfm.return_value = {
- 'vcpus': 8,
- 'memory_mb': 2048,
- }
- self.drv.host_wrapper = 'host_wrapper'
- # Validate that this gets converted to int with floor
- self.drv.disk_dvr = mock.Mock(capacity=2091.8)
- exp_inv = {
- 'VCPU': {
- 'total': 8,
- 'max_unit': 8,
- 'allocation_ratio': 16.0,
- 'reserved': 0,
- },
- 'MEMORY_MB': {
- 'total': 2048,
- 'max_unit': 2048,
- 'allocation_ratio': 1.5,
- 'reserved': 512,
- },
- 'DISK_GB': {
- 'total': 2091,
- 'max_unit': 2091,
- 'allocation_ratio': 1.0,
- 'reserved': 0,
- },
- }
- ptree = provider_tree.ProviderTree()
- ptree.new_root('compute_host', uuids.cn)
- # Let the caller muck with these
- yield ptree, exp_inv
- self.drv.update_provider_tree(ptree, 'compute_host',
- allocations=allocations)
- self.assertEqual(exp_inv, ptree.data('compute_host').inventory)
- mock_bhrfm.assert_called_once_with('host_wrapper')
-
- def test_update_provider_tree(self):
- # Basic: no inventory already on the provider, no extra providers, no
- # aggregates or traits.
- with self._update_provider_tree():
- pass
-
- def test_update_provider_tree_ignore_allocations(self):
- with self._update_provider_tree(allocations="This is ignored"):
- pass
-
- def test_update_provider_tree_conf_overrides(self):
- # Non-default CONF values for allocation ratios and reserved.
- self.flags(cpu_allocation_ratio=12.3,
- reserved_host_cpus=4,
- ram_allocation_ratio=4.5,
- reserved_host_memory_mb=32,
- disk_allocation_ratio=6.7,
- # This gets int(ceil)'d
- reserved_host_disk_mb=5432.1)
- with self._update_provider_tree() as (_, exp_inv):
- exp_inv['VCPU']['allocation_ratio'] = 12.3
- exp_inv['VCPU']['reserved'] = 4
- exp_inv['MEMORY_MB']['allocation_ratio'] = 4.5
- exp_inv['MEMORY_MB']['reserved'] = 32
- exp_inv['DISK_GB']['allocation_ratio'] = 6.7
- exp_inv['DISK_GB']['reserved'] = 6
-
- def test_update_provider_tree_complex_ptree(self):
- # Overrides inventory already on the provider; leaves other providers
- # and aggregates/traits alone.
- with self._update_provider_tree() as (ptree, exp_inv):
- ptree.update_inventory('compute_host', {
- # these should get blown away
- 'VCPU': {
- 'total': 16,
- 'max_unit': 2,
- 'allocation_ratio': 1.0,
- 'reserved': 10,
- },
- 'CUSTOM_BOGUS': {
- 'total': 1234,
- }
- })
- ptree.update_aggregates('compute_host',
- [uuids.ss_agg, uuids.other_agg])
- ptree.update_traits('compute_host', ['CUSTOM_FOO', 'CUSTOM_BAR'])
- ptree.new_root('ssp', uuids.ssp)
- ptree.update_inventory('ssp', {'sentinel': 'inventory',
- 'for': 'ssp'})
- ptree.update_aggregates('ssp', [uuids.ss_agg])
- ptree.new_child('sriov', 'compute_host', uuid=uuids.sriov)
- # Since CONF.cpu_allocation_ratio is not set and this is not
- # the initial upt call (so CONF.initial_cpu_allocation_ratio would
- # be used), the existing allocation ratio value from the tree is
- # used.
- exp_inv['VCPU']['allocation_ratio'] = 1.0
-
- # Make sure the compute's agg and traits were left alone
- cndata = ptree.data('compute_host')
- self.assertEqual(set([uuids.ss_agg, uuids.other_agg]),
- cndata.aggregates)
- self.assertEqual(set(['CUSTOM_FOO', 'CUSTOM_BAR']), cndata.traits)
- # And the other providers were left alone
- self.assertEqual(set([uuids.cn, uuids.ssp, uuids.sriov]),
- set(ptree.get_provider_uuids()))
- # ...including the ssp's aggregates
- self.assertEqual(set([uuids.ss_agg]), ptree.data('ssp').aggregates)
-
- @mock.patch('nova.virt.powervm.tasks.storage.AttachVolume.execute')
- @mock.patch('nova.virt.powervm.tasks.network.PlugMgmtVif.execute')
- @mock.patch('nova.virt.powervm.tasks.network.PlugVifs.execute')
- @mock.patch('nova.virt.powervm.media.ConfigDrivePowerVM')
- @mock.patch('nova.virt.configdrive.required_by')
- @mock.patch('nova.virt.powervm.vm.create_lpar')
- @mock.patch('pypowervm.tasks.partition.build_active_vio_feed_task',
- autospec=True)
- @mock.patch('pypowervm.tasks.storage.add_lpar_storage_scrub_tasks',
- autospec=True)
- def test_spawn_ops(self, mock_scrub, mock_bldftsk, mock_crt_lpar,
- mock_cdrb, mock_cfg_drv, mock_plug_vifs,
- mock_plug_mgmt_vif, mock_attach_vol):
- """Validates the 'typical' spawn flow of the spawn of an instance. """
- mock_cdrb.return_value = True
- self.drv.host_wrapper = mock.Mock()
- self.drv.disk_dvr = mock.create_autospec(ssp.SSPDiskAdapter,
- instance=True)
- mock_ftsk = pvm_tx.FeedTask('fake', [mock.Mock(spec=pvm_vios.VIOS)])
- mock_bldftsk.return_value = mock_ftsk
- block_device_info = self._fake_bdms()
- self.drv.spawn('context', self.inst, 'img_meta', 'files', 'password',
- 'allocs', network_info='netinfo',
- block_device_info=block_device_info)
- mock_crt_lpar.assert_called_once_with(
- self.adp, self.drv.host_wrapper, self.inst)
- mock_bldftsk.assert_called_once_with(
- self.adp, xag={pvm_const.XAG.VIO_SMAP, pvm_const.XAG.VIO_FMAP})
- self.assertTrue(mock_plug_vifs.called)
- self.assertTrue(mock_plug_mgmt_vif.called)
- mock_scrub.assert_called_once_with(
- [mock_crt_lpar.return_value.id], mock_ftsk, lpars_exist=True)
- self.drv.disk_dvr.create_disk_from_image.assert_called_once_with(
- 'context', self.inst, 'img_meta')
- self.drv.disk_dvr.attach_disk.assert_called_once_with(
- self.inst, self.drv.disk_dvr.create_disk_from_image.return_value,
- mock_ftsk)
- self.assertEqual(2, mock_attach_vol.call_count)
- mock_cfg_drv.assert_called_once_with(self.adp)
- mock_cfg_drv.return_value.create_cfg_drv_vopt.assert_called_once_with(
- self.inst, 'files', 'netinfo', mock_ftsk, admin_pass='password',
- mgmt_cna=mock.ANY)
- self.pwron.assert_called_once_with(self.adp, self.inst)
-
- mock_cfg_drv.reset_mock()
- mock_attach_vol.reset_mock()
-
- # No config drive, no bdms
- mock_cdrb.return_value = False
- self.drv.spawn('context', self.inst, 'img_meta', 'files', 'password',
- 'allocs')
- mock_cfg_drv.assert_not_called()
- mock_attach_vol.assert_not_called()
-
- @mock.patch('nova.virt.powervm.tasks.storage.DetachVolume.execute')
- @mock.patch('nova.virt.powervm.tasks.network.UnplugVifs.execute')
- @mock.patch('nova.virt.powervm.vm.delete_lpar')
- @mock.patch('nova.virt.powervm.media.ConfigDrivePowerVM')
- @mock.patch('nova.virt.configdrive.required_by')
- @mock.patch('pypowervm.tasks.partition.build_active_vio_feed_task',
- autospec=True)
- def test_destroy(self, mock_bldftsk, mock_cdrb, mock_cfgdrv,
- mock_dlt_lpar, mock_unplug, mock_detach_vol):
- """Validates PowerVM destroy."""
- self.drv.host_wrapper = mock.Mock()
- self.drv.disk_dvr = mock.create_autospec(ssp.SSPDiskAdapter,
- instance=True)
-
- mock_ftsk = pvm_tx.FeedTask('fake', [mock.Mock(spec=pvm_vios.VIOS)])
- mock_bldftsk.return_value = mock_ftsk
- block_device_info = self._fake_bdms()
-
- # Good path, with config drive, destroy disks
- mock_cdrb.return_value = True
- self.drv.destroy('context', self.inst, [],
- block_device_info=block_device_info)
- self.pwroff.assert_called_once_with(
- self.adp, self.inst, force_immediate=True)
- mock_bldftsk.assert_called_once_with(
- self.adp, xag=[pvm_const.XAG.VIO_SMAP])
- mock_unplug.assert_called_once()
- mock_cdrb.assert_called_once_with(self.inst)
- mock_cfgdrv.assert_called_once_with(self.adp)
- mock_cfgdrv.return_value.dlt_vopt.assert_called_once_with(
- self.inst, stg_ftsk=mock_bldftsk.return_value)
- self.assertEqual(2, mock_detach_vol.call_count)
- self.drv.disk_dvr.detach_disk.assert_called_once_with(
- self.inst)
- self.drv.disk_dvr.delete_disks.assert_called_once_with(
- self.drv.disk_dvr.detach_disk.return_value)
- mock_dlt_lpar.assert_called_once_with(self.adp, self.inst)
-
- self.pwroff.reset_mock()
- mock_bldftsk.reset_mock()
- mock_unplug.reset_mock()
- mock_cdrb.reset_mock()
- mock_cfgdrv.reset_mock()
- self.drv.disk_dvr.detach_disk.reset_mock()
- self.drv.disk_dvr.delete_disks.reset_mock()
- mock_detach_vol.reset_mock()
- mock_dlt_lpar.reset_mock()
-
- # No config drive, preserve disks, no block device info
- mock_cdrb.return_value = False
- self.drv.destroy('context', self.inst, [], block_device_info={},
- destroy_disks=False)
- mock_cfgdrv.return_value.dlt_vopt.assert_not_called()
- mock_detach_vol.assert_not_called()
- self.drv.disk_dvr.delete_disks.assert_not_called()
-
- # Non-forced power_off, since preserving disks
- self.pwroff.assert_called_once_with(
- self.adp, self.inst, force_immediate=False)
- mock_bldftsk.assert_called_once_with(
- self.adp, xag=[pvm_const.XAG.VIO_SMAP])
- mock_unplug.assert_called_once()
- mock_cdrb.assert_called_once_with(self.inst)
- mock_cfgdrv.assert_not_called()
- mock_cfgdrv.return_value.dlt_vopt.assert_not_called()
- self.drv.disk_dvr.detach_disk.assert_called_once_with(
- self.inst)
- self.drv.disk_dvr.delete_disks.assert_not_called()
- mock_dlt_lpar.assert_called_once_with(self.adp, self.inst)
-
- self.pwroff.reset_mock()
- mock_bldftsk.reset_mock()
- mock_unplug.reset_mock()
- mock_cdrb.reset_mock()
- mock_cfgdrv.reset_mock()
- self.drv.disk_dvr.detach_disk.reset_mock()
- self.drv.disk_dvr.delete_disks.reset_mock()
- mock_dlt_lpar.reset_mock()
-
- # InstanceNotFound exception, non-forced
- self.pwroff.side_effect = exception.InstanceNotFound(
- instance_id='something')
- self.drv.destroy('context', self.inst, [], block_device_info={},
- destroy_disks=False)
- self.pwroff.assert_called_once_with(
- self.adp, self.inst, force_immediate=False)
- self.drv.disk_dvr.detach_disk.assert_not_called()
- mock_unplug.assert_not_called()
- self.drv.disk_dvr.delete_disks.assert_not_called()
- mock_dlt_lpar.assert_not_called()
-
- self.pwroff.reset_mock()
- self.pwroff.side_effect = None
- mock_unplug.reset_mock()
-
- # Convertible (PowerVM) exception
- mock_dlt_lpar.side_effect = pvm_exc.TimeoutError("Timed out")
- self.assertRaises(exception.InstanceTerminationFailure,
- self.drv.destroy, 'context', self.inst, [],
- block_device_info={})
-
- # Everything got called
- self.pwroff.assert_called_once_with(
- self.adp, self.inst, force_immediate=True)
- mock_unplug.assert_called_once()
- self.drv.disk_dvr.detach_disk.assert_called_once_with(self.inst)
- self.drv.disk_dvr.delete_disks.assert_called_once_with(
- self.drv.disk_dvr.detach_disk.return_value)
- mock_dlt_lpar.assert_called_once_with(self.adp, self.inst)
-
- # Other random exception raises directly
- mock_dlt_lpar.side_effect = ValueError()
- self.assertRaises(ValueError,
- self.drv.destroy, 'context', self.inst, [],
- block_device_info={})
-
- @mock.patch('nova.virt.powervm.tasks.image.UpdateTaskState.'
- 'execute', autospec=True)
- @mock.patch('nova.virt.powervm.tasks.storage.InstanceDiskToMgmt.'
- 'execute', autospec=True)
- @mock.patch('nova.virt.powervm.tasks.image.StreamToGlance.execute')
- @mock.patch('nova.virt.powervm.tasks.storage.RemoveInstanceDiskFromMgmt.'
- 'execute')
- def test_snapshot(self, mock_rm, mock_stream, mock_conn, mock_update):
- self.drv.disk_dvr = mock.Mock()
- self.drv.image_api = mock.Mock()
- mock_conn.return_value = 'stg_elem', 'vios_wrap', 'disk_path'
- self.drv.snapshot('context', self.inst, 'image_id',
- 'update_task_state')
- self.assertEqual(2, mock_update.call_count)
- self.assertEqual(1, mock_conn.call_count)
- mock_stream.assert_called_once_with(disk_path='disk_path')
- mock_rm.assert_called_once_with(
- stg_elem='stg_elem', vios_wrap='vios_wrap', disk_path='disk_path')
-
- self.drv.disk_dvr.capabilities = {'snapshot': False}
- self.assertRaises(exception.NotSupportedWithOption, self.drv.snapshot,
- 'context', self.inst, 'image_id', 'update_task_state')
-
- def test_power_on(self):
- self.drv.power_on('context', self.inst, 'network_info')
- self.pwron.assert_called_once_with(self.adp, self.inst)
-
- def test_power_off(self):
- self.drv.power_off(self.inst)
- self.pwroff.assert_called_once_with(
- self.adp, self.inst, force_immediate=True, timeout=None)
-
- def test_power_off_timeout(self):
- # Long timeout (retry interval means nothing on powervm)
- self.drv.power_off(self.inst, timeout=500, retry_interval=10)
- self.pwroff.assert_called_once_with(
- self.adp, self.inst, force_immediate=False, timeout=500)
-
- @mock.patch('nova.virt.powervm.vm.reboot')
- def test_reboot_soft(self, mock_reboot):
- inst = mock.Mock()
- self.drv.reboot('context', inst, 'network_info', 'SOFT')
- mock_reboot.assert_called_once_with(self.adp, inst, False)
-
- @mock.patch('nova.virt.powervm.vm.reboot')
- def test_reboot_hard(self, mock_reboot):
- inst = mock.Mock()
- self.drv.reboot('context', inst, 'network_info', 'HARD')
- mock_reboot.assert_called_once_with(self.adp, inst, True)
-
- @mock.patch('nova.virt.powervm.driver.PowerVMDriver.plug_vifs')
- def test_attach_interface(self, mock_plug_vifs):
- self.drv.attach_interface('context', 'inst', 'image_meta', 'vif')
- mock_plug_vifs.assert_called_once_with('inst', ['vif'])
-
- @mock.patch('nova.virt.powervm.driver.PowerVMDriver.unplug_vifs')
- def test_detach_interface(self, mock_unplug_vifs):
- self.drv.detach_interface('context', 'inst', 'vif')
- mock_unplug_vifs.assert_called_once_with('inst', ['vif'])
-
- @mock.patch('nova.virt.powervm.tasks.vm.Get', autospec=True)
- @mock.patch('nova.virt.powervm.tasks.base.run', autospec=True)
- @mock.patch('nova.virt.powervm.tasks.network.PlugVifs', autospec=True)
- @mock.patch('taskflow.patterns.linear_flow.Flow', autospec=True)
- def test_plug_vifs(self, mock_tf, mock_plug_vifs, mock_tf_run, mock_get):
- # Successful plug
- mock_inst = mock.Mock()
- self.drv.plug_vifs(mock_inst, 'net_info')
- mock_get.assert_called_once_with(self.adp, mock_inst)
- mock_plug_vifs.assert_called_once_with(
- self.drv.virtapi, self.adp, mock_inst, 'net_info')
- add_calls = [mock.call(mock_get.return_value),
- mock.call(mock_plug_vifs.return_value)]
- mock_tf.return_value.add.assert_has_calls(add_calls)
- mock_tf_run.assert_called_once_with(
- mock_tf.return_value, instance=mock_inst)
-
- # InstanceNotFound and generic exception both raise
- mock_tf_run.side_effect = exception.InstanceNotFound('id')
- exc = self.assertRaises(exception.VirtualInterfacePlugException,
- self.drv.plug_vifs, mock_inst, 'net_info')
- self.assertIn('instance', str(exc))
- mock_tf_run.side_effect = Exception
- exc = self.assertRaises(exception.VirtualInterfacePlugException,
- self.drv.plug_vifs, mock_inst, 'net_info')
- self.assertIn('unexpected', str(exc))
-
- @mock.patch('nova.virt.powervm.tasks.base.run', autospec=True)
- @mock.patch('nova.virt.powervm.tasks.network.UnplugVifs', autospec=True)
- @mock.patch('taskflow.patterns.linear_flow.Flow', autospec=True)
- def test_unplug_vifs(self, mock_tf, mock_unplug_vifs, mock_tf_run):
- # Successful unplug
- mock_inst = mock.Mock()
- self.drv.unplug_vifs(mock_inst, 'net_info')
- mock_unplug_vifs.assert_called_once_with(self.adp, mock_inst,
- 'net_info')
- mock_tf.return_value.add.assert_called_once_with(
- mock_unplug_vifs.return_value)
- mock_tf_run.assert_called_once_with(mock_tf.return_value,
- instance=mock_inst)
-
- # InstanceNotFound should pass
- mock_tf_run.side_effect = exception.InstanceNotFound(instance_id='1')
- self.drv.unplug_vifs(mock_inst, 'net_info')
-
- # Raise InterfaceDetachFailed otherwise
- mock_tf_run.side_effect = Exception
- self.assertRaises(exception.InterfaceDetachFailed,
- self.drv.unplug_vifs, mock_inst, 'net_info')
-
- @mock.patch('pypowervm.tasks.vterm.open_remotable_vnc_vterm',
- autospec=True)
- @mock.patch('nova.virt.powervm.vm.get_pvm_uuid',
- new=mock.Mock(return_value='uuid'))
- def test_get_vnc_console(self, mock_vterm):
- # Success
- mock_vterm.return_value = '10'
- resp = self.drv.get_vnc_console(mock.ANY, self.inst)
- self.assertEqual('127.0.0.1', resp.host)
- self.assertEqual('10', resp.port)
- self.assertEqual('uuid', resp.internal_access_path)
- mock_vterm.assert_called_once_with(
- mock.ANY, 'uuid', mock.ANY, vnc_path='uuid')
-
- # VNC failure - exception is raised directly
- mock_vterm.side_effect = pvm_exc.VNCBasedTerminalFailedToOpen(err='xx')
- self.assertRaises(pvm_exc.VNCBasedTerminalFailedToOpen,
- self.drv.get_vnc_console, mock.ANY, self.inst)
-
- # 404
- mock_vterm.side_effect = pvm_exc.HttpError(mock.Mock(status=404))
- self.assertRaises(exception.InstanceNotFound, self.drv.get_vnc_console,
- mock.ANY, self.inst)
-
- @mock.patch('nova.virt.powervm.volume.fcvscsi.FCVscsiVolumeAdapter')
- def test_attach_volume(self, mock_vscsi_adpt):
- """Validates the basic PowerVM attach volume."""
- # BDMs
- mock_bdm = self._fake_bdms()['block_device_mapping'][0]
-
- with mock.patch.object(self.inst, 'save') as mock_save:
- # Invoke the method.
- self.drv.attach_volume('context', mock_bdm.get('connection_info'),
- self.inst, mock.sentinel.stg_ftsk)
-
- # Verify the connect volume was invoked
- mock_vscsi_adpt.return_value.attach_volume.assert_called_once_with()
- mock_save.assert_called_once_with()
-
- @mock.patch('nova.virt.powervm.volume.fcvscsi.FCVscsiVolumeAdapter')
- def test_detach_volume(self, mock_vscsi_adpt):
- """Validates the basic PowerVM detach volume."""
- # BDMs
- mock_bdm = self._fake_bdms()['block_device_mapping'][0]
-
- # Invoke the method, good path test.
- self.drv.detach_volume('context', mock_bdm.get('connection_info'),
- self.inst, mock.sentinel.stg_ftsk)
- # Verify the disconnect volume was invoked
- mock_vscsi_adpt.return_value.detach_volume.assert_called_once_with()
-
- @mock.patch('nova.virt.powervm.volume.fcvscsi.FCVscsiVolumeAdapter')
- def test_extend_volume(self, mock_vscsi_adpt):
- mock_bdm = self._fake_bdms()['block_device_mapping'][0]
- self.drv.extend_volume(
- 'context', mock_bdm.get('connection_info'), self.inst, 0)
- mock_vscsi_adpt.return_value.extend_volume.assert_called_once_with()
-
- def test_vol_drv_iter(self):
- block_device_info = self._fake_bdms()
- bdms = nova_driver.block_device_info_get_mapping(block_device_info)
- vol_adpt = mock.Mock()
-
- def _get_results(bdms):
- # Patch so we get the same mock back each time.
- with mock.patch('nova.virt.powervm.volume.fcvscsi.'
- 'FCVscsiVolumeAdapter', return_value=vol_adpt):
- return [
- (bdm, vol_drv) for bdm, vol_drv in self.drv._vol_drv_iter(
- 'context', self.inst, bdms)]
-
- results = _get_results(bdms)
- self.assertEqual(
- 'fake_vol1',
- results[0][0]['connection_info']['data']['volume_id'])
- self.assertEqual(vol_adpt, results[0][1])
- self.assertEqual(
- 'fake_vol2',
- results[1][0]['connection_info']['data']['volume_id'])
- self.assertEqual(vol_adpt, results[1][1])
-
- # Test with empty bdms
- self.assertEqual([], _get_results([]))
-
- @staticmethod
- def _fake_bdms():
- def _fake_bdm(volume_id, target_lun):
- connection_info = {'driver_volume_type': 'fibre_channel',
- 'data': {'volume_id': volume_id,
- 'target_lun': target_lun,
- 'initiator_target_map':
- {'21000024F5': ['50050768']}}}
- mapping_dict = {'source_type': 'volume', 'volume_id': volume_id,
- 'destination_type': 'volume',
- 'connection_info':
- jsonutils.dumps(connection_info),
- }
- bdm_dict = nova_block_device.BlockDeviceDict(mapping_dict)
- bdm_obj = bdmobj.BlockDeviceMapping(**bdm_dict)
-
- return nova_virt_bdm.DriverVolumeBlockDevice(bdm_obj)
-
- bdm_list = [_fake_bdm('fake_vol1', 0), _fake_bdm('fake_vol2', 1)]
- block_device_info = {'block_device_mapping': bdm_list}
-
- return block_device_info
-
- @mock.patch('nova.virt.powervm.volume.fcvscsi.wwpns', autospec=True)
- def test_get_volume_connector(self, mock_wwpns):
- vol_connector = self.drv.get_volume_connector(mock.Mock())
- self.assertEqual(mock_wwpns.return_value, vol_connector['wwpns'])
- self.assertFalse(vol_connector['multipath'])
- self.assertEqual(vol_connector['host'], CONF.host)
- self.assertIsNone(vol_connector['initiator'])
diff --git a/nova/tests/unit/virt/powervm/test_host.py b/nova/tests/unit/virt/powervm/test_host.py
deleted file mode 100644
index 625e1f9c70..0000000000
--- a/nova/tests/unit/virt/powervm/test_host.py
+++ /dev/null
@@ -1,62 +0,0 @@
-# Copyright 2016 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-
-import mock
-from pypowervm.wrappers import managed_system as pvm_ms
-
-from nova import test
-from nova.virt.powervm import host as pvm_host
-
-
-class TestPowerVMHost(test.NoDBTestCase):
- def test_host_resources(self):
- # Create objects to test with
- ms_wrapper = mock.create_autospec(pvm_ms.System, spec_set=True)
- asio = mock.create_autospec(pvm_ms.ASIOConfig, spec_set=True)
- ms_wrapper.configure_mock(
- proc_units_configurable=500,
- proc_units_avail=500,
- memory_configurable=5242880,
- memory_free=5242752,
- memory_region_size='big',
- asio_config=asio)
- self.flags(host='the_hostname')
-
- # Run the actual test
- stats = pvm_host.build_host_resource_from_ms(ms_wrapper)
- self.assertIsNotNone(stats)
-
- # Check for the presence of fields
- fields = (('vcpus', 500), ('vcpus_used', 0),
- ('memory_mb', 5242880), ('memory_mb_used', 128),
- 'hypervisor_type', 'hypervisor_version',
- ('hypervisor_hostname', 'the_hostname'), 'cpu_info',
- 'supported_instances', 'stats')
- for fld in fields:
- if isinstance(fld, tuple):
- value = stats.get(fld[0], None)
- self.assertEqual(value, fld[1])
- else:
- value = stats.get(fld, None)
- self.assertIsNotNone(value)
- # Check for individual stats
- hstats = (('proc_units', '500.00'), ('proc_units_used', '0.00'))
- for stat in hstats:
- if isinstance(stat, tuple):
- value = stats['stats'].get(stat[0], None)
- self.assertEqual(value, stat[1])
- else:
- value = stats['stats'].get(stat, None)
- self.assertIsNotNone(value)
diff --git a/nova/tests/unit/virt/powervm/test_image.py b/nova/tests/unit/virt/powervm/test_image.py
deleted file mode 100644
index 2db33e6a0f..0000000000
--- a/nova/tests/unit/virt/powervm/test_image.py
+++ /dev/null
@@ -1,55 +0,0 @@
-# Copyright 2015, 2018 IBM Corp.
-#
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-
-from nova import test
-from nova.virt.powervm import image
-
-
-class TestImage(test.TestCase):
-
- @mock.patch('nova.utils.temporary_chown', autospec=True)
- @mock.patch('nova.image.glance.API', autospec=True)
- def test_stream_blockdev_to_glance(self, mock_api, mock_chown):
- mock_open = mock.mock_open()
- with mock.patch('builtins.open', new=mock_open):
- image.stream_blockdev_to_glance('context', mock_api, 'image_id',
- 'metadata', '/dev/disk')
- mock_chown.assert_called_with('/dev/disk')
- mock_open.assert_called_with('/dev/disk', 'rb')
- mock_api.update.assert_called_with('context', 'image_id', 'metadata',
- mock_open.return_value)
-
- @mock.patch('nova.image.glance.API', autospec=True)
- def test_generate_snapshot_metadata(self, mock_api):
- mock_api.get.return_value = {'name': 'image_name'}
- mock_instance = mock.Mock()
- mock_instance.project_id = 'project_id'
- ret = image.generate_snapshot_metadata('context', mock_api, 'image_id',
- mock_instance)
- mock_api.get.assert_called_with('context', 'image_id')
- self.assertEqual({
- 'name': 'image_name',
- 'status': 'active',
- 'disk_format': 'raw',
- 'container_format': 'bare',
- 'properties': {
- 'image_location': 'snapshot',
- 'image_state': 'available',
- 'owner_id': 'project_id',
- }
- }, ret)
diff --git a/nova/tests/unit/virt/powervm/test_media.py b/nova/tests/unit/virt/powervm/test_media.py
deleted file mode 100644
index f98769e0de..0000000000
--- a/nova/tests/unit/virt/powervm/test_media.py
+++ /dev/null
@@ -1,203 +0,0 @@
-# Copyright 2015, 2017 IBM Corp.
-#
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import fixtures
-import mock
-from oslo_utils.fixture import uuidsentinel
-from pypowervm import const as pvm_const
-from pypowervm.tasks import scsi_mapper as tsk_map
-from pypowervm.tests import test_fixtures as pvm_fx
-from pypowervm.utils import transaction as pvm_tx
-from pypowervm.wrappers import network as pvm_net
-from pypowervm.wrappers import storage as pvm_stg
-from pypowervm.wrappers import virtual_io_server as pvm_vios
-
-from nova import test
-from nova.virt.powervm import media as m
-
-
-class TestConfigDrivePowerVM(test.NoDBTestCase):
- """Unit Tests for the ConfigDrivePowerVM class."""
-
- def setUp(self):
- super(TestConfigDrivePowerVM, self).setUp()
-
- self.apt = self.useFixture(pvm_fx.AdapterFx()).adpt
-
- self.validate_vopt = self.useFixture(fixtures.MockPatch(
- 'pypowervm.tasks.vopt.validate_vopt_repo_exists',
- autospec=True)).mock
- self.validate_vopt.return_value = 'vios_uuid', 'vg_uuid'
-
- @mock.patch('nova.api.metadata.base.InstanceMetadata')
- @mock.patch('nova.virt.configdrive.ConfigDriveBuilder.make_drive')
- def test_crt_cfg_dr_iso(self, mock_mkdrv, mock_meta):
- """Validates that the image creation method works."""
- cfg_dr_builder = m.ConfigDrivePowerVM(self.apt)
- self.assertTrue(self.validate_vopt.called)
- mock_instance = mock.MagicMock()
- mock_instance.uuid = uuidsentinel.inst_id
- mock_files = mock.MagicMock()
- mock_net = mock.MagicMock()
- iso_path = '/tmp/cfgdrv.iso'
- cfg_dr_builder._create_cfg_dr_iso(mock_instance, mock_files, mock_net,
- iso_path)
- self.assertEqual(mock_mkdrv.call_count, 1)
-
- # Test retry iso create
- mock_mkdrv.reset_mock()
- mock_mkdrv.side_effect = [OSError, mock_mkdrv]
- cfg_dr_builder._create_cfg_dr_iso(mock_instance, mock_files, mock_net,
- iso_path)
- self.assertEqual(mock_mkdrv.call_count, 2)
-
- @mock.patch('tempfile.NamedTemporaryFile')
- @mock.patch('nova.virt.powervm.vm.get_pvm_uuid')
- @mock.patch('pypowervm.tasks.scsi_mapper.build_vscsi_mapping')
- @mock.patch('pypowervm.tasks.scsi_mapper.add_map')
- @mock.patch('os.path.getsize')
- @mock.patch('pypowervm.tasks.storage.upload_vopt')
- @mock.patch('nova.virt.powervm.media.ConfigDrivePowerVM.'
- '_create_cfg_dr_iso')
- def test_create_cfg_drv_vopt(self, mock_ccdi, mock_upl, mock_getsize,
- mock_addmap, mock_bldmap, mock_vm_id,
- mock_ntf):
- cfg_dr = m.ConfigDrivePowerVM(self.apt)
- mock_instance = mock.MagicMock()
- mock_instance.uuid = uuidsentinel.inst_id
- mock_upl.return_value = 'vopt', 'f_uuid'
- fh = mock_ntf.return_value.__enter__.return_value
- fh.name = 'iso_path'
- wtsk = mock.create_autospec(pvm_tx.WrapperTask, instance=True)
- ftsk = mock.create_autospec(pvm_tx.FeedTask, instance=True)
- ftsk.configure_mock(wrapper_tasks={'vios_uuid': wtsk})
-
- def test_afs(add_func):
- # Validate the internal add_func
- vio = mock.create_autospec(pvm_vios.VIOS)
- self.assertEqual(mock_addmap.return_value, add_func(vio))
- mock_vm_id.assert_called_once_with(mock_instance)
- mock_bldmap.assert_called_once_with(
- None, vio, mock_vm_id.return_value, 'vopt')
- mock_addmap.assert_called_once_with(vio, mock_bldmap.return_value)
- wtsk.add_functor_subtask.side_effect = test_afs
-
- # calculate expected file name
- expected_file_name = 'cfg_' + mock_instance.uuid.replace('-', '')
- allowed_len = pvm_const.MaxLen.VOPT_NAME - 4 # '.iso' is 4 chars
- expected_file_name = expected_file_name[:allowed_len] + '.iso'
-
- cfg_dr.create_cfg_drv_vopt(
- mock_instance, 'files', 'netinfo', ftsk, admin_pass='pass')
-
- mock_ntf.assert_called_once_with(mode='rb')
- mock_ccdi.assert_called_once_with(mock_instance, 'files', 'netinfo',
- 'iso_path', admin_pass='pass')
- mock_getsize.assert_called_once_with('iso_path')
- mock_upl.assert_called_once_with(self.apt, 'vios_uuid', fh,
- expected_file_name,
- mock_getsize.return_value)
- wtsk.add_functor_subtask.assert_called_once()
-
- def test_sanitize_network_info(self):
- network_info = [{'type': 'lbr'}, {'type': 'pvm_sea'},
- {'type': 'ovs'}]
-
- cfg_dr_builder = m.ConfigDrivePowerVM(self.apt)
-
- resp = cfg_dr_builder._sanitize_network_info(network_info)
- expected_ret = [{'type': 'vif'}, {'type': 'vif'},
- {'type': 'ovs'}]
- self.assertEqual(resp, expected_ret)
-
- @mock.patch('pypowervm.wrappers.storage.VG', autospec=True)
- @mock.patch('pypowervm.tasks.storage.rm_vg_storage', autospec=True)
- @mock.patch('nova.virt.powervm.vm.get_pvm_uuid')
- @mock.patch('pypowervm.tasks.scsi_mapper.gen_match_func', autospec=True)
- @mock.patch('pypowervm.tasks.scsi_mapper.find_maps', autospec=True)
- @mock.patch('pypowervm.wrappers.virtual_io_server.VIOS', autospec=True)
- @mock.patch('taskflow.task.FunctorTask', autospec=True)
- def test_dlt_vopt(self, mock_functask, mock_vios, mock_find_maps, mock_gmf,
- mock_uuid, mock_rmstg, mock_vg):
- cfg_dr = m.ConfigDrivePowerVM(self.apt)
- wtsk = mock.create_autospec(pvm_tx.WrapperTask, instance=True)
- ftsk = mock.create_autospec(pvm_tx.FeedTask, instance=True)
- ftsk.configure_mock(wrapper_tasks={'vios_uuid': wtsk})
-
- # Test with no media to remove
- mock_find_maps.return_value = []
- cfg_dr.dlt_vopt('inst', ftsk)
- mock_uuid.assert_called_once_with('inst')
- mock_gmf.assert_called_once_with(pvm_stg.VOptMedia)
- wtsk.add_functor_subtask.assert_called_once_with(
- tsk_map.remove_maps, mock_uuid.return_value,
- match_func=mock_gmf.return_value)
- ftsk.get_wrapper.assert_called_once_with('vios_uuid')
- mock_find_maps.assert_called_once_with(
- ftsk.get_wrapper.return_value.scsi_mappings,
- client_lpar_id=mock_uuid.return_value,
- match_func=mock_gmf.return_value)
- mock_functask.assert_not_called()
-
- # Test with media to remove
- mock_find_maps.return_value = [mock.Mock(backing_storage=media)
- for media in ['m1', 'm2']]
-
- def test_functor_task(rm_vopt):
- # Validate internal rm_vopt function
- rm_vopt()
- mock_vg.get.assert_called_once_with(
- self.apt, uuid='vg_uuid', parent_type=pvm_vios.VIOS,
- parent_uuid='vios_uuid')
- mock_rmstg.assert_called_once_with(
- mock_vg.get.return_value, vopts=['m1', 'm2'])
- return 'functor_task'
- mock_functask.side_effect = test_functor_task
-
- cfg_dr.dlt_vopt('inst', ftsk)
- mock_functask.assert_called_once()
- ftsk.add_post_execute.assert_called_once_with('functor_task')
-
- def test_mgmt_cna_to_vif(self):
- mock_cna = mock.Mock(spec=pvm_net.CNA, mac="FAD4433ED120")
-
- # Run
- cfg_dr_builder = m.ConfigDrivePowerVM(self.apt)
- vif = cfg_dr_builder._mgmt_cna_to_vif(mock_cna)
-
- # Validate
- self.assertEqual(vif.get('address'), "fa:d4:43:3e:d1:20")
- self.assertEqual(vif.get('id'), 'mgmt_vif')
- self.assertIsNotNone(vif.get('network'))
- self.assertEqual(1, len(vif.get('network').get('subnets')))
- subnet = vif.get('network').get('subnets')[0]
- self.assertEqual(6, subnet.get('version'))
- self.assertEqual('fe80::/64', subnet.get('cidr'))
- ip = subnet.get('ips')[0]
- self.assertEqual('fe80::f8d4:43ff:fe3e:d120', ip.get('address'))
-
- def test_mac_to_link_local(self):
- mac = 'fa:d4:43:3e:d1:20'
- self.assertEqual('fe80::f8d4:43ff:fe3e:d120',
- m.ConfigDrivePowerVM._mac_to_link_local(mac))
-
- mac = '00:00:00:00:00:00'
- self.assertEqual('fe80::0200:00ff:fe00:0000',
- m.ConfigDrivePowerVM._mac_to_link_local(mac))
-
- mac = 'ff:ff:ff:ff:ff:ff'
- self.assertEqual('fe80::fdff:ffff:feff:ffff',
- m.ConfigDrivePowerVM._mac_to_link_local(mac))
diff --git a/nova/tests/unit/virt/powervm/test_mgmt.py b/nova/tests/unit/virt/powervm/test_mgmt.py
deleted file mode 100644
index 5c0098ceeb..0000000000
--- a/nova/tests/unit/virt/powervm/test_mgmt.py
+++ /dev/null
@@ -1,193 +0,0 @@
-# Copyright 2015, 2018 IBM Corp.
-#
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-import retrying
-
-from nova import exception
-from nova import test
-from pypowervm.tests import test_fixtures as pvm_fx
-from pypowervm.tests.test_utils import pvmhttp
-
-from nova.virt.powervm import mgmt
-
-LPAR_HTTPRESP_FILE = "lpar.txt"
-
-
-class TestMgmt(test.TestCase):
- def setUp(self):
- super(TestMgmt, self).setUp()
- self.apt = self.useFixture(pvm_fx.AdapterFx()).adpt
-
- lpar_http = pvmhttp.load_pvm_resp(LPAR_HTTPRESP_FILE, adapter=self.apt)
- self.assertIsNotNone(
- lpar_http, "Could not load %s " % LPAR_HTTPRESP_FILE)
-
- self.resp = lpar_http.response
-
- @mock.patch('pypowervm.tasks.partition.get_this_partition', autospec=True)
- def test_mgmt_uuid(self, mock_get_partition):
- mock_get_partition.return_value = mock.Mock(uuid='mock_mgmt')
- adpt = mock.Mock()
-
- # First run should call the partition only once
- self.assertEqual('mock_mgmt', mgmt.mgmt_uuid(adpt))
- mock_get_partition.assert_called_once_with(adpt)
-
- # But a subsequent call should effectively no-op
- mock_get_partition.reset_mock()
- self.assertEqual('mock_mgmt', mgmt.mgmt_uuid(adpt))
- self.assertEqual(mock_get_partition.call_count, 0)
-
- @mock.patch('glob.glob', autospec=True)
- @mock.patch('nova.privsep.path.writefile', autospec=True)
- @mock.patch('os.path.realpath', autospec=True)
- def test_discover_vscsi_disk(self, mock_realpath, mock_writefile,
- mock_glob):
- scanpath = '/sys/bus/vio/devices/30000005/host*/scsi_host/host*/scan'
- udid = ('275b5d5f88fa5611e48be9000098be9400'
- '13fb2aa55a2d7b8d150cb1b7b6bc04d6')
- devlink = ('/dev/disk/by-id/scsi-SIBM_3303_NVDISK' + udid)
- mapping = mock.Mock()
- mapping.client_adapter.lpar_slot_num = 5
- mapping.backing_storage.udid = udid
- # Realistically, first glob would return e.g. .../host0/.../host0/...
- # but it doesn't matter for test purposes.
- mock_glob.side_effect = [[scanpath], [devlink]]
- mgmt.discover_vscsi_disk(mapping)
- mock_glob.assert_has_calls(
- [mock.call(scanpath), mock.call('/dev/disk/by-id/*' + udid[-32:])])
- mock_writefile.assert_called_once_with(scanpath, 'a', '- - -')
- mock_realpath.assert_called_with(devlink)
-
- @mock.patch('retrying.retry', autospec=True)
- @mock.patch('glob.glob', autospec=True)
- @mock.patch('nova.privsep.path.writefile', autospec=True)
- def test_discover_vscsi_disk_not_one_result(self, mock_writefile,
- mock_glob, mock_retry):
- """Zero or more than one disk is found by discover_vscsi_disk."""
- def validate_retry(kwargs):
- self.assertIn('retry_on_result', kwargs)
- self.assertEqual(250, kwargs['wait_fixed'])
- self.assertEqual(300000, kwargs['stop_max_delay'])
-
- def raiser(unused):
- raise retrying.RetryError(mock.Mock(attempt_number=123))
-
- def retry_passthrough(**kwargs):
- validate_retry(kwargs)
-
- def wrapped(_poll_for_dev):
- return _poll_for_dev
- return wrapped
-
- def retry_timeout(**kwargs):
- validate_retry(kwargs)
-
- def wrapped(_poll_for_dev):
- return raiser
- return wrapped
-
- udid = ('275b5d5f88fa5611e48be9000098be9400'
- '13fb2aa55a2d7b8d150cb1b7b6bc04d6')
- mapping = mock.Mock()
- mapping.client_adapter.lpar_slot_num = 5
- mapping.backing_storage.udid = udid
- # No disks found
- mock_retry.side_effect = retry_timeout
- mock_glob.side_effect = lambda path: []
- self.assertRaises(exception.NoDiskDiscoveryException,
- mgmt.discover_vscsi_disk, mapping)
- # Multiple disks found
- mock_retry.side_effect = retry_passthrough
- mock_glob.side_effect = [['path'], ['/dev/sde', '/dev/sdf']]
- self.assertRaises(exception.UniqueDiskDiscoveryException,
- mgmt.discover_vscsi_disk, mapping)
-
- @mock.patch('time.sleep', autospec=True)
- @mock.patch('os.path.realpath', autospec=True)
- @mock.patch('os.stat', autospec=True)
- @mock.patch('nova.privsep.path.writefile', autospec=True)
- def test_remove_block_dev(self, mock_writefile, mock_stat, mock_realpath,
- mock_sleep):
- link = '/dev/link/foo'
- realpath = '/dev/sde'
- delpath = '/sys/block/sde/device/delete'
- mock_realpath.return_value = realpath
-
- # Good path
- mock_stat.side_effect = (None, None, OSError())
- mgmt.remove_block_dev(link)
- mock_realpath.assert_called_with(link)
- mock_stat.assert_has_calls([mock.call(realpath), mock.call(delpath),
- mock.call(realpath)])
- mock_writefile.assert_called_once_with(delpath, 'a', '1')
- self.assertEqual(0, mock_sleep.call_count)
-
- # Device param not found
- mock_writefile.reset_mock()
- mock_stat.reset_mock()
- mock_stat.side_effect = (OSError(), None, None)
- self.assertRaises(exception.InvalidDevicePath, mgmt.remove_block_dev,
- link)
- # stat was called once; exec was not called
- self.assertEqual(1, mock_stat.call_count)
- self.assertEqual(0, mock_writefile.call_count)
-
- # Delete special file not found
- mock_writefile.reset_mock()
- mock_stat.reset_mock()
- mock_stat.side_effect = (None, OSError(), None)
- self.assertRaises(exception.InvalidDevicePath, mgmt.remove_block_dev,
- link)
- # stat was called twice; exec was not called
- self.assertEqual(2, mock_stat.call_count)
- self.assertEqual(0, mock_writefile.call_count)
-
- @mock.patch('retrying.retry')
- @mock.patch('os.path.realpath')
- @mock.patch('os.stat')
- @mock.patch('nova.privsep.path.writefile')
- def test_remove_block_dev_timeout(self, mock_dacw, mock_stat,
- mock_realpath, mock_retry):
-
- def validate_retry(kwargs):
- self.assertIn('retry_on_result', kwargs)
- self.assertEqual(250, kwargs['wait_fixed'])
- self.assertEqual(10000, kwargs['stop_max_delay'])
-
- def raiser(unused):
- raise retrying.RetryError(mock.Mock(attempt_number=123))
-
- def retry_timeout(**kwargs):
- validate_retry(kwargs)
-
- def wrapped(_poll_for_del):
- return raiser
- return wrapped
-
- # Deletion was attempted, but device is still there
- link = '/dev/link/foo'
- delpath = '/sys/block/sde/device/delete'
- realpath = '/dev/sde'
- mock_realpath.return_value = realpath
- mock_stat.side_effect = lambda path: 1
- mock_retry.side_effect = retry_timeout
-
- self.assertRaises(
- exception.DeviceDeletionException, mgmt.remove_block_dev, link)
- mock_realpath.assert_called_once_with(link)
- mock_dacw.assert_called_with(delpath, 'a', '1')
diff --git a/nova/tests/unit/virt/powervm/test_vif.py b/nova/tests/unit/virt/powervm/test_vif.py
deleted file mode 100644
index 985c48abe5..0000000000
--- a/nova/tests/unit/virt/powervm/test_vif.py
+++ /dev/null
@@ -1,327 +0,0 @@
-# Copyright 2017 IBM Corp.
-#
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-from pypowervm import exceptions as pvm_ex
-from pypowervm.wrappers import network as pvm_net
-
-from nova import exception
-from nova.network import model
-from nova import test
-from nova.virt.powervm import vif
-
-
-def cna(mac):
- """Builds a mock Client Network Adapter for unit tests."""
- return mock.Mock(spec=pvm_net.CNA, mac=mac, vswitch_uri='fake_href')
-
-
-class TestVifFunctions(test.NoDBTestCase):
-
- def setUp(self):
- super(TestVifFunctions, self).setUp()
-
- self.adpt = mock.Mock()
-
- @mock.patch('nova.virt.powervm.vif.PvmOvsVifDriver')
- def test_build_vif_driver(self, mock_driver):
- # Valid vif type
- driver = vif._build_vif_driver(self.adpt, 'instance', {'type': 'ovs'})
- self.assertEqual(mock_driver.return_value, driver)
-
- mock_driver.reset_mock()
-
- # Fail if no vif type
- self.assertRaises(exception.VirtualInterfacePlugException,
- vif._build_vif_driver, self.adpt, 'instance',
- {'type': None})
- mock_driver.assert_not_called()
-
- # Fail if invalid vif type
- self.assertRaises(exception.VirtualInterfacePlugException,
- vif._build_vif_driver, self.adpt, 'instance',
- {'type': 'bad_type'})
- mock_driver.assert_not_called()
-
- @mock.patch('oslo_serialization.jsonutils.dumps')
- @mock.patch('pypowervm.wrappers.event.Event')
- def test_push_vif_event(self, mock_event, mock_dumps):
- mock_vif = mock.Mock(mac='MAC', href='HREF')
- vif._push_vif_event(self.adpt, 'action', mock_vif, mock.Mock(),
- 'pvm_sea')
- mock_dumps.assert_called_once_with(
- {'provider': 'NOVA_PVM_VIF', 'action': 'action', 'mac': 'MAC',
- 'type': 'pvm_sea'})
- mock_event.bld.assert_called_once_with(self.adpt, 'HREF',
- mock_dumps.return_value)
- mock_event.bld.return_value.create.assert_called_once_with()
-
- mock_dumps.reset_mock()
- mock_event.bld.reset_mock()
- mock_event.bld.return_value.create.reset_mock()
-
- # Exception reraises
- mock_event.bld.return_value.create.side_effect = IndexError
- self.assertRaises(IndexError, vif._push_vif_event, self.adpt, 'action',
- mock_vif, mock.Mock(), 'pvm_sea')
- mock_dumps.assert_called_once_with(
- {'provider': 'NOVA_PVM_VIF', 'action': 'action', 'mac': 'MAC',
- 'type': 'pvm_sea'})
- mock_event.bld.assert_called_once_with(self.adpt, 'HREF',
- mock_dumps.return_value)
- mock_event.bld.return_value.create.assert_called_once_with()
-
- @mock.patch('nova.virt.powervm.vif._push_vif_event')
- @mock.patch('nova.virt.powervm.vif._build_vif_driver')
- def test_plug(self, mock_bld_drv, mock_event):
- """Test the top-level plug method."""
- mock_vif = {'address': 'MAC', 'type': 'pvm_sea'}
-
- # 1) With new_vif=True (default)
- vnet = vif.plug(self.adpt, 'instance', mock_vif)
-
- mock_bld_drv.assert_called_once_with(self.adpt, 'instance', mock_vif)
- mock_bld_drv.return_value.plug.assert_called_once_with(mock_vif,
- new_vif=True)
- self.assertEqual(mock_bld_drv.return_value.plug.return_value, vnet)
- mock_event.assert_called_once_with(self.adpt, 'plug', vnet, mock.ANY,
- 'pvm_sea')
-
- # Clean up
- mock_bld_drv.reset_mock()
- mock_bld_drv.return_value.plug.reset_mock()
- mock_event.reset_mock()
-
- # 2) Plug returns None (which it should IRL whenever new_vif=False).
- mock_bld_drv.return_value.plug.return_value = None
- vnet = vif.plug(self.adpt, 'instance', mock_vif, new_vif=False)
-
- mock_bld_drv.assert_called_once_with(self.adpt, 'instance', mock_vif)
- mock_bld_drv.return_value.plug.assert_called_once_with(mock_vif,
- new_vif=False)
- self.assertIsNone(vnet)
- mock_event.assert_not_called()
-
- @mock.patch('nova.virt.powervm.vif._build_vif_driver')
- def test_plug_raises(self, mock_vif_drv):
- """HttpError is converted to VirtualInterfacePlugException."""
- vif_drv = mock.Mock(plug=mock.Mock(side_effect=pvm_ex.HttpError(
- resp=mock.Mock())))
- mock_vif_drv.return_value = vif_drv
- mock_vif = {'address': 'vifaddr'}
- self.assertRaises(exception.VirtualInterfacePlugException,
- vif.plug, 'adap', 'inst', mock_vif,
- new_vif='new_vif')
- mock_vif_drv.assert_called_once_with('adap', 'inst', mock_vif)
- vif_drv.plug.assert_called_once_with(mock_vif, new_vif='new_vif')
-
- @mock.patch('nova.virt.powervm.vif._push_vif_event')
- @mock.patch('nova.virt.powervm.vif._build_vif_driver')
- def test_unplug(self, mock_bld_drv, mock_event):
- """Test the top-level unplug method."""
- mock_vif = {'address': 'MAC', 'type': 'pvm_sea'}
-
- # 1) With default cna_w_list
- mock_bld_drv.return_value.unplug.return_value = 'vnet_w'
- vif.unplug(self.adpt, 'instance', mock_vif)
- mock_bld_drv.assert_called_once_with(self.adpt, 'instance', mock_vif)
- mock_bld_drv.return_value.unplug.assert_called_once_with(
- mock_vif, cna_w_list=None)
- mock_event.assert_called_once_with(self.adpt, 'unplug', 'vnet_w',
- mock.ANY, 'pvm_sea')
- # Clean up
- mock_bld_drv.reset_mock()
- mock_bld_drv.return_value.unplug.reset_mock()
- mock_event.reset_mock()
-
- # 2) With specified cna_w_list
- mock_bld_drv.return_value.unplug.return_value = None
- vif.unplug(self.adpt, 'instance', mock_vif, cna_w_list='cnalist')
- mock_bld_drv.assert_called_once_with(self.adpt, 'instance', mock_vif)
- mock_bld_drv.return_value.unplug.assert_called_once_with(
- mock_vif, cna_w_list='cnalist')
- mock_event.assert_not_called()
-
- @mock.patch('nova.virt.powervm.vif._build_vif_driver')
- def test_unplug_raises(self, mock_vif_drv):
- """HttpError is converted to VirtualInterfacePlugException."""
- vif_drv = mock.Mock(unplug=mock.Mock(side_effect=pvm_ex.HttpError(
- resp=mock.Mock())))
- mock_vif_drv.return_value = vif_drv
- mock_vif = {'address': 'vifaddr'}
- self.assertRaises(exception.VirtualInterfaceUnplugException,
- vif.unplug, 'adap', 'inst', mock_vif,
- cna_w_list='cna_w_list')
- mock_vif_drv.assert_called_once_with('adap', 'inst', mock_vif)
- vif_drv.unplug.assert_called_once_with(
- mock_vif, cna_w_list='cna_w_list')
-
-
-class TestVifOvsDriver(test.NoDBTestCase):
-
- def setUp(self):
- super(TestVifOvsDriver, self).setUp()
-
- self.adpt = mock.Mock()
- self.inst = mock.MagicMock(uuid='inst_uuid')
- self.drv = vif.PvmOvsVifDriver(self.adpt, self.inst)
-
- @mock.patch('pypowervm.tasks.cna.crt_p2p_cna', autospec=True)
- @mock.patch('pypowervm.tasks.partition.get_this_partition', autospec=True)
- @mock.patch('nova.virt.powervm.vm.get_pvm_uuid')
- def test_plug(self, mock_pvm_uuid, mock_mgmt_lpar, mock_p2p_cna,):
- # Mock the data
- mock_pvm_uuid.return_value = 'lpar_uuid'
- mock_mgmt_lpar.return_value = mock.Mock(uuid='mgmt_uuid')
- # mock_trunk_dev_name.return_value = 'device'
-
- cna_w, trunk_wraps = mock.MagicMock(), [mock.MagicMock()]
- mock_p2p_cna.return_value = cna_w, trunk_wraps
-
- # Run the plug
- network_model = model.Model({'bridge': 'br0', 'meta': {'mtu': 1450}})
- mock_vif = model.VIF(address='aa:bb:cc:dd:ee:ff', id='vif_id',
- network=network_model, devname='device')
- self.drv.plug(mock_vif)
-
- # Validate the calls
- ovs_ext_ids = ('iface-id=vif_id,iface-status=active,'
- 'attached-mac=aa:bb:cc:dd:ee:ff,vm-uuid=inst_uuid')
- mock_p2p_cna.assert_called_once_with(
- self.adpt, None, 'lpar_uuid', ['mgmt_uuid'],
- 'NovaLinkVEABridge', configured_mtu=1450, crt_vswitch=True,
- mac_addr='aa:bb:cc:dd:ee:ff', dev_name='device', ovs_bridge='br0',
- ovs_ext_ids=ovs_ext_ids)
-
- @mock.patch('pypowervm.tasks.partition.get_this_partition', autospec=True)
- @mock.patch('nova.virt.powervm.vm.get_pvm_uuid')
- @mock.patch('nova.virt.powervm.vm.get_cnas')
- @mock.patch('pypowervm.tasks.cna.find_trunks', autospec=True)
- def test_plug_existing_vif(self, mock_find_trunks, mock_get_cnas,
- mock_pvm_uuid, mock_mgmt_lpar):
- # Mock the data
- t1, t2 = mock.MagicMock(), mock.MagicMock()
- mock_find_trunks.return_value = [t1, t2]
-
- mock_cna = mock.Mock(mac='aa:bb:cc:dd:ee:ff')
- mock_get_cnas.return_value = [mock_cna]
-
- mock_pvm_uuid.return_value = 'lpar_uuid'
-
- mock_mgmt_lpar.return_value = mock.Mock(uuid='mgmt_uuid')
-
- self.inst = mock.MagicMock(uuid='c2e7ff9f-b9b6-46fa-8716-93bbb795b8b4')
- self.drv = vif.PvmOvsVifDriver(self.adpt, self.inst)
-
- # Run the plug
- network_model = model.Model({'bridge': 'br0', 'meta': {'mtu': 1500}})
- mock_vif = model.VIF(address='aa:bb:cc:dd:ee:ff', id='vif_id',
- network=network_model, devname='devname')
- resp = self.drv.plug(mock_vif, new_vif=False)
-
- self.assertIsNone(resp)
-
- # Validate if trunk.update got invoked for all trunks of CNA of vif
- self.assertTrue(t1.update.called)
- self.assertTrue(t2.update.called)
-
- @mock.patch('pypowervm.tasks.cna.find_trunks')
- @mock.patch('nova.virt.powervm.vm.get_cnas')
- def test_unplug(self, mock_get_cnas, mock_find_trunks):
- # Set up the mocks
- mock_cna = mock.Mock(mac='aa:bb:cc:dd:ee:ff')
- mock_get_cnas.return_value = [mock_cna]
-
- t1, t2 = mock.MagicMock(), mock.MagicMock()
- mock_find_trunks.return_value = [t1, t2]
-
- # Call the unplug
- mock_vif = {'address': 'aa:bb:cc:dd:ee:ff',
- 'network': {'bridge': 'br-int'}}
- self.drv.unplug(mock_vif)
-
- # The trunks and the cna should have been deleted
- self.assertTrue(t1.delete.called)
- self.assertTrue(t2.delete.called)
- self.assertTrue(mock_cna.delete.called)
-
-
-class TestVifSeaDriver(test.NoDBTestCase):
-
- def setUp(self):
- super(TestVifSeaDriver, self).setUp()
-
- self.adpt = mock.Mock()
- self.inst = mock.Mock()
- self.drv = vif.PvmSeaVifDriver(self.adpt, self.inst)
-
- @mock.patch('nova.virt.powervm.vm.get_pvm_uuid')
- @mock.patch('pypowervm.tasks.cna.crt_cna')
- def test_plug_from_neutron(self, mock_crt_cna, mock_pvm_uuid):
- """Tests that a VIF can be created. Mocks Neutron net"""
-
- # Set up the mocks. Look like Neutron
- fake_vif = {'details': {'vlan': 5}, 'network': {'meta': {}},
- 'address': 'aabbccddeeff'}
-
- def validate_crt(adpt, host_uuid, lpar_uuid, vlan, mac_addr=None):
- self.assertIsNone(host_uuid)
- self.assertEqual(5, vlan)
- self.assertEqual('aabbccddeeff', mac_addr)
- return pvm_net.CNA.bld(self.adpt, 5, 'host_uuid',
- mac_addr=mac_addr)
- mock_crt_cna.side_effect = validate_crt
-
- # Invoke
- resp = self.drv.plug(fake_vif)
-
- # Validate (along with validate method above)
- self.assertEqual(1, mock_crt_cna.call_count)
- self.assertIsNotNone(resp)
- self.assertIsInstance(resp, pvm_net.CNA)
-
- def test_plug_existing_vif(self):
- """Tests that a VIF need not be created."""
-
- # Set up the mocks
- fake_vif = {'network': {'meta': {'vlan': 5}},
- 'address': 'aabbccddeeff'}
-
- # Invoke
- resp = self.drv.plug(fake_vif, new_vif=False)
-
- self.assertIsNone(resp)
-
- @mock.patch('nova.virt.powervm.vm.get_cnas')
- def test_unplug_vifs(self, mock_vm_get):
- """Tests that a delete of the vif can be done."""
- # Mock up the CNA response. Two should already exist, the other
- # should not.
- cnas = [cna('AABBCCDDEEFF'), cna('AABBCCDDEE11'), cna('AABBCCDDEE22')]
- mock_vm_get.return_value = cnas
-
- # Run method. The AABBCCDDEE11 won't be unplugged (wasn't invoked
- # below) and the last unplug will also just no-op because its not on
- # the VM.
- self.drv.unplug({'address': 'aa:bb:cc:dd:ee:ff'})
- self.drv.unplug({'address': 'aa:bb:cc:dd:ee:22'})
- self.drv.unplug({'address': 'aa:bb:cc:dd:ee:33'})
-
- # The delete should have only been called once for each applicable vif.
- # The second CNA didn't have a matching mac so it should be skipped.
- self.assertEqual(1, cnas[0].delete.call_count)
- self.assertEqual(0, cnas[1].delete.call_count)
- self.assertEqual(1, cnas[2].delete.call_count)
diff --git a/nova/tests/unit/virt/powervm/test_vm.py b/nova/tests/unit/virt/powervm/test_vm.py
deleted file mode 100644
index ab0f9c35e8..0000000000
--- a/nova/tests/unit/virt/powervm/test_vm.py
+++ /dev/null
@@ -1,563 +0,0 @@
-# Copyright 2014, 2017 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import fixtures
-import mock
-from pypowervm import exceptions as pvm_exc
-from pypowervm.helpers import log_helper as pvm_log
-from pypowervm.tests import test_fixtures as pvm_fx
-from pypowervm.utils import lpar_builder as lpar_bld
-from pypowervm.utils import uuid as pvm_uuid
-from pypowervm.wrappers import base_partition as pvm_bp
-from pypowervm.wrappers import logical_partition as pvm_lpar
-
-from nova.compute import power_state
-from nova import exception
-from nova import test
-from nova.tests.unit.virt import powervm
-from nova.virt.powervm import vm
-
-
-class TestVMBuilder(test.NoDBTestCase):
-
- def setUp(self):
- super(TestVMBuilder, self).setUp()
-
- self.adpt = mock.MagicMock()
- self.host_w = mock.MagicMock()
- self.lpar_b = vm.VMBuilder(self.host_w, self.adpt)
-
- self.san_lpar_name = self.useFixture(fixtures.MockPatch(
- 'pypowervm.util.sanitize_partition_name_for_api',
- autospec=True)).mock
-
- self.inst = powervm.TEST_INSTANCE
-
- @mock.patch('pypowervm.utils.lpar_builder.DefaultStandardize',
- autospec=True)
- @mock.patch('nova.virt.powervm.vm.get_pvm_uuid')
- @mock.patch('pypowervm.utils.lpar_builder.LPARBuilder', autospec=True)
- def test_vm_builder(self, mock_lpar_bldr, mock_uuid2pvm, mock_def_stdz):
- inst = mock.Mock()
- inst.configure_mock(
- name='lpar_name', uuid='lpar_uuid',
- flavor=mock.Mock(memory_mb='mem', vcpus='vcpus', extra_specs={}))
- vmb = vm.VMBuilder('host', 'adap')
- mock_def_stdz.assert_called_once_with('host', proc_units_factor=0.1)
- self.assertEqual(mock_lpar_bldr.return_value,
- vmb.lpar_builder(inst))
- self.san_lpar_name.assert_called_once_with('lpar_name')
- mock_uuid2pvm.assert_called_once_with(inst)
- mock_lpar_bldr.assert_called_once_with(
- 'adap', {'name': self.san_lpar_name.return_value,
- 'uuid': mock_uuid2pvm.return_value,
- 'memory': 'mem',
- 'vcpu': 'vcpus',
- 'srr_capability': True}, mock_def_stdz.return_value)
-
- # Assert non-default proc_units_factor.
- mock_def_stdz.reset_mock()
- self.flags(proc_units_factor=0.2, group='powervm')
- vmb = vm.VMBuilder('host', 'adap')
- mock_def_stdz.assert_called_once_with('host', proc_units_factor=0.2)
-
- def test_format_flavor(self):
- """Perform tests against _format_flavor."""
- # convert instance uuid to pypowervm uuid
- # LP 1561128, simplified remote restart is enabled by default
- lpar_attrs = {'memory': 2048,
- 'name': self.san_lpar_name.return_value,
- 'uuid': pvm_uuid.convert_uuid_to_pvm(
- self.inst.uuid).upper(),
- 'vcpu': 1, 'srr_capability': True}
-
- # Test dedicated procs
- self.inst.flavor.extra_specs = {'powervm:dedicated_proc': 'true'}
- test_attrs = dict(lpar_attrs, dedicated_proc='true')
-
- self.assertEqual(self.lpar_b._format_flavor(self.inst), test_attrs)
- self.san_lpar_name.assert_called_with(self.inst.name)
- self.san_lpar_name.reset_mock()
-
- # Test dedicated procs, min/max vcpu and sharing mode
- self.inst.flavor.extra_specs = {'powervm:dedicated_proc': 'true',
- 'powervm:dedicated_sharing_mode':
- 'share_idle_procs_active',
- 'powervm:min_vcpu': '1',
- 'powervm:max_vcpu': '3'}
- test_attrs = dict(lpar_attrs,
- dedicated_proc='true',
- sharing_mode='sre idle procs active',
- min_vcpu='1', max_vcpu='3')
- self.assertEqual(self.lpar_b._format_flavor(self.inst), test_attrs)
- self.san_lpar_name.assert_called_with(self.inst.name)
- self.san_lpar_name.reset_mock()
-
- # Test shared proc sharing mode
- self.inst.flavor.extra_specs = {'powervm:uncapped': 'true'}
- test_attrs = dict(lpar_attrs, sharing_mode='uncapped')
- self.assertEqual(self.lpar_b._format_flavor(self.inst), test_attrs)
- self.san_lpar_name.assert_called_with(self.inst.name)
- self.san_lpar_name.reset_mock()
-
- # Test availability priority
- self.inst.flavor.extra_specs = {'powervm:availability_priority': '150'}
- test_attrs = dict(lpar_attrs, avail_priority='150')
- self.assertEqual(self.lpar_b._format_flavor(self.inst), test_attrs)
- self.san_lpar_name.assert_called_with(self.inst.name)
- self.san_lpar_name.reset_mock()
-
- # Test processor compatibility
- self.inst.flavor.extra_specs = {
- 'powervm:processor_compatibility': 'POWER8'}
- test_attrs = dict(lpar_attrs, processor_compatibility='POWER8')
- self.assertEqual(self.lpar_b._format_flavor(self.inst), test_attrs)
- self.san_lpar_name.assert_called_with(self.inst.name)
- self.san_lpar_name.reset_mock()
-
- # Test min, max proc units
- self.inst.flavor.extra_specs = {'powervm:min_proc_units': '0.5',
- 'powervm:max_proc_units': '2.0'}
- test_attrs = dict(lpar_attrs, min_proc_units='0.5',
- max_proc_units='2.0')
- self.assertEqual(self.lpar_b._format_flavor(self.inst), test_attrs)
- self.san_lpar_name.assert_called_with(self.inst.name)
- self.san_lpar_name.reset_mock()
-
- # Test min, max mem
- self.inst.flavor.extra_specs = {'powervm:min_mem': '1024',
- 'powervm:max_mem': '4096'}
- test_attrs = dict(lpar_attrs, min_mem='1024', max_mem='4096')
- self.assertEqual(self.lpar_b._format_flavor(self.inst), test_attrs)
- self.san_lpar_name.assert_called_with(self.inst.name)
- self.san_lpar_name.reset_mock()
-
- # Test remote restart set to false
- self.inst.flavor.extra_specs = {'powervm:srr_capability': 'false'}
- test_attrs = dict(lpar_attrs, srr_capability=False)
- self.assertEqual(self.lpar_b._format_flavor(self.inst), test_attrs)
-
- # Unhandled powervm: key is ignored
- self.inst.flavor.extra_specs = {'powervm:srr_capability': 'false',
- 'powervm:something_new': 'foo'}
- test_attrs = dict(lpar_attrs, srr_capability=False)
- self.assertEqual(self.lpar_b._format_flavor(self.inst), test_attrs)
-
- # If we recognize a key, but don't handle it, we raise
- with mock.patch.object(self.lpar_b, '_is_pvm_valid_key',
- return_value=True):
- self.inst.flavor.extra_specs = {'powervm:srr_capability': 'false',
- 'powervm:something_new': 'foo'}
- self.assertRaises(KeyError, self.lpar_b._format_flavor, self.inst)
-
- @mock.patch('pypowervm.wrappers.shared_proc_pool.SharedProcPool.search')
- def test_spp_pool_id(self, mock_search):
- # The default pool is always zero. Validate the path.
- self.assertEqual(0, self.lpar_b._spp_pool_id('DefaultPool'))
- self.assertEqual(0, self.lpar_b._spp_pool_id(None))
-
- # Further invocations require calls to the adapter. Build a minimal
- # mocked SPP wrapper
- spp = mock.MagicMock()
- spp.id = 1
-
- # Three invocations. First has too many elems. Second has none.
- # Third is just right. :-)
- mock_search.side_effect = [[spp, spp], [], [spp]]
-
- self.assertRaises(exception.ValidationError, self.lpar_b._spp_pool_id,
- 'fake_name')
- self.assertRaises(exception.ValidationError, self.lpar_b._spp_pool_id,
- 'fake_name')
-
- self.assertEqual(1, self.lpar_b._spp_pool_id('fake_name'))
-
-
-class TestVM(test.NoDBTestCase):
- def setUp(self):
- super(TestVM, self).setUp()
-
- self.apt = self.useFixture(pvm_fx.AdapterFx(
- traits=pvm_fx.LocalPVMTraits)).adpt
- self.apt.helpers = [pvm_log.log_helper]
-
- self.san_lpar_name = self.useFixture(fixtures.MockPatch(
- 'pypowervm.util.sanitize_partition_name_for_api')).mock
- self.san_lpar_name.side_effect = lambda name: name
- mock_entries = [mock.Mock(), mock.Mock()]
- self.resp = mock.MagicMock()
- self.resp.feed = mock.MagicMock(entries=mock_entries)
-
- self.get_pvm_uuid = self.useFixture(fixtures.MockPatch(
- 'nova.virt.powervm.vm.get_pvm_uuid')).mock
-
- self.inst = powervm.TEST_INSTANCE
-
- def test_translate_vm_state(self):
- self.assertEqual(power_state.RUNNING,
- vm._translate_vm_state('running'))
- self.assertEqual(power_state.RUNNING,
- vm._translate_vm_state('migrating running'))
- self.assertEqual(power_state.RUNNING,
- vm._translate_vm_state('starting'))
- self.assertEqual(power_state.RUNNING,
- vm._translate_vm_state('open firmware'))
- self.assertEqual(power_state.RUNNING,
- vm._translate_vm_state('shutting down'))
- self.assertEqual(power_state.RUNNING,
- vm._translate_vm_state('suspending'))
-
- self.assertEqual(power_state.SHUTDOWN,
- vm._translate_vm_state('migrating not active'))
- self.assertEqual(power_state.SHUTDOWN,
- vm._translate_vm_state('not activated'))
-
- self.assertEqual(power_state.NOSTATE,
- vm._translate_vm_state('unknown'))
- self.assertEqual(power_state.NOSTATE,
- vm._translate_vm_state('hardware discovery'))
- self.assertEqual(power_state.NOSTATE,
- vm._translate_vm_state('not available'))
-
- self.assertEqual(power_state.SUSPENDED,
- vm._translate_vm_state('resuming'))
- self.assertEqual(power_state.SUSPENDED,
- vm._translate_vm_state('suspended'))
-
- self.assertEqual(power_state.CRASHED,
- vm._translate_vm_state('error'))
-
- @mock.patch('pypowervm.wrappers.logical_partition.LPAR', autospec=True)
- def test_get_lpar_names(self, mock_lpar):
- inst1 = mock.Mock()
- inst1.configure_mock(name='inst1')
- inst2 = mock.Mock()
- inst2.configure_mock(name='inst2')
- mock_lpar.search.return_value = [inst1, inst2]
- self.assertEqual({'inst1', 'inst2'}, set(vm.get_lpar_names('adap')))
- mock_lpar.search.assert_called_once_with(
- 'adap', is_mgmt_partition=False)
-
- @mock.patch('pypowervm.tasks.vterm.close_vterm', autospec=True)
- def test_dlt_lpar(self, mock_vterm):
- """Performs a delete LPAR test."""
- vm.delete_lpar(self.apt, 'inst')
- self.get_pvm_uuid.assert_called_once_with('inst')
- self.apt.delete.assert_called_once_with(
- pvm_lpar.LPAR.schema_type, root_id=self.get_pvm_uuid.return_value)
- self.assertEqual(1, mock_vterm.call_count)
-
- # Test Failure Path
- # build a mock response body with the expected HSCL msg
- resp = mock.Mock()
- resp.body = 'error msg: HSCL151B more text'
- self.apt.delete.side_effect = pvm_exc.Error(
- 'Mock Error Message', response=resp)
-
- # Reset counters
- self.apt.reset_mock()
- mock_vterm.reset_mock()
-
- self.assertRaises(pvm_exc.Error, vm.delete_lpar, self.apt, 'inst')
- self.assertEqual(1, mock_vterm.call_count)
- self.assertEqual(1, self.apt.delete.call_count)
-
- self.apt.reset_mock()
- mock_vterm.reset_mock()
-
- # Test HttpError 404
- resp.status = 404
- self.apt.delete.side_effect = pvm_exc.HttpError(resp=resp)
- vm.delete_lpar(self.apt, 'inst')
- self.assertEqual(1, mock_vterm.call_count)
- self.assertEqual(1, self.apt.delete.call_count)
-
- self.apt.reset_mock()
- mock_vterm.reset_mock()
-
- # Test Other HttpError
- resp.status = 111
- self.apt.delete.side_effect = pvm_exc.HttpError(resp=resp)
- self.assertRaises(pvm_exc.HttpError, vm.delete_lpar, self.apt, 'inst')
- self.assertEqual(1, mock_vterm.call_count)
- self.assertEqual(1, self.apt.delete.call_count)
-
- self.apt.reset_mock()
- mock_vterm.reset_mock()
-
- # Test HttpError 404 closing vterm
- resp.status = 404
- mock_vterm.side_effect = pvm_exc.HttpError(resp=resp)
- vm.delete_lpar(self.apt, 'inst')
- self.assertEqual(1, mock_vterm.call_count)
- self.assertEqual(0, self.apt.delete.call_count)
-
- self.apt.reset_mock()
- mock_vterm.reset_mock()
-
- # Test Other HttpError closing vterm
- resp.status = 111
- mock_vterm.side_effect = pvm_exc.HttpError(resp=resp)
- self.assertRaises(pvm_exc.HttpError, vm.delete_lpar, self.apt, 'inst')
- self.assertEqual(1, mock_vterm.call_count)
- self.assertEqual(0, self.apt.delete.call_count)
-
- @mock.patch('nova.virt.powervm.vm.VMBuilder', autospec=True)
- @mock.patch('pypowervm.utils.validation.LPARWrapperValidator',
- autospec=True)
- def test_crt_lpar(self, mock_vld, mock_vmbldr):
- self.inst.flavor.extra_specs = {'powervm:dedicated_proc': 'true'}
- mock_bldr = mock.Mock(spec=lpar_bld.LPARBuilder)
- mock_vmbldr.return_value.lpar_builder.return_value = mock_bldr
- mock_pend_lpar = mock.create_autospec(pvm_lpar.LPAR, instance=True)
- mock_bldr.build.return_value = mock_pend_lpar
-
- vm.create_lpar(self.apt, 'host', self.inst)
- mock_vmbldr.assert_called_once_with('host', self.apt)
- mock_vmbldr.return_value.lpar_builder.assert_called_once_with(
- self.inst)
- mock_bldr.build.assert_called_once_with()
- mock_vld.assert_called_once_with(mock_pend_lpar, 'host')
- mock_vld.return_value.validate_all.assert_called_once_with()
- mock_pend_lpar.create.assert_called_once_with(parent='host')
-
- # Test to verify the LPAR Creation with invalid name specification
- mock_vmbldr.side_effect = lpar_bld.LPARBuilderException("Invalid Name")
- self.assertRaises(exception.BuildAbortException,
- vm.create_lpar, self.apt, 'host', self.inst)
-
- # HttpError
- mock_vmbldr.side_effect = pvm_exc.HttpError(mock.Mock())
- self.assertRaises(exception.PowerVMAPIFailed,
- vm.create_lpar, self.apt, 'host', self.inst)
-
- @mock.patch('pypowervm.wrappers.logical_partition.LPAR', autospec=True)
- def test_get_instance_wrapper(self, mock_lpar):
- resp = mock.Mock(status=404)
- mock_lpar.get.side_effect = pvm_exc.Error('message', response=resp)
- # vm.get_instance_wrapper(self.apt, instance, 'lpar_uuid')
- self.assertRaises(exception.InstanceNotFound, vm.get_instance_wrapper,
- self.apt, self.inst)
-
- @mock.patch('pypowervm.tasks.power.power_on', autospec=True)
- @mock.patch('oslo_concurrency.lockutils.lock', autospec=True)
- @mock.patch('nova.virt.powervm.vm.get_instance_wrapper')
- def test_power_on(self, mock_wrap, mock_lock, mock_power_on):
- entry = mock.Mock(state=pvm_bp.LPARState.NOT_ACTIVATED)
- mock_wrap.return_value = entry
-
- vm.power_on(None, self.inst)
- mock_power_on.assert_called_once_with(entry, None)
- mock_lock.assert_called_once_with('power_%s' % self.inst.uuid)
-
- mock_power_on.reset_mock()
- mock_lock.reset_mock()
-
- stop_states = [
- pvm_bp.LPARState.RUNNING, pvm_bp.LPARState.STARTING,
- pvm_bp.LPARState.OPEN_FIRMWARE, pvm_bp.LPARState.SHUTTING_DOWN,
- pvm_bp.LPARState.ERROR, pvm_bp.LPARState.RESUMING,
- pvm_bp.LPARState.SUSPENDING]
-
- for stop_state in stop_states:
- entry.state = stop_state
- vm.power_on(None, self.inst)
- mock_lock.assert_called_once_with('power_%s' % self.inst.uuid)
- mock_lock.reset_mock()
- self.assertEqual(0, mock_power_on.call_count)
-
- @mock.patch('pypowervm.tasks.power.power_on', autospec=True)
- @mock.patch('nova.virt.powervm.vm.get_instance_wrapper')
- def test_power_on_negative(self, mock_wrp, mock_power_on):
- mock_wrp.return_value = mock.Mock(state=pvm_bp.LPARState.NOT_ACTIVATED)
-
- # Convertible (PowerVM) exception
- mock_power_on.side_effect = pvm_exc.VMPowerOnFailure(
- reason='Something bad', lpar_nm='TheLPAR')
- self.assertRaises(exception.InstancePowerOnFailure,
- vm.power_on, None, self.inst)
-
- # Non-pvm error raises directly
- mock_power_on.side_effect = ValueError()
- self.assertRaises(ValueError, vm.power_on, None, self.inst)
-
- @mock.patch('pypowervm.tasks.power.PowerOp', autospec=True)
- @mock.patch('pypowervm.tasks.power.power_off_progressive', autospec=True)
- @mock.patch('oslo_concurrency.lockutils.lock', autospec=True)
- @mock.patch('nova.virt.powervm.vm.get_instance_wrapper')
- def test_power_off(self, mock_wrap, mock_lock, mock_power_off, mock_pop):
- entry = mock.Mock(state=pvm_bp.LPARState.NOT_ACTIVATED)
- mock_wrap.return_value = entry
-
- vm.power_off(None, self.inst)
- self.assertEqual(0, mock_power_off.call_count)
- self.assertEqual(0, mock_pop.stop.call_count)
- mock_lock.assert_called_once_with('power_%s' % self.inst.uuid)
-
- stop_states = [
- pvm_bp.LPARState.RUNNING, pvm_bp.LPARState.STARTING,
- pvm_bp.LPARState.OPEN_FIRMWARE, pvm_bp.LPARState.SHUTTING_DOWN,
- pvm_bp.LPARState.ERROR, pvm_bp.LPARState.RESUMING,
- pvm_bp.LPARState.SUSPENDING]
- for stop_state in stop_states:
- entry.state = stop_state
- mock_power_off.reset_mock()
- mock_pop.stop.reset_mock()
- mock_lock.reset_mock()
- vm.power_off(None, self.inst)
- mock_power_off.assert_called_once_with(entry)
- self.assertEqual(0, mock_pop.stop.call_count)
- mock_lock.assert_called_once_with('power_%s' % self.inst.uuid)
- mock_power_off.reset_mock()
- mock_lock.reset_mock()
- vm.power_off(None, self.inst, force_immediate=True, timeout=5)
- self.assertEqual(0, mock_power_off.call_count)
- mock_pop.stop.assert_called_once_with(
- entry, opts=mock.ANY, timeout=5)
- self.assertEqual('PowerOff(immediate=true, operation=shutdown)',
- str(mock_pop.stop.call_args[1]['opts']))
- mock_lock.assert_called_once_with('power_%s' % self.inst.uuid)
-
- @mock.patch('pypowervm.tasks.power.power_off_progressive', autospec=True)
- @mock.patch('nova.virt.powervm.vm.get_instance_wrapper')
- def test_power_off_negative(self, mock_wrap, mock_power_off):
- """Negative tests."""
- mock_wrap.return_value = mock.Mock(state=pvm_bp.LPARState.RUNNING)
-
- # Raise the expected pypowervm exception
- mock_power_off.side_effect = pvm_exc.VMPowerOffFailure(
- reason='Something bad.', lpar_nm='TheLPAR')
- # We should get a valid Nova exception that the compute manager expects
- self.assertRaises(exception.InstancePowerOffFailure,
- vm.power_off, None, self.inst)
-
- # Non-pvm error raises directly
- mock_power_off.side_effect = ValueError()
- self.assertRaises(ValueError, vm.power_off, None, self.inst)
-
- @mock.patch('pypowervm.tasks.power.power_on', autospec=True)
- @mock.patch('pypowervm.tasks.power.power_off_progressive', autospec=True)
- @mock.patch('pypowervm.tasks.power.PowerOp', autospec=True)
- @mock.patch('oslo_concurrency.lockutils.lock', autospec=True)
- @mock.patch('nova.virt.powervm.vm.get_instance_wrapper')
- def test_reboot(self, mock_wrap, mock_lock, mock_pop, mock_pwroff,
- mock_pwron):
- entry = mock.Mock(state=pvm_bp.LPARState.NOT_ACTIVATED)
- mock_wrap.return_value = entry
-
- # No power_off
- vm.reboot('adap', self.inst, False)
- mock_lock.assert_called_once_with('power_%s' % self.inst.uuid)
- mock_wrap.assert_called_once_with('adap', self.inst)
- mock_pwron.assert_called_once_with(entry, None)
- self.assertEqual(0, mock_pwroff.call_count)
- self.assertEqual(0, mock_pop.stop.call_count)
-
- mock_pwron.reset_mock()
-
- # power_off (no power_on) hard
- entry.state = pvm_bp.LPARState.RUNNING
- vm.reboot('adap', self.inst, True)
- self.assertEqual(0, mock_pwron.call_count)
- self.assertEqual(0, mock_pwroff.call_count)
- mock_pop.stop.assert_called_once_with(entry, opts=mock.ANY)
- self.assertEqual(
- 'PowerOff(immediate=true, operation=shutdown, restart=true)',
- str(mock_pop.stop.call_args[1]['opts']))
-
- mock_pop.reset_mock()
-
- # power_off (no power_on) soft
- entry.state = pvm_bp.LPARState.RUNNING
- vm.reboot('adap', self.inst, False)
- self.assertEqual(0, mock_pwron.call_count)
- mock_pwroff.assert_called_once_with(entry, restart=True)
- self.assertEqual(0, mock_pop.stop.call_count)
-
- mock_pwroff.reset_mock()
-
- # PowerVM error is converted
- mock_pop.stop.side_effect = pvm_exc.TimeoutError("Timed out")
- self.assertRaises(exception.InstanceRebootFailure,
- vm.reboot, 'adap', self.inst, True)
-
- # Non-PowerVM error is raised directly
- mock_pwroff.side_effect = ValueError
- self.assertRaises(ValueError, vm.reboot, 'adap', self.inst, False)
-
- @mock.patch('oslo_serialization.jsonutils.loads')
- def test_get_vm_qp(self, mock_loads):
- self.apt.helpers = ['helper1', pvm_log.log_helper, 'helper3']
-
- # Defaults
- self.assertEqual(mock_loads.return_value,
- vm.get_vm_qp(self.apt, 'lpar_uuid'))
- self.apt.read.assert_called_once_with(
- 'LogicalPartition', root_id='lpar_uuid', suffix_type='quick',
- suffix_parm=None)
- mock_loads.assert_called_once_with(self.apt.read.return_value.body)
-
- self.apt.read.reset_mock()
- mock_loads.reset_mock()
-
- # Specific qprop, no logging errors
- self.assertEqual(mock_loads.return_value,
- vm.get_vm_qp(self.apt, 'lpar_uuid', qprop='Prop',
- log_errors=False))
- self.apt.read.assert_called_once_with(
- 'LogicalPartition', root_id='lpar_uuid', suffix_type='quick',
- suffix_parm='Prop', helpers=['helper1', 'helper3'])
-
- resp = mock.MagicMock()
- resp.status = 404
- self.apt.read.side_effect = pvm_exc.HttpError(resp)
- self.assertRaises(exception.InstanceNotFound, vm.get_vm_qp, self.apt,
- 'lpar_uuid', log_errors=False)
-
- self.apt.read.side_effect = pvm_exc.Error("message", response=None)
- self.assertRaises(pvm_exc.Error, vm.get_vm_qp, self.apt,
- 'lpar_uuid', log_errors=False)
-
- resp.status = 500
- self.apt.read.side_effect = pvm_exc.Error("message", response=resp)
- self.assertRaises(pvm_exc.Error, vm.get_vm_qp, self.apt,
- 'lpar_uuid', log_errors=False)
-
- @mock.patch('nova.virt.powervm.vm.get_pvm_uuid')
- @mock.patch('pypowervm.wrappers.network.CNA.search')
- @mock.patch('pypowervm.wrappers.network.CNA.get')
- def test_get_cnas(self, mock_get, mock_search, mock_uuid):
- # No kwargs: get
- self.assertEqual(mock_get.return_value, vm.get_cnas(self.apt, 'inst'))
- mock_uuid.assert_called_once_with('inst')
- mock_get.assert_called_once_with(self.apt, parent_type=pvm_lpar.LPAR,
- parent_uuid=mock_uuid.return_value)
- mock_search.assert_not_called()
- # With kwargs: search
- mock_get.reset_mock()
- mock_uuid.reset_mock()
- self.assertEqual(mock_search.return_value, vm.get_cnas(
- self.apt, 'inst', one=2, three=4))
- mock_uuid.assert_called_once_with('inst')
- mock_search.assert_called_once_with(
- self.apt, parent_type=pvm_lpar.LPAR,
- parent_uuid=mock_uuid.return_value, one=2, three=4)
- mock_get.assert_not_called()
-
- def test_norm_mac(self):
- EXPECTED = "12:34:56:78:90:ab"
- self.assertEqual(EXPECTED, vm.norm_mac("12:34:56:78:90:ab"))
- self.assertEqual(EXPECTED, vm.norm_mac("1234567890ab"))
- self.assertEqual(EXPECTED, vm.norm_mac("12:34:56:78:90:AB"))
- self.assertEqual(EXPECTED, vm.norm_mac("1234567890AB"))
diff --git a/nova/tests/unit/virt/powervm/volume/__init__.py b/nova/tests/unit/virt/powervm/volume/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/nova/tests/unit/virt/powervm/volume/__init__.py
+++ /dev/null
diff --git a/nova/tests/unit/virt/powervm/volume/test_fcvscsi.py b/nova/tests/unit/virt/powervm/volume/test_fcvscsi.py
deleted file mode 100644
index 2db5b1a663..0000000000
--- a/nova/tests/unit/virt/powervm/volume/test_fcvscsi.py
+++ /dev/null
@@ -1,456 +0,0 @@
-# Copyright 2015, 2018 IBM Corp.
-#
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-from pypowervm import const as pvm_const
-from pypowervm.tasks import hdisk
-from pypowervm.tests import test_fixtures as pvm_fx
-from pypowervm.utils import transaction as pvm_tx
-from pypowervm.wrappers import storage as pvm_stor
-from pypowervm.wrappers import virtual_io_server as pvm_vios
-
-from nova import conf as cfg
-from nova import exception as exc
-from nova import test
-from nova.virt.powervm.volume import fcvscsi
-
-CONF = cfg.CONF
-
-I_WWPN_1 = '21000024FF649104'
-I_WWPN_2 = '21000024FF649105'
-
-
-class TestVSCSIAdapter(test.NoDBTestCase):
-
- def setUp(self):
- super(TestVSCSIAdapter, self).setUp()
-
- self.adpt = self.useFixture(pvm_fx.AdapterFx()).adpt
- self.wtsk = mock.create_autospec(pvm_tx.WrapperTask, instance=True)
- self.ftsk = mock.create_autospec(pvm_tx.FeedTask, instance=True)
- self.ftsk.configure_mock(wrapper_tasks={'vios_uuid': self.wtsk})
-
- @mock.patch('nova.virt.powervm.vm.get_pvm_uuid')
- def init_vol_adpt(mock_pvm_uuid):
- con_info = {
- 'serial': 'id',
- 'data': {
- 'initiator_target_map': {
- I_WWPN_1: ['t1'],
- I_WWPN_2: ['t2', 't3']
- },
- 'target_lun': '1',
- 'volume_id': 'a_volume_identifier',
- },
- }
- mock_inst = mock.MagicMock()
- mock_pvm_uuid.return_value = '1234'
-
- return fcvscsi.FCVscsiVolumeAdapter(
- self.adpt, mock_inst, con_info, stg_ftsk=self.ftsk)
- self.vol_drv = init_vol_adpt()
-
- @mock.patch('pypowervm.utils.transaction.FeedTask', autospec=True)
- @mock.patch('pypowervm.wrappers.virtual_io_server.VIOS', autospec=True)
- def test_reset_stg_ftsk(self, mock_vios, mock_ftsk):
- self.vol_drv.reset_stg_ftsk('stg_ftsk')
- self.assertEqual('stg_ftsk', self.vol_drv.stg_ftsk)
-
- mock_vios.getter.return_value = 'getter'
- mock_ftsk.return_value = 'local_feed_task'
- self.vol_drv.reset_stg_ftsk()
- self.assertEqual('local_feed_task', self.vol_drv.stg_ftsk)
- mock_vios.getter.assert_called_once_with(
- self.adpt, xag=[pvm_const.XAG.VIO_SMAP])
- mock_ftsk.assert_called_once_with('local_feed_task', 'getter')
-
- @mock.patch('pypowervm.tasks.partition.get_physical_wwpns', autospec=True)
- def test_wwpns(self, mock_vio_wwpns):
- mock_vio_wwpns.return_value = ['aa', 'bb']
- wwpns = fcvscsi.wwpns(self.adpt)
- self.assertListEqual(['aa', 'bb'], wwpns)
- mock_vio_wwpns.assert_called_once_with(self.adpt, force_refresh=False)
-
- def test_set_udid(self):
- # Mock connection info
- self.vol_drv.connection_info['data'][fcvscsi.UDID_KEY] = None
-
- # Set the UDID
- self.vol_drv._set_udid('udid')
-
- # Verify
- self.assertEqual('udid',
- self.vol_drv.connection_info['data'][fcvscsi.UDID_KEY])
-
- def test_get_udid(self):
- # Set the value to retrieve
- self.vol_drv.connection_info['data'][fcvscsi.UDID_KEY] = 'udid'
- retrieved_udid = self.vol_drv._get_udid()
- # Check key found
- self.assertEqual('udid', retrieved_udid)
-
- # Check key not found
- self.vol_drv.connection_info['data'].pop(fcvscsi.UDID_KEY)
- retrieved_udid = self.vol_drv._get_udid()
- # Check key not found
- self.assertIsNone(retrieved_udid)
-
- @mock.patch('nova.virt.powervm.vm.get_instance_wrapper')
- @mock.patch('pypowervm.utils.transaction.FeedTask', autospec=True)
- def test_attach_volume(self, mock_feed_task, mock_get_wrap):
- mock_lpar_wrap = mock.MagicMock()
- mock_lpar_wrap.can_modify_io.return_value = True, None
- mock_get_wrap.return_value = mock_lpar_wrap
- mock_attach_ftsk = mock_feed_task.return_value
-
- # Pass if all vioses modified
- mock_ret = {'wrapper_task_rets': {'vios1': {'vio_modified': True},
- 'vios2': {'vio_modified': True}}}
- mock_attach_ftsk.execute.return_value = mock_ret
- self.vol_drv.attach_volume()
- mock_feed_task.assert_called_once()
- mock_attach_ftsk.add_functor_subtask.assert_called_once_with(
- self.vol_drv._attach_volume_to_vio, provides='vio_modified',
- flag_update=False)
- mock_attach_ftsk.execute.assert_called_once()
- self.ftsk.execute.assert_called_once()
-
- mock_feed_task.reset_mock()
- mock_attach_ftsk.reset_mock()
- self.ftsk.reset_mock()
-
- # Pass if 1 vios modified
- mock_ret = {'wrapper_task_rets': {'vios1': {'vio_modified': True},
- 'vios2': {'vio_modified': False}}}
- mock_attach_ftsk.execute.return_value = mock_ret
- self.vol_drv.attach_volume()
- mock_feed_task.assert_called_once()
- mock_attach_ftsk.add_functor_subtask.assert_called_once_with(
- self.vol_drv._attach_volume_to_vio, provides='vio_modified',
- flag_update=False)
- mock_attach_ftsk.execute.assert_called_once()
- self.ftsk.execute.assert_called_once()
-
- # Raise if no vios modified
- mock_ret = {'wrapper_task_rets': {'vios1': {'vio_modified': False},
- 'vios2': {'vio_modified': False}}}
- mock_attach_ftsk.execute.return_value = mock_ret
- self.assertRaises(exc.VolumeAttachFailed, self.vol_drv.attach_volume)
-
- # Raise if vm in invalid state
- mock_lpar_wrap.can_modify_io.return_value = False, None
- self.assertRaises(exc.VolumeAttachFailed, self.vol_drv.attach_volume)
-
- @mock.patch('nova.virt.powervm.volume.fcvscsi.FCVscsiVolumeAdapter.'
- '_set_udid')
- @mock.patch('nova.virt.powervm.volume.fcvscsi.FCVscsiVolumeAdapter.'
- '_add_append_mapping')
- @mock.patch('nova.virt.powervm.volume.fcvscsi.FCVscsiVolumeAdapter.'
- '_discover_volume_on_vios')
- @mock.patch('pypowervm.tasks.hdisk.good_discovery', autospec=True)
- def test_attach_volume_to_vio(self, mock_good_disc, mock_disc_vol,
- mock_add_map, mock_set_udid):
- # Setup mocks
- mock_vios = mock.MagicMock()
- mock_vios.uuid = 'uuid'
- mock_disc_vol.return_value = 'status', 'devname', 'udid'
-
- # Bad discovery
- mock_good_disc.return_value = False
- ret = self.vol_drv._attach_volume_to_vio(mock_vios)
- self.assertFalse(ret)
- mock_disc_vol.assert_called_once_with(mock_vios)
- mock_good_disc.assert_called_once_with('status', 'devname')
-
- # Good discovery
- mock_good_disc.return_value = True
- ret = self.vol_drv._attach_volume_to_vio(mock_vios)
- self.assertTrue(ret)
- mock_add_map.assert_called_once_with(
- 'uuid', 'devname', tag='a_volume_identifier')
- mock_set_udid.assert_called_once_with('udid')
-
- def test_extend_volume(self):
- # Ensure the method is implemented
- self.vol_drv.extend_volume()
-
- @mock.patch('nova.virt.powervm.volume.fcvscsi.LOG')
- @mock.patch('pypowervm.tasks.hdisk.good_discovery', autospec=True)
- @mock.patch('pypowervm.tasks.hdisk.discover_hdisk', autospec=True)
- @mock.patch('pypowervm.tasks.hdisk.build_itls', autospec=True)
- @mock.patch('nova.virt.powervm.volume.fcvscsi.FCVscsiVolumeAdapter.'
- '_get_hdisk_itls')
- def test_discover_volume_on_vios(self, mock_get_itls, mock_build_itls,
- mock_disc_hdisk, mock_good_disc,
- mock_log):
- mock_vios = mock.MagicMock()
- mock_vios.uuid = 'uuid'
- mock_get_itls.return_value = 'v_wwpns', 't_wwpns', 'lun'
- mock_build_itls.return_value = 'itls'
- mock_disc_hdisk.return_value = 'status', 'devname', 'udid'
-
- # Good discovery
- mock_good_disc.return_value = True
- status, devname, udid = self.vol_drv._discover_volume_on_vios(
- mock_vios)
- self.assertEqual(mock_disc_hdisk.return_value[0], status)
- self.assertEqual(mock_disc_hdisk.return_value[1], devname)
- self.assertEqual(mock_disc_hdisk.return_value[2], udid)
- mock_get_itls.assert_called_once_with(mock_vios)
- mock_build_itls.assert_called_once_with('v_wwpns', 't_wwpns', 'lun')
- mock_disc_hdisk.assert_called_once_with(self.adpt, 'uuid', 'itls')
- mock_good_disc.assert_called_once_with('status', 'devname')
- mock_log.info.assert_called_once()
- mock_log.warning.assert_not_called()
-
- mock_log.reset_mock()
-
- # Bad discovery, not device in use status
- mock_good_disc.return_value = False
- self.vol_drv._discover_volume_on_vios(mock_vios)
- mock_log.warning.assert_not_called()
- mock_log.info.assert_not_called()
-
- # Bad discovery, device in use status
- mock_disc_hdisk.return_value = (hdisk.LUAStatus.DEVICE_IN_USE, 'dev',
- 'udid')
- self.vol_drv._discover_volume_on_vios(mock_vios)
- mock_log.warning.assert_called_once()
-
- def test_get_hdisk_itls(self):
- """Validates the _get_hdisk_itls method."""
-
- mock_vios = mock.MagicMock()
- mock_vios.get_active_pfc_wwpns.return_value = [I_WWPN_1]
-
- i_wwpn, t_wwpns, lun = self.vol_drv._get_hdisk_itls(mock_vios)
- self.assertListEqual([I_WWPN_1], i_wwpn)
- self.assertListEqual(['t1'], t_wwpns)
- self.assertEqual('1', lun)
-
- mock_vios.get_active_pfc_wwpns.return_value = [I_WWPN_2]
- i_wwpn, t_wwpns, lun = self.vol_drv._get_hdisk_itls(mock_vios)
- self.assertListEqual([I_WWPN_2], i_wwpn)
- self.assertListEqual(['t2', 't3'], t_wwpns)
-
- mock_vios.get_active_pfc_wwpns.return_value = ['12345']
- i_wwpn, t_wwpns, lun = self.vol_drv._get_hdisk_itls(mock_vios)
- self.assertListEqual([], i_wwpn)
-
- @mock.patch('pypowervm.wrappers.storage.PV', autospec=True)
- @mock.patch('pypowervm.tasks.scsi_mapper.build_vscsi_mapping',
- autospec=True)
- @mock.patch('pypowervm.tasks.scsi_mapper.add_map', autospec=True)
- def test_add_append_mapping(self, mock_add_map, mock_bld_map, mock_pv):
- def test_afs(add_func):
- mock_vios = mock.create_autospec(pvm_vios.VIOS)
- self.assertEqual(mock_add_map.return_value, add_func(mock_vios))
- mock_pv.bld.assert_called_once_with(self.adpt, 'devname', tag=None)
- mock_bld_map.assert_called_once_with(
- None, mock_vios, self.vol_drv.vm_uuid,
- mock_pv.bld.return_value)
- mock_add_map.assert_called_once_with(
- mock_vios, mock_bld_map.return_value)
-
- self.wtsk.add_functor_subtask.side_effect = test_afs
- self.vol_drv._add_append_mapping('vios_uuid', 'devname')
- self.wtsk.add_functor_subtask.assert_called_once()
-
- @mock.patch('nova.virt.powervm.volume.fcvscsi.LOG.warning')
- @mock.patch('nova.virt.powervm.vm.get_instance_wrapper')
- @mock.patch('pypowervm.utils.transaction.FeedTask', autospec=True)
- def test_detach_volume(self, mock_feed_task, mock_get_wrap, mock_log):
- mock_lpar_wrap = mock.MagicMock()
- mock_lpar_wrap.can_modify_io.return_value = True, None
- mock_get_wrap.return_value = mock_lpar_wrap
- mock_detach_ftsk = mock_feed_task.return_value
-
- # Multiple vioses modified
- mock_ret = {'wrapper_task_rets': {'vios1': {'vio_modified': True},
- 'vios2': {'vio_modified': True}}}
- mock_detach_ftsk.execute.return_value = mock_ret
- self.vol_drv.detach_volume()
- mock_feed_task.assert_called_once()
- mock_detach_ftsk.add_functor_subtask.assert_called_once_with(
- self.vol_drv._detach_vol_for_vio, provides='vio_modified',
- flag_update=False)
- mock_detach_ftsk.execute.assert_called_once_with()
- self.ftsk.execute.assert_called_once_with()
- mock_log.assert_not_called()
-
- # 1 vios modified
- mock_ret = {'wrapper_task_rets': {'vios1': {'vio_modified': True},
- 'vios2': {'vio_modified': False}}}
- mock_detach_ftsk.execute.return_value = mock_ret
- self.vol_drv.detach_volume()
- mock_log.assert_not_called()
-
- # No vioses modifed
- mock_ret = {'wrapper_task_rets': {'vios1': {'vio_modified': False},
- 'vios2': {'vio_modified': False}}}
- mock_detach_ftsk.execute.return_value = mock_ret
- self.vol_drv.detach_volume()
- mock_log.assert_called_once()
-
- # Raise if exception during execute
- mock_detach_ftsk.execute.side_effect = Exception()
- self.assertRaises(exc.VolumeDetachFailed, self.vol_drv.detach_volume)
-
- # Raise if vm in invalid state
- mock_lpar_wrap.can_modify_io.return_value = False, None
- self.assertRaises(exc.VolumeDetachFailed, self.vol_drv.detach_volume)
-
- @mock.patch('pypowervm.tasks.hdisk.good_discovery', autospec=True)
- @mock.patch('nova.virt.powervm.volume.fcvscsi.FCVscsiVolumeAdapter.'
- '_discover_volume_on_vios')
- @mock.patch('nova.virt.powervm.volume.fcvscsi.FCVscsiVolumeAdapter.'
- '_add_remove_mapping')
- @mock.patch('nova.virt.powervm.volume.fcvscsi.FCVscsiVolumeAdapter.'
- '_add_remove_hdisk')
- @mock.patch('nova.virt.powervm.vm.get_vm_qp')
- def test_detach_vol_for_vio(self, mock_get_qp, mock_rm_hdisk, mock_rm_map,
- mock_disc_vol, mock_good_disc):
- # Good detach, bdm data is found
- self.vol_drv._set_udid('udid')
- mock_vios = mock.MagicMock()
- mock_vios.uuid = 'vios_uuid'
- mock_vios.hdisk_from_uuid.return_value = 'devname'
- mock_get_qp.return_value = 'part_id'
- ret = self.vol_drv._detach_vol_for_vio(mock_vios)
- self.assertTrue(ret)
- mock_vios.hdisk_from_uuid.assert_called_once_with('udid')
- mock_rm_map.assert_called_once_with('part_id', 'vios_uuid', 'devname')
- mock_rm_hdisk.assert_called_once_with(mock_vios, 'devname')
-
- mock_vios.reset_mock()
- mock_rm_map.reset_mock()
- mock_rm_hdisk.reset_mock()
-
- # Good detach, no udid
- self.vol_drv._set_udid(None)
- mock_disc_vol.return_value = 'status', 'devname', 'udid'
- mock_good_disc.return_value = True
- ret = self.vol_drv._detach_vol_for_vio(mock_vios)
- self.assertTrue(ret)
- mock_vios.hdisk_from_uuid.assert_not_called()
- mock_disc_vol.assert_called_once_with(mock_vios)
- mock_good_disc.assert_called_once_with('status', 'devname')
- mock_rm_map.assert_called_once_with('part_id', 'vios_uuid', 'devname')
- mock_rm_hdisk.assert_called_once_with(mock_vios, 'devname')
-
- mock_vios.reset_mock()
- mock_disc_vol.reset_mock()
- mock_good_disc.reset_mock()
- mock_rm_map.reset_mock()
- mock_rm_hdisk.reset_mock()
-
- # Good detach, no device name
- self.vol_drv._set_udid('udid')
- mock_vios.hdisk_from_uuid.return_value = None
- ret = self.vol_drv._detach_vol_for_vio(mock_vios)
- self.assertTrue(ret)
- mock_vios.hdisk_from_uuid.assert_called_once_with('udid')
- mock_disc_vol.assert_called_once_with(mock_vios)
- mock_good_disc.assert_called_once_with('status', 'devname')
- mock_rm_map.assert_called_once_with('part_id', 'vios_uuid', 'devname')
- mock_rm_hdisk.assert_called_once_with(mock_vios, 'devname')
-
- mock_rm_map.reset_mock()
- mock_rm_hdisk.reset_mock()
-
- # Bad detach, invalid state
- mock_good_disc.return_value = False
- ret = self.vol_drv._detach_vol_for_vio(mock_vios)
- self.assertFalse(ret)
- mock_rm_map.assert_not_called()
- mock_rm_hdisk.assert_not_called()
-
- # Bad detach, exception discovering volume on vios
- mock_disc_vol.side_effect = Exception()
- ret = self.vol_drv._detach_vol_for_vio(mock_vios)
- self.assertFalse(ret)
- mock_rm_map.assert_not_called()
- mock_rm_hdisk.assert_not_called()
-
- @mock.patch('pypowervm.tasks.scsi_mapper.gen_match_func', autospec=True)
- @mock.patch('pypowervm.tasks.scsi_mapper.remove_maps', autospec=True)
- def test_add_remove_mapping(self, mock_rm_maps, mock_gen_match):
- def test_afs(rm_func):
- mock_vios = mock.create_autospec(pvm_vios.VIOS)
- self.assertEqual(mock_rm_maps.return_value, rm_func(mock_vios))
- mock_gen_match.assert_called_once_with(
- pvm_stor.PV, names=['devname'])
- mock_rm_maps.assert_called_once_with(
- mock_vios, 'vm_uuid', mock_gen_match.return_value)
-
- self.wtsk.add_functor_subtask.side_effect = test_afs
- self.vol_drv._add_remove_mapping('vm_uuid', 'vios_uuid', 'devname')
- self.wtsk.add_functor_subtask.assert_called_once()
-
- @mock.patch('pypowervm.tasks.hdisk.remove_hdisk', autospec=True)
- @mock.patch('taskflow.task.FunctorTask', autospec=True)
- @mock.patch('nova.virt.powervm.volume.fcvscsi.FCVscsiVolumeAdapter.'
- '_check_host_mappings')
- def test_add_remove_hdisk(self, mock_check_maps, mock_functask,
- mock_rm_hdisk):
- mock_vios = mock.MagicMock()
- mock_vios.uuid = 'uuid'
- mock_check_maps.return_value = True
- self.vol_drv._add_remove_hdisk(mock_vios, 'devname')
- mock_functask.assert_not_called()
- self.ftsk.add_post_execute.assert_not_called()
- mock_check_maps.assert_called_once_with(mock_vios, 'devname')
- self.assertEqual(0, mock_rm_hdisk.call_count)
-
- def test_functor_task(rm_hdisk, name=None):
- rm_hdisk()
- return 'functor_task'
-
- mock_check_maps.return_value = False
- mock_functask.side_effect = test_functor_task
- self.vol_drv._add_remove_hdisk(mock_vios, 'devname')
- mock_functask.assert_called_once()
- self.ftsk.add_post_execute.assert_called_once_with('functor_task')
- mock_rm_hdisk.assert_called_once_with(self.adpt, CONF.host,
- 'devname', 'uuid')
-
- @mock.patch('pypowervm.tasks.scsi_mapper.gen_match_func', autospec=True)
- @mock.patch('pypowervm.tasks.scsi_mapper.find_maps', autospec=True)
- def test_check_host_mappings(self, mock_find_maps, mock_gen_match):
- mock_vios = mock.MagicMock()
- mock_vios.uuid = 'uuid2'
- mock_v1 = mock.MagicMock(scsi_mappings='scsi_maps_1', uuid='uuid1')
- mock_v2 = mock.MagicMock(scsi_mappings='scsi_maps_2', uuid='uuid2')
- mock_feed = [mock_v1, mock_v2]
- self.ftsk.feed = mock_feed
-
- # Multiple mappings found
- mock_find_maps.return_value = ['map1', 'map2']
- ret = self.vol_drv._check_host_mappings(mock_vios, 'devname')
- self.assertTrue(ret)
- mock_gen_match.assert_called_once_with(pvm_stor.PV, names=['devname'])
- mock_find_maps.assert_called_once_with('scsi_maps_2', None,
- mock_gen_match.return_value)
-
- # One mapping found
- mock_find_maps.return_value = ['map1']
- ret = self.vol_drv._check_host_mappings(mock_vios, 'devname')
- self.assertFalse(ret)
-
- # No mappings found
- mock_find_maps.return_value = []
- ret = self.vol_drv._check_host_mappings(mock_vios, 'devname')
- self.assertFalse(ret)
diff --git a/nova/tests/unit/virt/test_block_device.py b/nova/tests/unit/virt/test_block_device.py
index b5fca28463..703f15967c 100644
--- a/nova/tests/unit/virt/test_block_device.py
+++ b/nova/tests/unit/virt/test_block_device.py
@@ -12,8 +12,9 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
from os_brick import encryptors
+from unittest import mock
+
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
@@ -48,7 +49,8 @@ class TestDriverBlockDevice(test.NoDBTestCase):
'volume': driver_block_device.DriverVolumeBlockDevice,
'volsnapshot': driver_block_device.DriverVolSnapshotBlockDevice,
'volimage': driver_block_device.DriverVolImageBlockDevice,
- 'volblank': driver_block_device.DriverVolBlankBlockDevice
+ 'volblank': driver_block_device.DriverVolBlankBlockDevice,
+ 'image': driver_block_device.DriverImageBlockDevice,
}
swap_bdm_dict = block_device.BlockDeviceDict(
@@ -77,14 +79,22 @@ class TestDriverBlockDevice(test.NoDBTestCase):
'volume_size': 4,
'guest_format': 'ext4',
'delete_on_termination': True,
- 'boot_index': -1})
+ 'boot_index': -1,
+ 'encrypted': False,
+ 'encryption_secret_uuid': None,
+ 'encryption_format': None,
+ 'encryption_options': None})
ephemeral_driver_bdm = {
'device_name': '/dev/sdc1',
'size': 4,
'device_type': 'disk',
'guest_format': 'ext4',
- 'disk_bus': 'scsi'}
+ 'disk_bus': 'scsi',
+ 'encrypted': False,
+ 'encryption_secret_uuid': None,
+ 'encryption_format': None,
+ 'encryption_options': None}
volume_bdm_dict = block_device.BlockDeviceDict(
{'id': 3, 'instance_uuid': uuids.instance,
@@ -209,6 +219,35 @@ class TestDriverBlockDevice(test.NoDBTestCase):
'boot_index': -1,
'volume_type': None}
+ image_bdm_dict = block_device.BlockDeviceDict(
+ {'id': 7, 'instance_uuid': uuids.instance,
+ 'device_name': '/dev/vda',
+ 'source_type': 'image',
+ 'destination_type': 'local',
+ 'disk_bus': 'virtio',
+ 'device_type': 'disk',
+ 'guest_format': 'ext4',
+ 'boot_index': 0,
+ 'image_id': 'fake-image-id-1',
+ 'volume_size': 5,
+ 'encrypted': True,
+ 'encryption_secret_uuid': uuids.secret,
+ 'encryption_format': 'plain',
+ 'encryption_options': None})
+
+ image_driver_bdm = {
+ 'device_name': '/dev/vda',
+ 'device_type': 'disk',
+ 'guest_format': 'ext4',
+ 'disk_bus': 'virtio',
+ 'boot_index': 0,
+ 'image_id': 'fake-image-id-1',
+ 'size': 5,
+ 'encrypted': True,
+ 'encryption_secret_uuid': uuids.secret,
+ 'encryption_format': 'plain',
+ 'encryption_options': None}
+
def setUp(self):
super(TestDriverBlockDevice, self).setUp()
self.volume_api = mock.MagicMock(autospec=cinder.API)
@@ -218,6 +257,8 @@ class TestDriverBlockDevice(test.NoDBTestCase):
# create bdm objects for testing
self.swap_bdm = fake_block_device.fake_bdm_object(
self.context, self.swap_bdm_dict)
+ self.image_bdm = fake_block_device.fake_bdm_object(
+ self.context, self.image_bdm_dict)
self.ephemeral_bdm = fake_block_device.fake_bdm_object(
self.context, self.ephemeral_bdm_dict)
self.volume_bdm = fake_block_device.fake_bdm_object(
@@ -336,6 +377,10 @@ class TestDriverBlockDevice(test.NoDBTestCase):
if field == 'attachment_id':
# Must set UUID values on UUID fields.
fake_value = ATTACHMENT_ID
+ elif isinstance(test_bdm._bdm_obj.fields[fld],
+ fields.UUIDField):
+ # Generically handle other UUID fields.
+ fake_value = uuids.fake_value
else:
fake_value = 'fake_changed_value'
test_bdm[field] = fake_value
@@ -376,6 +421,20 @@ class TestDriverBlockDevice(test.NoDBTestCase):
def test_driver_swap_default_size(self):
self._test_driver_default_size('swap')
+ def test_driver_image_block_device(self):
+ self._test_driver_device("image")
+
+ def test_driver_image_default_size(self):
+ self._test_driver_default_size('image')
+
+ def test_driver_image_block_device_destination_not_local(self):
+ self._test_driver_device('image')
+ bdm = self.image_bdm_dict.copy()
+ bdm['destination_type'] = 'volume'
+ self.assertRaises(driver_block_device._InvalidType,
+ self.driver_classes['image'],
+ fake_block_device.fake_bdm_object(self.context, bdm))
+
def test_driver_ephemeral_block_device(self):
self._test_driver_device("ephemeral")
@@ -405,7 +464,7 @@ class TestDriverBlockDevice(test.NoDBTestCase):
self.assertEqual(test_bdm.volume_size, 3)
self.assertEqual('fake-snapshot-id-1', test_bdm.get('snapshot_id'))
- def test_driver_image_block_device(self):
+ def test_driver_volume_image_block_device(self):
self._test_driver_device('volimage')
test_bdm = self.driver_classes['volimage'](
@@ -415,7 +474,7 @@ class TestDriverBlockDevice(test.NoDBTestCase):
self.assertEqual(test_bdm.volume_size, 1)
self.assertEqual('fake-image-id-1', test_bdm.get('image_id'))
- def test_driver_image_block_device_destination_local(self):
+ def test_driver_volume_image_block_device_destination_local(self):
self._test_driver_device('volimage')
bdm = self.volimage_bdm_dict.copy()
bdm['destination_type'] = 'local'
@@ -436,24 +495,23 @@ class TestDriverBlockDevice(test.NoDBTestCase):
def _test_call_wait_func(self, delete_on_termination, delete_fail=False):
test_bdm = self.driver_classes['volume'](self.volume_bdm)
test_bdm['delete_on_termination'] = delete_on_termination
- with mock.patch.object(self.volume_api, 'delete') as vol_delete:
- wait_func = mock.MagicMock()
- mock_exception = exception.VolumeNotCreated(volume_id='fake-id',
- seconds=1,
- attempts=1,
- volume_status='error')
- wait_func.side_effect = mock_exception
-
- if delete_on_termination and delete_fail:
- vol_delete.side_effect = Exception()
-
- self.assertRaises(exception.VolumeNotCreated,
- test_bdm._call_wait_func,
- context=self.context,
- wait_func=wait_func,
- volume_api=self.volume_api,
- volume_id='fake-id')
- self.assertEqual(delete_on_termination, vol_delete.called)
+ if delete_on_termination and delete_fail:
+ self.volume_api.delete.side_effect = Exception()
+
+ wait_func = mock.MagicMock()
+ mock_exception = exception.VolumeNotCreated(volume_id='fake-id',
+ seconds=1,
+ attempts=1,
+ volume_status='error')
+ wait_func.side_effect = mock_exception
+
+ self.assertRaises(exception.VolumeNotCreated,
+ test_bdm._call_wait_func,
+ context=self.context,
+ wait_func=wait_func,
+ volume_api=self.volume_api,
+ volume_id='fake-id')
+ self.assertEqual(delete_on_termination, self.volume_api.delete.called)
def test_call_wait_delete_volume(self):
self._test_call_wait_func(True)
@@ -486,25 +544,24 @@ class TestDriverBlockDevice(test.NoDBTestCase):
volume['shared_targets'] = True
volume['service_uuid'] = uuids.service_uuid
+ if delete_attachment_raises:
+ self.volume_api.attachment_delete.side_effect = (
+ delete_attachment_raises)
+
+ self.virt_driver.get_volume_connector.return_value = connector
+
with test.nested(
mock.patch.object(driver_bdm, '_get_volume', return_value=volume),
- mock.patch.object(self.virt_driver, 'get_volume_connector',
- return_value=connector),
mock.patch('os_brick.initiator.utils.guard_connection'),
- mock.patch.object(self.volume_api, 'attachment_delete'),
- ) as (mock_get_volume, mock_get_connector, mock_guard,
- vapi_attach_del):
-
- if delete_attachment_raises:
- vapi_attach_del.side_effect = delete_attachment_raises
+ ) as (mock_get_volume, mock_guard):
driver_bdm.detach(elevated_context, instance,
self.volume_api, self.virt_driver,
attachment_id=attachment_id)
mock_guard.assert_called_once_with(volume)
- vapi_attach_del.assert_called_once_with(elevated_context,
- attachment_id)
+ self.volume_api.attachment_delete.assert_called_once_with(
+ elevated_context, attachment_id)
def test_volume_delete_attachment_with_shared_targets(self):
self.test_volume_delete_attachment(include_shared_targets=True)
@@ -955,31 +1012,28 @@ class TestDriverBlockDevice(test.NoDBTestCase):
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx,
**{'uuid': uuids.uuid})
- with test.nested(
- mock.patch.object(self.volume_api, 'get_snapshot',
- return_value=snapshot),
- mock.patch.object(self.volume_api, 'create', return_value=volume),
- mock.patch.object(self.volume_api, 'delete'),
- ) as (vol_get_snap, vol_create, vol_delete):
- wait_func = mock.MagicMock()
- mock_exception = exception.VolumeNotCreated(volume_id=volume['id'],
- seconds=1,
- attempts=1,
- volume_status='error')
- wait_func.side_effect = mock_exception
- self.assertRaises(exception.VolumeNotCreated,
- test_bdm.attach, context=self.context,
- instance=instance,
- volume_api=self.volume_api,
- virt_driver=self.virt_driver,
- wait_func=wait_func)
-
- vol_get_snap.assert_called_once_with(
- self.context, 'fake-snapshot-id-1')
- vol_create.assert_called_once_with(
- self.context, 3, '', '', availability_zone=None,
- snapshot=snapshot, volume_type=None)
- vol_delete.assert_called_once_with(self.context, volume['id'])
+ self.volume_api.get_snapshot.return_value = snapshot
+ self.volume_api.create.return_value = volume
+ wait_func = mock.MagicMock()
+ mock_exception = exception.VolumeNotCreated(volume_id=volume['id'],
+ seconds=1,
+ attempts=1,
+ volume_status='error')
+ wait_func.side_effect = mock_exception
+ self.assertRaises(exception.VolumeNotCreated,
+ test_bdm.attach, context=self.context,
+ instance=instance,
+ volume_api=self.volume_api,
+ virt_driver=self.virt_driver,
+ wait_func=wait_func)
+
+ self.volume_api.get_snapshot.assert_called_once_with(
+ self.context, 'fake-snapshot-id-1')
+ self.volume_api.create.assert_called_once_with(
+ self.context, 3, '', '', availability_zone=None,
+ snapshot=snapshot, volume_type=None)
+ self.volume_api.delete.assert_called_once_with(
+ self.context, volume['id'])
def test_snapshot_attach_volume(self):
test_bdm = self.driver_classes['volsnapshot'](
@@ -987,19 +1041,17 @@ class TestDriverBlockDevice(test.NoDBTestCase):
instance = {'id': 'fake_id', 'uuid': uuids.uuid}
- with test.nested(
- mock.patch.object(self.driver_classes['volume'], 'attach'),
- mock.patch.object(self.volume_api, 'get_snapshot'),
- mock.patch.object(self.volume_api, 'create'),
- ) as (mock_attach, mock_get_snapshot, mock_create):
+ with mock.patch.object(
+ self.driver_classes['volume'], 'attach'
+ ) as mock_attach:
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver)
self.assertEqual('fake-volume-id-2', test_bdm.volume_id)
mock_attach.assert_called_once_with(
self.context, instance, self.volume_api, self.virt_driver)
# Make sure theses are not called
- mock_get_snapshot.assert_not_called()
- mock_create.assert_not_called()
+ self.volume_api.get_snapshot.assert_not_called()
+ self.volume_api.create.assert_not_called()
def test_snapshot_attach_no_volume_and_no_volume_type(self):
bdm = self.driver_classes['volsnapshot'](self.volsnapshot_bdm)
@@ -1009,15 +1061,10 @@ class TestDriverBlockDevice(test.NoDBTestCase):
original_volume = {'id': uuids.original_volume_id,
'volume_type_id': 'original_volume_type'}
new_volume = {'id': uuids.new_volume_id}
- with test.nested(
- mock.patch.object(self.driver_classes['volume'], 'attach'),
- mock.patch.object(self.volume_api, 'get_snapshot',
- return_value=snapshot),
- mock.patch.object(self.volume_api, 'get',
- return_value=original_volume),
- mock.patch.object(self.volume_api, 'create',
- return_value=new_volume),
- ) as (mock_attach, mock_get_snapshot, mock_get, mock_create):
+ self.volume_api.get_snapshot.return_value = snapshot
+ self.volume_api.get.return_value = original_volume
+ self.volume_api.create.return_value = new_volume
+ with mock.patch.object(self.driver_classes["volume"], "attach"):
bdm.volume_id = None
bdm.volume_type = None
bdm.attach(self.context, instance, self.volume_api,
@@ -1025,10 +1072,11 @@ class TestDriverBlockDevice(test.NoDBTestCase):
# Assert that the original volume type is fetched, stored within
# the bdm and then used to create the new snapshot based volume.
- mock_get.assert_called_once_with(self.context,
- uuids.original_volume_id)
+ self.volume_api.get.assert_called_once_with(
+ self.context, uuids.original_volume_id)
self.assertEqual('original_volume_type', bdm.volume_type)
- mock_create.assert_called_once_with(self.context, bdm.volume_size,
+ self.volume_api.create.assert_called_once_with(
+ self.context, bdm.volume_size,
'', '', volume_type='original_volume_type', snapshot=snapshot,
availability_zone=None)
@@ -1100,27 +1148,25 @@ class TestDriverBlockDevice(test.NoDBTestCase):
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx,
**{'uuid': uuids.uuid})
- with test.nested(
- mock.patch.object(self.volume_api, 'create', return_value=volume),
- mock.patch.object(self.volume_api, 'delete'),
- ) as (vol_create, vol_delete):
- wait_func = mock.MagicMock()
- mock_exception = exception.VolumeNotCreated(volume_id=volume['id'],
- seconds=1,
- attempts=1,
- volume_status='error')
- wait_func.side_effect = mock_exception
- self.assertRaises(exception.VolumeNotCreated,
- test_bdm.attach, context=self.context,
- instance=instance,
- volume_api=self.volume_api,
- virt_driver=self.virt_driver,
- wait_func=wait_func)
-
- vol_create.assert_called_once_with(
- self.context, 1, '', '', image_id=image['id'],
- availability_zone=None, volume_type=None)
- vol_delete.assert_called_once_with(self.context, volume['id'])
+ self.volume_api.create.return_value = volume
+ wait_func = mock.MagicMock()
+ mock_exception = exception.VolumeNotCreated(volume_id=volume['id'],
+ seconds=1,
+ attempts=1,
+ volume_status='error')
+ wait_func.side_effect = mock_exception
+ self.assertRaises(exception.VolumeNotCreated,
+ test_bdm.attach, context=self.context,
+ instance=instance,
+ volume_api=self.volume_api,
+ virt_driver=self.virt_driver,
+ wait_func=wait_func)
+
+ self.volume_api.create.assert_called_once_with(
+ self.context, 1, '', '', image_id=image['id'],
+ availability_zone=None, volume_type=None)
+ self.volume_api.delete.assert_called_once_with(
+ self.context, volume['id'])
def test_image_attach_volume(self):
test_bdm = self.driver_classes['volimage'](
@@ -1128,19 +1174,17 @@ class TestDriverBlockDevice(test.NoDBTestCase):
instance = {'id': 'fake_id', 'uuid': uuids.uuid}
- with test.nested(
- mock.patch.object(self.driver_classes['volume'], 'attach'),
- mock.patch.object(self.volume_api, 'get_snapshot'),
- mock.patch.object(self.volume_api, 'create'),
- ) as (mock_attch, mock_get_snapshot, mock_create):
+ with mock.patch.object(
+ self.driver_classes['volume'], 'attach'
+ ) as mock_attach:
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver)
self.assertEqual('fake-volume-id-2', test_bdm.volume_id)
- mock_attch.assert_called_once_with(
+ mock_attach.assert_called_once_with(
self.context, instance, self.volume_api, self.virt_driver)
# Make sure theses are not called
- mock_get_snapshot.assert_not_called()
- mock_create.assert_not_called()
+ self.volume_api.get_snapshot.assert_not_called()
+ self.volume_api.create.assert_not_called()
def test_blank_attach_fail_volume(self):
no_blank_volume = self.volblank_bdm_dict.copy()
@@ -1152,30 +1196,26 @@ class TestDriverBlockDevice(test.NoDBTestCase):
**{'uuid': uuids.uuid})
volume = {'id': 'fake-volume-id-2',
'display_name': '%s-blank-vol' % uuids.uuid}
+ self.volume_api.create.return_value = volume
+ wait_func = mock.MagicMock()
+ mock_exception = exception.VolumeNotCreated(volume_id=volume['id'],
+ seconds=1,
+ attempts=1,
+ volume_status='error')
+ wait_func.side_effect = mock_exception
+ self.assertRaises(exception.VolumeNotCreated,
+ test_bdm.attach, context=self.context,
+ instance=instance,
+ volume_api=self.volume_api,
+ virt_driver=self.virt_driver,
+ wait_func=wait_func)
- with test.nested(
- mock.patch.object(self.volume_api, 'create', return_value=volume),
- mock.patch.object(self.volume_api, 'delete'),
- ) as (vol_create, vol_delete):
- wait_func = mock.MagicMock()
- mock_exception = exception.VolumeNotCreated(volume_id=volume['id'],
- seconds=1,
- attempts=1,
- volume_status='error')
- wait_func.side_effect = mock_exception
- self.assertRaises(exception.VolumeNotCreated,
- test_bdm.attach, context=self.context,
- instance=instance,
- volume_api=self.volume_api,
- virt_driver=self.virt_driver,
- wait_func=wait_func)
-
- vol_create.assert_called_once_with(
- self.context, test_bdm.volume_size,
- '%s-blank-vol' % uuids.uuid,
- '', volume_type=None, availability_zone=None)
- vol_delete.assert_called_once_with(
- self.context, volume['id'])
+ self.volume_api.create.assert_called_once_with(
+ self.context, test_bdm.volume_size,
+ '%s-blank-vol' % uuids.uuid,
+ '', volume_type=None, availability_zone=None)
+ self.volume_api.delete.assert_called_once_with(
+ self.context, volume['id'])
def test_blank_attach_volume(self):
no_blank_volume = self.volblank_bdm_dict.copy()
@@ -1281,12 +1321,8 @@ class TestDriverBlockDevice(test.NoDBTestCase):
def test_is_implemented(self):
for bdm in (self.volimage_bdm, self.volume_bdm, self.swap_bdm,
- self.ephemeral_bdm, self.volsnapshot_bdm):
+ self.ephemeral_bdm, self.volsnapshot_bdm, self.image_bdm):
self.assertTrue(driver_block_device.is_implemented(bdm))
- local_image = self.volimage_bdm_dict.copy()
- local_image['destination_type'] = 'local'
- self.assertFalse(driver_block_device.is_implemented(
- fake_block_device.fake_bdm_object(self.context, local_image)))
def test_is_block_device_mapping(self):
test_swap = self.driver_classes['swap'](self.swap_bdm)
@@ -1484,13 +1520,9 @@ class TestDriverBlockDevice(test.NoDBTestCase):
'display_name': 'fake-snapshot-vol'}
self.stub_volume_create(volume)
- with test.nested(
- mock.patch.object(self.volume_api, 'get_snapshot',
- return_value=snapshot),
- mock.patch.object(volume_class, 'attach')
- ) as (
- vol_get_snap, vol_attach
- ):
+ self.volume_api.get_snapshot.return_value = snapshot
+
+ with mock.patch.object(volume_class, 'attach') as vol_attach:
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver)
diff --git a/nova/tests/unit/virt/test_hardware.py b/nova/tests/unit/virt/test_hardware.py
index 1e98dfba6a..26ec198f08 100644
--- a/nova/tests/unit/virt/test_hardware.py
+++ b/nova/tests/unit/virt/test_hardware.py
@@ -14,9 +14,9 @@
import collections
import copy
-import ddt
+from unittest import mock
-import mock
+import ddt
import testtools
import nova.conf
@@ -2814,6 +2814,54 @@ class NumberOfSerialPortsTest(test.NoDBTestCase):
flavor, image_meta)
+class VirtLockMemoryTestCase(test.NoDBTestCase):
+ def _test_get_locked_memory_constraint(self, spec=None, props=None):
+ flavor = objects.Flavor(vcpus=16, memory_mb=2048,
+ extra_specs=spec or {})
+ image_meta = objects.ImageMeta.from_dict({"properties": props or {}})
+ return hw.get_locked_memory_constraint(flavor, image_meta)
+
+ def test_get_locked_memory_constraint_image(self):
+ self.assertTrue(
+ self._test_get_locked_memory_constraint(
+ spec={"hw:mem_page_size": "small"},
+ props={"hw_locked_memory": "True"}))
+
+ def test_get_locked_memory_conflict(self):
+ ex = self.assertRaises(
+ exception.FlavorImageLockedMemoryConflict,
+ self._test_get_locked_memory_constraint,
+ spec={
+ "hw:locked_memory": "False",
+ "hw:mem_page_size": "small"
+ },
+ props={"hw_locked_memory": "True"}
+ )
+ ex_msg = ("locked_memory value in image (True) and flavor (False) "
+ "conflict. A consistent value is expected if both "
+ "specified.")
+ self.assertEqual(ex_msg, str(ex))
+
+ def test_get_locked_memory_constraint_forbidden(self):
+ self.assertRaises(
+ exception.LockMemoryForbidden,
+ self._test_get_locked_memory_constraint,
+ {"hw:locked_memory": "True"})
+
+ self.assertRaises(
+ exception.LockMemoryForbidden,
+ self._test_get_locked_memory_constraint,
+ {},
+ {"hw_locked_memory": "True"})
+
+ def test_get_locked_memory_constraint_image_false(self):
+ # False value of locked_memory will not raise LockMemoryForbidden
+ self.assertFalse(
+ self._test_get_locked_memory_constraint(
+ spec=None,
+ props={"hw_locked_memory": "False"}))
+
+
class VirtMemoryPagesTestCase(test.NoDBTestCase):
def test_cell_instance_pagesize(self):
cell = objects.InstanceNUMACell(
@@ -3836,9 +3884,16 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
siblings=[set([2]), set([3])])
])
inst_topo = objects.InstanceNUMATopology(
- cells=[objects.InstanceNUMACell(
- cpuset=set(), pcpuset=set([0, 1]), memory=2048,
- cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
+ cells=[
+ objects.InstanceNUMACell(
+ id=0,
+ cpuset=set(),
+ pcpuset=set([0, 1]),
+ memory=2048,
+ cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
+ )
+ ]
+ )
inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
@@ -3867,9 +3922,16 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
siblings=[set([2]), set([3])])
])
inst_topo = objects.InstanceNUMATopology(
- cells=[objects.InstanceNUMACell(
- cpuset=set(), pcpuset=set([0, 1]), memory=2048,
- cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
+ cells=[
+ objects.InstanceNUMACell(
+ id=0,
+ cpuset=set(),
+ pcpuset=set([0, 1]),
+ memory=2048,
+ cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
+ )
+ ]
+ )
inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
@@ -3898,9 +3960,16 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
siblings=[set([2]), set([3])])
])
inst_topo = objects.InstanceNUMATopology(
- cells=[objects.InstanceNUMACell(
- cpuset=set(), pcpuset=set([0, 1]), memory=2048,
- cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
+ cells=[
+ objects.InstanceNUMACell(
+ id=0,
+ cpuset=set(),
+ pcpuset=set([0, 1]),
+ memory=2048,
+ cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
+ )
+ ]
+ )
inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
self.assertIsNone(inst_topo)
@@ -3927,12 +3996,24 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
siblings=[set([4]), set([5]), set([6]), set([7])])
])
inst_topo = objects.InstanceNUMATopology(
- cells=[objects.InstanceNUMACell(
- cpuset=set(), pcpuset=set([0, 1]), memory=2048,
- cpu_policy=fields.CPUAllocationPolicy.DEDICATED),
- objects.InstanceNUMACell(
- cpuset=set(), pcpuset=set([2, 3]), memory=2048,
- cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
+ cells=[
+ objects.InstanceNUMACell(
+ id=0,
+ cpuset=set(),
+ pcpuset=set([0, 1]),
+ memory=2048,
+ cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
+ ),
+ objects.InstanceNUMACell(
+ id=1,
+ cpuset=set(),
+ pcpuset=set([2, 3]),
+ memory=2048,
+ cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
+ ),
+ ]
+ )
+
inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
for cell in inst_topo.cells:
@@ -3970,12 +4051,24 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
])
inst_topo = objects.InstanceNUMATopology(
- cells=[objects.InstanceNUMACell(
- cpuset=set(), pcpuset=set([0, 1]), memory=2048,
- cpu_policy=fields.CPUAllocationPolicy.DEDICATED),
- objects.InstanceNUMACell(
- cpuset=set(), pcpuset=set([2, 3]), memory=2048,
- cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
+ cells=[
+ objects.InstanceNUMACell(
+ id=0,
+ cpuset=set(),
+ pcpuset=set([0, 1]),
+ memory=2048,
+ cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
+ ),
+ objects.InstanceNUMACell(
+ id=1,
+ cpuset=set(),
+ pcpuset=set([2, 3]),
+ memory=2048,
+ cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
+ ),
+ ]
+ )
+
inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
for cell in inst_topo.cells:
@@ -4003,12 +4096,24 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
siblings=[set([4]), set([5]), set([6]), set([7])])
])
inst_topo = objects.InstanceNUMATopology(
- cells=[objects.InstanceNUMACell(
- cpuset=set(), pcpuset=set([0, 1]), memory=2048,
- cpu_policy=fields.CPUAllocationPolicy.DEDICATED),
- objects.InstanceNUMACell(
- cpuset=set(), pcpuset=set([2, 3]), memory=2048,
- cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
+ cells=[
+ objects.InstanceNUMACell(
+ id=0,
+ cpuset=set(),
+ pcpuset=set([0, 1]),
+ memory=2048,
+ cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
+ ),
+ objects.InstanceNUMACell(
+ id=1,
+ cpuset=set(),
+ pcpuset=set([2, 3]),
+ memory=2048,
+ cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
+ ),
+ ]
+ )
+
inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
self.assertIsNone(inst_topo)
diff --git a/nova/tests/unit/virt/test_imagecache.py b/nova/tests/unit/virt/test_imagecache.py
index b9dff7f6b6..b97e520074 100644
--- a/nova/tests/unit/virt/test_imagecache.py
+++ b/nova/tests/unit/virt/test_imagecache.py
@@ -12,7 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from nova import block_device
diff --git a/nova/tests/unit/virt/test_images.py b/nova/tests/unit/virt/test_images.py
index 085b169db3..58581d93ba 100644
--- a/nova/tests/unit/virt/test_images.py
+++ b/nova/tests/unit/virt/test_images.py
@@ -13,8 +13,8 @@
# under the License.
import os
+from unittest import mock
-import mock
from oslo_concurrency import processutils
from nova.compute import utils as compute_utils
diff --git a/nova/tests/unit/virt/test_netutils.py b/nova/tests/unit/virt/test_netutils.py
index de3f451351..fa0e16df19 100644
--- a/nova/tests/unit/virt/test_netutils.py
+++ b/nova/tests/unit/virt/test_netutils.py
@@ -17,6 +17,17 @@ from nova.virt import netutils
class TestNetUtilsTestCase(test.NoDBTestCase):
+
+ def _get_fake_instance_nw_info(self, num_networks, dhcp_server, mtu):
+ network_info = fake_network.fake_get_instance_nw_info(self,
+ num_networks)
+ for vif in network_info:
+ for subnet in vif['network']['subnets']:
+ subnet['meta']['dhcp_server'] = dhcp_server
+ vif['network']['meta']['mtu'] = mtu
+
+ return network_info
+
def test_get_cached_vifs_with_vlan_no_nw_info(self):
# Make sure that an empty dictionary will be returned when
# nw_info is None
@@ -39,3 +50,15 @@ class TestNetUtilsTestCase(test.NoDBTestCase):
expected = {'fa:16:3e:d1:28:e4': '2145'}
self.assertEqual(expected,
netutils.get_cached_vifs_with_vlan(network_info))
+
+ def test__get_link_mtu(self):
+ network_info_dhcp = self._get_fake_instance_nw_info(
+ 1, '192.168.0.100', 9000)
+ network_info_no_dhcp = self._get_fake_instance_nw_info(
+ 1, None, 9000)
+
+ for vif in network_info_dhcp:
+ self.assertIsNone(netutils._get_link_mtu(vif))
+
+ for vif in network_info_no_dhcp:
+ self.assertEqual(9000, netutils._get_link_mtu(vif))
diff --git a/nova/tests/unit/virt/test_osinfo.py b/nova/tests/unit/virt/test_osinfo.py
index af3698b541..5d927deab1 100644
--- a/nova/tests/unit/virt/test_osinfo.py
+++ b/nova/tests/unit/virt/test_osinfo.py
@@ -12,8 +12,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from nova import exception
from nova import objects
diff --git a/nova/tests/unit/virt/test_virt.py b/nova/tests/unit/virt/test_virt.py
index 11f306c761..2d108c6f2d 100644
--- a/nova/tests/unit/virt/test_virt.py
+++ b/nova/tests/unit/virt/test_virt.py
@@ -14,8 +14,8 @@
# under the License.
import io
+from unittest import mock
-import mock
import os_traits
from nova import test
@@ -102,6 +102,33 @@ class TestVirtDriver(test.NoDBTestCase):
self.assertFalse(traits[os_traits.COMPUTE_IMAGE_TYPE_RAW])
self.assertFalse(traits[os_traits.COMPUTE_IMAGE_TYPE_VHD])
+ def test_block_device_info_get_encrypted_disks(self):
+ block_device_info = {
+ 'swap': {'device_name': '/dev/sdb', 'swap_size': 1},
+ 'image': [
+ {'device_name': '/dev/vda', 'encrypted': True},
+ ],
+ 'ephemerals': [
+ {'device_name': '/dev/vdb', 'encrypted': True},
+ {'device_name': '/dev/vdc', 'encrypted': False},
+ ],
+ }
+ disks = driver.block_device_info_get_encrypted_disks(block_device_info)
+ expected = [
+ {'device_name': '/dev/vda', 'encrypted': True},
+ {'device_name': '/dev/vdb', 'encrypted': True},
+ ]
+ self.assertEqual(expected, disks)
+ # Try removing 'image'
+ block_device_info.pop('image')
+ disks = driver.block_device_info_get_encrypted_disks(block_device_info)
+ expected = [{'device_name': '/dev/vdb', 'encrypted': True}]
+ self.assertEqual(expected, disks)
+ # Remove 'ephemerals'
+ block_device_info.pop('ephemerals')
+ disks = driver.block_device_info_get_encrypted_disks(block_device_info)
+ self.assertEqual([], disks)
+
class FakeMount(object):
def __init__(self, image, mount_dir, partition=None, device=None):
diff --git a/nova/tests/unit/virt/test_virt_drivers.py b/nova/tests/unit/virt/test_virt_drivers.py
index 32d5ef2911..58fa3d4c27 100644
--- a/nova/tests/unit/virt/test_virt_drivers.py
+++ b/nova/tests/unit/virt/test_virt_drivers.py
@@ -15,9 +15,9 @@
from collections import deque
import sys
import traceback
+from unittest import mock
import fixtures
-import mock
import netaddr
import os_resource_classes as orc
import os_vif
diff --git a/nova/tests/unit/virt/vmwareapi/test_configdrive.py b/nova/tests/unit/virt/vmwareapi/test_configdrive.py
index de07444ddb..7e8b1c1b63 100644
--- a/nova/tests/unit/virt/vmwareapi/test_configdrive.py
+++ b/nova/tests/unit/virt/vmwareapi/test_configdrive.py
@@ -13,8 +13,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel
from nova import context
diff --git a/nova/tests/unit/virt/vmwareapi/test_driver_api.py b/nova/tests/unit/virt/vmwareapi/test_driver_api.py
index 7b31b24167..ac473c8c09 100644
--- a/nova/tests/unit/virt/vmwareapi/test_driver_api.py
+++ b/nova/tests/unit/virt/vmwareapi/test_driver_api.py
@@ -21,9 +21,9 @@ Test suite for VMwareAPI.
import collections
import datetime
+from unittest import mock
from eventlet import greenthread
-import mock
import os_resource_classes as orc
from oslo_utils import fixture as utils_fixture
from oslo_utils.fixture import uuidsentinel
@@ -2123,7 +2123,7 @@ class VMwareAPIVMTestCase(test.NoDBTestCase,
'min_unit': 1,
'max_unit': 16,
'step_size': 1,
- 'allocation_ratio': 16.0,
+ 'allocation_ratio': 4.0,
},
orc.MEMORY_MB: {
'total': 2048,
@@ -2131,7 +2131,7 @@ class VMwareAPIVMTestCase(test.NoDBTestCase,
'min_unit': 1,
'max_unit': 1024,
'step_size': 1,
- 'allocation_ratio': 1.5,
+ 'allocation_ratio': 1.0,
},
orc.DISK_GB: {
'total': 95,
diff --git a/nova/tests/unit/virt/vmwareapi/test_ds_util.py b/nova/tests/unit/virt/vmwareapi/test_ds_util.py
index 3b909642fb..1716027afb 100644
--- a/nova/tests/unit/virt/vmwareapi/test_ds_util.py
+++ b/nova/tests/unit/virt/vmwareapi/test_ds_util.py
@@ -14,8 +14,8 @@
from contextlib import contextmanager
import re
+from unittest import mock
-import mock
from oslo_utils import units
from oslo_vmware import exceptions as vexc
from oslo_vmware.objects import datastore as ds_obj
diff --git a/nova/tests/unit/virt/vmwareapi/test_imagecache.py b/nova/tests/unit/virt/vmwareapi/test_imagecache.py
index 485b1ea4cd..1116804d2f 100644
--- a/nova/tests/unit/virt/vmwareapi/test_imagecache.py
+++ b/nova/tests/unit/virt/vmwareapi/test_imagecache.py
@@ -13,8 +13,8 @@
# under the License.
import datetime
+from unittest import mock
-import mock
from oslo_config import cfg
from oslo_utils import fixture as utils_fixture
from oslo_utils.fixture import uuidsentinel
diff --git a/nova/tests/unit/virt/vmwareapi/test_images.py b/nova/tests/unit/virt/vmwareapi/test_images.py
index 7cfec00c97..20abc063a0 100644
--- a/nova/tests/unit/virt/vmwareapi/test_images.py
+++ b/nova/tests/unit/virt/vmwareapi/test_images.py
@@ -17,8 +17,8 @@ Test suite for images.
import os
import tarfile
+from unittest import mock
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import units
from oslo_vmware import rw_handles
@@ -117,13 +117,11 @@ class VMwareImagesTestCase(test.NoDBTestCase):
mock.patch.object(images.IMAGE_API, 'download'),
mock.patch.object(images, 'image_transfer'),
mock.patch.object(images, '_build_shadow_vm_config_spec'),
- mock.patch.object(session, '_call_method'),
mock.patch.object(vm_util, 'get_vmdk_info')
) as (mock_image_api_get,
mock_image_api_download,
mock_image_transfer,
mock_build_shadow_vm_config_spec,
- mock_call_method,
mock_get_vmdk_info):
image_data = {'id': 'fake-id',
'disk_format': 'vmdk',
@@ -172,7 +170,7 @@ class VMwareImagesTestCase(test.NoDBTestCase):
mock_write_handle)
mock_get_vmdk_info.assert_called_once_with(
session, mock.sentinel.vm_ref, 'fake-vm')
- mock_call_method.assert_called_once_with(
+ session._call_method.assert_called_once_with(
session.vim, "UnregisterVM", mock.sentinel.vm_ref)
@mock.patch('oslo_vmware.rw_handles.ImageReadHandle')
@@ -188,13 +186,11 @@ class VMwareImagesTestCase(test.NoDBTestCase):
mock.patch.object(images.IMAGE_API, 'download'),
mock.patch.object(images, 'image_transfer'),
mock.patch.object(images, '_build_shadow_vm_config_spec'),
- mock.patch.object(session, '_call_method'),
mock.patch.object(vm_util, 'get_vmdk_info')
) as (mock_image_api_get,
mock_image_api_download,
mock_image_transfer,
mock_build_shadow_vm_config_spec,
- mock_call_method,
mock_get_vmdk_info):
image_data = {'id': 'fake-id',
'disk_format': 'vmdk',
@@ -220,7 +216,7 @@ class VMwareImagesTestCase(test.NoDBTestCase):
mock_image_transfer.assert_called_once_with(mock_read_handle,
mock_write_handle)
- mock_call_method.assert_called_once_with(
+ session._call_method.assert_called_once_with(
session.vim, "UnregisterVM", mock.sentinel.vm_ref)
mock_get_vmdk_info.assert_called_once_with(
session, mock.sentinel.vm_ref, 'fake-vm')
diff --git a/nova/tests/unit/virt/vmwareapi/test_network_util.py b/nova/tests/unit/virt/vmwareapi/test_network_util.py
index b5b2bd7281..b3b5bb15ea 100644
--- a/nova/tests/unit/virt/vmwareapi/test_network_util.py
+++ b/nova/tests/unit/virt/vmwareapi/test_network_util.py
@@ -15,8 +15,8 @@
# under the License.
import collections
+from unittest import mock
-import mock
from oslo_vmware import vim_util
from nova import test
diff --git a/nova/tests/unit/virt/vmwareapi/test_session.py b/nova/tests/unit/virt/vmwareapi/test_session.py
index e4cb361764..6088e1f5b2 100644
--- a/nova/tests/unit/virt/vmwareapi/test_session.py
+++ b/nova/tests/unit/virt/vmwareapi/test_session.py
@@ -21,7 +21,7 @@
Test suite for VMwareAPI Session
"""
-import mock
+from unittest import mock
from oslo_vmware import exceptions as vexec
diff --git a/nova/tests/unit/virt/vmwareapi/test_vif.py b/nova/tests/unit/virt/vmwareapi/test_vif.py
index b0fb9df47c..02d516fac7 100644
--- a/nova/tests/unit/virt/vmwareapi/test_vif.py
+++ b/nova/tests/unit/virt/vmwareapi/test_vif.py
@@ -13,7 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_vmware import vim_util
from nova import exception
diff --git a/nova/tests/unit/virt/vmwareapi/test_vm_util.py b/nova/tests/unit/virt/vmwareapi/test_vm_util.py
index 92e1434f44..82fa07a882 100644
--- a/nova/tests/unit/virt/vmwareapi/test_vm_util.py
+++ b/nova/tests/unit/virt/vmwareapi/test_vm_util.py
@@ -15,8 +15,8 @@
# under the License.
import collections
+from unittest import mock
-import mock
from oslo_service import fixture as oslo_svc_fixture
from oslo_utils import units
from oslo_utils import uuidutils
diff --git a/nova/tests/unit/virt/vmwareapi/test_vmops.py b/nova/tests/unit/virt/vmwareapi/test_vmops.py
index 61a4c15769..19990b8b32 100644
--- a/nova/tests/unit/virt/vmwareapi/test_vmops.py
+++ b/nova/tests/unit/virt/vmwareapi/test_vmops.py
@@ -14,8 +14,8 @@
# under the License.
import time
+from unittest import mock
-import mock
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import units
diff --git a/nova/tests/unit/virt/vmwareapi/test_volumeops.py b/nova/tests/unit/virt/vmwareapi/test_volumeops.py
index 81e86aa948..003cbb9283 100644
--- a/nova/tests/unit/virt/vmwareapi/test_volumeops.py
+++ b/nova/tests/unit/virt/vmwareapi/test_volumeops.py
@@ -12,8 +12,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import ddt
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_vmware import exceptions as oslo_vmw_exceptions
from oslo_vmware import vim_util as vutil
diff --git a/nova/tests/unit/virt/zvm/test_driver.py b/nova/tests/unit/virt/zvm/test_driver.py
index 85a8a5227c..a5a129331d 100644
--- a/nova/tests/unit/virt/zvm/test_driver.py
+++ b/nova/tests/unit/virt/zvm/test_driver.py
@@ -13,8 +13,9 @@
# under the License.
import copy
-import mock
import os
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel
from nova.compute import provider_tree
diff --git a/nova/tests/unit/virt/zvm/test_guest.py b/nova/tests/unit/virt/zvm/test_guest.py
index 029f211ea4..c786270715 100644
--- a/nova/tests/unit/virt/zvm/test_guest.py
+++ b/nova/tests/unit/virt/zvm/test_guest.py
@@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.compute import power_state as compute_power_state
from nova import context
diff --git a/nova/tests/unit/virt/zvm/test_hypervisor.py b/nova/tests/unit/virt/zvm/test_hypervisor.py
index d2081d49e2..c816ca57f6 100644
--- a/nova/tests/unit/virt/zvm/test_hypervisor.py
+++ b/nova/tests/unit/virt/zvm/test_hypervisor.py
@@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova import context
from nova import exception
diff --git a/nova/tests/unit/virt/zvm/test_utils.py b/nova/tests/unit/virt/zvm/test_utils.py
index 60893759b9..77747855f4 100644
--- a/nova/tests/unit/virt/zvm/test_utils.py
+++ b/nova/tests/unit/virt/zvm/test_utils.py
@@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from zvmconnector import connector
diff --git a/nova/tests/unit/volume/test_cinder.py b/nova/tests/unit/volume/test_cinder.py
index f4ee7383d4..e53ebe3cb8 100644
--- a/nova/tests/unit/volume/test_cinder.py
+++ b/nova/tests/unit/volume/test_cinder.py
@@ -13,13 +13,14 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
from cinderclient import api_versions as cinder_api_versions
from cinderclient import exceptions as cinder_exception
from cinderclient.v3 import limits as cinder_limits
from keystoneauth1 import loading as ks_loading
from keystoneauth1 import session
from keystoneclient import exceptions as keystone_exception
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils